seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16462145161
|
import cv2
import numpy as np
# np.set_printoptions(threshold=np.inf)
import time
from collections import deque
import threading
# Low Quality
# PAUSE_INDICATOR = (-1, 0)
# RESOLUTION = "480p15"
# FPS = 15
# Production quality
PAUSE_INDICATOR = (-1, 0)
RESOLUTION = "1440p60"
FPS = 60
cv2.namedWindow("Frame", 0);
cv2.resizeWindow("Frame", 2560, 1440)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
scenes = [
"TitleScreen",
"Outline",
"Newton",
"SchrodingerEquation",
"HarmonicOscillator",
"VMC",
"WhatToGuess",
"PsiDesign",
"NewIdea",
"NetworkDisplay",
"QDResults",
"HeliumResults",
"Conclusions",
"FutureProspects",
"ThankYou",
]
scenes = [
("media/videos/presentation/{}/" + s + ".mp4").format(RESOLUTION) for s in scenes
]
class Manager:
def __init__(self, scenes):
self.scenes = scenes
self.active_scene = cv2.VideoCapture(self.scenes[0])
self.forward = deque()
self.current_scene = 0
self.frame = 0
self.lock = threading.Lock()
self.last_scene = len(scenes) - 1
self.keep_running = True
def stop(self):
self.lock.acquire()
self.keep_running = False
self.active_scene.release()
self.lock.release()
def run(self):
while self.keep_running:
self.lock.acquire()
if len(self.forward) >= FPS:
self.lock.release()
time.sleep(1 / FPS / 5)
continue
if not self.active_scene.isOpened():
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
if self.active_scene.isOpened():
ret, frame = self.active_scene.read()
if ret:
self.forward.append(frame)
else:
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(
self.scenes[self.current_scene]
)
self.lock.release()
def next_frame(self):
self.lock.acquire()
frame = self.forward.popleft() if self.forward else None
self.lock.release()
return frame
def play(self):
paused = False
indicator_present = False
t0 = 0
while True:
t1 = time.time()
wait_time = max(1, int(900 * (1 / FPS - (t1 - t0)))) * int(not paused)
key = cv2.waitKey(wait_time) & 0xFF
if key == ord("q"):
self.stop()
break
elif key == ord(" "):
paused = not paused
continue
elif key == 83:
for _ in range(FPS // 2):
frame = self.next_frame()
if frame is not None:
cv2.imshow("Frame", frame)
elif key == 81:
self.active_scene.release()
self.current_scene = max(0, self.current_scene - 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
paused = False
elif key == ord('n'):
self.active_scene.release()
self.current_scene = min(self.last_scene, self.current_scene + 1)
self.active_scene = cv2.VideoCapture(self.scenes[self.current_scene])
paused = False
elif key != 0xFF:
print("\rUnknown key pressed:", key)
print(f"{1 / (t1 - t0):.2f}", end="\r")
if not paused:
frame = self.next_frame()
if frame is not None:
ind_pres = (
frame[PAUSE_INDICATOR][0] == 0
and frame[PAUSE_INDICATOR][1] == 0
and frame[PAUSE_INDICATOR][2] >= 224
)
if indicator_present and not ind_pres:
paused = True
indicator_present = ind_pres
cv2.imshow("Frame", frame)
t0 = t1
if __name__ == "__main__":
manager = Manager(scenes)
load_thread = threading.Thread(target=manager.run)
load_thread.start()
manager.play()
cv2.destroyAllWindows()
# i = 0
# paused = False
# direction = 1
# prev, prev_stop_frame, t0 = 0, 0, 0
# while True:
# frame = frames[i]
# cv2.imshow("Frame", frame)
# delta = time.time() - t0
# wait_time = max(1, int(1000 * (1 / FPS - delta))) * int(not paused)
# key = cv2.waitKey(wait_time) & 0xFF
# if key == ord(" "):
# paused = not paused
# prev_stop_frame = i
# elif key == 81:
# i = max(0, i - FPS // 2)
# elif key == 85:
# i = min(len(frames - 1), i + FPS // 2)
# elif key == ord("q"):
# break
# elif key == ord("h"):
# direction = -1
# elif key == ord("l"):
# direction = 1
# elif (
# not paused
# and abs(i - prev_stop_frame) > FPS_SOURCE / 2
# and np.all(frame == prev)
# and False
# ):
# paused = True
# prev_stop_frame = i
# prev = frame
# t0 = time.time()
# i = max(0, min(len(frames) - 1, i + direction))
|
bsamseth/masters-presentation
|
player.py
|
player.py
|
py
| 5,552 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "cv2.namedWindow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.resizeWindow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_FULLSCREEN",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.setWindowProperty",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_FULLSCREEN",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.WINDOW_FULLSCREEN",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 152,
"usage_type": "call"
}
] |
15191375755
|
from collections import defaultdict, deque
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
color = defaultdict(int)
seen = set()
q = deque()
for node1 in range(len(graph)):
if node1 in seen:
continue
color[node1] = 1
parent_color = 1
q.extend([n for n in graph[node1] if n not in seen])
seen.add(node1)
while q:
l = len(q)
for _ in range(l):
node = q.popleft()
must_color = parent_color * -1 #invert color
if color[node] == 0:
color[node] = must_color
seen.add(node)
elif color[node] != must_color:
#print(node, graph[node], q, color[node], must_color)
return False
q.extend([n for n in graph[node] if n not in seen])
parent_color = parent_color * -1 #invert color
return True
|
Dumbris/leetcode
|
medium/785.is-graph-bipartite.py
|
785.is-graph-bipartite.py
|
py
| 1,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
43724375221
|
import sys
from PySide6.QtGui import QGuiApplication
from PySide6.QtQml import *
from ppt import Maker
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
qmlRegisterType(Maker, "ppt", 1, 0, "Maker")
engine = QQmlApplicationEngine()
engine.load('main.qml')
if not engine.rootObjects():
sys.exit(-1)
exit_code = app.exec()
del engine
sys.exit(exit_code)
|
JunTae90/MinChae
|
main.py
|
main.py
|
py
| 400 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PySide6.QtGui.QGuiApplication",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "ppt.Maker",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "sys.exit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
}
] |
8397431053
|
import os
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from colander import (
Boolean,
Integer,
Length,
MappingSchema,
OneOf,
SchemaNode,
SequenceSchema,
String
)
from deform import (
Form,
ValidationFailure,
widget
)
here = os.path.dirname(os.path.abspath(__file__))
class UserSchema(MappingSchema):
name = SchemaNode(String(),
description = 'Be comfortable here')
surname = SchemaNode(String(),
description = 'Be comfortable here')
email = SchemaNode(String(),
description = 'Be comfortable here')
def form_view(request):
schema = UserSchema()
myform = Form(schema, buttons=('submit',))
template_values = {}
template_values.update(myform.get_widget_resources())
if 'submit' in request.POST:
controls = request.POST.items()
try:
myform.validate(controls)
except ValidationFailure as e:
template_values['form'] = e.render()
else:
template_values['form'] = 'OK'
return template_values
template_values['form'] = myform.render()
return template_values
|
tennisracket/bonkh
|
bonkh/bonkh/app.py
|
app.py
|
py
| 1,227 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "colander.MappingSchema",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "colander.SchemaNode",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "colander.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "colander.SchemaNode",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "colander.String",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "colander.SchemaNode",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "colander.String",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "deform.Form",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "deform.ValidationFailure",
"line_number": 46,
"usage_type": "name"
}
] |
2845548081
|
from django.shortcuts import render
from .models import Directores, Peliculas
def index (request):
directores = Directores.objects.all()
return render(request, 'index.html', context={
'directores': directores,
})
# Capturando la variable ids pasada en la url
# El nombre de la variable debe coincidir con introducido en urls.py
def director (request, ids):
dtor = Directores.objects.filter(id = ids)
foto =''
nombre = ''
apellido = ''
nacimiento= ''
muerte = ''
pais = ''
biografia = ''
peliculas = Peliculas.objects.filter(director = ids)
for director in dtor:
foto = director.foto
nombre = director.nombre
apellido = director.apellido
nacimiento = director.nacimiento
if director.fallecimiento:
muerte = f' † {director.fallecimiento}'
else:
muerte = ''
pais = director.pais
biografia = director.biografia
return render(request, 'Details.html',context={
'nombre': nombre,
'apellido': apellido,
'fechas': f'{nacimiento} {muerte}',
'foto': foto,
'pais': pais,
'bio': biografia,
'peliculas' : peliculas
})
def pelicula (request, ids):
peliculas = Peliculas.objects.filter(id = ids)
# Haciendo un switch para colocar el nombre correcto del género
genero = ''
for peli in peliculas:
if peli.genero == 'c':
genero = 'Comedia'
elif peli.genero == 'f':
genero = 'Ciencia Ficción'
elif peli.genero == 'd':
genero = 'Drama'
elif peli.genero == 't':
genero = 'Terror'
return render(request, 'pelis.html',context={
'peliculas': peliculas,
'genero': genero
})
|
Ranset/django_openbootcamp_exercise12
|
directores/views.py
|
views.py
|
py
| 1,805 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Directores.objects.all",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Directores.objects",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "models.Directores",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Directores.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Directores.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Directores",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.Peliculas.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Peliculas.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.Peliculas",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.Peliculas.objects.filter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Peliculas.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.Peliculas",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
}
] |
27250414296
|
import django
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from drf_yasg.views import get_schema_view
from rest_framework.permissions import AllowAny
from drf_yasg import openapi
schema_url_v1_patterns = [
path(r'^api/auth/', include('authentication.urls')),
path(r'^api/community/', include('community.urls')),
path(r'^api/feature/', include('feature.urls')),
path(r'^api/account/', include('account.urls')),
path(r'^api/recommendation/', include('recommendation.urls')),
]
schema_view_v1 = get_schema_view(
openapi.Info(
title="NYOM Open API",
default_version='v1',
description="NYOM Open API Docs",
terms_of_service="https://www.google.com/policies/terms/",
license=openapi.License(name="License"),
),
validators=['flex'], #'ssv'],
public=True,
permission_classes=(AllowAny,),
patterns=schema_url_v1_patterns,
)
urlpatterns = [
path('admin/', admin.site.urls),
#path('', )
path('api/auth/', include('authentication.urls')),
path('api/community/', include('community.urls')),
path('api/feature/', include('feature.urls')),
path('api/account/', include('account.urls')),
path('api/recommendation/', include('recommendation.urls')),
# Auto DRF API docs
# path(r'swagger(?P<format>\.json|\.yaml)', schema_view_v1.without_ui(cache_timeout=0), name='schema-json'),
path(r'swagger', schema_view_v1.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path(r'redoc', schema_view_v1.with_ui('redoc', cache_timeout=0), name='schema-redoc-v1'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
moon-hy/lunch-recommendation
|
config/urls.py
|
urls.py
|
py
| 1,754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.License",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 48,
"usage_type": "attribute"
}
] |
41417945016
|
#!/usr/bin/env python3
# Cryptopals Challenge, Set 2, Challenge 12
# CJ Guttormsson
# 2017-01-03
import sys
sys.path.append('..')
from common import (get_random_key, base64_to_bytes, aes_128_ecb_encrypt,
guess_mode, pkcs7_pad)
import random
import itertools
#############
# CONSTANTS #
#############
# A random but constant key
UNKNOWN_KEY = get_random_key()
# The given data, that is not known in its decoded form
UNKNOWN_DATA = base64_to_bytes("""
Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""")
#################
# NEW FUNCTIONS #
#################
def encrypt_ecb_with_unknowns(data: bytes) -> bytes:
"""Pad given data with the unknown data and encrypt with the unknown key"""
return aes_128_ecb_encrypt(data + UNKNOWN_DATA, UNKNOWN_KEY)
def guess_block_length(alg) -> int:
"""Given an encryption algorithm, guess the block length it uses."""
# Guess by observing how the size of the output text, which is always a
# multiple of the block length, changes.
last_length = len(alg(b''))
for data_length in itertools.count(1):
new_length = len(alg(b'\0' * data_length))
if new_length > last_length:
return new_length - last_length
def guess_unknown_string(alg) -> bytes:
"""Given the algorithm above, find the unknown data."""
assert guess_mode(alg) == 'ECB'
block_length = guess_block_length(alg)
# Guess one character at a time by shifting the unknown text so that only
# one unknown character is in the block we are looking at
known_bytes = b''
while True:
# figure out how much padding we need, and which block we're looking at
empty_block = bytes(block_length - (len(known_bytes) % 16) - 1)
start = (len(known_bytes) // 16) * 16
# Create a lookup table for each possible byte (result block -> byte)
results = {}
for possible_byte in (bytes([b]) for b in range(256)):
result = alg(empty_block+known_bytes+possible_byte)[start:start+16]
results[result] = possible_byte
# Look at what the answer should be, then use that to figure out
# which possible byte was correct
expected_block = alg(empty_block)[start:start+16]
if expected_block in results:
known_bytes += results[expected_block]
else:
break
# We must remove the last byte, since it will always be an extraneous
# \x01. This happens because of pkcs7 padding (but doesn't extend to
# \x02 or further because that causes the \x01 to change, rendering
# all guessed blocks invalid).
return known_bytes[:-1]
########
# MAIN #
########
def main():
# Determine block length
block_length = guess_block_length(encrypt_ecb_with_unknowns)
assert block_length == 16
# Determine the algorithm being used
assert guess_mode(encrypt_ecb_with_unknowns) == 'ECB'
# Guess the key
assert guess_unknown_string(encrypt_ecb_with_unknowns) == UNKNOWN_DATA
print('Challenge 12 completed successfully.')
if __name__ == '__main__':
main()
|
cjguttormsson/cryptopals
|
set2/challenge12.py
|
challenge12.py
|
py
| 3,257 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "common.get_random_key",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "common.base64_to_bytes",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "common.aes_128_ecb_encrypt",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "itertools.count",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "common.guess_mode",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "common.guess_mode",
"line_number": 98,
"usage_type": "call"
}
] |
1112432236
|
import numpy as np
from PIL import Image
import re
from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
mask= plt.imread('b.jpg')
#准备utf-8编码的文本文件file
fo=open('input.txt', 'r', encoding='utf-8')
strThousand = fo.read().lower()
fo.close()
#print(strThousand)
print("222")
#字符串预处理: #大小写#标点符号#特殊符号
sep = "."
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "\""
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "\'"
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = ","
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
sep = "<"
for ch in sep:
strThousand = strThousand.replace(ch, ' ')
print("222")
#分解提取单词 list
strList = strThousand.split()
#print(len(strList), strList)
print("222")
#单词计数字典 set , dict
strSet = set(strList)
exclude = {'a', 'the', 'and', 'i', 'you', 'in'} #排除语法型词汇,代词、冠词、连词等无语义词
strSet = strSet-exclude
print("222")
#print(len(strSet), strSet)
print("222")
strDict = {}
for word in strSet:
strDict[word] = strList.count(word)
#print(len(strDict), strDict)
wcList = list(strDict.items())
#print(wcList)
print("222")
wcList.sort(key=lambda x: x[1], reverse=True) #按词频排序 list.sort(key=)
#print(wcList)
print("111")
for i in range(50): #输出TOP20
print(wcList[i])
my_wordcloud = WordCloud(scale=4,mask=mask,background_color='white',
max_words = 50,max_font_size = 60,random_state=20).generate(wcList)
#显示生成的词云
plt.imshow(my_wordcloud)
plt.axis("off")
plt.show()
#保存生成的图片
my_wordcloud.to_file('result.jpg')
|
Wang993/code_
|
code/ENGLISH WORD FRUQUECY_wordcloud.py
|
ENGLISH WORD FRUQUECY_wordcloud.py
|
py
| 1,768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
}
] |
1492499597
|
import pymysql
from pymysql import connect
# from baiyu.function.zudai_to_fumudai import *
from baiyu.models import *
import datetime
class OpenDB(object):
def __init__(self):
# 初始化
self.conn = connect(host='localhost', port=3306, user='root', password='123456', database='forecastsystem', charset='utf8')
self.cursor = self.conn.cursor()
def __enter__(self):
# 返回游标进行执行操作
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
# 结束提交数据并关闭数据库
self.conn.commit()
self.cursor.close()
self.conn.close()
'''
#获取祖代鸡引种周度总量,返回类型:list
#by sujie 2019/05/27
'''
def get_introduced_detail_all():
res = []
try:
db_res = IntroducedInfoDetail.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
res = []
print(e)
return res
def get_introduced_detail_info(bird_type,nGen):
introduce_res = []
with OpenDB() as cursor:
sql = '''
SELECT
bi.id,
bi.YEAR,
bi.WeekNum,
bi.startDate,
bi.endDate,
bc.companyName,
bs.SpeciesName,
bf.feedWayName,
bi.RuSheNum,
bi.LivePeriod,
bi.nGeneration,
bi.qzhyFlag,
bi.Remark
FROM
baiyu_introducedinfodetail bi,
baiyu_companyinfo bc,
baiyu_feedway bf,
baiyu_speciesinfo bs
WHERE
bi.CompanyId = bc.id
AND bi.feedWayId = bf.id
AND bi.SpeciesId = bs.id
AND bi.nBirdsType = %d
AND bi.nGeneration = %d
''' % (bird_type,nGen)
try:
cursor.execute(sql)
db_res = cursor.fetchall()
for i in db_res:
introduce_res.append(i)
except Exception as e:
print('Error Reason is :',e)
print('standard_res',introduce_res)
return introduce_res
'''
#获取祖代鸡引种周度总表记录条数,返回类型:int
#by sujie 2019/05/27
'''
def get_count_ProgenitorIntroduced(bird_type,nGen):
try:
count = IntroducedInfo.objects.filter(nBirdsType=bird_type,nGeneration=nGen).count()
except Exception as e:
count = 0
print('Error Reason is :',e)
return count
'''
#获取商品代周度统计,返回类型:list
#by sujie 2019/05/27
'''
def get_sWeekly_statistic():
res = []
try:
db_res = WeeklyStatisticTable.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取祖代鸡和父母代机周度标准参数,传入参数为代次,祖代鸡代次为1,父母代为2,商品代为3,返回类型:list
#by sujie 2019/05/30
'''
def get_weekly_standard(nBirdsType,nGeneration):
standard_res = []
with OpenDB() as cursor:
sql = '''
SELECT
bws.WeekNum,
bws.siTaoRate,
bws.ChanDanRate,
bws.RuFuZhongDanRate,
bws.ShouJingRate,
bws.FuHuaRate,
bws.JianChuRate,
bws.SaleRate,
bws.nGeneration,
bs.SpeciesName,
bf.feedWayName,
bws.Remark
FROM
baiyu_weekstandardtable bws
JOIN baiyu_speciesinfo bs
JOIN baiyu_feedway bf
WHERE
bws.SpeciesId = bs.id
AND bws.feedWayId = bf.id
AND bws.nBirdsType = %d
AND bws.nGeneration = %d
''' % (nBirdsType,nGeneration)
try:
cursor.execute(sql)
db_res = cursor.fetchall()
print('****',db_res)
for i in db_res:
standard_res.append(i)
except Exception as e:
print('Error Reason is :',e)
return standard_res
'''
#获取父母代鸡周度标准参数,返回类型:list
#by sujie 2019/05/30
'''
# def get_fumudai_weekly_standard():
# res = []
# try:
# db_res = WeekStandardTable.objects.all()
# for index in db_res:
# res.append(index)
# except Exception as e:
# print('Error Reason is :',e)
# return res
'''
#获取商品代鸡日度标准参数,返回类型:list
#by sujie 2019/05/30
'''
def get_shangpindai_daily_standard(bird_type):
res = []
try:
db_res = DailyStandardTable.objects.all().values().filter(nBirdsType=bird_type)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取商品代鸡年度标准参数,返回类型:list
#by sujie 2019/05/30
'''
def get_shangpindai_yearly_param():
res = []
try:
db_res = YearParameter.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取祖代鸡/父母代鸡的总参数,返回类型:list
#by sujie 2019/05/30
'''
def get_all_param(bird_type,nGeneration):
res = []
try:
db_res = WholeParameter.objects.all().values().filter(nBirdsType=bird_type,nGeneration = nGeneration)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取年度淘汰鸡肉参数(祖代淘汰鸡、父母代淘汰鸡)
#by sujie 2019/05/30
'''
def get_tcjirou_param(bird_type,nGeneration):
res = []
try:
db_res = YearTaotaiJirouParam.objects.all().filter(nBirdsType=bird_type,nGeneration = nGeneration)
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
'''
#获取周度的标注日期
#by sujie 2019/06/04
'''
def get_weeklydate_standard():
res = []
try:
db_res = WeekDateStandard.objects.all()
for index in db_res:
res.append(index)
except Exception as e:
print('Error Reason is :',e)
return res
###################################################################
'''
#此部分为计算各项数据 2019-06-28 [email protected]
'''
###################################################################
'''
获取引种和入舍的时间列表,入参为代次和家禽种类,返回为list
get_Rushe_timeList(1,4) 为获取祖代白羽肉鸭的引种时间列表
flag为1的不参与计算,为0的参与计算
'''
def get_Sum_Rushe_timeList(nGen,nBirdType,save_type):
time_list = []
try:
res = IntroducedInfo.objects.all().values('Year','WeekNum','LivePeriod','RuSheNum','flag').filter(nGeneration=nGen,nBirdsType=nBirdType,nDraftOrOriginal=save_type,flag=0)
for index in res:
time_list.append(index)
except Exception as e:
print('The Error Reason is :',e)
return time_list
def get_Rushe_timeList(nGen,nBirdType,year):
time_list = []
try:
res = IntroducedInfoDetail.objects.all().values('Year','WeekNum','LivePeriod','RuSheNum').filter(nGeneration=nGen,nBirdsType=nBirdType,Year=year)
for index in res:
time_list.append(index)
except Exception as e:
print('The Error Reason is :',e)
return time_list
'''
该接口的功能是给出一个星期(年份、第几周)和生存周期,可以得到生存周期之后的周(年份、第几周),
传入的参数是年份、周度、生存周期,返回值类型list,生存周期内的所有星期的年份和周度
'''
def get_period_week(year,weekNum,livePeriod=1):
# 2006年以前的数据不参与计算,所以从2006年第一周开始参与计算
_year = 2006
time_list = []
try:
index = WeekDateStandard.objects.all().values('id').filter(Year=year,WeekNum=weekNum)[0]['id']
# print('index',index)
except Exception as e:
print('The Error Reason is :',e)
index = 1
limit_start = index - 2
limit_end = limit_start + livePeriod
# print(limit_start,limit_end,'qujian')
try:
res = WeekDateStandard.objects.all().values('Year','WeekNum').order_by('id')[limit_start:limit_end]
# print('res',res)
for item in res:
time_list.append(item)
except Exception as e:
print('The Error Reason is :',e)
time_list = []
return time_list
def get_correct_param_by_week(year,week):
try:
res = WeekCorrectionFactor.objects.all().values().filter(Year=year,WeekNum=week)[0]
except Exception as e:
print(str(e))
return res
'''
将每周计算的结果插入baiyu_weeklyCoreTable中
Time: 2019-06-28
Author:[email protected]
'''
def insert_baiyu_weekly_core(Year,WeekNum,TotalYuChengCunLan):
pass
'''
根据年份和第几周,获取此周的入舍量
入参为:year,week_num,返回值为int
'''
def get_rushe_by_week(year,week_num):
try:
rusheNum = IntroducedInfoDetail.objects.values('RuSheNum').filter(Year=year,WeekNum=week_num)[0]['RuSheNum']
except Exception as e:
rusheNum = 0
print('get_rushe_by_week:The Error Reason is :',e)
return rusheNum
'''
#根据生产周期的第几周,获取死淘率
'''
# def calc_baiyu_weekly_median_core():
# rushe_DateList = get_Rushe_timeList()[0:3]
# for index in rushe_DateList:
# ##step 1:判断上周是否有入舍的鸡苗,如果有查找上周的死淘率
# # print(index['Year'],index['WeekNum'])
# last_week = get_period_week(index['Year'],index['WeekNum'],1)[0]
# print('last_week',last_week)
# last_week_rushe = get_rushe_by_week(last_week['Year'],last_week['WeekNum'])
# current_week_rushe = get_rushe_by_week(index['Year'],index['WeekNum'])
#
# sum_rushe = last_week_rushe + current_week_rushe
# print('Current Rushe is :',sum_rushe)
# ##step 2:查看本周是否有入舍的鸡苗,如果有直接加上上周死淘后的鸡苗数量
'''
#根据第几周获取该生产周的死淘率,入参是第几周,返回值为死淘率
ahthor : [email protected]
date : 2019-07-02
'''
def get_sitaoRate_by_weekCount(week_num):
try:
res = WeekStandardTable.objects.values('siTaoRate').filter(WeekNum=week_num,nGeneration=1)[0]['siTaoRate']
except Exception as e:
res = 0
print('get_sitaoRate_by_weekCount:The Error Reason is:',e,'siTaoLv is invalid !!!')
return res
'''
#根据第几周获取该生产周的周度生产指标,入参是第几周,返回值为死淘率
ahthor : [email protected]
date : 2019-07-04
'''
def get_weekly_param_standard(week_num,bird_type,nGen,SpeciesId=6,feedWayId=1):
param_res = {
'SpeciesId' : 1,
'feedWayId' : 1,
'siTaoRate' : 0,
'ChanDanRate' : 0,
'RuFuZhongDanRate' : 0,
'ShouJingRate' : 0,
'FuHuaRate' : 0,
'JianChuRate' : 0,
'SaleRate' : 0
}
try:
param_res = WeekStandardTable.objects.values().filter(WeekNum=week_num,nGeneration=nGen,nBirdsType=bird_type)[0]
except Exception as e:
print('get_weekly_param_standard:The Error Reason is :',e)
return param_res
'''
根据年份和周度,获取种蛋数量
'''
def get_chandan_num(year,week_num,origin_year,origin_week,bird_type,nGen):
dan_num = 0
try:
dan_num = WeeklyIntroducedMedian.objects.values('TotalDan').filter(Year=year,WeekNum=week_num,originYear=origin_year,originWeek=origin_week,nBirdsType=bird_type,nGeneration=nGen)[0]['TotalDan']
except Exception as e:
print('get_chandan_num:The Error Reason is :',e)
return dan_num
'''
获取淘汰鸡肉的体重和屠宰率,入参是年份,返回值是dict
'''
def get_taotaiji_param(year,bird_type,nGen):
param = {}
try:
param = YearTaotaiJirouParam.objects.all().values().filter(nYear=year,nBirdsType=bird_type,nGeneration=nGen)[0]
except Exception as e:
print('get_taotaiji_param:The Error Reason is:',e)
return param
'''
获取淘汰鸡肉的体重和屠宰率,入参是年份,返回值是dict
'''
def get_taotaiji_param_all(bird_type,nGen):
param = {}
try:
param = YearTaotaiJirouParam.objects.all().values().filter(nBirdsType=bird_type,nGeneration=nGen)
except Exception as e:
print('get_taotaiji_param:The Error Reason is:',e)
return param
# def calc_baiyu_weekly_median_core(bird_type,nGen,CompanyId,SpeciesId,feedWayId,chandan_interval=3):
# # now_time1 = datetime.datetime.now()
# rushe_DateList = get_Rushe_timeList(nGen,bird_type,2006)
# # now_time2 = datetime.datetime.now()
# ## 清空baiyu_weeklyintroducedmedian数据库数据
# cleanup_weeklyintroducedmedian(bird_type,nGen)
#
# # med_core_data_list = []
#
# for index in rushe_DateList:
# #根据年份、周度和生存周期,计算所有在生存周期内的数据
# start_year = index['Year'] #2006
# start_week = index['WeekNum'] #2
# rushe_num = index['RuSheNum'] #15000
#
# for iWeek in range(index['LivePeriod']+3):
# shengchanWeek = iWeek+1
# param = get_weekly_param_standard(iWeek+1,bird_type,nGen,SpeciesId,feedWayId)
# ##根据起始年和周,算接下来生存周期内的每个周度的年和周,如2006年第1周 三周后是 2006年第4周
# cur_year,cur_week = get_week_info_by_offset(start_year,start_week,iWeek)
# start_date,end_date = get_start_end_date_by_week(cur_year,cur_week)
# yuchengCunlan = 0
# chandanCunlan = 0
# chandanNum = 0
# chuji_num = 0
# real_sale_chuji_num = 0
# TaoTaiJiNum = 0
# taotai_jirou = 0
#
# if iWeek == index['LivePeriod']:
# TaoTaiJiNum = rushe_num
# else:
# TaoTaiJiNum = 0
#
# ### 生产周小于25周为育成期,25周之后进入产蛋期
# if iWeek < index['LivePeriod']:
# if shengchanWeek < 25: #25需要改成动态获取
# yuchengCunlan = rushe_num #15000
# chandanCunlan = 0
# else:
# yuchengCunlan = 0
# chandanCunlan = rushe_num
#
# ##根据产蛋期存栏数,计算产蛋的数量
# chandanNum = round(chandanCunlan*param['ChanDanRate']/100*7)
#
# '''
# 获取三个星期之前的产蛋数量,孵化周期是21天,按照3周计算
# '''
# _3week_before_year,_3week_before_weeknum, = get_week_info_by_offset(cur_year,cur_week,chandan_interval*(-1))
# dan_init = get_chandan_num(_3week_before_year, _3week_before_weeknum,start_year,start_week,bird_type,nGen)
# _3week_before_param = get_weekly_param_standard((shengchanWeek-chandan_interval),bird_type,nGen,SpeciesId,feedWayId)
# shoujing_rate = _3week_before_param['ShouJingRate']/100
# rufu_zhongdan_rate = _3week_before_param['RuFuZhongDanRate']/100
# fuhua_rate = _3week_before_param['FuHuaRate']/100
# jianchu_rate = param['JianChuRate']/100
# if nGen == 1:
# chuji_num = round(dan_init*shoujing_rate*rufu_zhongdan_rate*fuhua_rate*jianchu_rate*0.45)
# else:
# chuji_num = round(dan_init * shoujing_rate * rufu_zhongdan_rate * fuhua_rate * jianchu_rate)
# real_sale_chuji_num = chuji_num
#
#
# # print(rushe_num,cur_year,cur_week,shengchanWeek,yuchengCunlan,chandanCunlan,chandanNum,chuji_num,TaoTaiJiNum,taotai_jirou)
# taotaiji_param = get_taotaiji_param(cur_year,bird_type, nGen)
# taotai_jirou = TaoTaiJiNum * taotaiji_param['StandardTZ'] * taotaiji_param['TuZaiRate']*1.0/100
# insertDB_median_baiyu(
# originYear = start_year,
# originWeek = start_week,
# Year = cur_year,
# WeekNum = cur_week,
# startDate = start_date,
# endDate = end_date,
# CompanyId = CompanyId,
# SpeciesId = SpeciesId,
# feedWayId = feedWayId,
# shengchanWeek = shengchanWeek,
# TotalYuChengCunLan = yuchengCunlan,
# TotalChanDanCunLan = chandanCunlan,
# TotalDan = chandanNum,
# TotalChuJi = chuji_num,
# TotalFactSaleChuJi = real_sale_chuji_num,
# TaoTaiJiNum = TaoTaiJiNum,
# dTaoTaiJiRou = taotai_jirou,
# nBirdsType = bird_type,
# nGeneration = nGen,
# )
# rushe_num = round(rushe_num*(100-param['siTaoRate'])/100)
# # WeeklyIntroducedMedian.objects.bulk_create(med_core_data_list)
'''
根据年份和月度,计算该周的育成期存栏量和产蛋期存栏量
'''
def get_data_from_median(year,week_num,bird_type,nGen):
TotalYuChengCunLan = 0
TotalChanDanCunLan = 0
TotalDan = 0
TotalChuJi = 0
TotalFactSaleChuJi = 0
TaoTaiJiNum = 0
dTaoTaiJiRou = 0
try:
res = WeeklyIntroducedMedian.objects.values().filter(Year=year,
WeekNum=week_num,
nBirdsType=bird_type,
nGeneration=nGen)
for index in res:
TotalYuChengCunLan += index['TotalYuChengCunLan']
TotalChanDanCunLan += index['TotalChanDanCunLan']
TotalDan += index['TotalDan']
TotalChuJi += index['TotalChuJi']
TotalFactSaleChuJi += index['TotalFactSaleChuJi']
TaoTaiJiNum += index['TaoTaiJiNum']
dTaoTaiJiRou += index['dTaoTaiJiRou']
except Exception as e:
print('get_data_total_from_median:The Error Reason is :',e)
return TotalYuChengCunLan,TotalChanDanCunLan,TotalDan,TotalChuJi,TotalFactSaleChuJi,TaoTaiJiNum,dTaoTaiJiRou
'''
根据年份和月度,计算该周的育成期存栏量和产蛋期存栏量
'''
def get_data_sum_from_median(year,week_num,bird_type,nGen,save_type):
data_med = {
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0
}
try:
res = WeeklyIntroducedSumMedian.objects.values().filter(Year=year,WeekNum=week_num,nBirdsType=bird_type,nGeneration=nGen,nDraftOrOriginal=save_type)
for index in res:
data_med['TotalYuChengCunLan'] += index['TotalYuChengCunLan']
data_med['TotalChanDanCunLan'] += index['TotalChanDanCunLan']
data_med['TotalDan'] += index['TotalDan']
data_med['TotalChuJi'] += index['TotalChuJi']
data_med['TotalFactSaleChuJi'] += index['TotalFactSaleChuJi']
data_med['TaoTaiJiNum'] += index['TaoTaiJiNum']
data_med['dTaoTaiJiRou'] += index['dTaoTaiJiRou']
except Exception as e:
print('get_data_total_from_median:The Error Reason is :',e)
return data_med
'''
根据年份、第几周和偏移量(当前周后的多少周即为偏移量),获取偏移量周后的年份和第几周
'''
def get_week_info_by_offset(current_year,current_weeknum,offset):
try:
index = WeekDateStandard.objects.all().values('id').filter(Year=current_year, WeekNum=current_weeknum)[0]['id']
except Exception as e:
print('The Error Reason is :', e)
index = 1
try:
res = WeekDateStandard.objects.values('Year','WeekNum').filter(id=index+offset)[0]
dest_year = res['Year']
dest_weeknum = res['WeekNum']
except Exception as e:
print(e)
return dest_year,dest_weeknum
'''
向存储白羽肉鸡中间值的插入数据
'''
def insertDB_median_baiyu(**kwargs):
param_init = {
'originYear':0,
'originWeek':0,
'Year':2050,
'WeekNum':1,
'startDate':'2050-01-01',
'endDate':'2050-01-07',
'CompanyId':14,
'SpeciesId':6,
'feedWayId':1,
'shengchanWeek': 0,
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0,
'nGeneration':1,
'nDraftOrOriginal':1,
'nBirdsType':1,
'Remark':''
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyIntroducedMedian.objects.create(
originYear = param_init['originYear'],
originWeek = param_init['originWeek'],
Year = param_init['Year'],
WeekNum = param_init['WeekNum'],
startDate = param_init['startDate'],
endDate = param_init['endDate'],
CompanyId = param_init['CompanyId'],
SpeciesId = param_init['SpeciesId'],
feedWayId = param_init['feedWayId'],
shengchanWeek = param_init['shengchanWeek'],
TotalYuChengCunLan = param_init['TotalYuChengCunLan'],
TotalChanDanCunLan = param_init['TotalChanDanCunLan'],
TotalDan = param_init['TotalDan'],
TotalChuJi = param_init['TotalChuJi'],
TotalFactSaleChuJi = param_init['TotalFactSaleChuJi'],
TaoTaiJiNum = param_init['TaoTaiJiNum'],
dTaoTaiJiRou = param_init['dTaoTaiJiRou'],
nGeneration = param_init['nGeneration'],
nDraftOrOriginal = param_init['nDraftOrOriginal'],
nBirdsType = param_init['nBirdsType'],
Remark = param_init['Remark']
)
def cleanup_weeklyintroducedmedian(bird_type,nGen):
try:
res = WeeklyIntroducedMedian.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print("insertDB_median_baiyu:The Error Reason is :", e)
return
def cleanup_weeklycoretable(bird_type,nGen,save_type):
try:
res = WeeklyCoreTable.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen,nDraftOrOriginal=save_type).delete()
except Exception as e:
print("insertDB_median_baiyu:The Error Reason is :", e)
return
def insertDB_weekly_core_baiyu(**kwargs):
param_init = {
'Year':2050,
'WeekNum':1,
'startDate':'1000-01-01',
'endDate':'1000-01-01',
'TotalYuChengCunLan':0,
'TotalChanDanCunLan':0,
'TotalDan':0,
'TotalChuJi':0,
'TotalFactSaleChuJi':0,
'TaoTaiJiNum':0,
'dTaoTaiJiRou':0,
'nGeneration':1,
'nDraftOrOriginal':1,
'nBirdsType':1,
'Remark':''
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
try:
WeeklyCoreTable.objects.create(
Year = param_init['Year'],
WeekNum = param_init['WeekNum'],
startDate = param_init['startDate'],
endDate = param_init['endDate'],
TotalYuChengCunLan = param_init['TotalYuChengCunLan'],
TotalChanDanCunLan = param_init['TotalChanDanCunLan'],
TotalDan = param_init['TotalDan'],
TotalChuJi = param_init['TotalChuJi'],
TotalFactSaleChuJi = param_init['TotalFactSaleChuJi'],
TaoTaiJiNum = param_init['TaoTaiJiNum'],
dTaoTaiJiRou = param_init['dTaoTaiJiRou'],
nGeneration = param_init['nGeneration'],
nDraftOrOriginal = param_init['nDraftOrOriginal'],
nBirdsType = param_init['nBirdsType'],
Remark = param_init['Remark']
)
except Exception as e:
print("The Error Reason is :", e)
'''
根据年份和第几周,获取此周的开始日期和结束日期
'''
def get_start_end_date_by_week(year,week_num):
try:
res = WeekDateStandard.objects.values('startDate','endDate').filter(Year=year,WeekNum=week_num)[0]
start_date = res['startDate']
end_date = res['endDate']
except Exception as e:
start_date = '1000-01-01'
end_date = '1000-01-01'
print('get_start_end_date_by_week:The Error Reason is:',e)
return start_date,end_date
def get_company_info():
res = []
try:
res = CompanyInfo.objects.all().values()
except Exception as e:
print('get_company_info:The Error Reason is:',e)
return res
def get_company_info_by_id(company_id):
res = []
try:
res = CompanyInfo.objects.values().filter(id = company_id)
except Exception as e:
print(e)
return res
def get_feedway_info():
res = []
try:
res = FeedWay.objects.all().values()
except Exception as e:
print("get_feedway_info:The Error Reason is:",e)
return res
def get_species_info():
result = []
try:
result = SpeciesInfo.objects.all().values()
except Exception as e:
print('get_species_info:The Error Reason is:',e)
return result
'''
获取祖代鸡的一日龄雏鸡数量
'''
def get_zudai_chuji_info():
chuji_list = []
zd_res = WeeklyCoreTable.objects.values('Year','WeekNum','TotalFactSaleChuJi')
for index in zd_res:
# print(index['Year'],index['WeekNum'],index['TotalFactSaleChuJi'])
chuji_list.append(index)
return chuji_list
'''
将祖代鸡的一日龄雏鸡的数量,作为父母代鸡的插入父母代引种
'''
def fumudai_introduced_info(*args):
pass
def insertDB_introduced_detail_info(**kwargs):
if not kwargs:
print('no valid kwargs')
return
else:
print('database would be updated')
init_param ={
'Year':0,
'WeekNum':0,
'startDate':'1000-1-1',
'endDate':'1000-1-1',
'CompanyId':0,
'SpeciesId':0,
'feedWayId':0,
'RuSheNum':0,
'LivePeriod':0,
'qzhyFlag':0,
'huanyuRate':0,
'qzhyStartWeek':0,
'HuanyuInterval':0,
'qzhyPeriod':0,
'nGeneration':0,
'nDraftOrOriginal':0,
'nBirdsType':0,
'Remark':0
}
for key in kwargs:
if key in init_param.keys():
init_param[key] = kwargs[key]
else:
print('Keep the origin key and value!')
IntroducedInfoDetail.objects.create(
Year=init_param['Year'],
WeekNum=init_param['WeekNum'],
startDate=init_param['startDate'],
endDate=init_param['endDate'],
CompanyId=init_param['CompanyId'],
SpeciesId=init_param['SpeciesId'],
feedWayId=init_param['feedWayId'],
RuSheNum=init_param['RuSheNum'],
LivePeriod=init_param['LivePeriod'],
qzhyFlag=init_param['qzhyFlag'],
huanyuRate=init_param['huanyuRate'],
qzhyStartWeek=init_param['qzhyStartWeek'],
HuanyuInterval=init_param['HuanyuInterval'],
qzhyPeriod=init_param['qzhyPeriod'],
nGeneration=init_param['nGeneration'],
nDraftOrOriginal=init_param['nDraftOrOriginal'],
nBirdsType=init_param['nBirdsType'],
Remark=init_param['Remark']
)
def insertDB_introduced_sum_info(**kwargs):
if not kwargs:
print('no valid kwargs')
return
else:
print('database would be updated')
init_param ={
'Year':0,
'WeekNum':0,
'startDate':'1000-1-1',
'endDate':'1000-1-1',
'RuSheNum':0,
'LivePeriod':0,
'nGeneration':0,
'nDraftOrOriginal':0,
'nBirdsType':0,
'Remark':0,
'flag':0
}
for key in kwargs:
if key in init_param.keys():
init_param[key] = kwargs[key]
else:
print('Keep the origin key and value!')
IntroducedInfo.objects.create(
Year=init_param['Year'],
WeekNum=init_param['WeekNum'],
startDate=init_param['startDate'],
endDate=init_param['endDate'],
RuSheNum=init_param['RuSheNum'],
LivePeriod=init_param['LivePeriod'],
nGeneration=init_param['nGeneration'],
nDraftOrOriginal=init_param['nDraftOrOriginal'],
nBirdsType=init_param['nBirdsType'],
Remark=init_param['Remark']
)
def clean_introducd_info(bird_type,nGen):
try:
res = IntroducedInfo.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print("clean_introducd_info:The Error Reason is:",str(e))
return
'''
出栏数和肉量统计
'''
def insertDB_weekly_detail_statistics(**kwargs):
param_init = {
'Year': 0,
'WeekNum': 0,
'startDate': '1000-01-01',
'endDate': '1000-01-01',
'CunlLanNum35': 0,
'CunlLanNum42': 0,
'CunlLanNum49': 0,
'CunlLanNum56': 0,
'ChuLanRouJiNum35': 0,
'ChuLanRouJiNum42': 0,
'ChuLanRouJiNum49': 0,
'ChuLanRouJiNum56': 0,
'TotalChuLanRouJiNum': 0,
'HuoZhong35': 0,
'HuoZhong42': 0,
'HuoZhong49': 0,
'HuoZhong56': 0,
'TotalHuoZhong': 0,
'JiRou35': 0,
'JiRou42': 0,
'JiRou49': 0,
'JiRou56': 0,
'TotalJiRou': 0,
'JiXiong35': 0,
'JiXiong42': 0,
'JiXiong49': 0,
'JiXiong56': 0,
'TotalJiXiong': 0,
'JiChi35': 0,
'JiChi42': 0,
'JiChi49': 0,
'JiChi56': 0,
'TotalJiChi': 0,
'JiTui35': 0,
'JiTui42': 0,
'JiTui49': 0,
'JiTui56': 0,
'TotalJiTui': 0,
'JiGuJia35': 0,
'JiGuJia42': 0,
'JiGuJia49': 0,
'JiGuJia56': 0,
'TotalJiGuJia': 0,
'JiNeiZang35': 0,
'JiNeiZang42': 0,
'JiNeiZang49': 0,
'JiNeiZang56': 0,
'TotalJiNeiZang': 0,
'nDraftOrOriginal': 1,
'nBirdsType':1,
'Remark': '',
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyStatisticDetail.objects.create(
Year=param_init['Year'],
WeekNum=param_init['WeekNum'],
startDate=param_init['startDate'],
endDate=param_init['endDate'],
CunlLanNum35=param_init['CunlLanNum35'],
CunlLanNum42=param_init['CunlLanNum42'],
CunlLanNum49=param_init['CunlLanNum49'],
CunlLanNum56=param_init['CunlLanNum56'],
ChuLanRouJiNum35=param_init['ChuLanRouJiNum35'],
ChuLanRouJiNum42=param_init['ChuLanRouJiNum42'],
ChuLanRouJiNum49=param_init['ChuLanRouJiNum49'],
ChuLanRouJiNum56=param_init['ChuLanRouJiNum56'],
TotalChuLanRouJiNum=param_init['TotalChuLanRouJiNum'],
HuoZhong35=param_init['HuoZhong35'],
HuoZhong42=param_init['HuoZhong42'],
HuoZhong49=param_init['HuoZhong49'],
HuoZhong56=param_init['HuoZhong56'],
TotalHuoZhong=param_init['TotalHuoZhong'],
JiRou35=param_init['JiRou35'],
JiRou42=param_init['JiRou42'],
JiRou49=param_init['JiRou49'],
JiRou56=param_init['JiRou56'],
TotalJiRou=param_init['TotalJiRou'],
JiXiong35=param_init['JiXiong35'],
JiXiong42=param_init['JiXiong42'],
JiXiong49=param_init['JiXiong49'],
JiXiong56=param_init['JiXiong56'],
TotalJiXiong=param_init['TotalJiXiong'],
JiChi35=param_init['JiChi35'],
JiChi42=param_init['JiChi42'],
JiChi49=param_init['JiChi49'],
JiChi56=param_init['JiChi56'],
TotalJiChi=param_init['TotalJiChi'],
JiTui35=param_init['JiTui35'],
JiTui42=param_init['JiTui42'],
JiTui49=param_init['JiTui49'],
JiTui56=param_init['JiTui56'],
TotalJiTui=param_init['TotalJiTui'],
JiGuJia35=param_init['JiGuJia35'],
JiGuJia42=param_init['JiGuJia42'],
JiGuJia49=param_init['JiGuJia49'],
JiGuJia56=param_init['JiGuJia56'],
TotalJiGuJia=param_init['TotalJiGuJia'],
JiNeiZang35=param_init['JiNeiZang35'],
JiNeiZang42=param_init['JiNeiZang42'],
JiNeiZang49=param_init['JiNeiZang49'],
JiNeiZang56=param_init['JiNeiZang56'],
TotalJiNeiZang=param_init['TotalJiNeiZang'],
nDraftOrOriginal=param_init['nDraftOrOriginal'],
nBirdsType=param_init['nBirdsType'],
Remark=param_init['Remark']
)
'''
出栏数和肉量统计
'''
def insertDB_weekly_statistics(**kwargs):
param_init = {
'Year': 0,
'WeekNum': 0,
'startDate': '1000-01-01',
'endDate': '1000-01-01',
'CunlLanNum35': 0,
'CunlLanNum42': 0,
'CunlLanNum49': 0,
'CunlLanNum56': 0,
'ChuLanRouJiNum35': 0,
'ChuLanRouJiNum42': 0,
'ChuLanRouJiNum49': 0,
'ChuLanRouJiNum56': 0,
'TotalChuLanRouJiNum': 0,
'HuoZhong35': 0,
'HuoZhong42': 0,
'HuoZhong49': 0,
'HuoZhong56': 0,
'TotalHuoZhong': 0,
'JiRou35': 0,
'JiRou42': 0,
'JiRou49': 0,
'JiRou56': 0,
'TotalJiRou': 0,
'JiXiong35': 0,
'JiXiong42': 0,
'JiXiong49': 0,
'JiXiong56': 0,
'TotalJiXiong': 0,
'JiChi35': 0,
'JiChi42': 0,
'JiChi49': 0,
'JiChi56': 0,
'TotalJiChi': 0,
'JiTui35': 0,
'JiTui42': 0,
'JiTui49': 0,
'JiTui56': 0,
'TotalJiTui': 0,
'JiGuJia35': 0,
'JiGuJia42': 0,
'JiGuJia49': 0,
'JiGuJia56': 0,
'TotalJiGuJia': 0,
'JiNeiZang35': 0,
'JiNeiZang42': 0,
'JiNeiZang49': 0,
'JiNeiZang56': 0,
'TotalJiNeiZang': 0,
'nDraftOrOriginal': 1,
'nBirdsType':1,
'Remark': '',
}
for key in kwargs:
if key in param_init.keys():
param_init[key] = kwargs[key]
else:
pass
WeeklyStatisticTable.objects.create(
Year=param_init['Year'],
WeekNum=param_init['WeekNum'],
startDate=param_init['startDate'],
endDate=param_init['endDate'],
CunlLanNum35=param_init['CunlLanNum35'],
CunlLanNum42=param_init['CunlLanNum42'],
CunlLanNum49=param_init['CunlLanNum49'],
CunlLanNum56=param_init['CunlLanNum56'],
ChuLanRouJiNum35=param_init['ChuLanRouJiNum35'],
ChuLanRouJiNum42=param_init['ChuLanRouJiNum42'],
ChuLanRouJiNum49=param_init['ChuLanRouJiNum49'],
ChuLanRouJiNum56=param_init['ChuLanRouJiNum56'],
TotalChuLanRouJiNum=param_init['TotalChuLanRouJiNum'],
HuoZhong35=param_init['HuoZhong35'],
HuoZhong42=param_init['HuoZhong42'],
HuoZhong49=param_init['HuoZhong49'],
HuoZhong56=param_init['HuoZhong56'],
TotalHuoZhong=param_init['TotalHuoZhong'],
JiRou35=param_init['JiRou35'],
JiRou42=param_init['JiRou42'],
JiRou49=param_init['JiRou49'],
JiRou56=param_init['JiRou56'],
TotalJiRou=param_init['TotalJiRou'],
JiXiong35=param_init['JiXiong35'],
JiXiong42=param_init['JiXiong42'],
JiXiong49=param_init['JiXiong49'],
JiXiong56=param_init['JiXiong56'],
TotalJiXiong=param_init['TotalJiXiong'],
JiChi35=param_init['JiChi35'],
JiChi42=param_init['JiChi42'],
JiChi49=param_init['JiChi49'],
JiChi56=param_init['JiChi56'],
TotalJiChi=param_init['TotalJiChi'],
JiTui35=param_init['JiTui35'],
JiTui42=param_init['JiTui42'],
JiTui49=param_init['JiTui49'],
JiTui56=param_init['JiTui56'],
TotalJiTui=param_init['TotalJiTui'],
JiGuJia35=param_init['JiGuJia35'],
JiGuJia42=param_init['JiGuJia42'],
JiGuJia49=param_init['JiGuJia49'],
JiGuJia56=param_init['JiGuJia56'],
TotalJiGuJia=param_init['TotalJiGuJia'],
JiNeiZang35=param_init['JiNeiZang35'],
JiNeiZang42=param_init['JiNeiZang42'],
JiNeiZang49=param_init['JiNeiZang49'],
JiNeiZang56=param_init['JiNeiZang56'],
TotalJiNeiZang=param_init['TotalJiNeiZang'],
nDraftOrOriginal=param_init['nDraftOrOriginal'],
nBirdsType=param_init['nBirdsType'],
Remark=param_init['Remark']
)
def get_xuanyongrate(bird_type,nGen):
result = 0
try:
result = WholeParameter.objects.all().values('XuanYongRate').filter(nBirdsType=bird_type,nGeneration=nGen)[0]['XuanYongRate']
except Exception as e:
print('The error reson is:',str(e))
return result
# def calc_baiyu_weekly_median_core(bird_type,nGen,CompanyId,SpeciesId,feedWayId,chandan_interval=3):
#
# print("%"*20)
# param_all = get_weekly_param_standard_all(bird_type,nGen,SpeciesId,feedWayId)
# taotaiji_param_all = get_taotaiji_param_all(bird_type,nGen)
# xuanyongrate = get_xuanyongrate(bird_type,nGen)
# date_list = get_date_standard_list()
# print("%" * 20)
# rushe_DateList = get_Rushe_timeList(nGen,bird_type,2006)
#
# ## 清空baiyu_weeklyintroducedmedian数据库数据
# cleanup_weeklyintroducedmedian(bird_type,nGen)
#
# med_core_data_list = []
#
# for index in rushe_DateList:
# #根据年份、周度和生存周期,计算所有在生存周期内的数据
# start_year = index['Year'] #2006
# start_week = index['WeekNum'] #2
# rushe_num = index['RuSheNum'] #15000
# dan_tmp_list = []
# for iWeek in range(index['LivePeriod']+chandan_interval):
# cur_year,cur_week,start_date,end_date = get_year_week_by_offset_function(date_list, start_year, start_week, iWeek)
# shengchanWeek = iWeek+1
# ##根据起始年和周,算接下来生存周期内的每个周度的年和周,如2006年第1周 三周后是 2006年第4周
# yuchengCunlan = 0
# chandanCunlan = 0
# chandanNum = 0
# chuji_num = 0
# real_sale_chuji_num = 0
# TaoTaiJiNum = 0
# taotai_jirou = 0
#
# if iWeek == index['LivePeriod']:
# TaoTaiJiNum = rushe_num
# else:
# TaoTaiJiNum = 0
#
# if iWeek < index['LivePeriod']:
# ### 生产周小于25周为育成期,25周之后进入产蛋期
# if shengchanWeek < 25: #25需要改成动态获取
# yuchengCunlan = rushe_num #15000
# chandanCunlan = 0
# else:
# yuchengCunlan = 0
# chandanCunlan = rushe_num
#
# ##根据产蛋期存栏数,计算产蛋的数量
# chandanNum = round(chandanCunlan*param_all[iWeek]['ChanDanRate']/100*7)
# dan_tmp_list.append(chandanNum)
#
# '''
# 获取三个星期之前的产蛋数量,孵化周期是21天,按照3周计算
# '''
# if shengchanWeek < (25+ chandan_interval):
# dan_init = 0
# shoujing_rate = 0
# rufu_zhongdan_rate = 0
# fuhua_rate = 0
#
# else:
# dan_init = dan_tmp_list[iWeek-chandan_interval]
# shoujing_rate = param_all[iWeek-chandan_interval]['ShouJingRate'] / 100
# rufu_zhongdan_rate = param_all[iWeek-chandan_interval]['RuFuZhongDanRate']/100
# fuhua_rate = param_all[iWeek-chandan_interval]['FuHuaRate']/100
# jianchu_rate = param_all[iWeek]['JianChuRate']/100
# chuji_num = round(dan_init*shoujing_rate*rufu_zhongdan_rate*fuhua_rate*jianchu_rate*xuanyongrate/100)
# real_sale_chuji_num = chuji_num
#
#
# taotai_jirou = TaoTaiJiNum * taotaiji_param_all[index['Year']-1990]['StandardTZ'] * taotaiji_param_all[index['Year']-1990]['TuZaiRate']*1.0/100
#
# rushe_num = round(rushe_num*(100-param_all[iWeek]['siTaoRate'])/100)
#
# # print(index['Year'],index['WeekNum'],shengchanWeek,yuchengCunlan, chandanCunlan, chandanNum, chuji_num, real_sale_chuji_num,TaoTaiJiNum,taotai_jirou)
# item = WeeklyIntroducedMedian(
#
# originYear=start_year,
# originWeek=start_week,
# Year=cur_year,
# WeekNum=cur_week,
# startDate=start_date,
# endDate=end_date,
# CompanyId=CompanyId,
# SpeciesId=SpeciesId,
# feedWayId=feedWayId,
# shengchanWeek=shengchanWeek,
# TotalYuChengCunLan=yuchengCunlan,
# TotalChanDanCunLan=chandanCunlan,
# TotalDan=chandanNum,
# TotalChuJi=chuji_num,
# TotalFactSaleChuJi=real_sale_chuji_num,
# TaoTaiJiNum=TaoTaiJiNum,
# dTaoTaiJiRou=taotai_jirou,
# nBirdsType=bird_type,
# nGeneration=nGen,
# Remark=''
# )
# med_core_data_list.append(item)
# WeeklyIntroducedMedian.objects.bulk_create(med_core_data_list)
def get_weekly_param_standard_all(bird_type,nGen):
result = []
try:
res = WeekStandardTable.objects.all().values().filter(nBirdsType=bird_type,nGeneration=nGen)
result = res
except Exception as e:
pass
return result
def get_date_standard_list():
try:
res = WeekDateStandard.objects.all().values('id', 'Year', 'WeekNum','startDate','endDate').filter(Year__gt=2000)
except Exception as e:
print(e)
return res
# def get_year_week_by_offset_function(list_res,year,week_num,offset):
#
# cur_year = '0000-00-00'
# cur_week = 0
# tmp = 0
# for index,elem in enumerate(list_res):
# if elem['Year'] == year and elem['WeekNum'] == week_num:
# tmp = index
# break
# cur_year = list_res[tmp + offset]['Year']
# cur_week = list_res[tmp + offset]['WeekNum']
# start_date = list_res[tmp + offset]['startDate']
# end_date = list_res[tmp + offset]['endDate']
# return cur_year,cur_week,start_date,end_date
# def gen_fumudai_introdeced_data(
# CompanyId = 1,
# SpeciesId = 1,
# feedWayId = 1,
# nGen=2,
# nDraftOrOriginal=1,
# nBirdsType=1,
# Remark=''
# ):
# ## step0: 清除表中原有的父母代入舍所有数据
# clean_introducd_detail_info(nBirdsType,nGen)
# ## step1: 获取父母代引种时间列表
# time_res = IntroducedInfoDetail.objects.values('Year','WeekNum','LivePeriod').filter(nBirdsType=nBirdsType,nGeneration=nGen-1)
# ## step2: 按时间列表插入数据
# for index in time_res:
# startDate,endDate = get_start_end_date_by_week(index['Year'], index['WeekNum'])
# rushe_num = get_fumudai_rushe_num(index['Year'], index['WeekNum'], nBirdsType, nGen-1)
# print(startDate,endDate,rushe_num)
# insertDB_introduced_detail_info(
# Year=index['Year'],
# WeekNum=index['WeekNum'],
# startDate=startDate,
# endDate=endDate,
# CompanyId=CompanyId,
# SpeciesId=SpeciesId,
# feedWayId=feedWayId,
# RuSheNum=rushe_num, ##根据祖代鸡的实际销售的雏鸡数量得来
# LivePeriod=index['LivePeriod'], ##根据父母代鸡的周标准得来
# qzhyFlag= 0 ,
# huanyuRate= 0,
# qzhyStartWeek=0,
# HuanyuInterval=0,
# qzhyPeriod=0,
# nGeneration = nGen,
# nDraftOrOriginal = nDraftOrOriginal,
# nBirdsType = nBirdsType,
# Remark=Remark
# )
def clean_introducd_detail_info(nBirdsType,nGen):
try:
IntroducedInfoDetail.objects.filter(nBirdsType=nBirdsType,nGeneration=nGen).delete()
except Exception as e:
print(e)
def get_fumudai_rushe_num(year,week_num,bird_type,nGen):
RuSheNum = 0
try:
res = WeeklyCoreTable.objects.values('TotalFactSaleChuJi').filter(Year = year, WeekNum= week_num,nBirdsType=bird_type,nGeneration=nGen)
RuSheNum = res[0]['TotalFactSaleChuJi']
except Exception as e:
print('get_fumudai_rushe_num:The error reason is :',str(e))
return RuSheNum
def cleanup_weeklyintroducedmedian(bird_type,nGen):
try:
res = WeeklyIntroducedMedian.objects.all().filter(nBirdsType=bird_type,nGeneration=nGen).delete()
except Exception as e:
print('The Error Reason is :',str(e))
|
Suefly/BoyarForecastSystem
|
baiyu/db.py
|
db.py
|
py
| 45,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymysql.connect",
"line_number": 11,
"usage_type": "call"
}
] |
71888988667
|
"""
需要备份的文件和目录由一个列表指定。
备份应该保存在主备份目录中。
文件备份成一个zip文件。
zip存档的名称是当前的日期和时间。
我们使用标准的zip命令,它通常默认地随Linux/Unix发行版提供。Windows用户可以使用Info-Zip程序。注意你可以使用任何地存档命令,
只要它有命令行界面就可以了,那样的话我们可以从我们的脚本中传递参数给它。
"""
import zipfile
def zip_files(files, zip_name):
zip = zipfile.ZipFile( zip_name, 'w')
for file in files:
print ('compressing', file)
zip.write( file )
zip.close()
print ('compressing finished')
files = ['D:\I\'M\\usedbypython\\1.txt']#文件的位置,多个文件用“,”隔开
zip_file = 'D:\I\'M\\usedbypython\\2.zip'#压缩包名字
zip_files(files, zip_file)
|
fivespeedasher/Pieces
|
重要文件创建备份.py
|
重要文件创建备份.py
|
py
| 874 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "zipfile.ZipFile",
"line_number": 16,
"usage_type": "call"
}
] |
43633665683
|
# pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers.conll2003 import Conll2003DatasetReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestConll2003Reader(object):
@pytest.mark.parametrize(u"lazy", (True, False))
@pytest.mark.parametrize(u"coding_scheme", (u'IOB1', u'BIOUL'))
def test_read_from_file(self, lazy, coding_scheme):
conll_reader = Conll2003DatasetReader(lazy=lazy, coding_scheme=coding_scheme)
instances = conll_reader.read(unicode(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'conll2003.txt'))
instances = ensure_list(instances)
if coding_scheme == u'IOB1':
expected_labels = [u'I-ORG', u'O', u'I-PER', u'O', u'O', u'I-LOC', u'O']
else:
expected_labels = [u'U-ORG', u'O', u'U-PER', u'O', u'O', u'U-LOC', u'O']
fields = instances[0].fields
tokens = [t.text for t in fields[u'tokens'].tokens]
assert tokens == [u'U.N.', u'official', u'Ekeus', u'heads', u'for', u'Baghdad', u'.']
assert fields[u"tags"].labels == expected_labels
fields = instances[1].fields
tokens = [t.text for t in fields[u'tokens'].tokens]
assert tokens == [u'AI2', u'engineer', u'Joel', u'lives', u'in', u'Seattle', u'.']
assert fields[u"tags"].labels == expected_labels
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/tests/data/dataset_readers/conll2003_dataset_reader_test.py
|
conll2003_dataset_reader_test.py
|
py
| 1,471 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
[
{
"api_name": "allennlp.data.dataset_readers.conll2003.Conll2003DatasetReader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "allennlp.common.testing.AllenNlpTestCase.FIXTURES_ROOT",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "allennlp.common.testing.AllenNlpTestCase",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "allennlp.common.util.ensure_list",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 14,
"usage_type": "attribute"
}
] |
20573212978
|
from flask import Flask, send_file
app = Flask(__name__)
from tools.sound_file_generator import sound_generator
from tools.interval import Interval
@app.route("/")
def index ():
return "MusicApp is active"
@app.route("/audiofile/<note>")
def get_note_sound(note):
generator = sound_generator()
sound_url = generator.get_sound_url(note)
return send_file(sound_url)
@app.route("/intervals/<root>/<interval>/<category>")
def get_interval_sound(root,interval,category):
"""Get the interval parameters and returns an audio file
in -> string Root Note, string Interval, string category "Melodic" or "Harmonic"
out -> void """
generator = Interval()
interval_url = generator.get_interval_audio_url(root,interval,category)
return send_file(interval_url)
if __name__ =="__main__":
app.run()
|
DanieleSpera/EarTraningAPI
|
__init__.py
|
__init__.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "tools.sound_file_generator.sound_generator",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tools.interval.Interval",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 25,
"usage_type": "call"
}
] |
21217863407
|
import cgi
from http.server import BaseHTTPRequestHandler,HTTPServer
from db_setup import Base,Restaurant,MenuItem
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class webserverHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith("/restaurant"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output +='<html><body><h1><a href="/restaurant/new">Create a new Restaurant</a></h2>'
output +='<h2>Restaurants List</h2>'
listOfRes = session.query(Restaurant).all()
for restaurant in listOfRes:
output+='<h3>%s</h3>' %(restaurant.name)
output+= '<br>'
editLink = "/restaurant/%s/edit" %(restaurant.id)
output+= '<a href="%s">Edit</a>' %(editLink)
output+='<br>'
deleteLink = "/restaurant/%s/delete" % restaurant.id
output+='<a href="%s">Delete</a>' % deleteLink
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
return
if self.path.endswith("/delete"):
restaurantId=self.path.split("/")[2]
getResById = session.query(Restaurant).filter_by(id=restaurantId).one()
if(getResById != []):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>"
output += "<h2>Do you want to delete this Restaurant?</h2>"
output += "<form method='POST' enctype='multipart/form-data' action='/restaurant/%s/delete'>" \
"<input type='submit' value='Delete'></form>" % restaurantId
output += "</body></html>"
self.wfile.write(bytes(output,"utf-8"))
return
if self.path.endswith("/edit"):
restaurantId=self.path.split("/")[2]
getNameById = session.query(Restaurant).filter_by(id=restaurantId).one()
if(getNameById != []):
self.send_response(200)
self.send_header('content-type', 'text/html')
self.end_headers()
output=""
output+="<html><body>"
output+="<h2>%s</h2>" %(getNameById.name)
output+= "<form method='POST' enctype='multipart/form-data' action='/restaurant/%s/edit'>" \
"<input name='editedName' placeholder='Enter New Name' type='text'>" \
"<input type='submit' value='Rename'></form>" %(restaurantId)
output+="</body></html>"
self.wfile.write(bytes(output,"utf-8"))
return
if self.path.endswith("/restaurant/new"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output += '<html><body>'
output += "<form method='POST' enctype='multipart/form-data' action='/restaurant/new'><h1>Make a New Restaurant</h1>" \
"<input name='nameOfRes' type='text'><input type='submit' value='Create'></form>"
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
return
if self.path.endswith("/hello"):
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
output=""
output=output+'<html><body><h1>Hello!</h1></body></html>'
output += "<form method='POST' enctype='multipart/form-data' action='/hello'><h2>what would like me to say?</h2>" \
"<input name='message' type='text'><input type='submit' value='Submit'></form>"
output += "</body></html>"
self.wfile.write(bytes(output,"UTF-8"))
print(output)
return
except IOError:
self.send_error(404,"FILE NOT FOUND %s" % self.path)
def do_POST(self):
try:
if self.path.endswith("/delete"):
restaurantId = self.path.split("/")[2]
deleteRes = session.query(Restaurant).filter_by(id=restaurantId).one()
if(deleteRes != []):
session.delete(deleteRes)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurant')
self.end_headers()
return
if self.path.endswith("/edit") :
restaurantId = self.path.split("/")[2]
ctype,pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
fields=""
if(ctype=='multipart/form-data'):
fields = cgi.parse_multipart(self.rfile,pdict)
newName = fields.get('editedName')
newName = newName[0].decode("utf-8")
rename = session.query(Restaurant).filter_by(id=restaurantId).one()
if rename != []:
rename.name = newName
session.add(rename)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurant')
self.end_headers()
if(self.path.endswith("/restaurant/new")):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
pdict['boundary'] = bytes(pdict['boundary'], "UTF-8")
if (ctype == 'multipart/form-data'):
fields = cgi.parse_multipart(self.rfile, pdict)
nameOfRes = fields.get('nameOfRes')
ResToDb = Restaurant(name=nameOfRes[0].decode("utf-8"))
session.add(ResToDb)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location','/restaurant')
self.end_headers()
print("Output OK")
return
# output += "<h2>Okay,how about this:</h2>"
# output += "<h1>"
# self.wfile.write(bytes(output,"utf-8"))
# self.wfile.write(nameOfRes[0])
# output += ""
# output +=" </h1>"
# output += '''<form method='POST' enctype='multipart/form-data' action='/hello'><h2>What would you like me to say?</h2>
# <input name="message" type="text" ><input type="submit" value="Submit"> </form>'''
except:
pass
def main():
try:
port = 8080
server = HTTPServer(('',port),webserverHandler)
print(("web server running on port %s" % port))
server.serve_forever()
except KeyboardInterrupt:
print("^C entered, stopping web server...")
server.socket.close()
if __name__=='__main__':
main()
|
SelbayevAlmas/fullstack_foundations_example
|
myserver.py
|
myserver.py
|
py
| 7,756 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "db_setup.Base.metadata",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "db_setup.Base",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 112,
"usage_type": "argument"
},
{
"api_name": "cgi.parse_header",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cgi.parse_multipart",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "cgi.parse_header",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cgi.parse_multipart",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "db_setup.Restaurant",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "http.server.HTTPServer",
"line_number": 175,
"usage_type": "call"
}
] |
27009683508
|
from sklearn.gaussian_process import GaussianProcessRegressor
def run(x_train, y_train, x_test, y_test,
kernel, alpha, optimizer, n_restarts_optimizer, normalize_y, copy_X_train, random_state
):
reg = GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state).fit(x_train, y_train)
return {'train_predict': reg.predict(x_train).tolist(),
'test_predict': reg.predict(x_test).tolist(),
'train_score': reg.score(x_train, y_train),
'test_score': reg.score(x_test, y_test),
'X_train_': reg.X_train_.tolist(),
'y_train_': reg.y_train_.tolist(),
'L_': reg.L_.tolist(),
'alpha_': reg.alpha_.tolist(),
'log_marginal_likelihood_value_': reg.log_marginal_likelihood_value_}
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/gaussian_processes/GaussianProcessRegressor/run.py
|
run.py
|
py
| 1,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.gaussian_process.GaussianProcessRegressor",
"line_number": 7,
"usage_type": "call"
}
] |
30107078628
|
""" Python Package Imports """
# Not Applicable
""" Django Package Support """
from django.contrib import admin
""" Internal Package Support """
""" -- IMPORTED AT APPROPRIATE SUBSECTION -- """
"""
event/admin.py
Author: Matthew J Swann
Version: 1.0
Last Update: 2014-06-05
Update by: Matthew J Swann
"""
class EventAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'company_tag', 'city','state')
list_filter = ('state',)
search_fields = ['date', 'company_tag', 'state', 'creator_tag', 'date_created']
ordering = ['date', 'city', 'state']
fieldsets = (
( 'Advanced options', {
'classes': ('wide', 'extrapretty'),
'fields' : ('date_created', 'creator_tag', 'company_tag', 'date', 'date_time_start',
'date_time_end',
'title',
'sessionStart', 'sessionEnd', 'addressLineOne', 'addressLineTwo', 'city',
'state', 'zipCode')
}),)
from Event.models import (
Event
)
admin.site.register(Event, EventAdmin)
|
mjs0031/view_trials
|
Event/admin.py
|
admin.py
|
py
| 1,197 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "Event.models.Event",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 42,
"usage_type": "name"
}
] |
36052032500
|
import cv2
cap = cv2.VideoCapture(1,cv2.CAP_DSHOW)
if not cap.isOpened:
print('Cant open camera')
exit(0)
cap.set(3,480)
cap.set(4,720)
cnt = 80
path = "Main_picture/"
ret,frame = cap.read()
H,W,_ = frame.shape
while True:
ret,frame = cap.read()
cv2.circle(frame,(W//2,H//2),5,(0,255,0),-1)
key = cv2.waitKey(30)
if ret:
if key == ord("s"):
cv2.imwrite(f"{path}Img_{cnt}.jpg",frame)
cnt += 1
# Nhấn ESC hoặc q để thoát
cv2.imshow("Result",frame)
if key == 27 or key == ord("q"): break
cap.release()
cv2.destroyAllWindows
|
HieunnUTE/Rubik-solver-with-Image-processing
|
capture.py
|
capture.py
|
py
| 628 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_DSHOW",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 26,
"usage_type": "attribute"
}
] |
9074380203
|
import requests
import pandas as pd
from pytube import YouTube, Search
import os
from pathlib import Path
from .serializers import *
# Youtube credentials
YOUTUBE_KEY_API = 'YOUR_YOUTUBE_KEY_API'
# Setting url for videos and searching list
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
VIDEOS_URL = 'https://www.googleapis.com/youtube/v3/videos'
# Find users downloads path
DOWNLOAD_PATH = str(Path.home() / 'Downloads')
ABSOLUTE_PATH = None #str(Path.absolute())
# SEARCH PARAMETERS
search_params = {
'key': YOUTUBE_KEY_API,
'q': '', # request.form.get('query')
'part': 'snippet',
'maxResults': 9,
'type': 'video'
}
# VIDEO PARAMETERS
video_params = {
'key': YOUTUBE_KEY_API,
'id': '', #','.join(video_ids),
'part': 'snippet,contentDetails',
'maxResults': 9
}
# Videos for testing
tiesto = [
{'video_id': 'nCg3ufihKyU', 'title': 'Tiësto - The Business (Official Music Video)', 'url': 'https://youtube.com/watch?v=nCg3ufihKyU', 'thumbnail': 'https://i.ytimg.com/vi/nCg3ufihKyU/sddefault.jpg?v=5f6cc459'},
{'video_id': 'taSubkjZUA4', 'title': "Tiësto & Karol G - Don't Be Shy (Official Music Video)", 'url': 'https://youtube.com/watch?v=taSubkjZUA4', 'thumbnail': 'https://i.ytimg.com/vi/taSubkjZUA4/sddefault.jpg?v=61151971'},
{'video_id': '1_4ELAxKrDc', 'title': 'Tiësto & Ava Max - The Motto (Official Music Video)', 'url': 'https://youtube.com/watch?v=1_4ELAxKrDc', 'thumbnail': 'https://i.ytimg.com/vi/1_4ELAxKrDc/sddefault.jpg?v=6183096b'},
{'video_id': '8R_4O3q92Lo', 'title': 'Tiësto - Live from Edge New York City', 'url': 'https://youtube.com/watch?v=8R_4O3q92Lo', 'thumbnail': 'https://i.ytimg.com/vi/8R_4O3q92Lo/sddefault.jpg'},
{'video_id': 'O1M2Dh94gMU', 'title': 'CLUBLIFE by Tiësto Episode 804', 'url': 'https://youtube.com/watch?v=O1M2Dh94gMU', 'thumbnail': 'https://i.ytimg.com/vi/O1M2Dh94gMU/hqdefault.jpg'},
{'video_id': 'r0bhF7SJLYQ', 'title': 'Tiësto & Charli XCX - Hot In It [Official Music Video]', 'url': 'https://youtube.com/watch?v=r0bhF7SJLYQ', 'thumbnail': 'https://i.ytimg.com/vi/r0bhF7SJLYQ/sddefault.jpg?v=62f5cd4d'},
{'video_id': 'nK-7S9HzFjo', 'title': 'TIËSTO MEGAMIX 2022 - Best Songs Of All Time', 'url': 'https://youtube.com/watch?v=nK-7S9HzFjo', 'thumbnail': 'https://i.ytimg.com/vi/nK-7S9HzFjo/sddefault.jpg'},
{'video_id': 'JqUqyUEwTMY', 'title': 'Tiësto - In Search Of Sunrise 4: Latin America CD1', 'url': 'https://youtube.com/watch?v=JqUqyUEwTMY', 'thumbnail': 'https://i.ytimg.com/vi/JqUqyUEwTMY/sddefault.jpg'},
{'video_id': 'kjdOBYTUOzY', 'title': 'TIËSTO @ 15 Years of Tomorrowland 2019 [full set]', 'url': 'https://youtube.com/watch?v=kjdOBYTUOzY', 'thumbnail': 'https://i.ytimg.com/vi/kjdOBYTUOzY/sddefault.jpg?v=5e2216ce'},
{'video_id': 'ontU9cOg354', 'title': 'Tiësto, Jonas Blue & Rita Ora - Ritual (Official Video)', 'url': 'https://youtube.com/watch?v=ontU9cOg354', 'thumbnail': 'https://i.ytimg.com/vi/ontU9cOg354/sddefault.jpg?v=5d0183d9'},
{'video_id': 'e94gack-DJk', 'title': 'Tiësto - Live @ Ultra Music Festival 2022', 'url': 'https://youtube.com/watch?v=e94gack-DJk', 'thumbnail': 'https://i.ytimg.com/vi/e94gack-DJk/sddefault.jpg'},
{'video_id': 'LqCcdtM7Qe4', 'title': 'Tiesto - Silence - Delerium featuring Sarah McLachlan', 'url': 'https://youtube.com/watch?v=LqCcdtM7Qe4', 'thumbnail': 'https://i.ytimg.com/vi/LqCcdtM7Qe4/hqdefault.jpg'},
{'video_id': 'b3mOLJvbBwQ', 'title': 'Tiësto feat. Nelly Furtado - Who Wants To Be Alone', 'url': 'https://youtube.com/watch?v=b3mOLJvbBwQ', 'thumbnail': 'https://i.ytimg.com/vi/b3mOLJvbBwQ/hqdefault.jpg'},
{'video_id': 'VlWOFJJIo9Y', 'title': 'DJ Tiesto - Insomnia', 'url': 'https://youtube.com/watch?v=VlWOFJJIo9Y', 'thumbnail': 'https://i.ytimg.com/vi/VlWOFJJIo9Y/hqdefault.jpg'},
{'video_id': 'Dr1nN__-2Po', 'title': 'Tiësto & KSHMR feat. Vassy - Secrets (Official Music Video)', 'url': 'https://youtube.com/watch?v=Dr1nN__-2Po', 'thumbnail': 'https://i.ytimg.com/vi/Dr1nN__-2Po/sddefault.jpg'},
{'video_id': '2EaE0_gQLw0', 'title': 'Tiësto - Adagio For Strings', 'url': 'https://youtube.com/watch?v=2EaE0_gQLw0', 'thumbnail': 'https://i.ytimg.com/vi/2EaE0_gQLw0/hqdefault.jpg'},
{'video_id': '8tIgN7eICn4', 'title': 'DJ Tiesto - Adagio For Strings', 'url': 'https://youtube.com/watch?v=8tIgN7eICn4', 'thumbnail': 'https://i.ytimg.com/vi/8tIgN7eICn4/hqdefault.jpg'},
{'video_id': '-qgzNwdkV4s', 'title': 'Dj Tiesto - Traffic!', 'url': 'https://youtube.com/watch?v=-qgzNwdkV4s', 'thumbnail': 'https://i.ytimg.com/vi/-qgzNwdkV4s/hqdefault.jpg'},
{'video_id': 'Jbh3GlrRcQ4', 'title': 'Tiësto ft. BT - Love Comes Again (Official Video)', 'url': 'https://youtube.com/watch?v=Jbh3GlrRcQ4', 'thumbnail': 'https://i.ytimg.com/vi/Jbh3GlrRcQ4/sddefault.jpg'}
]
# Main functions
def fetch_videos_from_yt(query, uat_data=None):
if uat_data:
return uat_data
videos = []
video_ids = []
search_params['q'] = query
video_params['q'] = query
try:
requests.get(SEARCH_URL, params=search_params)
search_response = requests.json()['items']
for item in search_response:
video_ids.append(item['id']['videoId'])
except:
print(f'Connection Error')
if video_ids:
requests.get(VIDEOS_URL, params=video_params)
video_response = requests.json()['items']
for video in video_response:
video_data = {
'video_id': video['id'],
'url': f'https://www.youtube.com/watch?v={video["id"]}',
'thumbnail': video['snippet']['thumbnails']['high']['url'],
'title': video['snippet']['title']
}
videos.append(video_data)
return videos
def fetch_videos(query, uat_data=None):
if uat_data:
return uat_data
else:
videos = []
search = Search(query)
for result in search.results:
video = {}
video['video_id'] = result.video_id
video['title'] = result.title
video['url'] = result.watch_url
video['thumbnail'] = YouTube(result.watch_url).thumbnail_url
videos.append(video)
return videos
def create_folders(video:bool=True):
path_to_file = DOWNLOAD_PATH + '/DJStudio'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
if video:
path_to_file = DOWNLOAD_PATH + '/DJStudio/VIDEO'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
else:
path_to_file = DOWNLOAD_PATH + '/DJStudio/MP3'
file_exists = os.path.exists(path_to_file)
if not file_exists:
os.mkdir(path_to_file)
print(f'Created path: {path_to_file}', end='\n')
return path_to_file
def download_video(video_id):
status = 'pending'
path_to_file = create_folders(video=True)
link=f"https://www.youtube.com/watch?v={video_id}"
try:
yt = YouTube(link)
except:
print("Connection Error")
try:
yt.streams.filter(progressive=True, file_extension="mp4").first().download(output_path=path_to_file)
status = 'approved'
except:
print("Some Error!")
status = 'rejected'
return status
def download_mp3(video_id):
status = 'pending'
video = f'https://www.youtube.com/watch?v={video_id}'
path_to_file = create_folders(video=False)
try:
audio_title = YouTube(video).title
audio_mp4 = YouTube(video).streams.filter(only_audio=True).first().download(output_path=path_to_file)
log = f' Downloaded: {audio_title}'
status = 'approved'
except:
log = f'Error: {audio_title}'
status = 'rejected'
try :
base, ext = os.path.splitext(audio_mp4)
to_mp3 = base + '.mp3'
os.rename(audio_mp4, to_mp3)
except FileExistsError:
os.remove(to_mp3)
os.rename(audio_mp4, to_mp3)
log = log.replace('Downloaded', 'Already exists')
return status
def download_mp3_from_file(file):
df_videos = pd.read_excel(file)
column = df_videos.columns[0]
videos = df_videos[column].str.strip().tolist()
path_to_file = create_folders(video=False)
logs = []
for idx, video in enumerate(iterable=videos, start=1):
try:
audio_title = YouTube(video).title
audio_mp4 = YouTube(video).streams.filter(only_audio=True).first().download(output_path=path_to_file)
log = [idx, 'Downloaded', audio_title]
except:
log = [idx, 'Error', audio_title]
try :
base, ext = os.path.splitext(audio_mp4)
to_mp3 = base + '.mp3'
os.rename(audio_mp4, to_mp3)
except FileExistsError:
os.remove(to_mp3)
os.rename(audio_mp4, to_mp3)
log[1] = 'Already exists'
print(log[0], log[1], log[2])
logs.append(log)
return logs
def fetch_playlist(user_email):
user_playlist = Playlist.objects.all().filter(user_email=user_email)
playlist_serializer = PlaylistSerializer(user_playlist, many=True)
print('fetch_playlist: ', playlist_serializer.data)
return playlist_serializer.data
def add_to_playlist(video):
status = 'pending'
playlist_serializer = PlaylistSerializer(data=video)
if playlist_serializer.is_valid():
playlist_serializer.save()
status = 'approved'
else:
status = 'rejected'
return status
def delete_from_playlist(video_id, user_email):
status='pending'
try:
record = Playlist.objects.all().filter(video_id=video_id, user_email=user_email)
record.delete()
status = 'approved'
except Exception as e:
print(e)
status = 'rejected'
return status
def save_contact_message(contact_email, user_message, contact_date):
status = 'pending'
contact_serializer = ContactSerializer(contact_email=contact_email, user_message=user_message, contact_date=contact_date)
if contact_serializer.is_valid():
contact_serializer.save()
status = 'approved'
else:
status = 'rejected'
return status
|
nikavgeros/DJ-Studio
|
backend/dj_studio/utils.py
|
utils.py
|
py
| 9,608 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path.home",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "requests.json",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "requests.json",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pytube.Search",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "os.rename",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "os.rename",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 186,
"usage_type": "call"
}
] |
42483090749
|
# pylint: disable=redefined-outer-name, unused-argument
"""
test src/config.py
"""
from contextlib import contextmanager
import pytest
from src.config import ConfigLoader
@contextmanager
def mock_open(config_content):
"""
Create config from mock file
"""
try:
yield config_content
finally:
pass
@pytest.fixture
def mock_open_config(monkeypatch):
"""
Mock a config file
"""
config_content = """
key1: value1
key2: value2
"""
monkeypatch.setattr(
'builtins.open',
lambda *args,
**kwargs: mock_open(config_content))
@pytest.fixture
def config_loader(mock_open_config):
"""
Return an instance of mocked configloader
"""
return ConfigLoader()
def test_load_config(config_loader):
"""
Test that config is loaded and accessible
"""
# Load the config
config = config_loader.load_config()
# Assert that the config is loaded and of the expected type
assert isinstance(config, dict)
# Add additional assertions based on your config structure and content
assert "key1" in config
assert config["key1"] == "value1"
|
dom38/secret-distribution-operator
|
tests/config_test.py
|
config_test.py
|
py
| 1,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "contextlib.contextmanager",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "src.config.ConfigLoader",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 39,
"usage_type": "attribute"
}
] |
26555794429
|
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from .models import RootObject, Uri
class ModelTestCase(TestCase):
def setUp(cls):
# Set up data for the whole TestCase
user_type = ContentType.objects.get(app_label="auth", model="user")
RootObject.objects.create(self_contenttype=user_type, name="foo")
RootObject.objects.create(self_contenttype=user_type)
def test_root_object(self):
rfoo = RootObject.objects.get(name="foo")
rnone = RootObject.objects.get(name="")
self.assertEquals(str(rfoo), "foo")
self.assertEquals(str(rnone), "no name provided")
def test_uri(self):
ufoo = Uri.objects.create()
self.assertEquals(str(ufoo), "None")
|
acdh-oeaw/apis-core-rdf
|
apis_core/apis_metainfo/test_models.py
|
test_models.py
|
py
| 778 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.RootObject.objects.create",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.RootObject.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.RootObject",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.RootObject.objects.create",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.RootObject.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.RootObject",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.RootObject.objects.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.RootObject.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.RootObject",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.RootObject.objects.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.RootObject.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.RootObject",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.Uri.objects.create",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.Uri.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.Uri",
"line_number": 21,
"usage_type": "name"
}
] |
2831089261
|
import threading
from time import time
from time import sleep
import asyncio
import tornado.web
import tracemalloc
from hoverbotpy.controllers.constants import PORT
from hoverbotpy.drivers.driver_dummy import DummyHovercraftDriver
from hoverbotpy.drivers.threading_dummy import ThreadingDummy
from hoverbotpy.drivers.pi_pico_simple import SimpleFan
from hoverbotpy.drivers.pi_pico_pid import PIDCorrectedFan
tracemalloc.start()
TIMEOUT_TIME = .5 # IDK UNITS
# Setup CLI arguments
import argparse
parser = argparse.ArgumentParser(
prog="WebController",
description="Web controller for PIE hovercraft.",
epilog="Written by Joseph Gilbert and Devlin Ih",
)
parser.add_argument(
"driver_type",
help=("Type of driver to use. Legal values:\n"
" dummy, dummy_threading, pico, pico_pid"),
)
args = parser.parse_args()
# Globals
# Why are these needed?
last_hover = 0
last_forward = 0
last_right = 0
last_left = 0
# Wish we were using Python 3.10 for pattern matching.
requested_driver = args.driver_type
if requested_driver == "dummy":
driver = DummyHovercraftDriver()
elif requested_driver == "threading_dummy":
driver = ThreadingDummy()
driver.run_loop()
elif requested_driver == "pico":
driver = SimpleFan()
elif requested_driver == "pico_pid":
driver = PIDCorrectedFan()
driver.run_loop()
else:
import sys
print(f"Error: {requested_driver} is not a valid driver type.")
sys.exit(-1)
class Hover(tornado.web.RequestHandler):
def get(self):
global driver
global last_hover
print("hover click")
last_hover = time()
if driver.hover>0:
driver.set_hover_speed(0)
else:
driver.set_hover_speed(20)
pass
class Estop(tornado.web.RequestHandler):
def get(self):
global driver
driver.stop()
print("ESTOP ESTOP ESTOP")
class Forward(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(60)
print("forward click")
print(driver.forward)
last_forward = time()
class NotForward(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(0)
print("not forward click")
print(driver.forward)
last_forward = time()
class Reverse(tornado.web.RequestHandler):
def get(self):
global last_forward
global driver
driver.set_forward_speed(0)
print("rev click")
print(driver.forward)
#last_forward = time()#'''
class Right(tornado.web.RequestHandler):
def get(self):
global last_right
global driver
driver.set_steering_angle(-.75)
print("right click")
print(driver.steering)
last_right = time()
class NotRight(tornado.web.RequestHandler):
def get(self):
global last_right
global driver
driver.set_steering_angle(0)
print("not right click")
print(driver.steering)
last_right = time()
class Left(tornado.web.RequestHandler):
def get(self):
global last_left
global driver
driver.set_steering_angle(.75)
print("left click")
print(driver.steering)
last_left = time()
class NotLeft(tornado.web.RequestHandler):
def get(self):
global last_left
global driver
driver.set_steering_angle(0)
print("not left click")
print(driver.steering)
last_left = time()
class Index(tornado.web.RequestHandler):
def get(self):
#self.write("Hello, world")
self.render("web_controller.html")
def on_connection_close(self):
print("connection closed")
def make_app(): # might be better to use a websocket in future versions
return tornado.web.Application([
(r"/darkmode.css", tornado.web.StaticFileHandler,
{"path": "darkmode.css"},),
(r"/", Index),
(r"/hover/", Hover),
(r"/0_pressed/", Estop),
(r"/estop/", Estop),
(r"/forward/", Forward),
(r"/w_pressed/", Forward),
# there will be no half a pressed with this code
(r"/a_pressed/", Left),
(r"/d_pressed/", Right),
(r"/w_released/", NotForward),
# there will be no half a pressed with this code
(r"/a_released/", NotLeft),
(r"/d_released/", NotRight),
#(r"/h_pressed/", HoverToggle),
], debug=True)
# async def
async def app_start():
app = make_app()
app.listen(PORT)
await asyncio.Event().wait()
async def web_app():
print("web server start")
app = make_app()
app.listen(PORT)
class WatchdogThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print("watchdog thread started")
running = True
while running:
now = time()
# print(now)
if ((last_forward + TIMEOUT_TIME) < now) and driver.forward != 0:
print("forward timeout")
driver.set_forward_speed(0)
if (((last_left + TIMEOUT_TIME) < now) or ((last_right + TIMEOUT_TIME) < now))and driver.steering != 0:
print("turn timeout")
driver.set_steering_angle(0)
from hoverbotpy.drivers.driver_dummy import DummyHovercraftDriver
if __name__ == "__main__":
driver = DummyHovercraftDriver()
motor_watchdog_thread = WatchdogThread(1, "watchdog_1", 1)
motor_watchdog_thread.setDaemon(True)
motor_watchdog_thread.start()
asyncio.run(app_start())
|
olincollege/hoverbois
|
hoverbotpy/src/hoverbotpy/controllers/web_controller.py
|
web_controller.py
|
py
| 5,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tracemalloc.start",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.drivers.driver_dummy.DummyHovercraftDriver",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.drivers.threading_dummy.ThreadingDummy",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.drivers.pi_pico_simple.SimpleFan",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.drivers.pi_pico_pid.PIDCorrectedFan",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "tornado.web.web",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "tornado.web.web",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "tornado.web.web.Application",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "tornado.web.web",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "tornado.web.web",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "hoverbotpy.controllers.constants.PORT",
"line_number": 184,
"usage_type": "argument"
},
{
"api_name": "asyncio.Event",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.controllers.constants.PORT",
"line_number": 191,
"usage_type": "argument"
},
{
"api_name": "threading.Thread",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "hoverbotpy.drivers.driver_dummy.DummyHovercraftDriver",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 225,
"usage_type": "call"
}
] |
28960969271
|
import gpudb
import collections
import time
import pandas as pd
pd.options.display.max_columns = 100
pd.set_option('display.width', 10000)
# init
TABLE = "risk_inputs"
COLLECTION = "RISK"
NEW_TABLE = "bs_stream"
HOST = "<ipaddress>"
ENCODING = "binary"
PORT = "9191"
DATA_PACK = 1
INGEST_FREQ = 3
"Execute python scripts on Kinetica servers using"
"/opt/gpudb/bin/gpudb_python"
# Establish connection to database with necessary credentials
# Pull data from Kinetica and put it directly into a Pandas df
h_db = gpudb.GPUdb(
encoding=ENCODING,
host=HOST,
port=PORT,
username="<username>",
password="<password>")
if h_db.has_table(table_name=TABLE)['table_exists']:
print("Table successfully reached.")
else:
print("Table not found.")
# Pull data from Kinetica and put it directly into a Pandas df
data = h_db.get_records(table_name=TABLE,offset=0,limit=gpudb.GPUdb.END_OF_SET,encoding=ENCODING)
df = pd.DataFrame(gpudb.GPUdbRecord.decode_binary_data(data["type_schema"], data["records_binary"]))
# Column instantiation for the target table
columns = [
["symbol","string"],
["spot_price","float"],
["option_type","string"],
["exposure","string"],
["strike_price","float"],
["maturity_y","int"],
["maturity_m","int"],
["maturity_d","int"],
["calendar","string"],
["day_count","string"],
["risk_free_rate","float"],
["dividend_rate","float"],
["calc_dt_y","int"],
["calc_dt_m","int"],
["calc_dt_d","int"],
["volatility","float"]
]
# Clear the table at run time the create the table
no_error_option = {"no_error_if_not_exists": "true"}
h_db.clear_table(table_name=NEW_TABLE, options=no_error_option)
collection_option_object = gpudb.GPUdbTableOptions.default().collection_name(COLLECTION)
print("Table cleared")
try:
table_gps_obj = gpudb.GPUdbTable(
columns,
NEW_TABLE,
collection_option_object,
h_db
)
print("Table created succesfully")
except gpudb.GPUdbException as e:
print("Table creation failure: {}".format(str(e)))
print(df.head(5))
index = 0
h_db = gpudb.GPUdb(encoding=ENCODING,host=HOST,port=PORT)
# Implement the GpuDB table class instead of manual JSON
my_type = """
{
"type": "record",
"name": "type_name",
"fields": [
{"name": "symbol","type": "string"},
{"name": "spot_price","type": "float"},
{"name": "option_type","type": "string"},
{"name": "exposure","type": "string"},
{"name": "strike_price","type": "float"},
{"name": "maturity_y","type": "int"},
{"name": "maturity_m","type": "int"},
{"name": "maturity_d","type": "int"},
{"name": "calendar","type": "string"},
{"name": "day_count","type": "string"},
{"name": "risk_free_rate","type": "float"},
{"name": "dividend_rate","type": "float"},
{"name": "calc_dt_y","type": "int"},
{"name": "calc_dt_m","type": "int"},
{"name": "calc_dt_d","type": "int"},
{"name": "volatility","type": "float"}
]
}""".replace('\n', '').replace(' ', '')
def stream_ingest(df):
"""This method parses the df and inserts the data into Kinetica row by row
with a 3 second delay in between rows"""
global index
i=0
coords= []
datum = collections.OrderedDict()
for index, row in df.iterrows():
datum["symbol"]=str(df.iloc[index,0])
datum["spot_price"]=float(df.iloc[index,1])
datum["option_type"] = str(df.iloc[index, 4])
datum["exposure"] = str(df.iloc[index, 6])
datum["strike_price"] = float(df.iloc[index, 7])
datum["maturity_y"] = int(df.iloc[index, 8])
datum["maturity_m"] = int(df.iloc[index, 9])
datum["maturity_d"] = int(df.iloc[index, 10])
datum["calendar"] = str(df.iloc[index, 11])
datum["day_count"] = str(df.iloc[index, 12])
datum["risk_free_rate"] = float(df.iloc[index, 13])
datum["dividend_rate"] = float(df.iloc[index, 14])
datum["calc_dt_y"] = int(df.iloc[index, 15])
datum["calc_dt_m"] = int(df.iloc[index, 16])
datum["calc_dt_d"] = int(df.iloc[index, 17])
datum["volatility"] = float(df.iloc[index, 18])
coords.append(h_db.encode_datum(my_type, datum))
i= i + 1
# Pump data in batches
if i % DATA_PACK == 0:
response = h_db.insert_records(
table_name=NEW_TABLE,
data=coords,
list_encoding=ENCODING,
options={})
coords = []
time.sleep(INGEST_FREQ)
print(response)
# Flush the last batch
if i % DATA_PACK != 0:
response = h_db.insert_records(
table_name=NEW_TABLE,
data=coords,
list_encoding=ENCODING,
options={})
# 3 second delay to mimic real time ingest
time.sleep(INGEST_FREQ)
print(response)
return coords
if __name__ == "__main__":
stream_ingest(df)
|
nickalonso/Utilities
|
stream.py
|
stream.py
|
py
| 5,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.options",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pandas.set_option",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdb",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdb",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdbRecord.decode_binary_data",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdbRecord",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "gpudb.GPUdbTableOptions.default",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdbTableOptions",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "gpudb.GPUdbTable",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "gpudb.GPUdbException",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "gpudb.GPUdb",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 152,
"usage_type": "call"
}
] |
15932153331
|
"""
A simple cache system for storing such things as project hierarchies and templates.
By default uses diskcache for simpler setup and backward compatibility
unless 'memcached' is set in the 'cache' section of the
config, in which case use that.
"""
import logging
from hydra_base import config as hydraconfig
import tempfile
log = logging.getLogger(__name__)
global cache
if hydraconfig.get('cache', 'type') != "memcached":
import diskcache as dc
cache = dc.Cache(tempfile.gettempdir())
elif hydraconfig.get('cache', 'type') == 'memcached':
try:
import pylibmc
cache = pylibmc.Client([hydraconfig.get('cache', 'host', '127.0.0.1')], binary=True)
except ModuleNotFoundError:
log.warning("Unable to find pylibmc. Defaulting to diskcache.")
import diskcache as dc
cache = dc.Cache(tempfile.gettempdir())
def clear_cache():
if hasattr(cache, 'flush_all'):
cache.flush_all() # memcache
else:
cache.clear() # diskcache
|
hydraplatform/hydra-base
|
hydra_base/lib/cache.py
|
cache.py
|
py
| 1,015 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hydra_base.config.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hydra_base.config",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "diskcache.Cache",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "hydra_base.config.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "hydra_base.config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pylibmc.Client",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "hydra_base.config.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "hydra_base.config",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "diskcache.Cache",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tempfile.gettempdir",
"line_number": 25,
"usage_type": "call"
}
] |
30513785226
|
from gaesessions import get_current_session
import logging
import levr_classes as levr
import levr_encrypt as enc
import base64
from google.appengine.api import urlfetch,taskqueue
import json
import urllib
from datetime import datetime, timedelta
def login_check(self):
''' for merchants ONLY
check if logged in, and return a the headerdata if so. if not, bounce to the login page'''
session = get_current_session()
logging.debug(session)
if session.has_key('loggedIn') == False or session['loggedIn'] == False:
#not logged in, bounce to login page
logging.info('Not logged in. . .Bouncing!')
self.redirect('/merchants/login')
elif session.has_key('loggedIn') == True and session['loggedIn'] == True:
uid = session['uid']
owner_of = session['owner_of']
logging.info(uid)
headerData = {
'loggedIn' : session['loggedIn'],
'uid' : enc.decrypt_key(uid),
'owner_of' : enc.decrypt_key(owner_of),
'validated' : session['validated']
}
#return user metadata.
return headerData
return
def login_check_mobile(self):
session = get_current_session()
logging.debug(session)
if session.has_key('loggedIn') == False or session['loggedIn'] == False:
#not logged in, bounce to login page
logging.info('Not logged in. . .Bouncing!')
self.redirect('/merchants/mobile/login')
elif session.has_key('loggedIn') == True and session['loggedIn'] == True:
uid = session['uid']
owner_of = session['owner_of']
meta = {
'uid' : enc.decrypt_key(uid),
'owner_of' : enc.decrypt_key(owner_of)
}
logging.info(meta)
#return user metadata.
return meta
def validated_check(user):
'''checks if this user has any linked businesses or not. does not yet return these businesses'''
'''num_bus = user.businesses.count()
if num_bus > 0:
return True
else:
return False'''
if user.verified_owner == True:
return True
else:
return False
def create_deal(deal,business,owner):
'''deal: a deal object
merchant: the merchant to be set as the owner of the deal'''
# TODO: remove this. Deals are being created in too many places and some are missing info
#init tags
tags = []
#add tags from the merchant
tags.extend(business.create_tags())
logging.info(tags)
#add tags from deal stuff
tags.extend(levr.tagger(deal.deal_text))
logging.info(tags)
tags.extend(levr.tagger(deal.description))
logging.info(tags)
deal.tags = tags
#add some other miscellaneous information
deal.origin = 'merchant'
deal.pin_color = 'green'
#copy info over from business
deal.business_name = business.business_name
deal.businessID = str(business.key())
deal.business = business
deal.geo_point = business.geo_point
deal.geo_hash = business.geo_hash
deal.deal_status='active'
deal.date_end = None
deal.put()
logging.info(levr.log_model_props(deal))
#fire off a task to do the image rotation stuff
task_params = {
'blob_key' : str(deal.img.key())
}
taskqueue.add(url='/tasks/checkImageRotationTask',payload=json.dumps(task_params))
return deal
def call_merchant(business):
#call the business
#twilio credentials
sid = 'AC4880dbd1ff355288728be2c5f5f7406b'
token = 'ea7cce49e3bb805b04d00f76253f9f2b'
twiliourl='https://api.twilio.com/2010-04-01/Accounts/AC4880dbd1ff355288728be2c5f5f7406b/Calls.json'
auth_header = 'Basic '+base64.b64encode(sid+':'+token)
logging.info(auth_header)
request = {'From':'+16173608582',
'To':'+16052610083',
'Url':'http://www.levr.com/merchants/verify/answer',
'StatusCallback':'http://www.levr.com/merchants/verify/statusCallback'}
result = urlfetch.fetch(url=twiliourl,
payload=urllib.urlencode(request),
method=urlfetch.POST,
headers={'Authorization':auth_header})
logging.info(levr.log_dict(result.__dict__))
def check_ua(self):
uastring = str(self.request.headers['user-agent'])
logging.info(uastring)
if 'mobile' in uastring.lower():
logging.info('Serving mobile version')
return 'mobile'
else:
logging.info('Serving desktop version')
return 'desktop'
def mobile_ua_bounce(self):
uastring = str(self.request.headers['user-agent'])
logging.info(uastring)
if 'mobile' in uastring.lower():
logging.info('Serving mobile version')
else:
logging.info('Not a mobile device - bounding to /merchants/mobile desktop version')
self.redirect('/merchants/mobileonly')
|
holmesal/levr-2
|
merchant_utils.py
|
merchant_utils.py
|
py
| 4,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gaesessions.get_current_session",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "levr_encrypt.decrypt_key",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "levr_encrypt.decrypt_key",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "gaesessions.get_current_session",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "levr_encrypt.decrypt_key",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "levr_encrypt.decrypt_key",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "levr_classes.tagger",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "levr_classes.tagger",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "levr_classes.log_model_props",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.taskqueue.add",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.taskqueue",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch.fetch",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "urllib.urlencode",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "google.appengine.api.urlfetch.POST",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.api.urlfetch",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "levr_classes.log_dict",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 162,
"usage_type": "call"
}
] |
24582961875
|
import os
# accessible as a variable in index.html:
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, Response
from flask import redirect, url_for
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, template_folder=tmpl_dir)
DATABASE_USERNAME = "yw3930"
DATABASE_PASSWRD = "3089"
DATABASE_HOST = "34.148.107.47" # change to 34.28.53.86 if you used database 2 for part 2
DATABASEURI = f"postgresql://{DATABASE_USERNAME}:{DATABASE_PASSWRD}@{DATABASE_HOST}/project1"
engine = create_engine(DATABASEURI)
#
# try:
# connection = engine.connect()
# # get column names and data for drug table
# select_query = "SELECT * FROM drug;"
# result = connection.execute(text(select_query))
# columns = result.keys()
# data = result.fetchall()
# print("Columns in drug table:")
# print(columns)
# print("Data in drug table:")
# for row in data:
# print(row)
#
# print('---------------------------------------')
#
# # get column names and data for pharmacy_storage table
# select_query = "SELECT * FROM pharmacy_storage;"
# result = connection.execute(text(select_query))
# columns = result.keys()
# data = result.fetchall()
# print("Columns in pharmacy_storage table:")
# print(columns)
# print("Data in pharmacy_storage table:")
# for row in data:
# print(row)
# connection.close()
#
# except Exception as e:
# print(f"Error connecting to database: {e}")
@app.before_request
def before_request():
"""
This function is run at the beginning of every web request
(every time you enter an address in the web browser).
We use it to setup a database connection that can be used throughout the request.
The variable g is globally accessible.
"""
try:
g.conn = engine.connect()
except:
print("uh oh, problem connecting to database")
import traceback; traceback.print_exc()
g.conn = None
@app.teardown_request
def teardown_request(exception):
"""
At the end of the web request, this makes sure to close the database connection.
If you don't, the database could run out of memory!
"""
try:
g.conn.close()
except Exception as e:
pass
@app.route('/')
def home():
return render_template('home.html')
@app.route('/search', methods=['POST'])
def search():
drug_id = request.form['drug_id']
select_query = "SELECT * FROM pharmacy_storage WHERE drug_id = " + str(drug_id)
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
return render_template("error.html", drug_id=drug_id)
else:
results = [result for result in cursor]
cursor.close()
# print(results)
return render_template("drug_information.html", drug_id=drug_id, drug_name=results[0][0], category=results[0][1], safety_stock=results[0][2], dosage=results[0][3])
@app.route('/add_drug', methods=['GET', 'POST'])
def add_drug():
if request.method == 'POST':
# get form data
drug_id = request.form['drug_id']
drug_name = request.form['drug_name']
quantity = int(request.form['quantity'])
expire_date = request.form['expire_date']
# check if drug exists in drug table
select_query = f"SELECT * FROM drug WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}' or name '{drug_name}' does not exist in the database."
return render_template("error.html", error_message=error_message)
# check if drug exists in pharmacy storage
select_query = f"SELECT * FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
cursor = g.conn.execute(text(select_query))
if cursor.rowcount:
# if drug exists in storage with same expiration date, add quantity
results = [result for result in cursor]
print(results)
new_quantity = int(results[0][5]) + int(quantity)
update_query = f"UPDATE pharmacy_storage SET quantity = {new_quantity} WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(update_query))
else:
# if drug doesn't exist in storage, add new record
insert_query = f"INSERT INTO pharmacy_storage (drug_id, drug_name, expire_date, quantity) VALUES ('{drug_id}', '{drug_name}', '{expire_date}', {quantity})"
g.conn.execute(text(insert_query))
cursor.close()
# render updated pharmacy storage page
select_query = "SELECT * FROM pharmacy_storage ORDER BY drug_id, drug_name, expire_date"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
g.conn.commit()
return render_template("pharmacy_storage.html", storage_results=storage_results)
else:
return render_template("add_drug.html")
@app.route('/take_drug', methods=['GET', 'POST'])
def take_drug():
if request.method == 'POST':
# get form data
drug_id = request.form['drug_id']
drug_name = request.form['drug_name']
quantity = int(request.form['quantity'])
expire_date = request.form['expire_date']
# check if drug exists in drug table
select_query = f"SELECT * FROM drug WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}' or name '{drug_name}' does not exist in the database."
return render_template("error.html", error_message=error_message)
# check if drug exists in pharmacy storage
select_query = f"SELECT * FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
cursor = g.conn.execute(text(select_query))
if not cursor.rowcount:
cursor.close()
error_message = f"Drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}' does not exist in the pharmacy storage."
return render_template("error.html", error_message=error_message)
results = [result for result in cursor]
cursor.close()
# check if quantity is sufficient
if int(results[0][5]) < quantity:
error_message = f"Insufficient quantity of drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}' in the pharmacy storage."
return render_template("error.html", error_message=error_message)
# calculate new quantity and update database
new_quantity = int(results[0][5]) - quantity
if new_quantity < 0:
error_message = f"Invalid quantity of drug with ID '{drug_id}', name '{drug_name}', and expiration date '{expire_date}'. Quantity after taking cannot be less than 0."
return render_template("error.html", error_message=error_message)
elif new_quantity == 0:
delete_query = f"DELETE FROM pharmacy_storage WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(delete_query))
else:
update_query = f"UPDATE pharmacy_storage SET quantity = {new_quantity} WHERE drug_id = '{drug_id}' and drug_name = '{drug_name}' and expire_date = '{expire_date}'"
g.conn.execute(text(update_query))
# render updated pharmacy storage page
select_query = "SELECT * FROM pharmacy_storage ORDER BY drug_id, drug_name, expire_date"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
g.conn.commit()
return render_template("pharmacy_storage.html", storage_results=storage_results)
else:
return render_template("take_drug.html")
@app.route('/pharmacy_storage')
def pharmacy_storage():
select_query = "SELECT * FROM pharmacy_storage;"
cursor = g.conn.execute(text(select_query))
storage_results = [result for result in cursor]
cursor.close()
return render_template('pharmacy_storage.html', storage_results=storage_results)
if __name__ == "__main__":
import click
@click.command()
@click.option('--debug', is_flag=True)
@click.option('--threaded', is_flag=True)
@click.argument('HOST', default='0.0.0.0')
@click.argument('PORT', default=8111, type=int)
def run(debug, threaded, host, port):
HOST, PORT = host, port
print("running on %s:%d" % (HOST, PORT))
app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)
run()
|
YueWang417/w4111-proj1-group69
|
webserver/server.py
|
server.py
|
py
| 9,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "traceback.print_exc",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.close",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.commit",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "flask.g.conn.commit",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "flask.g.conn.execute",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "flask.g.conn",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "click.command",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 197,
"usage_type": "call"
}
] |
72698063549
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 21 10:29:50 2018
@author: qwzhou
"""
"""
=======================================
plot line and dash
=======================================
ASM across the site
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from matplotlib.backends.backend_pdf import PdfPages
#x = np.linspace(0, 10, 500)
y = []
z = []
k = []
def readfile(filename, y, z, k):
nline=0
with open(filename, 'r') as fig:
for line in fig:
data = line.split()
if nline == 0:
y.append(list(map(float,data[1:])))
elif nline == 1:
z.append(list(map(float,data[1:])))
elif nline == 2:
k.append(list(map(float,data[1:])))
nline=nline+1
#
#
#readfile("/Users/qiangweizhou/K562.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/IMR90.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/HepG2.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/A549.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/HUES64.asmonGeneend.Methy.1.txt", y,z, k)
#readfile("/Users/qiangweizhou/GM12878.asmonGeneend.Methy.1.txt", y,z, k)
readfile("/Users/qiangweizhou/A549.asmonExpressionGene.Methy.1.txt.Aver", y,z, k)
readfile("/Users/qiangweizhou/A549.asmonUnGene.Methy.1.txt.Aver", y,z, k)
x = np.linspace(1, len(y[0]), len(y[0]))
label=['Expression', 'Unexpreesion']
#label=['K562', 'IMR90', 'HepG2', 'A549', 'HUES64', 'GM12878']
filename="ASMonGeneExpression.all"
filename2=filename + ".pdf"
nsample=2
legend=1
percentage=1
cutoff=6
######################################################
def find_martrix_max_value(data_matrix):
new_data=[]
for i in range(len(data_matrix)):
new_data.append(max(data_matrix[i]))
return max(new_data)
xlen=len(y[0])
print(xlen, xlen/2)
#######################################################
def plotline(x, y, title, label, nsample, legend, filename):
prosamp = 0
fig, ax = plt.subplots()
while prosamp < nsample:
y[prosamp] = [i*percentage for i in y[prosamp]]
#for i,item in enumerate(y[prosamp]):
# if item >6:
# y[prosamp][i]=6
ax.plot(x, y[prosamp], label=label[prosamp]) #,color="dodgerblue"
prosamp = prosamp +1
#dashes = [10, 5, 100, 5]
#line1.set_dashes(dashes) # dash line
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
ax.xaxis.set_major_formatter(plt.FuncFormatter('{:.0f}'.format))
#ax.yaxis.set_major_formatter(plt.FuncFormatter('{:.1f}%'.format))
#plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=True, left=False, right=False, labelleft=True)
#ax.axes.get_xaxis().set_visible(False)
if legend == 1:
plt.legend(loc='best', prop={'size': 12}) # legend , loc is the legend location
#plt.axvline(x=xlen/2-1, ls="--", color='black')
plt.axhline(y=0, xmin=0.05, xmax=0.5, linewidth=8, color='gray')
plt.axhline(y=0, xmin=0.5, xmax=0.505, linewidth=8, color='k' )
plt.axhline(y=0, xmin=0.505, xmax=0.95, linewidth=8, color='gray')
scale_ls = [1,len(x)/2,len(x)]
index_ls = ['-200bp','Start', "+200bp"]
plt.xticks(scale_ls,index_ls,color='k', size=15)
ax.set_title(title,size=15)
ax.set_ylabel('ASM distribution',size=15)
#ax.set_ylabel('Methylation Level',size=15)
maxy = 100
maxy = find_martrix_max_value(y) * 1.1
ax.set_ylim(0.0, maxy)
#plt.show()
#filename2=filename + ".png"
#plt.savefig(filename2, bbox_inches='tight')
#label = ['IMR90', 'A549', 'H1', 'GM12878', 'encodeA549']
pdf = PdfPages(filename2)
plotline(x, y, "ASM distribution", label, nsample, legend, filename+".CG")
#plotline(x, y, "CG methylation distribution", label, nsample, legend, filename+".CG")
legend=0
#plotline(x, z, "CHG methylation distribution", label, nsample, legend, filename+".CHG")
#plotline(x, k, "CHH methylation distribution", label, nsample, legend, filename+".CHH")
pdf.savefig()
pdf.close()
'''
fig, ax = plt.subplots()
line1, = ax.plot(x, k1, label='IMR90')
#dashes = [10, 5, 100, 5]
#line1.set_dashes(dashes) # dash line
line2, = ax.plot(x, k2, label='A549')
# several dash line example
#line3, = ax.plot(x, y3, ':', label='..style')
#line4, = ax.plot(x,-np.sin(x)/2, '-.', label='-.style')
#line5, = ax.plot(x,np.sin(x)/4, '--', label='--style')
#line6, = ax.plot(x,-np.sin(x)/4, '^', label='--style')
#plt.axis('off')
#plt.xticks([])
#plt.yticks([])
#ax.axes.get_yaxis().set_visible(False)
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
#ax.axes.get_xaxis().set_visible(False)
plt.legend(loc='center right') # legend , loc is the legend location
plt.axhline(y=0, xmin=0.05, xmax=0.35, linewidth=8, color='gray')
plt.axhline(y=0, xmin=0.65, xmax=0.35, linewidth=8, color='k' )
plt.axhline(y=0, xmin=0.65, xmax=0.95, linewidth=8, color='gray')
scale_ls = [1,39,76,117]
index_ls = ['upstream','Start','End', "downstream"]
plt.xticks(scale_ls,index_ls,color='k', size=15)
#ax.set_title('Box plot')
ax.set_ylabel('Methylation Level',size=15)
maxy=max(k1)
if max(k2) > maxy:
maxy = max(k2)*1.1
else:
maxy = maxy*1.1
ax.set_ylim(0.0, maxy)
#plt.savefig("test.png")
plt.show()
'''
|
ZhouQiangwei/MethHaploScripts
|
plotASM-expressiongene.py
|
plotASM-expressiongene.py
|
py
| 5,901 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.FuncFormatter",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tick_params",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.backends.backend_pdf.PdfPages",
"line_number": 116,
"usage_type": "call"
}
] |
30414879190
|
"""SQLAlchemy models for quiz and quiz questions"""
from datetime import datetime
from models.model import db
from models.quiz_attempt import QuestionAttempt
import sys
sys.path.append('../')
from generator.generator import create_quiz
def search_slug(context):
"""Turns the plant slug into a string suitable for Wikipedia or Google search"""
return context.get_current_parameters()['slug'].replace('-', '+')
def num_by_family(context):
"""Gives number to quiz based on how many quizzes of the same family
are already in the database"""
family = context.get_current_parameters()['family']
return len(Quiz.query.filter(Quiz.family==family).all()) + 1
class Quiz(db.Model):
"""Quiz"""
__tablename__ = 'quizzes'
id = db.Column(
db.Integer,
primary_key=True
)
num_questions = db.Column(db.Integer)
family = db.Column(
db.Text,
nullable=False
)
num_by_family = db.Column(
db.Integer,
default=num_by_family
)
created_on = db.Column(
db.DateTime,
default = datetime.utcnow
)
created_by = db.Column(
db.Text,
default = 'system'
)
questions = db.relationship(
'Question',
secondary="quiz_questions",
backref='part_of'
)
attempts = db.relationship(
'QuizAttempt',
backref='quiz'
)
@classmethod
def create(cls, family):
"""Create new quiz from identified family.
If error in quiz creation, return False"""
questions = create_quiz(family)
if not questions:
return False
quiz = Quiz(num_questions=10, family=family)
db.session.add(quiz)
db.session.commit()
for question in questions:
new_question = Question(**question)
new_question.family = family
db.session.add(new_question)
db.session.commit()
quiz.questions.append(new_question)
db.session.commit()
return quiz
class Question(db.Model):
"""Quiz question"""
__tablename__ = 'questions'
id = db.Column(
db.Integer,
primary_key=True
)
url = db.Column(
db.Text
)
correct_answer = db.Column(
db.Text
)
wrong_answer_1 = db.Column(
db.Text
)
wrong_answer_2 = db.Column(
db.Text
)
wrong_answer_3 = db.Column(
db.Text
)
slug = db.Column(
db.Text
)
search_slug = db.Column(
db.Text,
default=search_slug
)
attempts = db.relationship(
'QuestionAttempt',
backref='question'
)
class QuizQuestion(db.Model):
"""Map quiz questions to a quiz"""
__tablename__ = 'quiz_questions'
id = db.Column(
db.Integer,
primary_key=True
)
question_id = db.Column(
db.Integer,
db.ForeignKey('questions.id', ondelete='cascade')
)
quiz_id = db.Column(
db.Integer,
db.ForeignKey('quizzes.id', ondelete='cascade')
)
|
lauramoon/capstone-1
|
models/quiz.py
|
quiz.py
|
py
| 3,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.model.db.Model",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "models.model.db.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "models.model.db.DateTime",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "models.model.db.relationship",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "models.model.db.relationship",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "generator.generator.create_quiz",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.model.db.session.add",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.model.db.session",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "models.model.db.session.commit",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "models.model.db.session",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "models.model.db.session.add",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.model.db.session",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "models.model.db.session.commit",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "models.model.db.session",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "models.model.db.session.commit",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "models.model.db.session",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "models.model.db.Model",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "models.model.db.Text",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "models.model.db.relationship",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "models.model.db.Model",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "models.model.db.ForeignKey",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "models.model.db.Column",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "models.model.db.Integer",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "models.model.db",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "models.model.db.ForeignKey",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "models.model.db",
"line_number": 154,
"usage_type": "name"
}
] |
30217438684
|
import seaborn as sns
import matplotlib.pyplot as plt
#%matplotlib inline
tips = sns.load_dataset('tips')
fluege = sns.load_dataset('flights')
###Matrixplots###
#Heatmap
#Damit die Heatmap gut funktioniert sollten eure Daten bereits in Matrixform vorliegen. Die sns.heatmatp() übernimmt dann die Einfärbung dieser Daten
# Matrix der Korrelationsdaten
tips.corr() #corr Funktion erkennt automatisch numerische Spalten & lässt kategorische Spalten weg.
sns.heatmap(tips.corr()) #Ausführen der Heatmap aufgrun der Korrelationsverteilung
plt.show()
sns.heatmap(tips.corr(),cmap='coolwarm',annot=True) # cmap: Farbvariante, annot: Korrelationswerte in dem Kästschen
plt.show()
#Erstellung einer Pivottabelle für die Darstellung der Daten in Jahre
pvfluege = fluege.pivot_table(values='passengers',index='month',columns='year') #values: Anzahl der der Passagiere; Index: sind die Zeilen, columns: sind die Spalten
sns.heatmap(pvfluege,cmap='magma',linecolor='white',linewidths=1) # linecolor: lässt eine Unterteilung zu; linewidths: gibt die Linienstärke wieder.
plt.show()
#Clustermap: welche Kategorien(z.B. Monate an dene geflogen werden) liegen beieiander
sns.clustermap(pvfluege,cmap='coolwarm',standard_scale=1) # standard_scale: Dimension für die Zeile oder für die Spalte standardisieren möchten (Subtrahieren von jedem Wert durch Minimum und anschließend Division jedes Wertes durch Maximum
plt.show() # Für Standardisierung von Zeilen 0 und für Spalten 1.
#Regressionsplots:
sns.lmplot(x="total_bill",y="tip", data=tips) # Vergleich von Bill mit Tip bzgl. Korrelation
plt.show()
sns.lmplot(x='total_bill',y='tip',data=tips,hue='sex',palette='coolwarm',#hue: Trennung von kategorischen Parameter
markers=['o','v'], #Veränderung der Symbole
scatter_kws={'s':100}) #scatter_kws: Definition von der Göße der Symbole
plt.show()
sns.lmplot(x='total_bill',y='tip',data=tips, row='time', col='sex') #über row & col können multiple Korrelationsgrafiken erstellt werden.
plt.show()
|
ThePeziBear/MyPythonLibrary
|
Visualizing_Python/Seaborn/2_matrix_regression_function_seaborn.py
|
2_matrix_regression_function_seaborn.py
|
py
| 2,082 |
python
|
de
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "seaborn.load_dataset",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "seaborn.load_dataset",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "seaborn.clustermap",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "seaborn.lmplot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "seaborn.lmplot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "seaborn.lmplot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
}
] |
10294260342
|
__url__ = "$URL: svn://gizmojo.org/pub/evoque/trunk/domain.py $"
__revision__ = "$Id: domain.py 1153 2009-01-20 11:43:21Z mario $"
import sys, logging
from evoque.collection import Collection
from evoque.evaluator import set_namespace_on_dict, xml
def get_log():
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s")
return logging.getLogger("evoque")
# $begin{init}
class Domain(object):
""" A registry of collections """
def __init__(self, default_dir,
restricted=False, errors=3, log=get_log(),
# defaults for Collections
cache_size=0, auto_reload=60, slurpy_directives=True,
# defaults for Collections (and Templates)
quoting="xml", input_encoding="utf_8", filters=[]):
"""
default_dir: either(str, Collection)
abs path, or actual default collection instance
restricted: bool
restricted evaluation namespace
errors: int
["silent", "zero", "name", "render", "raise"]
log: the logging.getLogger("evoque") logger; may be pre-initialized
and passed in, or adjusted as needed after initialization.
Default settings (via loggin.basicConfig()) are:
handler=StreamHandler()
level=logging.INFO
format="%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s"
# Defaults for Collections (and Templates)
cache_size: int
max number loaded templates in collection
0 means unlimited loaded templates
auto_reload: int
min seconds to wait to check if needs reloading
0 means check on every rendering
slurpy_directives: bool
consume all whitespace trailing a directive
quoting: either("xml", "str", type)
"xml" -> qpy.xml, "str" -> unicode
input_encoding: str
hint for how to decode template source
filters: [callable]
list of template post-evaluation filter functions
"""
# $end{init}
self.restricted = restricted
self.errors = errors
self.log = log # the logging.getLogger("evoque") logger
# defaults -- cascaded down to collections
self.cache_size = cache_size
self.auto_reload = auto_reload
self.slurpy_directives = slurpy_directives
self.quoting = quoting
self.input_encoding = input_encoding
self.filters = filters
#
self.collections = {} # by collection name
# default collection
if isinstance(default_dir, Collection):
self.collections[default_dir.name] = default_dir
self.default_collection = default_dir
else:
self.set_collection("", default_dir, cache_size, auto_reload,
slurpy_directives, quoting, input_encoding, filters)
self.default_collection = self.collections[""]
#
self.globals = {}
self.globals['xml'] = xml
if self.restricted:
restrict_execution_namespace(self.globals)
def set_on_globals(self, name, value):
""" (name:str, value:any) -> None
"""
self.globals[name] = value
def set_namespace_on_globals(self, name, obj, no_underscored=True):
""" (name:either(str, None), obj:any, no_underscored:bool) -> None
If name is None, the obj's namespace will be set onto top-level.
"""
set_namespace_on_dict(self.globals, name, obj, no_underscored)
def get_collection(self, name=None):
""" (name:either(None, str, Collection)) -> Collection
"""
if name is None:
return self.default_collection
if isinstance(name, Collection):
name = name.name
return self.collections[name]
def set_collection(self, name, dir,
cache_size=None, auto_reload=None, slurpy_directives=None,
quoting=None, input_encoding=None, filters=None):
""" (name:str, dir:str,
cache_size:int, auto_reload:int, slurpy_directives:bool,
quoting:either(str, type), input_encoding:str,
filters:either(None, [callable])) -> None
"""
if self.has_collection(name):
raise ValueError(
"Domain already has a collection named [%s]" % (name))
self.collections[name] = Collection(self, name, dir,
cache_size, auto_reload, slurpy_directives, quoting,
input_encoding, filters)
def has_collection(self, name):
""" (name:str -> bool
"""
return name in self.collections
def get_template(self, name, src=None, collection=None, raw=None,
data=None, quoting=None, input_encoding=None, filters=None):
""" Wraps Collection.get_template()
"""
return self.get_collection(collection).get_template(name,
src, raw, data, quoting, input_encoding, filters)
def set_template(self, name, src=None, collection=None, raw=None,
data=None, from_string=True,
quoting=None, input_encoding=None, filters=None):
""" Wraps Collection.set_template()
"""
self.get_collection(collection).set_template(name, src,
raw, data, from_string, quoting, input_encoding, filters)
def has_template(self, name, collection=None):
""" Wraps Collection.has_template()
"""
return self.get_collection(collection).has_template(name)
def restrict_execution_namespace(namespace):
""" (namespace:dict) -> None
Modifies the namespace dict parameter to add entries for builtins deemed
safe, and sets a dummy __builtins__ empty dict.
"""
# here type(__builtins__) is dict (not module as in the interpreter)
import inspect
# In python 2.4, BaseException is not avialable
BaseException_ = __builtins__.get('BaseException', Exception)
for name, obj in __builtins__.items():
if name in DISALLOW_BUILTINS:
continue
if inspect.isbuiltin(obj) or \
(inspect.isclass(obj) and not issubclass(obj, BaseException_)) or \
(obj in (None, True, False)):
namespace[name] = obj
namespace["__builtins__"] = {}
# Potentially unsafe __builtins__ in python 2.5 that will be removed from
# execution namespace (in addition to all subclasses of BaseException).
# $begin{unsafe}
DISALLOW_BUILTINS = ["_", "__debug__", "__doc__", "__import__", "__name__",
"buffer", "callable", "classmethod", "coerce", "compile", "delattr", "dir",
"eval", "execfile", "exit", "file", "getattr", "globals", "hasattr", "id",
"input", "isinstance", "issubclass", "locals", "object", "open", "quit",
"raw_input", "reload", "setattr", "staticmethod", "super", "type", "vars"]
# $end{unsafe}
# why "object" is included, see:
# http://groups.google.com/group/comp.lang.python/msg/5639e1b5cdac3ac2
|
phonybone/Rnaseq
|
ext_libs/evoque/domain.py
|
domain.py
|
py
| 7,129 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "evoque.collection.Collection",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "evoque.evaluator.xml",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "evoque.evaluator.set_namespace_on_dict",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "evoque.collection.Collection",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "evoque.collection.Collection",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "inspect.isbuiltin",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 158,
"usage_type": "call"
}
] |
29699780347
|
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from scipy import ndimage
import geoplot
import matplotlib.pylab as pylab
# import geoplot as gp
from models import Simulation, NTAGraphNode, DiseaseModel
def read_hospital_data(filename):
df = pd.read_csv(filename, sep=",", header=None, encoding="ISO-8859-1")
# Remove extraneous commas/columns after last column
df = df.drop([1, 2, 3, 4], axis=1)
df.columns = [
"name",
"lat",
"long",
"bed_count",
]
df["lat"] = df.lat.astype(float)
df["long"] = df.long.astype(float)
return df
def read_nta_data(filename):
df = pd.read_csv(filename, sep=",", header=None, encoding="ISO-8859-1")
# Remove extraneous commas/columns after last column
df = df.loc[:, :6]
# Rename columns to use for indexing
df.columns = [
"borough",
"nta_code",
"nta_name",
"population",
"lat",
"long",
"hospitalization_rate",
]
df["lat"] = df.lat.astype(float)
df["long"] = df.long.astype(float)
df["hospitalization_rate"] = df.hospitalization_rate.astype(float)
return df
def write_parsed_data(df, filename):
with open(filename, "w") as f:
for row in df.itertuples(index=False, name=None):
f.write("{}\n".format(row))
def show_kdeplot(shape, gdf):
ax = geoplot.polyplot(shape, projection=geoplot.crs.AlbersEqualArea(), zorder=1)
geoplot.kdeplot(
gdf,
ax=ax,
shade=True,
cmap="Reds",
n_levels=16,
shade_lowest=True,
clip=shape.simplify(0.001, preserve_topology=False),
)
geoplot.pointplot(gdf, ax=ax, color="blue")
plt.show()
NTAs = read_nta_data("New York Pop NTA updated.csv")
hospitals = read_hospital_data("NYC Hospital Locations Filled.csv")
zipfile = "zip:///home/kevin/code/Comp-Epi-Project/shape/shapefile.zip"
shape = gpd.read_file(zipfile)
gdf = gpd.GeoDataFrame(
hospitals,
geometry=gpd.points_from_xy(hospitals["long"], hospitals["lat"]),
crs="epsg:4326",
)
gdf2 = gpd.GeoDataFrame(
NTAs, geometry=gpd.points_from_xy(NTAs["long"], NTAs["lat"]), crs="epsg:4326",
)
##################
# choropleth WIP #
##################
NTAs_d = dict(gdf2)
shape_d = dict(shape)
for i, ntacode in NTAs_d["nta_code"].items():
indexes = [k for k, v in shape_d["ntacode"].items() if ntacode == v]
if indexes:
assert len(indexes) == 1
index = indexes.pop()
NTAs_d["geometry"][i] = shape_d["geometry"][index]
else:
print(ntacode, shape_d["ntaname"][i])
new_NTAs = pd.DataFrame(NTAs_d)
gdf3 = gpd.GeoDataFrame(new_NTAs, geometry=new_NTAs.geometry)
print(gdf3.head(100))
# show_kdeplot(shape, gdf)
geoplot.choropleth(
gdf3,
projection=geoplot.crs.AlbersEqualArea(),
hue="hospitalization_rate",
cmap="Greens",
legend=True,
edgecolor="black",
linewidth=1,
)
plt.show()
|
cwaldron97/Comp-Epi-Project
|
hospitals.py
|
hospitals.py
|
py
| 2,990 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "geoplot.polyplot",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "geoplot.crs.AlbersEqualArea",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "geoplot.crs",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "geoplot.kdeplot",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "geoplot.pointplot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "geopandas.read_file",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "geopandas.points_from_xy",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "geopandas.points_from_xy",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "geoplot.choropleth",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "geoplot.crs.AlbersEqualArea",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "geoplot.crs",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
}
] |
1706295460
|
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
# MED分类器
class Medclass:
def __init__(self):
self.center_dict = {} # 分类中心点,以类别标签为键 label: center_point(list)
self.feature_number = 0 # 特征维度
self.train_state = False # 训练状态,True为训练完成,False表示还没训练过
def train(self, feature_set, label_set):
new_label_set = {key: value for key, value in enumerate(label_set)} # 将标签集合转换为以下标为键的字典 index: label
self.feature_number = len(feature_set[0])
sample_num = len(label_set) # 样本个数
count = {} # 计算每个类别的样本个数 label: count(int)
# 计算每个类别的分类中心点
for index in range(sample_num):
if new_label_set[index] not in count.keys():
count[new_label_set[index]] = 0
else:
count[new_label_set[index]] += 1 # 计算对应标签的样本数
if new_label_set[index] not in self.center_dict.keys():
self.center_dict[new_label_set[index]] = feature_set[index]
else:
self.center_dict[new_label_set[index]] += feature_set[index]
for _key_ in self.center_dict.keys():
for _feature_ in range(self.feature_number):
self.center_dict[_key_][_feature_] /= count[_key_]
self.train_state = True
# 根据输入来进行分类预测,输出以 下标—预测分类 为键值对的字典
def predict(self, feature_set):
# 先判断此分类器是否经过训练
if not self.train_state:
return {}
sample_num = len(feature_set)
distance_to = {} # 计算某个样本到各分类中心点距离的平方 label: float
result = {} # 保存分类结果 index: label
for _sample_ in range(sample_num):
for _key_ in self.center_dict.keys():
delta = feature_set[_sample_] - self.center_dict[_key_]
distance_to[_key_] = np.dot(delta.T, delta)
result[_sample_] = min(distance_to, key=distance_to.get) # 返回最小值的键(即label)
return result
# 判断预测准确率
def accuracy(self, feature_set, label_set):
if not self.train_state:
return 0.0
correct_num = 0
total_num = len(label_set)
predict = self.predict(feature_set)
for _sample_ in range(total_num):
if predict[_sample_] == label_set[_sample_]:
correct_num += 1
return correct_num / total_num
# 根据指定的阳性类别,计算分类器的性能指标(准确率accuracy,精度precision,召回率recall,特异性specificity,F1_Score)
def performance(self, feature_set, label_set, positive):
if not self.train_state:
return {}
total_num = len(label_set)
predict = self.predict(feature_set)
true_positive, false_positive, true_negative, false_negative = 0, 0, 0, 0
for _sample_ in range(total_num):
if predict[_sample_] == label_set[_sample_]:
if label_set[_sample_] == positive:
true_positive += 1
else:
true_negative += 1
else:
if label_set[_sample_] == positive:
false_negative += 1
else:
false_positive += 1
print("tp=",true_positive,"tn=",true_negative,"fn=",false_negative,"fp=",false_positive)
accuracy = (true_positive + true_negative) / total_num # 准确率(预测正确的样本与总样本数之比)
precision = true_positive / (true_positive + false_positive) # 精度(所有 预测 为阳性的样本中, 真值 为阳性的比例)
recall = true_positive / (true_positive + false_negative) # 召回率(所有 真值 为阳性的样本中, 预测 为阳性的比例)
specificity = true_negative / (true_negative + false_positive) # 特异性(所有 真值 为阴性的样本中, 预测 为阴性的比例)
F1_Score = (2 * precision * recall) / (precision + recall) # 精度与召回率的加权平均
print("accuracy:", accuracy, "precision:", precision, "recall:", recall, "specificity:",specificity, "F1_Score:", F1_Score)
# 获取某一类的样本中心点
def get_center(self, key):
if key in self.center_dict.keys():
return self.center_dict[key]
else:
return []
def get_center_dict(self):
return self.center_dict
#end
#画分割线
# 展示二维平面上,二分类问题的决策线(class_1和class_2)
# feature是样本特征集合,label是对应的标签集合,对每一维特征进行两两比较,n表示特征维数
def show_decision_line(feature, label, med_classifier, class_1=0, class_2=0, n=0):
plt.figure(figsize=(16, 12), dpi=80) # 整张画布大小与分辨率
img = [[] for i in range(n * n)]
for i in range(n):
for j in range(n):
img[i * n + j] = plt.subplot(n, n, i * n + j + 1)
center_1 = med_classifier.get_center(class_1)
center_2 = med_classifier.get_center(class_2)
c_1 = [center_1[i], center_1[j]] # class_1类中心点的i, j两维的分量
c_2 = [center_2[i], center_2[j]] # class_2类中心点的i, j两维的分量
center_3 = [(c_1[0] + c_2[0]) / 2, (c_1[1] + c_2[1]) / 2] # 两点连线的中点
k2, b2 = calculate_vertical_line(c_1, c_2) # 两点中垂线的斜率和截距
plt.scatter(feature[:, i], feature[:, j], c=label, s=20, marker='.') # 整个样本集在特征0和2上的散点图
plt.scatter(c_1[0], c_1[1], c='b', marker='x') # 显示med分类器计算的样本中心点
plt.scatter(c_2[0], c_2[1], c='b', marker='x')
plt.grid(True) # 显示网格线
plt.axis('equal') # 横纵坐标间隔大小相同
plt.axline(c_1, c_2, color='c', linestyle="--", label="connected line")
plt.axline(center_3, slope=k2, color='r', label="decision line")
if i == j:
plt.legend() # 对角线上的子图显示出图例
plt.xlabel("feature " + str(i))
plt.ylabel("feature " + str(j))
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# 计算两点连线,返回斜率和纵截距(假设是二维平面上的点,并且用列表表示)
def calculate_connected_line(point_1, point_2):
if len(point_1) != 2 or len(point_2) != 2:
return None
k = (point_1[1] - point_2[1]) / (point_1[0] - point_2[0])
b = (point_1[0] * point_2[1] - point_2[0] * point_1[1]) / (point_1[0] - point_2[0])
return k, b
# 计算两点中垂线,返回斜率和纵截距(假设是二维平面上的点,并且用列表表示)
def calculate_vertical_line(point_1, point_2):
if len(point_1) != 2 or len(point_2) != 2:
return None
k = -(point_1[0] - point_2[0]) / (point_1[1] - point_2[1])
b = (point_1[1] + point_2[1] + (point_1[0] + point_2[0]) * (point_1[0] - point_2[0]) / (point_1[1] - point_2[1]))/2
return k, b
#画分割线end
# feature表示样本特征,label表示对应的标签,m行n列共计m*n个子图
def visualization(feature, label, m, n):
plt.figure(figsize=(10, 10), dpi=100)
img = [[] for i in range(m*n)]
for i in range(m):
for j in range(n):
img[i*n+j] = plt.subplot(m, n, i*n+j+1)
plt.xlabel("x"+str(i))
plt.ylabel("x"+str(j))
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.scatter(feature[:, i], feature[:, j], s=5, c=label, marker='x')
plt.grid(True) # 显示网格线
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# feature表示样本特征,label表示对应的标签,m行n列共计m*n个子图
def visualization_white(feature, label, m, n):
plt.figure(figsize=(10, 10), dpi=100)
img = [[] for i in range(m*n)]
for i in range(m):
for j in range(n):
img[i*n+j] = plt.subplot(m, n, i*n+j+1)
plt.xlabel("x"+str(i))
plt.ylabel("x"+str(j))
plt.xlim(-20, 20)
plt.ylim(-20, 20)
plt.scatter(feature[:, i], feature[:, j], s=5, c=label, marker='x')
plt.grid(True) # 显示网格线
plt.tight_layout() # 自动调整子图大小,减少相互遮挡的问题
plt.show()
# 去除某个类别的样本,返回两个numpy数组
def remove_from_data(feature, label, num):
new_feature = []
new_label = []
for index in range(len(label)):
if label[index] != num:
new_feature.append(feature[index])
new_label.append(label[index])
return np.asarray(new_feature), np.asarray(new_label)
# 特征白化,返回白化后的矩阵(numpy数组格式)
# 参数为numpy格式的数组,其格式为数学上的矩阵的转置
def whitening(data):
Ex=np.cov(data,rowvar=False) #Ex为data的协方差矩阵
print(Ex.shape)
a, b = np.linalg.eig(Ex) #原始特征协方差矩阵Ex的特征值和特征向量
#特征向量单位化
modulus=[]
b=np.real(b)
for i in range(b.shape[1]):
sum=0
for j in range(b.shape[0]):
sum+=b[i][j]**2
modulus.append(sum)
modulus=np.asarray(modulus,dtype="float64")
b=b/modulus
#对角矩阵A
a=np.real(a)
A=np.diag(a**(-0.5))
W=np.dot(A,b.transpose())
X=np.dot(W,np.dot(Ex,W.transpose()))
for i in range(W.shape[0]):
for j in range(W.shape[1]):
if np.isnan(W[i][j]):
W[i][j]=0
print(W)
return np.dot(data,W)
if __name__ == '__main__':
iris = datasets.load_iris()
iris_data = iris.data
iris_target = iris.target
iris_target_names=iris.target_names
print(iris)
#可视化
visualization(iris_data,iris_target,4,4)
#去除线性不可分的最后一个
iris_data_linear, iris_target_linear = remove_from_data(iris_data, iris_target, 2)
visualization(iris_data_linear,iris_target_linear,4,4)
#划分训练集、测试集
x_train, x_test, y_train, y_test = train_test_split(iris_data_linear, iris_target_linear, test_size=0.3)
meds=Medclass()
meds.train(x_train,y_train)
meds.performance(x_test, y_test, 0)
# 展示每个特征两两对比图,显示决策线
show_decision_line(x_test, y_test, meds, class_1=0, class_2=1, n=4)
#特征白化
iris_data_white = whitening(iris_data)
print(iris_data_white)
visualization_white(iris_data_white,iris_target,4,4)
#去除线性可分的类
#iris_data_nolinear, iris_target_nolinear = remove_from_data(iris_data, iris_target, 0) #无白化
#visualization(iris_data_nolinear,iris_target_nolinear,4,4)
iris_data_nolinear, iris_target_nolinear = remove_from_data(iris_data_white, iris_target, 0)#白化
visualization_white(iris_data_nolinear,iris_target_nolinear,4,4)
#划分训练集、测试集
x_train, x_test, y_train, y_test = train_test_split(iris_data_nolinear, iris_target_nolinear, test_size=0.3)
meds2=Medclass()
meds2.train(x_train,y_train)
meds2.performance(x_test, y_test, 1)
# 展示每个特征两两对比图,显示决策线
show_decision_line(x_test, y_test, meds2, class_1=1, class_2=2, n=4)
|
suudeer/iris-su2021
|
iris-su2021/iris/main.py
|
main.py
|
py
| 11,704 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.dot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axline",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axline",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.cov",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.eig",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "numpy.real",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 252,
"usage_type": "call"
}
] |
45180348436
|
#判断一个视频是否属于幽默类还是实用类
import numpy as np
import operator
from matplotlib import pyplot as plt
def traindataset():
datagroup=np.loadtxt('C:\\Users\\Dell\Desktop\\classification\\diabetes_train.txt',dtype=float,delimiter=',')
dataset=datagroup[:,1:]
label=datagroup[:,0]
return dataset,label
def testdataset():
datagroup=np.loadtxt('C:\\Users\\Dell\Desktop\\classification\\diabetes_test.txt',dtype=float,delimiter=',')
dataset=datagroup[:,1:]
label=datagroup[:,0]
return dataset,label
def K_classify(test,datagroup,label,k,p):#p-阈值
datasize=datagroup.shape[0]#计算已知数据的行数
test0=np.tile(test,(datasize,1))-datagroup#将测试集与已知数据形式相同,再相减
distance0=(test0**2).sum(1)#平方和
distance=distance0**0.5#开方算欧氏距离
listsy=distance0.argsort()#距离从小到大按索引(下标)排序
classcount={}#创建一个空字典
num0=0
num1=0
for i in range(k):
label0=label[listsy[i]]
classcount[label0]=classcount.get(label0,0)+1#计算各类别的次数
if label0==0:
num0+=1
else:
num1+=1
nums=num0+num1
if num1/nums >= p:
return 1
else:
return 0
if __name__=='__main__':
datagroup,label=traindataset()
test,truelabels=testdataset()
predict=[]
Ps=[(n/100) for n in range(101)]#改变阈值
a=-1
b=0
AUC=0.0
TPR=[]
FPR=[]
for p in Ps:
for i in range(len(test)):
predict.append(K_classify(test[i],datagroup,label,150,p))
tp=0
fp=0
tn=0
fn=0
for j in range(len(test)):
if predict[j]==truelabels[j]:
if predict[j]==1:
tp+=1
else:
tn+=1
else:
if predict[j]==1:
fp+=1
else:
fn+=1
fpr=fp/(fp+tn)
tpr=tp/(tp+fn)
print(fpr)
AUC+=(a+tpr)*(b-fpr)/2#微元法算梯形面积求AUC
a=tpr
b=fpr
TPR.append(tpr)
FPR.append(fpr)
del predict[:]
plt.plot(FPR,TPR)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC曲线')
plt.rcParams['font.sans-serif'] = ['SimHei']#显示中文
plt.grid()#网格线
x=[(n/10) for n in range(11)]
y=[(n/10) for n in range(11)]
plt.xticks(x)
plt.yticks(y)
print('AUC=',AUC)
plt.show()
|
lijiaming666/Python_demo
|
K近邻法+ROC.py
|
K近邻法+ROC.py
|
py
| 2,540 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
}
] |
2373356170
|
from huggingface_hub import login
from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer
login(token='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')
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
input_text = "Hello, how are you?"
inputs = tokenizer.encode(input_text, return_tensors='pt')
outputs = model.generate(inputs, max_length=50, num_return_sequences=5, temperature=0.7)
print("Generated text:")
for i, output in enumerate(outputs):
print(f"{i}: {tokenizer.decode(output)}")
|
mario872/Isaac-Voice-Assistant
|
main/Llama2.py
|
Llama2.py
|
py
| 612 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "huggingface_hub.login",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "transformers.AutoModelForCausalLM.from_pretrained",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForCausalLM",
"line_number": 7,
"usage_type": "name"
}
] |
4357761100
|
from flask_wtf import Form
from wtforms import StringField, TextAreaField, SelectField
from wtforms import SubmitField, validators
# from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import Department, Service
class RequestForm(Form):
'''This class creates an RequestForm
object.
'''
name = StringField('Request',
[validators.Required(message='We need an Request.'),
validators.Length(
max=70,
message='Your \subject is a tad long.'
)
]
)
description = TextAreaField('Request Description',
[validators.required(
message='Please describe your Request.')])
priority = SelectField('Priority', choices=[
('high', 'High'), ('medium', 'Medium'), ('low', 'Low')])
department = SelectField('Department',
[validators.Required(
message='Department required.')],
coerce=int)
service = SelectField('Service',
[validators.Required(
message='Service required.')],
coerce=int)
submit = SubmitField('Post Request')
def __init__(self, *args, **kwargs):
super(RequestForm, self).__init__(*args, **kwargs)
self.department.choices = [
(dept.id, dept.name) for dept in Department.query.all()]
self.service.choices = [
(service.id, service.name) for service in Service.query.all()]
class CommentForm(Form):
'''This class creates a CommentForm
object
'''
comment = TextAreaField('Comment')
|
bazanovam/smartIT
|
app/request/forms.py
|
forms.py
|
py
| 1,819 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask_wtf.Form",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "wtforms.StringField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Required",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "wtforms.validators.Length",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.required",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wtforms.SelectField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Required",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "wtforms.validators.Required",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "wtforms.validators",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.Department.query.all",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "models.Department.query",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "models.Service.query.all",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "models.Service.query",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.Service",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask_wtf.Form",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "wtforms.TextAreaField",
"line_number": 48,
"usage_type": "call"
}
] |
17767255083
|
from datetime import datetime
import uuid
class Order():
def __init__(self, order):
self.__dict__ = order
self.id = str(uuid.uuid4())
class RenderDishInfo():
def __init__(self, id, name, shelf, value, isPicked, isDecayed):
self.id = id
self.name = name
self.shelf = shelf
self.value = value
self.isPicked = isPicked
self.isDecayed = isDecayed
|
purifier1990/PythonLearn
|
kitchen/order.py
|
order.py
|
py
| 363 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "uuid.uuid4",
"line_number": 7,
"usage_type": "call"
}
] |
73758139386
|
"""Docstring"""
import os
import ezdxf
from .file_utilities import check_file
def check_file_and_folder(
path, save_path, save=True
) -> list[ezdxf.document.Drawing] | None:
"""
Handle file or folder to apply the cleaning process
"""
if os.path.isdir(path):
list_dir = os.listdir(path)
try:
os.mkdir(save_path)
except FileExistsError:
pass
docs = []
for name in list_dir:
npath = os.path.join(path, name)
docs.append(check_file_and_folder(npath, os.path.join(save_path, name)))
else:
if check_file(path):
doc = ezdxf.readfile(path)
clean(doc)
if save:
doc.saveas(save_path)
docs = [doc]
else:
docs = None
print(f"{path} is not a dxf file")
return docs
def clean(doc: ezdxf.document.Drawing) -> None:
"""Apply functions to clean the doc"""
remove_sw(doc)
def remove_sw(doc: ezdxf.document.Drawing) -> None:
"""Remove the text included by solidworks from the doc file"""
blocks = doc.blocks
for bloc in blocks:
for entity in bloc:
if entity.dxftype() == "MTEXT":
if "SOLIDWORKS" in entity.text:
bloc.delete_entity(entity)
|
ldevillez/pySwTools
|
pyswtools/ready_dxf/dxf_utilities.py
|
dxf_utilities.py
|
py
| 1,326 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "os.path.isdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "file_utilities.check_file",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ezdxf.readfile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ezdxf.document",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ezdxf.document",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "ezdxf.document",
"line_number": 45,
"usage_type": "attribute"
}
] |
21485752439
|
# import heapq
from collections import deque
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
wordList = set(wordList)
if endWord not in wordList:return 0
n = len(wordList)
# def getdist(x, y):
# count = 0
# for i in range(len(x)):
# if x[i] != y[i]:
# count += 1
# return count
visited = [0] * n
queue = deque([])
queue.append(beginWord)
level = 1
while queue:
size = len(queue)
for i in range(size):
word = queue.popleft()
if word == endWord:
return level
else:
for j in range(len(word)):
temp = list(word)
for k in range(26):
if chr(ord('a') + k) != temp[j]:
temp[j] = chr(ord('a') + k)
newword = "".join(temp)
if newword in wordList:
queue.append(newword)
wordList.remove(newword)
level += 1
return 0
# 这一道题,第一反应就知道是一个bfs,然后写完之后超时了,琢磨了半天在怀疑是不是自己的bfs写的效率不高,尝试用heapq去做一些启发搜索,但是一直超时,而且启发的时候如果不把level放在第一有些级会出现错误结果。
# 看了答案后,发现这个题的关键在于计算这个**只更改一个字符**的距离。 我使用的方法是针对每两个word比较他们的距离,这样的复杂度就等于wordlen * n * n。
# 而有一个巧妙的方法,针对每一个word,把所有跟他距离唯一的word都求出来,然后判断是否在现存dict中,这样由于hash操作复杂度为O(1),那么这个方案的复杂度只有wordlen * 26 * n,在n很大的时候改善很明显。
# 所以这里面关键是,忘记了用hashset来存储这些word,他的查找时间O(1)非常优秀!
|
bboychencan/Algorithm
|
leetcode/127.py
|
127.py
|
py
| 2,238 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
}
] |
26447487026
|
import os
import json
from PIL import Image
import numpy as np
from numpy.core.numeric import full
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
import sys
from pcl_generator import PointCloudGenerator
from lidar_generator import PseudoLidarGenerator
def load_pose(extrinsics: dict) -> np.array:
"""
Load pose as 4x4 matrix from (x, y, z, yaw)
:param extrinsics: Dictionary with extrinsic parameters x, y, z and yaw angle.
:return T: 4x4 transformation matrix as numpy array
"""
sinval = np.sin(extrinsics['yaw'] * np.pi / 180.)
cosval = np.cos(extrinsics['yaw'] * np.pi / 180.)
Rz = np.array([[cosval, -sinval, 0],
[sinval, cosval, 0],
[0, 0, 1]])
t = np.array([extrinsics['x'], extrinsics['y'], extrinsics['z']])
T = np.eye(4)
T[:3, :3] = Rz
T[:3, 3] = Rz @ t
return T
class PseudoLidarData(Dataset):
"""
Adapted from https://github.com/autonomousvision/transfuser/blob/main/transfuser/data.py
"""
def __init__(self, root, config):
self.seq_len = config.seq_len
self.pred_len = config.pred_len
self.ignore_sides = config.ignore_sides
self.ignore_rear = config.ignore_rear
self.input_resolution = config.input_resolution
self.scale = config.scale
with open('pseudolidar/extrinsics.json', mode='r') as f:
extrinsics = json.load(f)
self.front_pose = load_pose(extrinsics['cam_front'])
self.left_pose = load_pose(extrinsics['cam_left'])
self.right_pose = load_pose(extrinsics['cam_right'])
self.rear_pose = load_pose(extrinsics['cam_rear'])
self.img_dims = config.image_dim
self.img_fov = config.fov
self.pc_generator = PointCloudGenerator(self.img_fov, self.img_dims)
self.lidar_generator = PseudoLidarGenerator([self.front_pose,
self.left_pose,
self.right_pose,
self.rear_pose])
self.lidar = []
self.front = []
self.left = []
self.right = []
self.rear = []
self.depth_front = []
self.depth_left = []
self.depth_right = []
self.depth_rear = []
self.x = []
self.y = []
self.x_command = []
self.y_command = []
self.theta = []
self.steer = []
self.throttle = []
self.brake = []
self.command = []
self.velocity = []
for sub_root in tqdm(root, file=sys.stdout):
preload_file = os.path.join(sub_root, 'pseudo_lidar_diag_pl_' +
str(self.seq_len)+'_'+str(self.pred_len)+'.npy')
# dump to npy if no preload
if not os.path.exists(preload_file):
preload_front = []
preload_left = []
preload_right = []
preload_rear = []
preload_lidar = []
preload_depth_front = []
preload_depth_left = []
preload_depth_right = []
preload_depth_rear = []
preload_x = []
preload_y = []
preload_x_command = []
preload_y_command = []
preload_theta = []
preload_steer = []
preload_throttle = []
preload_brake = []
preload_command = []
preload_velocity = []
# list sub-directories in root
root_files = os.listdir(sub_root)
routes = [folder for folder in root_files if not os.path.isfile(os.path.join(sub_root, folder))]
for route in routes:
route_dir = os.path.join(sub_root, route)
# subtract final frames (pred_len) since there are no future waypoints
# first frame of sequence not used
num_seq = (len(os.listdir(route_dir+"/rgb_front/"))-self.pred_len-2)//self.seq_len
for seq in range(num_seq):
fronts = []
lefts = []
rights = []
rears = []
lidars = []
depth_fronts = []
depth_lefts = []
depth_rights = []
depth_rears = []
xs = []
ys = []
thetas = []
# read files sequentially (past and current frames)
for i in range(self.seq_len):
# images
filename = f"{str(seq*self.seq_len+1+i).zfill(4)}.png"
fronts.append(route_dir+"/rgb_front/"+filename)
lefts.append(route_dir+"/rgb_left/"+filename)
rights.append(route_dir+"/rgb_right/"+filename)
rears.append(route_dir+"/rgb_rear/"+filename)
# point cloud
lidars.append(route_dir + f"/lidar/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_fronts.append(route_dir + f"/depth_front/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_lefts.append(route_dir + f"/depth_left/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_rights.append(route_dir + f"/depth_right/{str(seq*self.seq_len+1+i).zfill(4)}.png")
depth_rears.append(route_dir + f"/depth_rear/{str(seq*self.seq_len+1+i).zfill(4)}.png")
# position
with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file:
data = json.load(read_file)
xs.append(data['x'])
ys.append(data['y'])
thetas.append(data['theta'])
# get control value of final frame in sequence
preload_x_command.append(data['x_command'])
preload_y_command.append(data['y_command'])
preload_steer.append(data['steer'])
preload_throttle.append(data['throttle'])
preload_brake.append(data['brake'])
preload_command.append(data['command'])
preload_velocity.append(data['speed'])
# read files sequentially (future frames)
for i in range(self.seq_len, self.seq_len + self.pred_len):
# point cloud
lidars.append(route_dir + f"/lidar/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_fronts.append(route_dir + f"/depth_front/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_lefts.append(route_dir + f"/depth_left/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_rights.append(route_dir + f"/depth_right/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
depth_rears.append(route_dir + f"/depth_rear/{str(seq*self.seq_len+1+i).zfill(4)}.npy")
# position
with open(route_dir + f"/measurements/{str(seq*self.seq_len+1+i).zfill(4)}.json", "r") as read_file:
data = json.load(read_file)
xs.append(data['x'])
ys.append(data['y'])
# fix for theta=nan in some measurements
if np.isnan(data['theta']):
thetas.append(0)
else:
thetas.append(data['theta'])
preload_front.append(fronts)
preload_left.append(lefts)
preload_right.append(rights)
preload_rear.append(rears)
preload_lidar.append(lidars)
preload_depth_front.append(depth_fronts)
preload_depth_left.append(depth_lefts)
preload_depth_right.append(depth_rights)
preload_depth_rear.append(depth_rears)
preload_x.append(xs)
preload_y.append(ys)
preload_theta.append(thetas)
# dump to npy
preload_dict = {}
preload_dict['front'] = preload_front
preload_dict['left'] = preload_left
preload_dict['right'] = preload_right
preload_dict['rear'] = preload_rear
preload_dict['lidar'] = preload_lidar
preload_dict['depth_front'] = preload_depth_front
preload_dict['depth_left'] = preload_depth_left
preload_dict['depth_right'] = preload_depth_right
preload_dict['depth_rear'] = preload_depth_rear
preload_dict['x'] = preload_x
preload_dict['y'] = preload_y
preload_dict['x_command'] = preload_x_command
preload_dict['y_command'] = preload_y_command
preload_dict['theta'] = preload_theta
preload_dict['steer'] = preload_steer
preload_dict['throttle'] = preload_throttle
preload_dict['brake'] = preload_brake
preload_dict['command'] = preload_command
preload_dict['velocity'] = preload_velocity
np.save(preload_file, preload_dict)
# load from npy if available
preload_dict = np.load(preload_file, allow_pickle=True)
self.front += preload_dict.item()['front']
self.left += preload_dict.item()['left']
self.right += preload_dict.item()['right']
self.rear += preload_dict.item()['rear']
self.lidar += preload_dict.item()['lidar']
self.depth_front += preload_dict.item()['depth_front']
self.depth_left += preload_dict.item()['depth_left']
self.depth_right += preload_dict.item()['depth_right']
self.depth_rear += preload_dict.item()['depth_rear']
self.x += preload_dict.item()['x']
self.y += preload_dict.item()['y']
self.x_command += preload_dict.item()['x_command']
self.y_command += preload_dict.item()['y_command']
self.theta += preload_dict.item()['theta']
self.steer += preload_dict.item()['steer']
self.throttle += preload_dict.item()['throttle']
self.brake += preload_dict.item()['brake']
self.command += preload_dict.item()['command']
self.velocity += preload_dict.item()['velocity']
print("Preloading " + str(len(preload_dict.item()['front'])) + " sequences from " + preload_file)
def __len__(self):
"""Returns the length of the dataset. """
return len(self.front)
def __getitem__(self, index):
"""Returns the item at index idx. """
data = dict()
data['fronts'] = []
data['lefts'] = []
data['rights'] = []
data['rears'] = []
data['lidars'] = []
seq_fronts = self.front[index]
seq_lefts = self.left[index]
seq_rights = self.right[index]
seq_rears = self.rear[index]
seq_lidars = self.lidar[index]
seq_depth_fronts = self.depth_front[index]
seq_depth_lefts = self.depth_left[index]
seq_depth_rights = self.depth_right[index]
seq_depth_rears = self.depth_rear[index]
seq_x = self.x[index]
seq_y = self.y[index]
seq_theta = self.theta[index]
full_lidar = []
pos = []
neg = []
for i in range(self.seq_len):
data['fronts'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_fronts[i]), scale=self.scale, crop=self.input_resolution))))
if not self.ignore_sides:
data['lefts'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_lefts[i]), scale=self.scale, crop=self.input_resolution))))
data['rights'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_rights[i]), scale=self.scale, crop=self.input_resolution))))
if not self.ignore_rear:
data['rears'].append(torch.from_numpy(np.array(
scale_and_crop_image(Image.open(seq_rears[i]), scale=self.scale, crop=self.input_resolution))))
"""
lidar_unprocessed = np.load(seq_lidars[i])[..., :3] # lidar: XYZI
full_lidar.append(lidar_unprocessed)
"""
pc_front = self.pc_generator.generate(seq_depth_fronts[i], max_depth=0.03)
pc_left = self.pc_generator.generate(seq_depth_lefts[i], max_depth=0.03)
pc_right = self.pc_generator.generate(seq_depth_rights[i], max_depth=0.03)
pc_rear = self.pc_generator.generate(seq_depth_rears[i], max_depth=0.03)
pseudolidar = self.lidar_generator.generate([pc_front, pc_left, pc_right, pc_rear])
full_lidar.append(pseudolidar)
# fix for theta=nan in some measurements
if np.isnan(seq_theta[i]):
seq_theta[i] = 0.
ego_x = seq_x[i]
ego_y = seq_y[i]
ego_theta = seq_theta[i]
# future frames
for i in range(self.seq_len, self.seq_len + self.pred_len):
lidar_unprocessed = np.load(seq_lidars[i])
full_lidar.append(lidar_unprocessed)
# lidar and waypoint processing to local coordinates
waypoints = []
for i in range(self.seq_len + self.pred_len):
# waypoint is the transformed version of the origin in local coordinates
# we use 90-theta instead of theta
# LBC code uses 90+theta, but x is to the right and y is downwards here
local_waypoint = transform_2d_points(np.zeros((1, 3)),
np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta, -ego_x, -ego_y)
waypoints.append(tuple(local_waypoint[0, :2]))
# process only past lidar point clouds
if i < self.seq_len:
# convert coordinate frame of point cloud
"""
full_lidar[i][:, 1] *= -1 # inverts x, y
full_lidar[i] = transform_2d_points(full_lidar[i],
np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta, -ego_x, -ego_y)
lidar_processed = lidar_to_histogram_features(full_lidar[i], crop=self.input_resolution)
"""
lidar_processed = PseudoLidarGenerator.post_process(full_lidar[i])
lidar_processed = PseudoLidarGenerator.project_to_bev(lidar_processed)
data['lidars'].append(lidar_processed.copy())
data['waypoints'] = waypoints
# convert x_command, y_command to local coordinates
# taken from LBC code (uses 90+theta instead of theta)
R = np.array([
[np.cos(np.pi/2+ego_theta), -np.sin(np.pi/2+ego_theta)],
[np.sin(np.pi/2+ego_theta), np.cos(np.pi/2+ego_theta)]
])
local_command_point = np.array([self.x_command[index]-ego_x, self.y_command[index]-ego_y])
local_command_point = R.T.dot(local_command_point)
data['target_point'] = tuple(local_command_point)
data['steer'] = self.steer[index]
data['throttle'] = self.throttle[index]
data['brake'] = self.brake[index]
data['command'] = self.command[index]
data['velocity'] = self.velocity[index]
return data
def lidar_to_histogram_features(lidar, crop=256):
"""
Convert LiDAR point cloud into 2-bin histogram over 256x256 grid
"""
def splat_points(point_cloud):
# 256 x 256 grid
pixels_per_meter = 16
hist_max_per_pixel = 5
x_meters_max = 8
y_meters_max = 16
xbins = np.linspace(-2*x_meters_max, 2*x_meters_max+1, 2*x_meters_max*pixels_per_meter+1)
ybins = np.linspace(-y_meters_max, 0, y_meters_max*pixels_per_meter+1)
hist = np.histogramdd(point_cloud[..., :2], bins=(xbins, ybins))[0]
hist[hist > hist_max_per_pixel] = hist_max_per_pixel
overhead_splat = hist/hist_max_per_pixel
return overhead_splat
below = lidar[lidar[..., 2] <= -2.0]
above = lidar[lidar[..., 2] > -2.0]
below_features = splat_points(below)
above_features = splat_points(above)
features = np.stack([below_features, above_features], axis=-1)
features = np.transpose(features, (2, 0, 1)).astype(np.float32)
return features
def scale_and_crop_image(image, scale=1, crop=256):
"""
Scale and crop a PIL image, returning a channels-first numpy array.
"""
# image = Image.open(filename)
(width, height) = (int(image.width // scale), int(image.height // scale))
im_resized = image.resize((width, height))
image = np.asarray(im_resized)
start_x = height//2 - crop//2
start_y = width//2 - crop//2
cropped_image = image[start_x:start_x+crop, start_y:start_y+crop]
cropped_image = np.transpose(cropped_image, (2, 0, 1))
return cropped_image
def transform_2d_points(xyz, r1, t1_x, t1_y, r2, t2_x, t2_y):
"""
Build a rotation matrix and take the dot product.
"""
# z value to 1 for rotation
xy1 = xyz.copy()
xy1[:, 2] = 1
c, s = np.cos(r1), np.sin(r1)
r1_to_world = np.matrix([[c, s, t1_x], [-s, c, t1_y], [0, 0, 1]])
# np.dot converts to a matrix, so we explicitly change it back to an array
world = np.asarray(r1_to_world @ xy1.T)
c, s = np.cos(r2), np.sin(r2)
r2_to_world = np.matrix([[c, s, t2_x], [-s, c, t2_y], [0, 0, 1]])
world_to_r2 = np.linalg.inv(r2_to_world)
out = np.asarray(world_to_r2 @ world).T
# reset z-coordinate
out[:, 2] = xyz[:, 2]
return out
|
jonathsch/multisensor
|
pseudolidar/pseudo_lidar_dataset.py
|
pseudo_lidar_dataset.py
|
py
| 18,526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.sin",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pcl_generator.PointCloudGenerator",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "lidar_generator.PseudoLidarGenerator",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "numpy.isnan",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 323,
"usage_type": "attribute"
},
{
"api_name": "lidar_generator.PseudoLidarGenerator.post_process",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "lidar_generator.PseudoLidarGenerator",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "lidar_generator.PseudoLidarGenerator.project_to_bev",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "lidar_generator.PseudoLidarGenerator",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "numpy.histogramdd",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 419,
"usage_type": "call"
}
] |
18417538437
|
from typing import List
from functools import lru_cache
class Solution:
def canJump_top_down(self, nums: List[int]) -> bool:
n = len(nums)
@lru_cache(None)
def can_jump(i):
if i < 0 or i >= n:
return False
if i + nums[i] >= n - 1:
return True
return any(can_jump(k) for k in range(i + nums[i], i, -1))
return can_jump(0)
def canJump_greedy(self, nums: List[int]) -> bool:
n = len(nums)
max_jump = 0
for i in range(n):
if max_jump < i:
return False
max_jump = max(max_jump, i + nums[i])
if max_jump >= n - 1:
return True
return max_jump >= n - 1
## TC: O(n)
## SC: O(1)
s = Solution()
print(s.canJump([0]))
print(s.canJump([3,2,1,0,4]))
|
ace-wu/oj
|
leetcode/0055-jump-game.py
|
0055-jump-game.py
|
py
| 850 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "functools.lru_cache",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
}
] |
42667622123
|
from models import db
from flask import Flask, request, jsonify
from bson.json_util import dumps
def getPacientes():
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
retorno = dumps(pacientes.find({}))
return jsonify(retorno)
finally:
con.close()
print('coneccion cerrada')
def getListPaciente(parametro='documento'):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
retorno = list(pacientes.find({}))
lista = [d[parametro] for d in retorno]
return jsonify({'data': lista, 'status': 200})
finally:
con.close()
print('coneccion cerrada')
def createPatient(data):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
pacientes.insert(data)
return jsonify({'message': 'paciente insertado', 'status': 200})
except:
return jsonify({'message': 'fallo en la insercion', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
def deletePatientDocument(documento):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
pacientes.delete_many({'documento': documento})
return jsonify({'message': 'paciente eliminado', 'status': 200})
except:
return jsonify({'message': 'fallo al eliminar paciente', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
def editarPaciente(data):
con = db.get_connection()
dbejercicios = con.ModeloEjercicios
try:
pacientes = dbejercicios.pacientes
print(data['data'])
pacientes.find_one_and_update({'documento': data['documento']}, {'$set': data['data']})
return jsonify({'message': 'paciente editado', 'status': 200})
except:
return jsonify({'message': 'fallo al editar un paciente', 'status': 500})
finally:
con.close()
print('coneccion cerrada')
|
andres94091/projectEjerciciosBackend
|
models/pacientes.py
|
pacientes.py
|
py
| 2,139 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.db.get_connection",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.db",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "bson.json_util.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.db.get_connection",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.db",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.db.get_connection",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.db",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "models.db.get_connection",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "models.db",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.db.get_connection",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 72,
"usage_type": "call"
}
] |
41366711164
|
# -*- encoding: utf-8 -*-
import sys, argparse, json, ovh, re, datetime,configparser
from urllib import parse
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--url','-u', help='Url recu par email du type https://www.ovh.com/manager/#/useraccount/contacts/123456?tab=REQUESTS&token=monsupertoken')
args = parser.parse_args()
print(decodeUrl(args.url))
client= ovh.Client()
updateck(client)
idurl = decodeUrl(args.url)["id"]
tokenurl = decodeUrl(args.url)["token"]
#Test if ID is ready to accept
idstatus = client.get('/me/task/contactChange/{}'.format(idurl))
if idstatus["state"] == "validatingByCustomers":
result = client.post('/me/task/contactChange/{}/accept'.format(idurl),token=tokenurl)
print(result)
else:
print("La tache de changeContact : "+str(idurl)+" est en état "+idstatus["state"])
def decodeUrl(url=""):
# Decode the URL for Id and token for contactChange task
result = parse.parse_qs(parse.urlsplit(url).fragment)
id = re.findall('\d+',list(result.keys())[0])[0]
token = result["token"][0]
return({'id': str(id), 'token': str(token)})
def getConsumerKey(client):
#Obtain Consumer Key for script
ck = client.new_consumer_key_request()
ck.add_recursive_rules(ovh.API_READ_WRITE, '/')
validation = ck.request()
print("Please visit %s to authenticate" % validation['validationUrl'])
input("and press Enter to continue...")
# Print nice welcome message
print("Welcome", client.get('/me')['firstname'])
return validation['consumerKey']
def updateck(client):
#Mise à jour de la CK dans le fichier si besoin
config = configparser.ConfigParser()
config.read('ovh.conf')
endpoint = config["default"]["endpoint"]
try:
client.get("/me")
except (ovh.exceptions.NotCredential,ovh.exceptions.InvalidCredential): # Si la clef CK est non valide alors on en recupere une nouvelle
config[endpoint]["consumer_key"]= getConsumerKey(client)
with open('ovh.conf', 'w') as configfile:
config.write(configfile)
except :
print("Erreur non lié à l'autentification de la CK\nVérifier le fichier ovh.conf")
quit
else:
print("Welcome", client.get('/me')['firstname'])
return True
if __name__ == "__main__":
main(sys.argv[1:])
|
FlorianKronos/ovh-api-scripts
|
acceptTranfert.py
|
acceptTranfert.py
|
py
| 2,376 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ovh.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlsplit",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ovh.API_READ_WRITE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "ovh.exceptions",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 54,
"usage_type": "attribute"
}
] |
25002494348
|
#author Duc Trung Nguyen
#2018-01-06
#Shopify Back End Challenge
from Menu import Menu
import json
def parse_menu(menu, py_menus):
this_id = menu['id']
this_data = menu['data']
this_child = menu['child_ids']
if not 'parent_id' in menu:
py_menus.append(Menu(this_id, this_data, this_child))
if 'parent_id' in menu:
this_parent = menu['parent_id']
for i in py_menus:
if i.is_child(this_parent):
i.add_child(menu)
if __name__ == "__main__":
menus = json.loads(req.get('https://backend-challenge-summer-2018.herokuapp.com/challenges.json?id=1&page=0').text)
START_PAGE = menus['pagination']['current_thing']
TOTAL_PAGES = int(menus['pagination'] ['total'] / menus['pagination'] ['per_page']) + 1
collection = []
for thing in range(START_PAGE, TOTAL_PAGES):
if (thing != START_PAGE):
menus = json.loads(req.get('https://backend-challenge-summer-2018.herokuapp.com/challenges.json?id=1&page='+ str(thing)).text)
menus = menus['menus']
for menu in menus :
parse_menu(menu , collection)
result = {"invalid_menus":[], "valid_menus":[]}
for i in collection:
if not i.is_valid:
result['invalid_menus'].append(i.__dict__())
if i.is_valid:
result['valid_menus'].append(i.__dict__())
|
suphuvn/Shopify-Back-End-Challenge
|
Shopify Back End Challenge.py
|
Shopify Back End Challenge.py
|
py
| 1,426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "Menu.Menu",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
}
] |
42727324036
|
import os
import csv
import numpy as np
import sys
from PIL import Image
class DataBuilder:
def __init__(self, image_dir, label_file, output_dir,output_file, output_label_file, target_size):
self.image_dir = image_dir
self.label_file = label_file
self.target_size = target_size
self.filtered_labels = []
self.output_file = output_file
self.output_label_file = output_label_file
self.output_dir = output_dir
self.labels = []
def filter_labels(self, labels_to_ignore):
# Load the labels from the CSV file and filter out the labels to ignore
with open(self.label_file, 'r') as file:
reader = csv.reader(file)
labels = [row for row in reader]
self.filtered_labels = [label for label in labels if label[2] not in labels_to_ignore]
def process_images(self):
# Create output directory if it doesn't exist
os.makedirs(self.output_dir, exist_ok=True)
# Open the output CSV file
with open(self.output_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# Iterate through the images in the directory
for image_file in os.listdir(self.image_dir):
if image_file.endswith('.jpg') or image_file.endswith('.png'):
image_path = os.path.join(self.image_dir, image_file)
label = self.get_label(image_file)
if label != None:
self.labels.append(label)
# Load, resize, and convert the image to an array of target size and three channels
image = Image.open(image_path)
image = image.resize(self.target_size)
image_array = np.array(image.convert('RGB'))
# Flatten the image array and append the label
flattened_image = image_array.flatten()
#row = np.concatenate(([label], flattened_image))
# Write the row to the CSV file
writer.writerow(flattened_image)
self.store_labels()
def get_index(self, image_file):
for i in range(len(self.filtered_labels)):
if image_file in self.filtered_labels[i][1]:
return i
return None
def get_label(self, image_file):
# Extract the label from the image file name or file itself
# and return the corresponding label
#We should search for labels ("sad" or "happy") appearing in the image file path (exact match not needed)
#If one of the labels is found, and the third column matches it (i.e. "sad or happy"), then we return the label (0 or 1)
#If no label is found, we return None, which means the image should be ignored
ind = self.get_index(image_file)
if ind is None:
print(f"Image {image_file} not found in labels")
return None
if 'sad' in self.filtered_labels[ind][1] and self.filtered_labels[ind][2] == 'sad':
return 0
elif 'happy' in self.filtered_labels[ind][1] and self.filtered_labels[ind][2] == 'happy':
return 1
else:
print(f"Image {image_file} not found in labels")
return None
def store_labels(self):
# Store the label in a separate file (e.g., CSV or text file)
# Implement your own logic here based on your desired output format
with open(self.output_label_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for label in self.labels:
writer.writerow(str(label))
# Example usage
builder = DataBuilder(
image_dir='./images',
label_file='./labels/labels.csv',
output_dir='./output',
output_file='./output/output.csv',
output_label_file='./output/output_labels.csv',
target_size=(64, 64) # Example target size, adjust according to your needs
)
# Filter out labels to ignore
labels_to_ignore = ['surprise', 'anger', 'disgust', 'contempt', 'fear', 'neutral'] # Example labels to ignore
builder.filter_labels(labels_to_ignore)
#print(builder.filtered_labels)
#sys.exit()
# Process the images
builder.process_images()
|
maximiliann97/TIF360-project-GIF-emotion-flipper
|
generate_data/DataBuilder.py
|
DataBuilder.py
|
py
| 4,326 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 85,
"usage_type": "call"
}
] |
22857897162
|
#!/usr/bin/env python
"""
Parses information from aql and outputs them to one JSON
input:
stdin: json aql output
e.g. aql -c "SHOW SETS" -o json | head -n -3
return:
JSON string
[[{...], {...}]] - for each server list of stats (e.g for each set)
"""
import sys
import json
data = []
json_in = ''
for l in sys.stdin:
json_in += l
if ']' in l:
# one server collected
server_stats = []
for stats in json.loads(json_in):
server_stats.append(stats)
json_in = ''
data.append(server_stats)
print(json.dumps(data))
|
tivvit/aerospike-tools-parsers
|
parse_aql.py
|
parse_aql.py
|
py
| 598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.stdin",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
}
] |
24764641791
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from .module import Module
from ...autograd import Variable, Backward
class Regression(Module):
'''Base loss function class for Regression task\n
Regression is the task of approximating a mapping function (f) from input variables (x) to a continuous output variable (y).
A continuous output variable is a real-value, such as an integer or floating point value. These are often quantities, such as amounts and sizes.
Args:
live_plot (bool): if True, plot the loss as training of the model proceeds.
'''
def __init__(self, live_plot=False):
self.live_plot = live_plot
self.losses = []
def forward(self, x, t, *args):
raise NotImplementedError
def _prepare_output(self, result, *args):
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
return result
class Classification(Module):
'''Base loss function class for Classification task\n
Classification is the task of approximating a mapping function (f) from input variables (x) to discrete output variables (y).
The output variables are often called labels or categories. The mapping function predicts the class or category for a given observation.
'''
def __init__(self, live_plot=False):
self.live_plot = live_plot
self.losses = []
def forward(self, x, t, *args):
raise NotImplementedError
def to_one_hot(self, x, classes):
'''
Convert labels into one-hot representation
Args:
x (np.array): labels in shape of [N]
classes (int): number of classes to classify
'''
labels = np.zeros((x.size, classes))
for i, label in enumerate(labels):
label[x[i]] = 1
return labels
def get_acc(self, x, t):
if x.shape[1] != 1:
pred = np.argmax(x.data, axis=1).reshape(-1,1)
else:
pred = x.data
if t.shape[1] != 1:
label = np.argmax(t.data, axis=1).reshape(-1,1)
else:
label = t.data
if pred.ndim != 2 or label.ndim != 2:
raise ValueError
return np.sum(pred == label) / x.shape[0]
class MSELoss(Regression):
'''Mean Square Error, Quadratic loss, L2 Loss\n
Creates a criterion that measures the mean squared error between n elements in the input x and target t.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
Shape:
- Input: [N, C]
- Target: [N, C]
- Output: [1] by default
[N] if not reduced
'''
def __init__(self, size_average=True, reduce=True):
super().__init__()
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if x.shape != t.data.shape:
raise ValueError('[*] dimention of input {} and target {} did not match.'.format(x.shape, t.shape))
result = np.sum(np.power(x.data - t.data,2),axis=1)/x.shape[1]
result = self._prepare_output(result, args)
result.set_creator((MSELossBackward(result.shape, x, t)))
return result
class MSELossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return 2*(var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
# TODO
class HuberLoss(Regression):
'''Huber Loss, Smooth Mean Absolute Error\n
Huber loss is a loss function used in robust regression, that is less sensitive to outliers in data than the squared error loss.
Args:
delta (double): decide boundry value for Huber loss calculation. Default: 1
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
Shape:
- Input: [N, C]
- Target: [N, C]
- Output: [1] by default
[N] if not reduced
'''
def __init__(self, delta=1, size_average=True, reduce=True):
super().__init__()
self.delta = delta
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if x.shape != t.data.shape:
raise ValueError('[*] dimention of input and target did not match.')
a = x.data - t.data
mask = (a <= self.delta)
result = np.zeros(a.shape)
result[mask] = (np.power(a,2)/2)[mask]
result[np.invert(mask)] = (self.delta*(np.abs(a)-self.delta/2))[mask]
result = self._prepare_output(result, args)
result.set_creator((HuberBackward(result.shape, x, t, self.delta, mask)))
return result
class HuberBackward(Backward):
def __init__(self, output_shape, var1, target, delta, mask):
def f(x):
a = var1.data - target.data
d = np.zeros(a.shape)
d[mask] = a[mask]
d[np.invert(mask)] = (delta*np.abs(a)/(a+1e-8))[mask]
return d
super().__init__(output_shape, var1, f)
# Classification
class CrossEntropyLoss(Classification):
'''Cross Entropy Loss\n
It is useful when training a classification problem with C classes.
This class incorporates the Softmax layer.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
live_plot (bool):
Shape:
- Input: [N,C] where C = number of classes
- Target: [N] where each value is 0 ≤ targets[i] ≤ C-1 or
[N,C] for one-hot representation
- Output: [1] as default
[N] if reduce is False
Model:
L(p,q) = -sum(p(x)logq(x))
'''
def __init__(self, size_average=True, reduce=True, live_plot=False):
super().__init__(live_plot=live_plot)
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if t.ndim is 1:
t.data = self.to_one_hot(t.data, x.shape[1])
if x.shape != t.shape:
raise ValueError('[*] dimention of input {} and target {} did not match.'.format(x.shape, t.shape))
c = np.max(x.data, axis=1)
c = np.expand_dims(c, axis=1)
tmp = np.exp(x.data - c)
y = tmp / (np.expand_dims(np.sum(tmp, axis=1), axis=1) + 1e-8)
result = np.sum(-t.data * np.log(y), axis=1)
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
result.set_creator((CrossEntropyLossBackward(result.shape, x, t)))
return result
class CrossEntropyLossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return (var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
class BCELoss(Classification):
'''Binary Cross Entropy Loss\n
This is used for measuring the error of a reconstruction in for example an auto-encoder.
Note that the targets y should be numbers between 0 and 1.
Args:
size_average (bool): the losses are averaged over observations for each minibatch. However, if False, the losses are instead summed for each minibatch. Ignored if reduce is False.
reduce (bool): the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch instead and ignores size_average. Default: True
live_plot (bool):
Shape:
- Input: [N,2]
- Target: [N] where each value is 0 ≤ targets[i] ≤ 1 or
[N,2] for one-hot representation
- Output: [1] as default
[N] if not reduce is True
Model:
L(p,q) = -sum(p(x)logq(x)+(1-p(x))log(1-q(x)))
'''
def __init__(self, size_average=True, reduce=True, live_plot=False):
super().__init__(live_plot=live_plot)
self.size_average = size_average
self.reduce = reduce
def forward(self, x, t, *args):
if t.ndim is 1:
t.data = self.to_one_hot(t.data, x.shape[1])
if x.shape != t.shape:
raise ValueError('[*] dimention of input and target did not match.')
c = np.max(x.data, axis=1)
c = np.expand_dims(c, axis=1)
tmp = np.exp(x.data - c)
y = tmp/np.expand_dims(np.sum(tmp, axis=1), axis=1)
result = np.sum(-t.data * np.log(y) - (1 - t.data) * np.log(1 - y), axis=1)
if not 'valid' in args and not 'test' in args:
self.losses.append(np.mean(result))
if self.reduce:
if self.size_average:
result = Variable(np.mean(result))
else:
result = Variable(np.sum(result))
else:
result = Variable(result)
result.set_creator((BCELossBackward(result.shape, x, t)))
return result
class BCELossBackward(Backward):
def __init__(self, output_shape, var1, target):
def f(x):
return (var1.data - target.data)/var1.shape[0]
super().__init__(output_shape, var1, f)
|
Kashu7100/Qualia
|
qualia/nn/modules/loss.py
|
loss.py
|
py
| 10,787 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "module.Module",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "module.Module",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "autograd.Backward",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.invert",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "autograd.Backward",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.invert",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "autograd.Backward",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "numpy.max",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "autograd.Variable",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "autograd.Backward",
"line_number": 248,
"usage_type": "name"
}
] |
12061356200
|
import tweepy
from textblob import TextBlob
consumer_key = 'EjXTChxrOmEWULyuuJ8iDXdyQ'
consumer_secret = 'NrtHvELXi0i6dtue39icLkrT3rrrUVHKWOlHWWGJm46LQGell5'
access_token = '1425159876-T5yoGiyxFk2sAdsZNjGVLRa94988APPcV4TI7R6'
access_token_secret = 'JsCnvZPbnn93qefEM187dPnUcdCn5pby220IiU3D1aKam'
auth =tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
query = raw_input("Type the query .\n")
#print(query)
public_tweets = api.search(query)
for tweet in public_tweets:
print('------------------------------------------------------------------')
print(tweet.text)
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
print('------------------------------------------------------------------')
|
HirdyaNegi/Senti2weet
|
test.py
|
test.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tweepy.OAuthHandler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 22,
"usage_type": "call"
}
] |
24370435806
|
from setuptools import setup, find_packages
VERSION = "0.1"
DESCRIPTION = "A Lagrangian Particle Tracking package"
LONG_DESCRIPTION = "Includes a set of tools for Lagrangian Particle Tracking like search, interpolation, etc."
# Setting up
setup(
# name must match the folder name
name="project-arrakis",
version=VERSION,
author="kal @ Dilip Kalagotla",
author_email="<[email protected]>",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[], # add any additional packages that
# needs to be installed along with your package. Eg: 'caer'
keywords=["python", "first package"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
)
|
kalagotla/project-arrakis
|
setup.py
|
setup.py
|
py
| 946 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
29673725009
|
import flask
import flask_login
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.consumer import oauth_authorized
import iou.config as config
from iou.models import User
google_blueprint = make_google_blueprint(
scope=["email"],
**config.googleAuth
)
login_manager = flask_login.LoginManager()
login_manager.login_view = 'google.login'
def init_app(app, danceAlchemyBackend):
app.secret_key = config.secret_key
login_manager.init_app(app)
google_blueprint.backend = danceAlchemyBackend
app.register_blueprint(google_blueprint, url_prefix="/login")
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token, testing=False):
if not token:
flask.flash("Failed to log in with {name}".format(name=blueprint.name))
return
if testing:
email = token
else:
resp = blueprint.session.get('/oauth2/v2/userinfo')
if not resp.ok:
print("Invalid response", resp.status_code, resp.text)
flask.abort(500)
data = resp.json()
email = data.get('email')
if not email:
print("Email not present in ", data)
flask.abort(500)
user = User.getOrCreate(email)
flask_login.login_user(user)
|
komackaj/flask-iou
|
iou/login.py
|
login.py
|
py
| 1,382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask_dance.contrib.google.make_google_blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "iou.config.googleAuth",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "iou.config",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "iou.config.secret_key",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "iou.config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "iou.models.User.query.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "iou.models.User.query",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "iou.models.User",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "iou.models.User.getOrCreate",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "iou.models.User",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "flask_login.login_user",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "flask_dance.consumer.oauth_authorized.connect_via",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask_dance.consumer.oauth_authorized",
"line_number": 27,
"usage_type": "name"
}
] |
2799581400
|
import streamlit as st
import pandas_ta as ta
import pandas as pd
import yfinance as yf
import pandas as pd; import numpy as np
st.title("Volatility Dashboard")
st.sidebar.title("selection")
option = st.sidebar.selectbox("options",('long signal', 'short signal', 'data frame', 'Important dates', 'implinks'))
st.subheader(option)
df = yf.download('BTC-USD', period='6mo', interval='1h')
df = df.reset_index()
# Step: Change data type of Date to Datetime
df = df.rename(columns={'index': 'Date'})
df['Volatility Open'] = df['Open'].rolling(window=10).std()
df['Volatility High'] = df['High'].rolling(window=10).std()
df['Volatility Low'] = df['Low'].rolling(window=10).std()
df['Volatility Close'] = df['Close'].rolling(window=10).std()
newdf=df[['Datetime','Volatility Open','Volatility High','Volatility Low','Volatility Close']]
newdf=newdf.set_index('Datetime')
newdf=newdf.dropna()
newdf = newdf.rename_axis('Date')
newdf.index = pd.to_datetime(newdf.index)
newdf.index = newdf.index.tz_localize(None)
f = pd.read_csv('https://raw.githubusercontent.com/suparn2304/volatility-dashboard/main/vol1%20harmonic.csv', index_col=0)
f = f.rename_axis('Date')
f.index = pd.to_datetime(f.index)
f = f.rename(columns={'0.0000000': 'forier'})
new_dates = pd.date_range(start=newdf.index.min(), end='2023-05-23 10:00:00', freq='1h')
updated_index = newdf.index.append(new_dates)
newdf = newdf[~newdf.index.duplicated(keep='first')]
newdf = newdf.reindex(updated_index)
newdf.index = pd.to_datetime(newdf.index)
merged_df = pd.merge(newdf, f, how='left', left_index=True, right_index=True)
merged_df.index = pd.to_datetime(merged_df.index, infer_datetime_format=True)
merged_df = merged_df.rename(columns={'Volatility Open': 'Open', 'Volatility Close': 'Close', 'Volatility High': 'High', 'Volatility Low': 'Low'})
merged_df = merged_df.fillna(method='ffill')
merged_df = merged_df[~merged_df.index.duplicated(keep='first')]
merged_df['fut1'] = merged_df['forier'].shift(-1)
merged_df['fut2'] = merged_df['forier'].shift(-2)
merged_df['fut3'] = merged_df['forier'].shift(-3)
merged_df['fut4'] = merged_df['forier'].shift(-4)
merged_df['fut5'] = merged_df['forier'].shift(-5)
merged_df['zscore'] = ta.zscore(merged_df['Close'], length=20, std=1)
merged_df = merged_df.rename_axis('Date')
merged_df['forier_plot'] = merged_df['forier']*100
merged_df['fut1'] = merged_df['forier'].shift(-1)
merged_df['fut2'] = merged_df['forier'].shift(-2)
merged_df['fut3'] = merged_df['forier'].shift(-3)
merged_df['fut4'] = merged_df['forier'].shift(-4)
merged_df['fut5'] = merged_df['forier'].shift(-5)
entry_points = pd.DataFrame(columns=['Date', 'Entry_Price'])
# Set the threshold for the z-score
zscore_threshold = -0.7999
# Loop through the rows in the DataFrame
for i in range(len(merged_df)):
# Check if the conditions are met for entering a trade
if (merged_df.iloc[i].fut3 > merged_df.iloc[i].fut2 > merged_df.iloc[i].fut1) and \
(merged_df.iloc[i].zscore > zscore_threshold) and \
(merged_df.iloc[i-1].zscore < zscore_threshold):
# Record the entry point
entry_points = entry_points.append({'Date': merged_df.iloc[i].name,
'Entry_Price': merged_df.iloc[i].Close},
ignore_index=True)
ohlc_df = pd.DataFrame()
ohlc_df.index = merged_df.index
ohlc_df['Open'] = merged_df['Open']
ohlc_df['High'] = merged_df['High']
ohlc_df['Low'] = merged_df['Low']
ohlc_df['Close'] = merged_df['Close']
if option == 'data frame':
st.dataframe(ohlc_df)
df_callendar = pd.read_csv('https://raw.githubusercontent.com/suparn2304/volatility-dashboard/main/calendar-event-list.csv', index_col=0)
df_callendar.index = pd.to_datetime(df_callendar.index)
calllendar_df = pd.merge(ohlc_df, df_callendar, how='left', left_index=True, right_index=True)
calllendar_df = calllendar_df.dropna()
if option == 'Important dates':
st.dataframe(df_callendar)
import plotly.graph_objects as go
fig = go.Figure(data= [go. Candlestick (
x = ohlc_df.index,
open = ohlc_df.Open,
high = ohlc_df.High,
low = ohlc_df.Low,
close = ohlc_df.Close
)])
fig.add_trace(go.Scatter(
x=entry_points.Date,
y=entry_points.Entry_Price,
mode= "markers",
marker_symbol="diamond-dot",
marker_size = 13,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(0,255,0,0.7)",
))
fig.add_trace(go.Scatter(
x=calllendar_df.index,
y=calllendar_df.Close,
mode= "markers",
marker_symbol="x",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.update_layout (xaxis_rangeslider_visible=False)
if option == 'long signal':
st.plotly_chart(fig)
st.dataframe(entry_points)
entry_points_short = pd.DataFrame(columns=['Date', 'Entry_Price'])
# Set the threshold for the z-score
zscore_threshold = 0.7999
# Loop through the rows in the DataFrame
for i in range(len(merged_df)):
# Check if the conditions are met for entering a trade
if (merged_df.iloc[i].fut3 < merged_df.iloc[i].fut2 < merged_df.iloc[i].fut1) and \
(merged_df.iloc[i].zscore < zscore_threshold) and \
(merged_df.iloc[i-1].zscore > zscore_threshold):
# Record the entry point
entry_points_short = entry_points_short.append({'Date': merged_df.iloc[i].name,
'Entry_Price': merged_df.iloc[i].Close},
ignore_index=True)
import plotly.graph_objects as go
fig = go.Figure(data= [go. Candlestick (
x = ohlc_df.index,
open = ohlc_df.Open,
high = ohlc_df.High,
low = ohlc_df.Low,
close = ohlc_df.Close
)])
fig.add_trace(go.Scatter(
x=entry_points_short.Date,
y=entry_points_short.Entry_Price,
mode= "markers",
marker_symbol="diamond-dot",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.add_trace(go.Scatter(
x=calllendar_df.index,
y=calllendar_df.Close,
mode= "markers",
marker_symbol="x",
marker_size = 10,
marker_line_width = 2,
marker_line_color= "rgba(0, 0, 0, 0.7)",
marker_color="rgba(205, 13, 0, 1)",
))
fig.update_layout (xaxis_rangeslider_visible=False)
if option == 'short signal':
st.plotly_chart(fig)
st.dataframe(entry_points_short)
if option == 'implinks':
st.write("gmx top trader account [link](https://www.gmx.house/arbitrum/account/0x48202a51c0d5d81b3ebed55016408a0e0a0afaae)")
st.write("gmx top trader account 2 [link](https://www.gmx.house/arbitrum/account/0xe8c19db00287e3536075114b2576c70773e039bd)")
st.write("bookmap [link](https://web.bookmap.com/?duration=10m)")
st.write("tradinglite [link](https://www.tradinglite.com/)")
|
carolinedlu/volatility-dashboard
|
dashboard.py
|
dashboard.py
|
py
| 7,458 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "streamlit.title",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar.title",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.selectbox",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "streamlit.subheader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "yfinance.download",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas_ta.zscore",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Candlestick",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Candlestick",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "streamlit.plotly_chart",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "streamlit.dataframe",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 198,
"usage_type": "call"
}
] |
20546896703
|
from typing import Tuple
from PIL import ImageColor
from PIL.ImageDraw import ImageDraw
from PIL.ImageFont import FreeTypeFont
from PIL import ImageFont
def wrap_text(text: str, width: int, font: FreeTypeFont) -> Tuple[str, int, int]:
text_lines = []
text_line = []
words = text.split()
line_height = 0
line_width = 0
for word in words:
text_line.append(word)
w, h = font.getsize(' '.join(text_line))
line_height = h
line_width = max(line_width, w)
if w > width:
text_line.pop()
text_lines.append(' '.join(text_line))
text_line = [word]
if len(text_line) > 0:
text_lines.append(' '.join(text_line))
text_height = line_height * len(text_lines)
return "\n".join(text_lines), line_width, text_height
def fit_width_height(wrapped, field_width, field_height, fontsize, font_path, jumpsize, max_size):
font = ImageFont.truetype(font_path, fontsize)
while jumpsize > 1:
# wrapped, line_width, line_height = wrap_text(text, field_width, font)
line_width, line_height = font.getsize_multiline(wrapped)
jumpsize = round(jumpsize)
if line_height < field_height and line_width < field_width and fontsize + jumpsize < max_size:
fontsize += jumpsize
else:
jumpsize = jumpsize // 2
if fontsize > jumpsize:
fontsize -= jumpsize
else:
fontsize = 0
font = ImageFont.truetype(font_path, fontsize)
return fontsize, font
def get_font_size_and_wrapped(max_size, field_width, field_height, font_path: str, text) -> Tuple[FreeTypeFont, int, str]:
field_height = round(field_height)
fontsize = max_size
jumpsize = 75
font = ImageFont.truetype(font_path, max_size)
wrapped, line_width, line_height = wrap_text(text, field_width, font)
i = 0
while i < 3:
fontsize, font = fit_width_height(wrapped, field_width, field_height, fontsize, font_path, jumpsize, max_size)
wrapped, line_width, line_height = wrap_text(text, field_width, font)
i += 1
return font, fontsize, wrapped
def draw_center_text(text: str, draw: ImageDraw, font: FreeTypeFont, f_width: int, x: int, y: int, color: Tuple[int, int, int], outline_percentage, outline_color, fontsize) -> Tuple[int, int]:
text_width = font.getsize(text)[0]
off_x = f_width / 2 - (text_width/ 2)
draw.text((x + off_x, y), text, color, font, stroke_width=round(outline_percentage * 0.01 * fontsize), stroke_fill=outline_color)
return font.getsize(text)
def draw_right_text(text: str, draw: ImageDraw, font: FreeTypeFont, f_width: int, x: int, y: int, color: Tuple[int, int, int], outline_percentage, outline_color, fontsize) -> Tuple[int, int]:
text_width = font.getsize(text)[0]
off_x = f_width - text_width
draw.text((x + off_x, y), text, color, font, stroke_width=round(outline_percentage * 0.01 * fontsize), stroke_fill=outline_color)
return font.getsize(text)
def convert_hex(hex_color: str) -> Tuple[int, int, int]:
return ImageColor.getcolor(hex_color, "RGB")
|
realmayus/imbot
|
image/manipulation_helper.py
|
manipulation_helper.py
|
py
| 3,154 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.ImageFont.FreeTypeFont",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.FreeTypeFont",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.ImageDraw",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.FreeTypeFont",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.ImageDraw",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.FreeTypeFont",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "PIL.ImageColor.getcolor",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "PIL.ImageColor",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 81,
"usage_type": "name"
}
] |
71602794748
|
#
#
# Key-Holding-Macro made by JngVedere
# Github : https://github.com/JngVedere
# version 0.1.0 - Released on 03-06-2023
#
#
from tkinter import messagebox, ttk
from tendo import singleton
try:
singleton.SingleInstance()
except SystemExit as e:
messagebox.showerror("ERROR", e)
import tkinter as tk
import pywinauto, XK_TO_DD
import threading
from time import sleep
try:
from KeyHolding import KeyToWindow, getKeyPressing
from KeyHolding import wnd_check_thread
except SystemExit as e:
messagebox.showerror("ERROR",e)
exit(0)
# // Global variables
root:object = tk.Tk()
app:object
app_title:str = "Key Holding Macro"
app_size:tuple = (350,170)
window_info:str = ""
simplified:bool = True
# // GUI
class MainApp:
def __init__(self, master):
global simplified
# Basic Variables
self.master = master
self.key_pressed = False
self.key_to_send = -1
#Frame
self.master.title(app_title)
self.X = int(self.master.winfo_screenwidth()/2 - app_size[0]/2)
self.Y = int(self.master.winfo_screenheight()/2 - app_size[1]/2)
self.master.wm_geometry(f"{app_size[0]}x{app_size[1]}+{self.X}+{self.Y}")
self.master.minsize(250, 150)
self.master.maxsize(700, 220)
self.master.resizable(True,False)
self.master.bind("<Key>", self.key_press)
self.upper_frame = tk.Frame(width=100, relief="sunken",bd=1)
self.upper_frame.pack(side="top",fill="both",padx=5,ipadx=2,pady=5,ipady=2)
self.lower_frame = tk.Frame(width=100, height=110, relief="sunken",bd=1)
self.lower_frame.pack(side="bottom",fill="both",padx=5,ipadx=2,pady=5,ipady=2,expand=True)
self.window_combobox = ttk.Combobox(self.upper_frame, width = 40, postcommand = lambda:self.update_cb_list(simplified), state='readonly')
self.window_combobox.set("Pick a Window")
self.window_combobox.pack(fill="x",padx=3,pady=3,side="top")
self.window_combobox.bind("<<ComboboxSelected>>",self.window_selected)
self.check_var = tk.BooleanVar(value=True)
self.simplified_checkbutton = tk.Checkbutton(self.upper_frame, text='Simplified Window', variable=self.check_var, onvalue=True, offvalue=False, command=self.on_check_button_click)
# self.simplified_checkbutton.bind("<ButtonRelease-1>",self.on_check_button_click)
self.simplified_checkbutton.pack(pady=2)
print(self.check_var.get())
self.show_key = tk.Label(self.lower_frame, text="<Press any key to hold>", bg='gray19', fg='snow')
self.show_key.pack(pady=5)
self.send_button = tk.Button(self.lower_frame, text="Hold Key", command=self.button_pressed, takefocus=False)
self.send_button.pack(pady=3)
self.ro_textbox = ttk.Label(self.lower_frame, text='',border=1,font=("Calibri",12,"bold"))
self.ro_textbox.pack(side="bottom")
def update_cb_list(self, simplified):
print("updt cb list", simplified)
if simplified == True: # Find window by window name
self.temp_list = pywinauto.Desktop(backend='uia').windows(title_re ='.')
self.values_list = [w.window_text() for w in self.temp_list]
else: # Find window by hwnd
self.values_list = []
self.hwnd_list = []
procs = pywinauto.findwindows.find_elements()
for proc in procs:
self.hwnd_list.append(proc.handle)
self.values_list.append((proc.name,proc.class_name))
self.window_combobox['value'] = self.values_list
def on_check_button_click(self):
def update_check_var(): #To Avoid Firing two functional works
print("Button Clicked")
global simplified
simplified = self.check_var.get()
self.window_combobox.set("Pick a Window")
print(self.check_var.get())
self.master.after(30, update_check_var)
def window_selected(self, event):
global window_info
if simplified == True:
window_info = self.window_combobox.get()
elif simplified == False:
window_info = self.hwnd_list[self.window_combobox.current()]
def key_press(self, event):
if self.key_pressed == False:
self.show_key.config(text=event.keysym)
self.key_to_send = XK_TO_DD.XK_TO_DD[str(event.keycode)]
print(repr(event.char), repr(event.keysym), repr(event.keycode), repr(event.keysym_num))
def button_pressed(self):
global window_info
if self.window_combobox.current() == -1:
messagebox.showerror("ERROR", "Window isn't selected")
return
elif self.key_to_send == -1:
messagebox.showerror("ERROR", "Key isn't selected")
return
if not KeyToWindow.is_valid_window_info(window_info): return
print(window_info)
if not self.key_pressed:
self.activate_button()
else:
self.deactivate_button()
def SafeQuit(self, master:object = root) -> None:
if messagebox.askokcancel(f"{app_title} Quit", f"Are you sure that you want to quit {app_title}?"):
if getKeyPressing() == True:
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=False)
print("Events Listening is stopped!")
master.destroy()
def is_input_activating(self):
if getKeyPressing() == True:
self.ro_textbox.config(text='Activating')
else:
self.ro_textbox.config(text='Not Activating')
self.is_hwnd_available()
def is_hwnd_available(self):
global window_info
if not KeyToWindow.is_valid_window_info(window_info):
self.deactivate_button()
def activate_button(self):
global window_info
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=True)
self.key_pressed = True
wnd_check_thread.resume()
key_check_thread.resume()
self.window_combobox.config(state='disabled')
self.simplified_checkbutton.config(state='disabled')
self.ro_textbox.config(text='')
self.send_button.config(text="Release Key")
def deactivate_button(self):
global window_info
KeyToWindow.send_key_to_window(window_info, self.key_to_send, key_down=False)
self.key_pressed = False
wnd_check_thread.pause()
key_check_thread.pause()
self.window_combobox.config(state='normal')
self.simplified_checkbutton.config(state='normal')
self.ro_textbox.config(text='')
self.send_button.config(text="Hold Key")
#// Logics Threading
class MyThread(threading.Thread):
def __init__(self):
super().__init__()
self.pause_event = threading.Event()
self.pause_event.clear()
self.daemon = True
def run(self):
# while not self.pause_event.is_set():
while True:
self.pause_event.wait()
app.is_input_activating()
# Wait for a short time before checking again
sleep(0.1)
def pause(self):
self.pause_event.clear()
def resume(self):
self.pause_event.set()
if __name__ == "__main__":
app = MainApp(root)
key_check_thread = MyThread()
key_check_thread.start()
root.protocol("WM_DELETE_WINDOW", app.SafeQuit)
root.mainloop()
|
JngVedere/Key-Holding-Macro
|
main.py
|
main.py
|
py
| 7,540 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tendo.singleton.SingleInstance",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tendo.singleton",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Combobox",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "tkinter.BooleanVar",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tkinter.Checkbutton",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "pywinauto.Desktop",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pywinauto.findwindows.find_elements",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pywinauto.findwindows",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "XK_TO_DD.XK_TO_DD",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showerror",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "KeyHolding.KeyToWindow.is_valid_window_info",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.askokcancel",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "KeyHolding.getKeyPressing",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow.send_key_to_window",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "KeyHolding.getKeyPressing",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow.is_valid_window_info",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "KeyHolding.KeyToWindow.send_key_to_window",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "KeyHolding.wnd_check_thread.resume",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "KeyHolding.wnd_check_thread",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "KeyHolding.KeyToWindow.send_key_to_window",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "KeyHolding.KeyToWindow",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "KeyHolding.wnd_check_thread.pause",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "KeyHolding.wnd_check_thread",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "threading.Event",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 202,
"usage_type": "call"
}
] |
11779785900
|
from scripts.util import read_supertopics, SuperTopic, get_spottopics, DateFormat, read_temp_dist
from typing import Literal
import numpy as np
import json
from prettytable import PrettyTable
DATASET = 'climate2'
LIMIT = 7000000
DATE_FORMAT: DateFormat = 'monthly'
NORM: Literal['abs', 'col', 'row'] = 'abs'
BOOST = ['raw', # 0
'retweets', # 1
'replies', # 2
'likes', # 3
'retweets_likes', # 4
'replies_likes', # 5
'retweets_replies', # 6
'retweets_likes_replies' # 7
][0]
# SOURCE_DIR = f'data/{DATASET}/topics_big2'
# TWEETS_FILE = f'data/{DATASET}/tweets_filtered_{LIMIT}.jsonl'
# LABELS_FILE = f'{SOURCE_DIR}/labels_{LIMIT}_tsne.npy'
EPS = 1e-12
FILE_SUPERTOPICS = f'data/{DATASET}/topics_big2/supertopics.csv'
# FILE_TEMP_DIST = f'data/{DATASET}/topics_big2/temporal_sampled/{DATE_FORMAT}/temporal_{LIMIT}_{DATE_FORMAT}_{BOOST}_{NORM}.json'
FILE_TEMP_DIST = 'data/climate2/topics_big2/temporal_keep_majority/monthly/temporal_monthly_raw_abs.json'
print(FILE_TEMP_DIST)
groups, topics, distributions = read_temp_dist(FILE_TEMP_DIST)
annotations = read_supertopics(FILE_SUPERTOPICS)
spot_topics = get_spottopics(distributions, threshold=0.4, min_size=500)
# print(topics)
# print(distributions.sum(axis=0))
print(distributions.shape)
print(annotations.shape)
print(spot_topics.shape)
tab = PrettyTable(field_names=['supertopic', 'N topics', 'N spottopics', 'spots/topics',
'N tweets', 'N tweet spot', 'spottweets/tweets', 'avg tweets/topic (std)', 'max peak'])
for st in SuperTopic:
n_topics = annotations[:, st].sum()
n_spots = annotations[:, st][spot_topics].sum()
n_topic_tweets = distributions.T[annotations[:, st] > 0].sum()
mean_tweets_per_topic = distributions.T[annotations[:, st] > 0].mean()
std_tweets_per_topic = distributions.T[annotations[:, st] > 0].std()
n_spot_tweets = distributions.T[spot_topics][annotations[:, st][spot_topics] > 0].sum()
tab.add_row([st.name,
f'{n_topics} ({n_topics / distributions.shape[1]:.1%})',
f'{n_spots} ({n_spots / len(spot_topics):.1%})',
f'{n_spots / n_topics:.2%}',
f'{n_topic_tweets:,} ({n_topic_tweets / (distributions.sum() + EPS):.1%})',
f'{n_spot_tweets:,} ({n_spot_tweets / (distributions.T[spot_topics].sum() + EPS):.1%})',
f'{n_spot_tweets / n_topic_tweets:.1%}',
f'{mean_tweets_per_topic:.1f} ({std_tweets_per_topic:.1f})',
groups[distributions.T[annotations[:, st] > 0].sum(axis=0).argmax()]
])
tab.add_row(['TOTAL',
distributions.shape[1],
len(spot_topics),
'––',
f'{distributions.sum():,}',
f'{distributions.T[spot_topics].sum():,}',
f'{distributions.T[spot_topics].sum() / distributions.sum():.1%}',
f'{distributions.mean():.1f} ({distributions.std():.1f})',
groups[distributions.T.sum(axis=0).argmax()]
])
print(tab)
print('annotated topics:', sum(annotations.sum(axis=1) > 0))
print('num topics:', len(topics))
print('num spot topics:', len(spot_topics))
# when does each spot topic "peak"
r = []
for spt in spot_topics:
r.append((spt[0], groups[distributions.T[spt].argmax()]))
rs = sorted(r, key=lambda x: x[1])
print(rs)
|
TimRepke/twitter-climate
|
code/figures/supertopics/spot_topic_stats.py
|
spot_topic_stats.py
|
py
| 3,415 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "scripts.util.DateFormat",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Literal",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "scripts.util.read_temp_dist",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scripts.util.read_supertopics",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scripts.util.get_spottopics",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "scripts.util.SuperTopic",
"line_number": 41,
"usage_type": "name"
}
] |
314481349
|
# imports
import os
import numpy as np
import pandas as pd
import pymysql
from pandas.plotting import table
import matplotlib.pyplot as plt
from datetime import datetime
from util.Event import Event
from matplotlib import rc
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 12}
rc('font', **font)
class IO:
def __init__(self, userName):
self.userName = userName
self.mysql = self.dbConnect()
self.events = self.queryUser()
def dbConnect(self):
mysql = pymysql.connect(database ='IOU_DB',
host='localhost',
user='noahf',
password='1')
return mysql
def queryUser(self):
'''
Method to return a list of Event objects with the given userName
'''
# TODO : change this to query the mysql database for the given username
# for now just read in a csv
#eventTable = pd.read_csv(os.path.join(os.getcwd(), 'EVENT_TABLE.csv'))
#eventTableByUser = eventTable[eventTable['UserName'] == self.userName]
query = f'''
SELECT *
FROM EVENT_TABLE
WHERE UserName='{self.userName}'
'''
eventTableByUser = pd.read_sql(query, self.mysql)
eventList = []
for ii, row in eventTableByUser.iterrows():
event = Event(row['UserName'], row['Event'], row['StartTime'], row['EndTime'], row['StartDate'])
eventList.append(event)
print(event)
return eventList
def writeNewEvent(self, table, event, start, end, startDate):
sqlcmd = f"""
INSERT INTO {table} VALUES {(self.userName, event, start, end, startDate)}
"""
print(sqlcmd)
cursor = self.mysql.cursor()
cursor.execute(sqlcmd)
self.mysql.commit()
def removeEvent(self, event, date):
sqlcmd = f"""
DELETE FROM EVENT_TABLE
WHERE UserName = '{self.userName}'
AND Event = '{event}'
AND StartDate = '{date}'
"""
cursor = self.mysql.cursor()
cursor.execute(sqlcmd)
self.mysql.commit()
def queryOweTable(self):
query = f"""
SELECT *
FROM OWE_TABLE
WHERE ower = '{self.userName}'
"""
oweTable = pd.read_sql(query, self.mysql)
print(oweTable)
if len(oweTable) > 0:
fig = plt.figure()
ax = plt.subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, oweTable, loc='best')
ax.set_title('Hourly Debt:', fontsize=14) #, pad=25
#fig.subplots_adjust(top=0.2)
return fig
else:
return "You don't have any debt!!"
def queryRequestTable(self):
query = f"""
SELECT *
FROM REQUESTS
WHERE Requestor != '{self.userName}'
"""
reqTable = pd.read_sql(query, self.mysql)
print(reqTable)
if len(reqTable) > 0:
fig = plt.figure()
ax = plt.subplot(111, frame_on=False) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
table(ax, reqTable, loc='best')
ax.set_title('Requests from other Professors:', fontsize=14)
return fig
else:
return "There are no requests"
def addRequest(self, startDate, start, end, eventName):
sqlcmd = f"""
INSERT INTO REQUESTS VALUES {(self.userName, startDate, start, end, eventName)}
"""
print(self.userName, startDate, start, end, eventName)
sqlCheck = f"""
SELECT *
FROM EVENT_TABLE
WHERE UserName='{self.userName}'
AND StartDate='{startDate}'
AND StartTime='{start}'
AND EndTime='{end}'
AND Event='{eventName}'
"""
cursor = self.mysql.cursor()
print(pd.read_sql(sqlCheck, self.mysql))
if len(pd.read_sql(sqlCheck, self.mysql)) == 0:
raise ValueError('Please Enter Values for an existing event')
cursor.execute(sqlcmd)
self.mysql.commit()
def fulfill(self, eventName, eventDate, otherFirst, otherLast):
cursor = self.mysql.cursor()
# get other User name
getOtherUser = f"""
SELECT *
FROM USERNAME
WHERE FirstName='{otherFirst}'
AND LastName='{otherLast}'
"""
userInfo = pd.read_sql(getOtherUser, self.mysql)
otherUser = userInfo['UserName'].tolist()[0]
# first remove request from REQUEST table
sqlcmd = f"""
DELETE FROM REQUESTS
WHERE Requestor = '{otherUser}'
AND EventName = '{eventName}'
AND StartDate = '{eventDate}'
"""
cursor.execute(sqlcmd)
#self.mysql.commit()
# get event hours
eventsQuery = f"""
SELECT *
FROM EVENT_TABLE
WHERE UserName='{otherUser}'
AND Event='{eventName}'
AND StartDate='{eventDate}'
"""
print(eventsQuery)
events = pd.read_sql(eventsQuery, self.mysql)
if len(events) > 1:
raise ValueError('Duplicate Events!!! Exiting...')
print(events)
event = Event(events['UserName'].tolist()[0],
events['Event'].tolist()[0],
events['StartTime'].tolist()[0],
events['EndTime'].tolist()[0],
events['StartDate'].tolist()[0])
eventHrs = event.endTime - event.startTime
eventHrs = eventHrs.total_seconds()/3600 # convert eventHrs to hr float
# change username on the event in EVENT_TABLE
updateCmd = f"""
UPDATE EVENT_TABLE
SET UserName='{self.userName}'
WHERE UserName='{otherUser}'
AND Event='{eventName}'
AND StartDate='{eventDate}'
"""
print()
print("update comm: ", updateCmd)
print()
cursor.execute(updateCmd)
#self.mysql.commit()
# get relevant rows in OWE_TABLE and check figure out if you owe the otherUser
getOwes = f"""
SELECT *
FROM OWE_TABLE
WHERE owes='{otherUser}'
AND ower='{self.userName}'
"""
oweTable = pd.read_sql(getOwes, self.mysql)
if len(oweTable) > 0:
hoursOwed = oweTable['amount'].tolist()[0]
else:
hoursOwed = 0
# now calculate who owes what hours and insert
if hoursOwed - eventHrs == 0:
deleteEvent = f"""
DELETE FROM OWE_TABLE
WHERE ower = '{self.userName}'
AND owes = '{otherUser}'
"""
cursor.execute(deleteEvent)
#self.mysql.commit()
elif hoursOwed - eventHrs < 0:
# first remove old owed hours
deleteEvent = f"""
DELETE FROM OWE_TABLE
WHERE ower = '{self.userName}'
AND owes = '{otherUser}'
"""
cursor.execute(deleteEvent)
#self.mysql.commit()
# then add new row with conjugate
addEvent = f"""
INSERT INTO OWE_TABLE VALUES {(otherUser, self.userName, eventHrs-hoursOwed)}
"""
cursor.execute(addEvent)
#self.mysql.commit()
else:
owesUpdate = f"""
UPDATE OWE_TABLE
SET amount='{hoursOwed-eventHrs}'
WHERE ower='{self.userName}'
AND owes='{otherUser}'
"""
cursor.execute(owesUpdate)
self.mysql.commit()
|
noahfranz13/IOU
|
util/IO.py
|
IO.py
|
py
| 8,566 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "matplotlib.rc",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "util.Event.Event",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pandas.plotting.table",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pandas.read_sql",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "pandas.plotting.table",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "pandas.plotting.table",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "util.Event.Event",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 229,
"usage_type": "call"
}
] |
36146924870
|
from PIL import Image
from DiamondDash.screenshot import Capturer
from DiamondDash.mouse import Mouse
import time
import random
colors = {}
C = Capturer(1048, 341)
M = Mouse(1048, 341)
def get_color(RGB):
if all(val < 60 for val in RGB):
return "B"
elif RGB in colors:
return colors[RGB]
else:
return '?'
def get_fuzzy_color(RGB):
if all(val < 60 for val in RGB):
return "B"
for val, color in colors.items():
if all(abs(rgb - v) < 10 for rgb, v in zip(RGB, val)):
return color
return '?'
class Grid:
def __init__(self, grid_size_x, grid_size_y, cell_size, img=()):
self.grid_size_x = grid_size_x
self.grid_size_y = grid_size_y
self.cell_size = cell_size
if img:
self.img = img
else:
self.take_screenshot()
def take_screenshot(self):
self.img = C.grab(0, 0, self.grid_size_x * self.cell_size, self.grid_size_y * self.cell_size)
def get_cell(self, x, y):
if x < self.cell_size_x and y < self.grid_size_y:
return self.img.crop((x * self.cell_size,
y * self.cell_size,
(x + 1) * self.cell_size - 1,
(y + 1) * self.cell_size - 1,
))
else:
return ()
def get_cell_rgb(self, x, y):
x0 = x * self.cell_size
y0 = y * self.cell_size
return tuple([int(sum(val) / len(val)) for val in zip(
self.img.getpixel((x0 + 10, y0 + 10)),
self.img.getpixel((x0 + 10, y0 + 30)),
self.img.getpixel((x0 + 30, y0 + 30)),
self.img.getpixel((x0 + 30, y0 + 10)),
self.img.getpixel((x0 + 20, y0 + 20)),
)])
def valid_cell(self, x, y):
return True
x0 = x * self.cell_size
y0 = y * self.cell_size
return (get_color(self.img.getpixel((x0, y0 + 6))) == "B" \
and get_color(self.img.getpixel((x0, y0 + 33))) == "B") or \
(get_color(self.img.getpixel((x0 + 39, y0 + 6))) == "B" \
and get_color(self.img.getpixel((x0 + 39, y0 + 33))) == "B")
def get_cell_color(self, x, y):
"""
print(self.get_cell(x, y).getpixel((0, 6)),
get_color(self.get_cell(x, y).getpixel((0, 6))),
self.get_cell(x, y).getpixel((0, 7)),
get_color(self.get_cell(x, y).getpixel((0, 7))),
)
"""
"""
if get_color(self.get_cell(x, y).getpixel((0, 6))) == "B":
return get_fuzzy_color(self.get_cell(x, y).getpixel((0, 7)))
else:
return "?"
"""
if self.valid_cell(x, y):
return get_fuzzy_color(self.get_cell_rgb(x, y))
else:
return "?"
def analyse_cell(self, x, y):
cell = self.get_cell_color(x, y)
if cell in ["1"]:
return cell
if cell == "?" or cell == "B":
return "."
cpt = 0
if x > 0:
if self.get_cell_color(x - 1, y) == cell:
cpt += 1
if x < self.grid_size_x - 1:
if self.get_cell_color(x + 1, y) == cell:
cpt += 1
if cpt > 1:
return "x"
if y > 0:
if self.get_cell_color(x, y - 1) == cell:
cpt += 1
if cpt > 1:
return "x"
if y < self.grid_size_y - 1:
if self.get_cell_color(x, y + 1) == cell:
cpt += 1
if cpt > 1:
return "x"
return "."
def click_cell(self, x, y):
M.mouse_pos((x + 0.5) * self.cell_size,
(y + 0.5) * self.cell_size)
M.left_click()
# print("click on", (x, y))
def seek_and_destroy(self):
targets = []
priority_targets = []
for y in range(self.grid_size_y):
for x in range(self.grid_size_x):
target = self.analyse_cell(x, y)
if target == "!":
self.click_cell(x, y)
return
elif target == "1":
priority_targets.append((x,y))
elif target == "x":
targets.append((x, y))
if priority_targets:
self.click_cell(*random.choice(priority_targets))
return
if targets:
self.click_cell(*random.choice(targets))
def calibration():
img = Image.open("reference.png")
grid = Grid(7, 2, 40, img)
for y in range(3):
colors[grid.get_cell_rgb(0, y)] = 'g'
colors[grid.get_cell_rgb(1, y)] = 'y'
colors[grid.get_cell_rgb(2, y)] = 'r'
colors[grid.get_cell_rgb(3, y)] = 'b'
colors[grid.get_cell_rgb(4, y)] = 'p'
for x in range(5):
colors[grid.get_cell_rgb(x, 3)] = '!'
for x in range(3):
colors[grid.get_cell_rgb(x, 4)] = '1'
def main():
grid = Grid(10, 9, 40)
calibration()
# grid.get_cell(8,8).show()
while True:
"""
for y in range(9):
line = []
for x in range(10):
line.append(grid.get_cell_color(x, y))
print(" ".join(line))
"""
"""
print()
for y in range(9):
line = []
for x in range(9):
line.append(grid.analyse_cell(x, y))
print(" ".join(line))
"""
grid.seek_and_destroy()
time.sleep(0.03)
grid.take_screenshot()
# print('-----')
if __name__ == "__main__":
main()
|
rndczn/DiamondDashBot
|
brain.py
|
brain.py
|
py
| 5,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "DiamondDash.screenshot.Capturer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "DiamondDash.mouse.Mouse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 184,
"usage_type": "call"
}
] |
74078752188
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy import context as context
from . fpc import state
from . fpc import TestFpCvStepsBreakdown, GenerateFloorPlanImageOperator, FpcPropGrp
bl_info = {
"name" : "FloorPlanCreator",
"author" : "haseeb",
"description" : "floor plan 3d mesh generator",
"blender" : (3, 50, 0),
"version" : (0, 0, 1),
"location" : "View3D",
"warning" : "",
"category" : "Generic"
}
# SPECIAL LINE
bpy.types.Scene.ff_FPC_prop_grp = bpy.props.PointerProperty(type=FpcPropGrp)
# MAIN PANEL CONTROL
class FPC_PT_Panel(bpy.types.Panel):
bl_idname = "FPC_PT_Panel"
bl_label = "FloorPlanCreator"
bl_category = "FF_Tools"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self,context):
layout = self.layout
s = state()
# Modeling
box_rg = layout.box()
col = box_rg.column(align = True)
col.label(text='Floor Plan Options')
row = col.row(align = True)
row.operator("fpc.testfpcvstepsbreakdown", text="Test FP CV Steps")
row = col.row(align = True)
row.operator("fpc.generatefloorplanimage", text="Generate Floor Plan")
# row.operator("ffgen.re_mirror", text="Re-Mirror ")
classes = (
TestFpCvStepsBreakdown,
GenerateFloorPlanImageOperator,
FPC_PT_Panel)
register,unregister = bpy.utils.register_classes_factory(classes)
# from . import auto_load
# auto_load.init()
# def register():
# auto_load.register()
# def unregister():
# auto_load.unregister()
|
lalamax3d/FloorPlanCreator
|
__init__.py
|
__init__.py
|
py
| 2,177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bpy.types",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.PointerProperty",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bpy.props",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "fpc.FpcPropGrp",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "bpy.types",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "fpc.state",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "fpc.TestFpCvStepsBreakdown",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "fpc.GenerateFloorPlanImageOperator",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "bpy.utils.register_classes_factory",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 63,
"usage_type": "attribute"
}
] |
9062401747
|
def matrixplot(start_date,end_date,type,term,flag=True):
# Configure plotting in Jupyter
from matplotlib import pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
# plt.rcParams.update({
# 'figure.figsize': (26, 15),
# 'axes.spines.right': False,
# 'axes.spines.left': False,
# 'axes.spines.top': False,
# 'axes.spines.bottom': False})
plt.rcParams['font.sans-serif'] = ['SimHei']
# Seed random number generator
from numpy import random as nprand
seed = hash("Network Science in Python") % 2**32
nprand.seed(seed)
import datetime
import pandas as pd
import numpy as np
import seaborn as sns
from sqlalchemy import create_engine
conn=create_engine('mysql+pymysql://root:lv+7)!@@SHZX@localhost:3306/pledge?charset=gbk')
if term=="all":
sql_query = "select * from trading_data where date_format(日切日期,'%%Y/%%m/%%d')>='{20}' and date_format(日切日期,'%%Y/%%m/%%d')<='{21}' and (正回购方机构类别 = '{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}{0}{8}{0}{9}{0}{10}{0}{11}{0}{12}{0}{13}{0}{14}{0}{15}{0}{16}{0}{17}{0}{18}{0}{19}') and (逆回购方机构类别 = '{2}{1}{3}{1}{4}{1}{5}{1}{6}{1}{7}{1}{8}{1}{9}{1}{10}{1}{11}{1}{12}{1}{13}{1}{14}{1}{15}{1}{16}{1}{17}{1}{18}{1}{19}')" .format("' or 正回购方机构类别 = '","' or 逆回购方机构类别 = '",'政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行', '城信社及联社','农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司', '保险公司','保险资产管理公司','保险经纪公司',start_date,end_date)
else:
sql_query = "select * from trading_data where date_format(日切日期,'%%Y/%%m/%%d')>='{20}' and date_format(日切日期,'%%Y/%%m/%%d')<='{21}' and 回购天数 = {22} and (正回购方机构类别 = '{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}{0}{8}{0}{9}{0}{10}{0}{11}{0}{12}{0}{13}{0}{14}{0}{15}{0}{16}{0}{17}{0}{18}{0}{19}') and (逆回购方机构类别 = '{2}{1}{3}{1}{4}{1}{5}{1}{6}{1}{7}{1}{8}{1}{9}{1}{10}{1}{11}{1}{12}{1}{13}{1}{14}{1}{15}{1}{16}{1}{17}{1}{18}{1}{19}')" .format("' or 正回购方机构类别 = '","' or 逆回购方机构类别 = '",'政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行', '城信社及联社','农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司', '保险公司','保险资产管理公司','保险经纪公司',start_date,end_date,term)
df = pd.read_sql(sql_query,con=conn)
title = list(df.columns)
date_idx=title.index('日切日期')
buyertype_idx=title.index('正回购方机构类别')
sellertype_idx=title.index('逆回购方机构类别')
amount_idx=title.index('首期结算金额(亿元)')
rate_idx=title.index('到期预计收益率(%)')
#建立四大类字典
classify_key=['政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行','城信社及联社', '农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司','保险公司', '保险资产管理公司','保险经纪公司']
classify_value=['大行','大行','大行','中行','中行','小行','小行','小行','大行','非银','非银','非银','非银','非银','非银','非银', '非银','非银']
classify=dict(zip(classify_key,classify_value))
#flag=FALSE表示四大类分类
if flag:
typelist=['政策性银行','国有控股商业银行','股份制商业银行','城市商业银行','农商行和农合行','村镇银行','城信社及联社', '农信社及联社','邮政储蓄银行','财务公司','信托公司','资产管理公司','证券公司','期货公司','基金公司','保险公司', '保险资产管理公司','保险经纪公司']
else:
typelist=['大行','中行','小行','非银']
for i in range(len(df)):
temp=df.iloc[i,buyertype_idx]
df.iloc[i,buyertype_idx]=classify[temp]
temp=df.iloc[i,sellertype_idx]
df.iloc[i,sellertype_idx]=classify[temp]
matrix = pd.DataFrame(np.zeros((len(typelist),len(typelist)),dtype=float),index=typelist,columns=typelist)
start_date = datetime.datetime.strptime(start_date,'%Y/%m/%d')
end_date = datetime.datetime.strptime(end_date,'%Y/%m/%d')
if type=="amount":
for i in range(len(df)):
trade_date=datetime.datetime.strptime(df.iloc[i,date_idx],'%Y/%m/%d')
if trade_date>=start_date and trade_date<=end_date:
matrix.loc[df.iloc[i,buyertype_idx],df.iloc[i,sellertype_idx]]+=float(df.iloc[i,amount_idx])
elif type=="rate":
rate_array=[]
all_rate=[]
for i in range(len(typelist)):
sub_array = []
for j in range(len(typelist)):
sub_array.append([])
rate_array.append(sub_array)
for i in range(len(df)):
trade_date=datetime.datetime.strptime(df.iloc[i,date_idx],'%Y/%m/%d')
if trade_date>=start_date and trade_date<=end_date:
rate_array[typelist.index(df.iloc[i,buyertype_idx])][typelist.index(df.iloc[i,sellertype_idx])].append(df.iloc[i,rate_idx])
for j in range(len(typelist)):
for k in range(len(typelist)):
all_rate.extend(rate_array[j][k])
median=sorted(all_rate)[int(len(all_rate)/2)]
for j in range(len(typelist)):
for k in range(len(typelist)):
if len(rate_array[j][k])==0:
matrix.iloc[j,k]=median
else:
matrix.iloc[j,k]=float(sorted(rate_array[j][k])[int(len(rate_array[j][k])/2)])
# matrix[list(matrix.columns)]=matrix[list(matrix.columns)].astype(float)
ax=sns.heatmap(matrix,cmap="YlGnBu",annot=True,fmt='.2f',vmin=1,vmax=5,linewidths=0.05,linecolor='white',annot_kws={'size':8,'weight':'bold'})
ax.set_title('{0} {3} {1}~{2}'.format(type,start_date,end_date,term))
ax.set_xlabel('逆回购方')
ax.set_ylabel('正回购方')
plt.show()
matrixplot("2019/05/27","2019/06/14",flag=False,type="rate",term=7)
|
ljiaqi1994/Pledge-Repo
|
质押式回购_类别矩阵_删减mysql.py
|
质押式回购_类别矩阵_删减mysql.py
|
py
| 6,632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "seaborn.heatmap",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 82,
"usage_type": "name"
}
] |
35161911497
|
from dhooks import Webhook
from dhooks import Embed
from datetime import date,datetime
import json
embed=Embed(
title="Sucessful Checout!",
url="https://twitter.com/_thecodingbunny?lang=en",
color=65280,
timestamp="now"
)
hook=Webhook("https://discordapp.com/api/webhooks/715950160185786399/uFNsHqIAsOCbiPiBFgUv-pozfLlZyondpi2uuIUjQbxcNuvFz2UedZcRH8dBH6Fo5-7T")
#Get webhook
now=datetime.now()
copped_time=now.strftime("||%Y%m%d\n%H:%M:%S||")
#Get time
store=input("Enter store name:")
#Get store
profile="||"+input("Enter profile:")+"||"
#Get profile
product_image=input("Enter product image link:")
#Get image
product_name=input("Enter product name:")
#Get product name
size=input("Enter product size:")
#Get size
price="$"+input("Enter the price:")
#Get price
order_number="||"+input("Enter order number:")+"||"
#Get order number
embed.add_field(name="Date Time",value=copped_time)
embed.add_field(name="Store",value=store)
embed.add_field(name="Profile",value=profile)
embed.add_field(name="Product",value=product_name)
embed.add_field(name="Size",value=size)
embed.add_field(name="Price",value=price)
embed.add_field(name="Order Number",value=order_number)
embed.set_thumbnail(product_image)
#Embed elements
embed.set_footer(text="@theGaneshBot",icon_url="https://ganeshbot.com/public/images/logo-transparent.png")
hook.send(embed=embed)
|
1mperfectiON/TCB-Project1
|
fake_bot_webhook.py
|
fake_bot_webhook.py
|
py
| 1,445 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dhooks.Embed",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dhooks.Webhook",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
}
] |
14550843664
|
import pytest
from single_number import Solution
from typing import List
@pytest.mark.parametrize(
'nums, expected',
[
([2, 2, 1], 1),
([4, 1, 2, 1, 2], 4),
([1], 1),
]
)
def test_single_number(nums: List[int], expected: int):
solution = Solution()
assert expected == solution.single_number(nums)
|
franciscoalface/leet-code
|
src/136.single_number/test_single_number.py
|
test_single_number.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "single_number.Solution",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
38460841413
|
import pygame
pygame.init()
font = pygame.font.Font(pygame.font.get_default_font(), 18)
class Components:
def __init__(self, window: pygame.Surface) -> None:
self.window = window
self.buttons = list()
def Button(self, name: str):
text = font.render(name, False, (0, 0, 0))
rect = pygame.Rect(900, 10, text.get_width(), 50)
if len(self.buttons) > 0:
top, left = self.buttons[-1]['rect'].top, self.buttons[-1]['rect'].left
rect.top = top+60
button = {'rect': rect, 'text': text}
self.buttons.append(button)
return button['rect']
def drawAllComponents(self):
for button in self.buttons:
pygame.draw.rect(self.window, (230, 230, 230), button['rect'])
self.window.blit(
button['text'], (button['rect'].left, button['rect'].top + button['rect'].height / 2 - button['text'].get_height()/2))
class ClickListener:
def __init__(self) -> None:
self.components = list()
def addListener(self, component: pygame.Rect, callbackFn):
self.components.append((component, callbackFn))
def listenEvents(self):
pos = pygame.mouse.get_pos()
left, _, _ = pygame.mouse.get_pressed()
for component in self.components:
if pos[0] in range(component[0].left, component[0].left + component[0].width):
if pos[1] in range(component[0].top, component[0].top + component[0].height) and left:
component[1]()
pygame.mouse.set_pos(
(component[0].left-10, component[0].top-10))
|
legit-programmer/bit-texture
|
ui.py
|
ui.py
|
py
| 1,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.get_pressed",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.mouse.set_pos",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 44,
"usage_type": "attribute"
}
] |
37983159283
|
from fastapi import FastAPI, Response, status,HTTPException
from fastapi.params import Body
from pydantic import BaseModel
from typing import Optional
from random import randrange
app = FastAPI()
class Post(BaseModel):
title: str
content: str
published: bool = True
rating: Optional[int] = None
my_posts = [{"title":"tile of post no 1", "content":"content of the post no 1", "id":1},
{"title": "my favorite foods" , "content":"pizzza", "id": 2}]
def find_post(id):
for p in my_posts:
if p['id'] == id:
return p
def find_index_post(id):
for i, p in enumerate(my_posts):
if p['id'] == id:
return i
@app.get("/")
def root():
return {"message": "Hello World"}
@app.get("/posts")
def get_posts():
return {"DATA": my_posts}
# create post with random ids
@app.post("/posts" ,status_code= status.HTTP_201_CREATED)
def create_posts(post : Post):
post_dict = post.dict()
post_dict['id'] = randrange(1,10000)
my_posts.append(post_dict)
return { "data ": post_dict}
#gettting specific post by id
@app.get("/posts/{id}")
def post_by_id(id: int, response: Response):
post = find_post(id)
if not post:
raise HTTPException(status_code= status.HTTP_404_NOT_FOUND, detail = f"post with id no {id } not found")
return {"new post": post}
#deleting a post
# for this we will first find a index in the array of thr required id so that we can delete
@app.delete("/posts/{id}", status_code=status.HTTP_204_NO_CONTENT)
def delete_post(id: int):
index= find_index_post(id)
if index == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail=f"post not found with id no {id}")
my_posts.pop(index)
return {f"the post with id no. {id} succesfully deleted"}
# updating the existing post for this we use put method
@app.put("/posts/{id}")
def update_post(id: int, post: Post):
index= find_index_post(id)
if index == None:
raise HTTPException(status_code = status.HTTP_404_NOT_FOUND, detail=f"post not found with id no {id}")
post_dict = post.dict()
post_dict['id'] = id
my_posts[index] = post_dict
return {"datfffa": post_dict}
|
RahimUllah001/FastAPI_PROJECT
|
main.py
|
main.py
|
py
| 2,278 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "random.randrange",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_201_CREATED",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "fastapi.Response",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_404_NOT_FOUND",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_404_NOT_FOUND",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "fastapi.status.HTTP_204_NO_CONTENT",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_404_NOT_FOUND",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 90,
"usage_type": "name"
}
] |
8592665762
|
from django.urls import path
from App import views
from django.urls import path
from django.contrib.auth import views as g
urlpatterns = [
path('',views.home,name="hm"),
path('abt/',views.about,name="ab"),
path('ap/',views.products,name="pro"),
path('vege/',views.vegetables,name="veg"),
path('fru/',views.fruits,name="fit"),
path('da/',views.dairy,name="day"),
path('pu/',views.pulses,name="pul"),
path('ho/',views.house,name="hom"),
path('po/',views.care,name="car"),
path('ca/',views.cart,name="cat"),
path('cnt/',views.contact,name="ct"),
path('rg/',views.register,name="reg"),
path('pf/',views.prfle,name="pfe"),
path('upf/',views.updf,name="upfe"),
path('lg/',g.LoginView.as_view(template_name="html/login.html"),name="lgn"),
path('lgg/',g.LogoutView.as_view(template_name="html/logout.html"),name="lgo"),
]
|
TataTejaswini/Django-Project
|
App/urls.py
|
urls.py
|
py
| 831 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "App.views.home",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "App.views.about",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "App.views.products",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "App.views.vegetables",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "App.views.fruits",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "App.views.dairy",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "App.views.pulses",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "App.views.house",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "App.views.care",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "App.views.cart",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "App.views.contact",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "App.views.register",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "App.views.prfle",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "App.views.updf",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "App.views",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 22,
"usage_type": "name"
}
] |
4927702164
|
# -*- coding: utf-8 -*-
import json
import pickle
import numpy as np
import random
def preprocess_train_data():
"""
Convert JSON train data to pkl
:param filename:
:return:
"""
f = open('train.json', 'r')
raw_data = json.load(f)
f.close()
def get_record(x):
band_image_1 = np.array(x['band_1'])
band_image_2 = np.array(x['band_2'])
band_image_1 = band_image_1.reshape((75, 75))
band_image_2 = band_image_2.reshape((75, 75))
image = np.stack([band_image_1, band_image_2])
label = x['is_iceberg']
return image, label
train_images = []
train_labels = []
for i in range(len(raw_data)):
image, label = get_record(raw_data[i])
train_labels.append(label)
train_images.append(image)
train_images = np.array(train_images)
train_labels = np.array(train_labels)
with open('train_data.pkl', 'wb') as ff:
pickle.dump(train_images, ff)
with open('train_label.pkl', 'wb') as ff:
pickle.dump(train_labels, ff)
print("Finish Preprocess Train Data")
def load_train_data(path):
with open(path+'/train_data.pkl', 'rb') as f:
train_data = pickle.load(f)
with open(path+'/train_label.pkl', 'rb') as f:
train_label = pickle.load(f)
train_data = zip(train_data, train_label)
num_samples = len(train_data)
ratio = 0.9
num_train = int(num_samples*ratio)
random.shuffle(train_data)
train_samples = train_data[:num_train]
test_samples = train_data[num_train:]
return train_samples, test_samples
def load_test_data(path):
"""
Load Test JSON data
:return:
"""
f = open(path+'/test.json', 'r')
raw_data = json.load(f)
f.close()
def get_image(x):
image_id = x['id']
band_image_1 = np.array(x['band_1'])
band_image_2 = np.array(x['band_2'])
band_image_1 = band_image_1.reshape((75, 75))
band_image_2 = band_image_2.reshape((75, 75))
image = np.stack([band_image_1, band_image_2])
return image_id, image
for i in range(len(raw_data)):
image_id, image = get_image(raw_data[i])
yield {
'image_id': image_id,
'image': image
}
# if __name__ == '__main__':
# preprocess_train_data()
#
# train_data, test_data = load_train_data()
#
# print(train_data[10])
|
wondervictor/KaggleIceberg
|
data/data_process.py
|
data_process.py
|
py
| 2,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 94,
"usage_type": "call"
}
] |
40276526905
|
import cv2
import random
import numpy as np
from PIL import Image
from compel import Compel
import torch
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionUpscalePipeline
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return seed
class MaskFormer:
def __init__(self, device):
print(f"Initializing MaskFormer to {device}")
self.device = device
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined", cahce_dir='/data1/gitaek')
self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir='/data1/kirby/.cache').to(device)
def inference(self, image_path, text):
threshold = 0.2
min_area = 0.02
padding = 25
if isinstance(image_path, str):
original_image = Image.open(image_path)
else: original_image = image_path
image = original_image.resize((512, 512))
inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model(**inputs)
mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold
area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1])
if area_ratio < min_area:
return None
visual_mask = cv2.dilate((mask*255).astype(np.uint8), np.ones((padding, padding), np.uint8))
image_mask = Image.fromarray(visual_mask)
return image_mask.resize(original_image.size)
class ImageEditing:
def __init__(self, device):
print(f"Initializing ImageEditing to {device}")
self.device = device
self.mask_former = MaskFormer(device=self.device)
self.revision = 'fp16' if 'cuda' in device else None
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-inpainting", revision=self.revision, torch_dtype=self.torch_dtype, cache_dir='/data1/kirby/.cache').to(device)
self.compel = Compel(tokenizer=self.inpaint.tokenizer, text_encoder=self.inpaint.text_encoder)
def inference_kirby(self, original_image, to_be_replaced_txt,
replace_with_txt='backdrop++, background++, backgrounds++',
seed=42, num_images_per_prompt=1, negative_prompt=''):
if seed is not None:
seed_everything(seed)
assert original_image.size == (512, 512)
mask_image = self.mask_former.inference(original_image, to_be_replaced_txt)
if mask_image is None:
return None, None
list_negative_prompt = negative_prompt.split(', ')
list_negative_prompt.insert(0, list_negative_prompt.pop(list_negative_prompt.index(to_be_replaced_txt)))
negative_prompt = ', '.join(list_negative_prompt)
negative_prompt = negative_prompt.replace(to_be_replaced_txt, f'{to_be_replaced_txt}++')
conditioning_pos = self.compel.build_conditioning_tensor(replace_with_txt)
conditioning_neg = self.compel.build_conditioning_tensor(negative_prompt)
updated_images = self.inpaint(
image=original_image,
prompt_embeds=conditioning_pos,
negative_prompt_embeds=conditioning_neg,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=50,
num_images_per_prompt=num_images_per_prompt
).images
return updated_images, mask_image
class SuperResolution:
def __init__(self, device):
print(f"Initializing SuperResolution to {device}")
self.revision = 'fp16' if 'cuda' in device else None
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.Upscaler_sr = StableDiffusionUpscalePipeline.from_pretrained(
"stabilityai/stable-diffusion-x4-upscaler", revision=self.revision,
torch_dtype=self.torch_dtype, cache_dir='/data1/kirby/.cache').to(device)
def inference(self, image, prompt, seed=None, baselen=128):
if seed is not None:
seed_everything(seed)
old_img = image.resize((baselen, baselen))
upscaled_img = self.Upscaler_sr(prompt=prompt, guidance_scale=7.5, image=old_img, num_inference_steps=50).images[0]
return upscaled_img
|
Anears/SHIFT
|
models/shift.py
|
shift.py
|
py
| 4,678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "transformers.CLIPSegProcessor.from_pretrained",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "transformers.CLIPSegProcessor",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "transformers.CLIPSegForImageSegmentation.from_pretrained",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "transformers.CLIPSegForImageSegmentation",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.float16",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "diffusers.StableDiffusionInpaintPipeline.from_pretrained",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "diffusers.StableDiffusionInpaintPipeline",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "compel.Compel",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.float16",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "diffusers.StableDiffusionUpscalePipeline.from_pretrained",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "diffusers.StableDiffusionUpscalePipeline",
"line_number": 99,
"usage_type": "name"
}
] |
74557703866
|
from django.shortcuts import render, redirect, get_object_or_404
from board.models import Post, Comment
from board.forms import PostForm, SignupForm, CommentForm
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.views.generic import TemplateView, ListView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
# Create your views here.
# ListView로 게시물 리스트 구현
class index(ListView):
model = Post
paginate_by = 10
def get_queryset(self):
return Post.objects.order_by('-pk')
# 게시물 내용
def post_detail(request, pk):
post_detail = get_object_or_404(Post, pk=pk)
context = {
'post_detail': post_detail,
}
return render(request, 'board/post_detail.html', context)
# 새 글 작성
@login_required
def new_post(request):
if request.method =="POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit = False)
post.author = request.user
post.generate()
return redirect('board:post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'board/form.html', {'form': form})
# 글 수정
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if post.author == User.objects.get(username=request.user.get_username()):
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.regdate = timezone.now()
post.generate()
return redirect('board:post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'board/form.html', {'form': form})
else:
return render(request, 'board/warning.html')
# 글 삭제
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
if post.author == User.objects.get(username = request.user.get_username()):
post.delete()
return redirect('board:index')
else:
return render(request, 'board/warning.html')
# 회원가입
def signup(request):
if request.method == 'POST':
signup_form = SignupForm(request.POST)
if signup_form.is_valid():
signup_form.signup()
return redirect('board:index')
else:
signup_form = SignupForm()
return render(request, 'registration/signup.html', {'signup_form': signup_form,})
# TemplateView로 회원가입 완료 페이지 구현
class RegisteredView(TemplateView):
template_name = 'registration/signup_done.html'
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.author = request.user
comment.save()
return redirect('board:post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'board/add_comment_to_post.html', {'form': form})
|
Xiorc/Concofreeboard
|
board/views.py
|
views.py
|
py
| 3,289 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.ListView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "board.models.Post",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "board.models.Post.objects.order_by",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "board.models.Post.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "board.models.Post",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "board.models.Post",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "board.forms.PostForm",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "board.forms.PostForm",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "board.models.Post",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "board.forms.PostForm",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "board.forms.PostForm",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "board.models.Post",
"line_number": 69,
"usage_type": "argument"
},
{
"api_name": "django.contrib.auth.models.User.objects.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "board.forms.SignupForm",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "board.forms.SignupForm",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "board.models.Post",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "board.forms.CommentForm",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "board.forms.CommentForm",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 106,
"usage_type": "call"
}
] |
74118711867
|
#!/usr/bin/env python3
"""
test_utils.py
contains the tests for the functions in the utils.py file
defined in the current directory
"""
from parameterized import parameterized
from utils import access_nested_map, get_json, memoize
from unittest.mock import patch, Mock
import unittest
class TestAccessNestedMap(unittest.TestCase):
"""
test case: Testing the access_nested_map() function
"""
@parameterized.expand([
({'a': 1}, ('a',), 1),
({'a': {'b': 2}}, ('a',), {'b': 2}),
({'a': {'b': 2}}, ('a', 'b'), 2)
])
def test_access_nested_map(self, nested_map, path, expected):
"""test_access_nested_map test function"""
self.assertEqual(access_nested_map(nested_map, path), expected)
@parameterized.expand([
({}, ('a',)),
({'a': 1}, ('a', 'b'))
])
def test_access_nested_map_exception(self, map, path):
"""test_access_nested_map_exception test function"""
with self.assertRaises(KeyError):
access_nested_map(map, path)
class TestGetJson(unittest.TestCase):
"""
test case: Testing the function of the get_json() function
"""
@parameterized.expand([
('http://example.com', {"payload": True}),
('http://holberton.io', {"payload": False})
])
@patch('utils.requests.get', autospec=True)
def test_get_json(self, test_url, test_payload, mock_request_get):
"""test_get_json() test method"""
mock_response = Mock()
mock_response.json.return_value = test_payload
mock_request_get.return_value = mock_response
output = get_json(test_url)
mock_request_get.assert_called_with(test_url)
self.assertEqual(output, test_payload)
class TestMemoize(unittest.TestCase):
"""
test case: Testing the utils.memoize decorator
"""
def test_memoize(self):
"""test_memoize() test method"""
class TestClass:
def a_method(self):
return 42
@memoize
def a_property(self):
return self.a_method()
with patch.object(TestClass, 'a_method') as mock_a_method:
test_obj = TestClass()
test_obj.a_property()
test_obj.a_property()
mock_a_method.assert_called_once()
|
PC-Ngumoha/alx-backend-python
|
0x03-Unittests_and_integration_tests/test_utils.py
|
test_utils.py
|
py
| 2,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "utils.access_nested_map",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "utils.access_nested_map",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "utils.get_json",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "utils.memoize",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 71,
"usage_type": "name"
}
] |
41776384713
|
# An ETL Reads and processes files from song_data and log_data and loads them into dimensional and fact tables
#===========================================================
#Importing Libraries
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
#==========================================================
def process_song_file(cur, filepath):
"""An ETL that extracts songs and artists data from song file and inserst records into songs and artists dimensional tables.
INPUT:
cur - A cursor that will be used to execute queries.
filepath - JASON object
OUTPUT:
songs and artists tables with records inserted.
"""
df = pd.read_json(filepath, lines=True)
for index, row in df.iterrows():
#songs---------------------------------------
song_data = (row.song_id, row.title, row.artist_id,
row.year, row.duration)
try:
cur.execute(song_table_insert, song_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: songs")
print (e)
#artists--------------------------------------------
artist_data = (row.artist_id, row.artist_name,
row.artist_location,
row.artist_latitude,
row.artist_longitude)
try:
cur.execute(artist_table_insert, artist_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: artists")
print (e)
#=============================================================
def process_log_file(cur, filepath):
"""An ETL that
- extracts time, users and songplays data from log_data file - inserts the records into the time and users dimensional tables and songplays fact table respectively.
INPUT:
cur - A cursor that will be used to execute queries.
filepath - JASON object
OUTPUT:
time, users and songplays tables with records inserted.
"""
df = pd.read_json(filepath, lines=True)
df = df[df.page == 'NextSong']
#time----------------------------------------
df['ts'] = pd.to_datetime(df['ts'], unit='ms')
t = df.copy()
time_data = (t.ts, t.ts.dt.hour, t.ts.dt.day,
t.ts.dt.dayofweek, t.ts.dt.month, t.ts.dt.year,
t.ts.dt.weekday)
column_labels = ['start_time', 'hour', 'day',
'week of year','month', 'year', 'weekday']
time_df = pd.DataFrame(columns=column_labels)
for index, column_label in enumerate(column_labels):
time_df[column_label] = time_data[index]
for i, row in time_df.iterrows():
try:
cur.execute(time_table_insert, list(row))
except psycopg2.Error as e:
print("Error: Inserting row for table: time")
print (e)
#users-----------------------------------
user_df = df[['userId', 'firstName', 'lastName', 'gender',
'level']]
for i, row in user_df.iterrows():
try:
cur.execute(user_table_insert, row)
except psycopg2.Error as e:
print("Error: Inserting row for table: users")
print (e)
#songplays-----------------------------------------
for index, row in df.iterrows():
try:
cur.execute(song_select, (row.song, row.artist,
row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
songplay_data = (row.ts, row.userId, row.level,
songid, artistid, row.sessionId,
row.location, row.userAgent)
try:
cur.execute(songplay_table_insert, songplay_data)
except psycopg2.Error as e:
print("Error: Inserting row for table: songplays")
print (e)
except psycopg2.Error as e:
print("Error: Querying for Song ID and Artist ID")
print (e)
#===========================================================
def process_data(cur, conn, filepath, func):
"""Function gets all files matching extension from directory
- gets total number of files found
- iterate over files and process
INPUT:
cur - A cursor that will be used to execute queries
conn - connection to database
filepath - JASON object
func - table functions
OUTPUT:
processed entire data
"""
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
#============================================================
def main():
""" Connects to Postgres database, executes functions above, creates the fact and dimensional tables.
"""
try:
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
except psycopg2.Error as e:
print("Error: Could not make connection to the Postgres database")
print(e)
try:
cur = conn.cursor()
except psycopg2.Error as e:
print("Error: Could not get curser to the Database")
print(e)
process_data(cur, conn, filepath='data/song_data',
func=process_song_file)
process_data(cur, conn, filepath='data/log_data',
func=process_log_file)
cur.close()
conn.close()
if __name__ == "__main__":
main()
|
Marvykalu/DataEngineering
|
data-modeling-postgresql/etl.py
|
etl.py
|
py
| 6,006 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_json",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.Error",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_json",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.Error",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.Error",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.Error",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.connect",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "psycopg2.Error",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.Error",
"line_number": 154,
"usage_type": "attribute"
}
] |
70128470908
|
# 1.парсим; headers берём из бразуера консоли разработчика (Network->Request)
# 2.сохраняем локально в файл
# 3.работаем с локальными данными
import json
import requests
from bs4 import BeautifulSoup
import csv
from time import sleep
import random
import local_properties as lp
url = lp.HEALTH_DIET_URL
headers = {
"accept": "*/*",
"user-agent": lp.HEADER_USER_AGENT
}
local_page_file_name = "health_diet.html"
file_categories = "all_categories_dict.json"
# общие методы
def open_file(name: str):
with open(name) as file:
return file.read()
def open_file_utf8(name: str):
with open(name, encoding="utf-8") as file:
return file.read()
def write_to_file(name: str, data: str):
with open(name, "w") as file:
file.write(data)
def write_to_file_utf8(name: str, data: str):
with open(name, "w", encoding="utf-8") as file:
file.write(data)
def open_json(name: str):
with open(name) as file:
return json.load(file)
def write_to_json(file_name: str, data: dict):
with open(file_name, "w") as file:
json.dump(data, file, indent=4, ensure_ascii=False)
# парсим веб-страницу
def scrap_page():
req = requests.get(url, headers)
src = req.text
return src
# сохраняем локально данные парсинга
def save_page_to_local(src: str):
write_to_file(local_page_file_name, src)
# данные из локального файла веб-страницы
def get_local_page():
return open_file(local_page_file_name)
# ссылки на все категории
def get_all_products_href(src: str):
soup = BeautifulSoup(src, "lxml")
all_products_href = soup.find_all(class_="mzr-tc-group-item-href")
# print(all_products_href)
return all_products_href
# словарь категорий и ссылки на них
def get_all_categories(src: str):
all_categories_dict = {}
hrefs = get_all_products_href(src)
for item in hrefs:
item_text = item.text
item_href = "https://health-diet.ru" + item.get("href")
all_categories_dict[item_text] = item_href
return all_categories_dict
def get_product_data():
all_categories = open_json(file_categories)
iteration_count = int(len(all_categories)) - 1
count = 0
print(f"Всего итераций: {iteration_count}")
for category_name, category_href in all_categories.items():
rep = [",", " ", "-", "'"]
for item in rep:
if item in category_name:
category_name = category_name.replace(item, "_")
req = requests.get(url=category_href, headers=headers)
src = req.text
result_file_name = f"data/{count}_{category_name}"
write_to_file_utf8(f"{result_file_name}.html", src)
src = open_file_utf8(f"{result_file_name}.html")
soup = BeautifulSoup(src, "lxml")
# проверка страницы на наличие таблицы с продуктами
alert_block = soup.find(class_="uk-alert-danger")
if alert_block is not None:
continue
# собираем заголовки таблицы
table_head = soup \
.find(class_="mzr-tc-group-table") \
.find("tr") \
.find_all("th")
product = table_head[0].text
calories = table_head[1].text
proteins = table_head[2].text
fats = table_head[3].text
carbohydrates = table_head[4].text
with open(f"{result_file_name}.csv", "w", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(
(
product,
calories,
proteins,
fats,
carbohydrates
)
)
# собираем данные продуктов
products_data = soup \
.find(class_="mzr-tc-group-table") \
.find("tbody") \
.find_all("tr")
product_info = []
for item in products_data:
product_tds = item.find_all("td")
title = product_tds[0].find("a").text
calories = product_tds[1].text
proteins = product_tds[2].text
fats = product_tds[3].text
carbohydrates = product_tds[4].text
product_info.append(
{
"Title": title,
"Calories": calories,
"Proteins": proteins,
"Fats": fats,
"Carbohydrates": carbohydrates
}
)
with open(f"{result_file_name}.csv", "a", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(
(
title,
calories,
proteins,
fats,
carbohydrates
)
)
with open(f"{result_file_name}.json", "a", encoding="utf-8") as file:
json.dump(product_info, file, indent=4, ensure_ascii=False)
count += 1
print(f"# Итерация {count}. {category_name} записан...")
iteration_count = iteration_count + 1
if iteration_count == 0:
print("Работа завершена")
break
print(f"Осталось итераций: {iteration_count}")
sleep(random.randrange(2, 4))
if __name__ == '__main__':
# 1 step
# src1 = scrap_page()
# save_page_to_local(src1)
# 2 step
# src2 = get_local_page()
# get_all_products_href(src2)
# 3 step
# src3 = get_local_page()
# categories = get_all_categories(src3)
# write_to_json(file_categories, categories)
# 4 step
get_product_data()
|
ildar2244/EdScraping
|
health_diet.py
|
health_diet.py
|
py
| 6,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "local_properties.HEALTH_DIET_URL",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "local_properties.HEADER_USER_AGENT",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 193,
"usage_type": "call"
}
] |
17838892540
|
import numpy as np
import cv2
# import ipdb
import opts
def computeH(x1, x2):
#Q2.2.1
#Compute the homography between two sets of points
num_of_points = x1.shape[0]
# Construct A matrix from x1 and x2
A = np.empty((2*num_of_points,9))
for i in range(num_of_points):
# Form A
Ai = np.array([[-x2[i,0], -x2[i,1], -1, 0, 0, 0, x1[i,0]*x2[i,0], x1[i,0]*x2[i,1], x1[i,0]], [0, 0, 0, -x2[i,0], -x2[i,1], -1, x1[i,1]*x2[i,0], x1[i,1]*x2[i,1], x1[i,1]]])
A[2*i:2*(i+1), :] = Ai
# Compute SVD solution and extract eigenvector corresponding to smallest eigenvalue
svd_sol = np.linalg.svd(A)
h = svd_sol[2][8]
H2to1 = h.reshape((3,3))
return H2to1
def computeH_norm(x1, x2):
#Q2.2.2
#Compute the centroid of the points
add_points_x1 = np.sum(x1,axis=0)
K1 = x1.shape[0]
centroid_x1 = add_points_x1/K1
add_points_x2 = np.sum(x2,axis=0)
K2 = x2.shape[0]
centroid_x2 = add_points_x2/K2
#Shift the origin of the points to the centroid
x1_shift = -x1 + centroid_x1
x2_shift = -x2 + centroid_x2
#Normalize the points so that the largest distance from the origin is equal to sqrt(2)
norm_x1 = np.linalg.norm(x1_shift,axis=1)
max_x1_idx = np.argmax(norm_x1)
max_x1_vec = x1_shift[max_x1_idx,:]
norm_x2 = np.linalg.norm(x2_shift,axis=1)
max_x2_idx = np.argmax(norm_x2)
max_x2_vec = x2_shift[max_x2_idx,:]
if max_x1_vec[0] == 0.0 or max_x1_vec[1] == 0.0 or max_x2_vec[0] == 0.0 or max_x2_vec[1] == 0.0:
H2to1 = np.array([])
else:
#Similarity transform 1
T1 = np.array([[1.0/max_x1_vec[0], 0, -centroid_x1[0]/max_x1_vec[0]], [0, 1/max_x1_vec[1], -centroid_x1[1]/max_x1_vec[1]],[0,0,1]])
#Similarity transform 2
T2 = np.array([[1.0/max_x2_vec[0], 0, -centroid_x2[0]/max_x2_vec[0]],[0, 1/max_x2_vec[1], -centroid_x2[1]/max_x2_vec[1]],[0,0,1]])
x1_div = np.tile(max_x1_vec,(x1_shift.shape[0],1))
x1_temp = np.append(x1,np.ones((K1,1)),axis=1)
x1_tilde = T1 @ x1_temp.T
x2_div = np.tile(max_x2_vec,(x2_shift.shape[0],1))
# x2_tilde = np.divide(x2_shift, x2_div)
x2_temp = np.append(x2,np.ones((K2,1)),axis=1)
x2_tilde = T2 @ x2_temp.T
# # H2to1 = x1_tilde
x1_tilde = x1_tilde.T
x1_tilde = x1_tilde[:,0:2]
x2_tilde = x2_tilde.T
x2_tilde = x2_tilde[:,0:2]
#Compute homography
H = computeH(x1_tilde,x2_tilde)
#Denormalization
H2to1 = np.linalg.inv(T1) @ H @ T2
return H2to1
def computeH_ransac(locs1, locs2, opts):
#Q2.2.3
#Compute the best fitting homography given a list of matching points
max_iters = opts.max_iters # the number of iterations to run RANSAC for
inlier_tol = opts.inlier_tol # the tolerance value for considering a point to be an inlier
num_of_points = locs1.shape[0]
sample_size = 4
d = 0
bestH2to1 = np.array([])
for i in range(max_iters):
# Sample a bunch of points from locs1 and locs 2
sample = np.random.choice(num_of_points,sample_size)
x1_sample = locs1[sample,:]
x2_sample = locs2[sample,:]
# computeH_norm(sampled points)
H = computeH_norm(x1_sample,x2_sample)
if H.size == 0:
continue
locs1_hom = np.append(locs1,np.ones((num_of_points,1)),axis=1)
locs2_hom = np.append(locs2,np.ones((num_of_points,1)),axis=1)
l_hat = H @ locs2_hom.T
l_hat[0,:] = np.divide(l_hat[0,:], l_hat[2,:])
l_hat[1,:] = np.divide(l_hat[1,:], l_hat[2,:])
l_hat[2,:] = np.divide(l_hat[2,:], l_hat[2,:])
Hvec = locs1_hom.T - l_hat
dist = np.linalg.norm(Hvec,axis=0)
inliers_test = dist < inlier_tol
inliers_test = inliers_test*1
num_inliers = np.sum(inliers_test)
if num_inliers > d:
# ipdb.set_trace()
d = num_inliers
inliers = inliers_test
bestH2to1 = H
return bestH2to1, inliers
def compositeH(H2to1, template, img):
# Create a composite image after warping the template image on top
# of the image using the homography
# Note that the homography we compute is from the image to the template;
# x_template = H2to1*x_photo
# For warping the template to the image, we need to invert it.
hp_cover_temp = img
cv_desk = template
# hp_cover_temp = cv2.resize(hp_cover,(cv_cover.shape[1],cv_cover.shape[0]))
# img = cv2.resize(img,(template.shape[1],template.shape[0]))
# Create mask of same size as template
mask = np.ones(shape=[hp_cover_temp.shape[0], hp_cover_temp.shape[1], hp_cover_temp.shape[2]], dtype= 'uint8')*255
# Warp mask by appropriate homography
warped_mask = cv2.warpPerspective(cv2.transpose(mask), H2to1, (cv_desk.shape[0], cv_desk.shape[1]))
warped_mask = cv2.transpose(warped_mask)
warped_mask = cv2.cvtColor(warped_mask, cv2.COLOR_BGR2GRAY)
warped_mask = cv2.bitwise_not(warped_mask)
warped_img = cv2.warpPerspective(cv2.transpose(hp_cover_temp), H2to1, (cv_desk.shape[0], cv_desk.shape[1]))
# warped_img = cv2.warpPerspective(cv2.transpose(img), bestH2to1, (template.shape[0], template.shape[1]))
warped_img = cv2.transpose(warped_img)
# cv2.imwrite('perspective.png', warped_img)
# hp_cover_mask = cv2.cvtColor(warped_img, cv2.COLOR_BGR2GRAY)
# _, mask = cv2.threshold(hp_cover_mask,50,255,cv2.THRESH_BINARY_INV)
masked_img = cv2.bitwise_and(cv_desk, cv_desk, mask=warped_mask)
composite_img = masked_img + warped_img
# Warp mask by appropriate homography
# Warp template by appropriate homography
#Use mask to combine the warped template and the image
composite_img = masked_img + warped_img
return composite_img
|
blakerbuchanan/computer_vision
|
augmented_reality/code/planarH.py
|
planarH.py
|
py
| 5,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.empty",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.svd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "opts.max_iters",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "opts.inlier_tol",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "cv2.transpose",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "cv2.transpose",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "cv2.bitwise_not",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "cv2.warpPerspective",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "cv2.transpose",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "cv2.transpose",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 170,
"usage_type": "call"
}
] |
37136495284
|
from keras.engine.saving import load_model
from argparse import ArgumentParser
import utils
def build_parser():
par = ArgumentParser()
par.add_argument('--word_features_path', type=str,
dest='word_features_path', help='filepath to save/load word features', default='feature_word')
par.add_argument('--img_features_path', type=str,
dest='img_features_path', help='filepath to save/load image features', default='feature_img')
par.add_argument('--word_file_mapping', type=str,
dest='word_file_mapping', help='filepath to save/load file to word mapping', default='index_word')
par.add_argument('--img_file_mapping', type=str,
dest='img_file_mapping', help='filepath to save/load file to image mapping', default='index_img')
par.add_argument('--index_folder', type=str,
dest='index_folder', help='folder to index', default='dataset')
par.add_argument('--glove_path', type=str,
dest='glove_path', help='path to pre-trained GloVe vectors', default='models/glove.6B')
par.add_argument('--model_path', type=str,
dest='model_path', help='path to custom model', default='my_model.hdf5')
return par
def generate_features(index_folder, features_path, file_mapping, loaded_model, glove_path):
features, index = index_images(
index_folder,
features_path,
file_mapping,
loaded_model,
glove_path)
print("Indexed %s images" % len(features))
return features
def index_images(folder, features_path, mapping_path, model, glove_path):
print ("Now indexing images...")
word_vectors = utils.load_glove_vectors(glove_path)
_, _, paths = utils.load_paired_img_wrd(
folder=folder,
word_vectors=word_vectors)
images_features, file_index = utils.generate_features(paths, model)
utils.save_features(features_path, images_features, mapping_path, file_index)
return images_features, file_index
# def build_feature_tree(file_name, features, n_trees=1000, dims=4096):
# feature_index = utils.index_features(features, n_trees, dims)
# utils.save_obj(file_name, feature_index)
# print('feature tree built!')
if __name__ == "__main__":
parser = build_parser()
options = parser.parse_args()
word_features_path = options.word_features_path
img_features_path = options.img_features_path
word_file_mapping = options.word_file_mapping
img_file_mapping = options.img_file_mapping
index_folder = options.index_folder
model_path = options.model_path
glove_path = options.glove_path
custom_model = load_model(model_path)
features = generate_features(index_folder, word_features_path, word_file_mapping, custom_model, glove_path)
vgg_model = utils.load_headless_pretrained_model()
features = generate_features(index_folder, img_features_path, img_file_mapping, vgg_model, glove_path)
|
cindyyao/image_search
|
index.py
|
index.py
|
py
| 2,983 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.load_glove_vectors",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.load_paired_img_wrd",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "utils.generate_features",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "utils.save_features",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "keras.engine.saving.load_model",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "utils.load_headless_pretrained_model",
"line_number": 63,
"usage_type": "call"
}
] |
35126198992
|
from unittest.mock import patch
from uuid import UUID, uuid4
import pytest
from pasqal_cloud import SDK, Workload
from pasqal_cloud.errors import (
WorkloadCancellingError,
WorkloadCreationError,
WorkloadFetchingError,
WorkloadResultsDecodeError,
)
from tests.test_doubles.authentication import FakeAuth0AuthenticationSuccess
class TestWorkload:
@pytest.fixture
def workload_with_link_id(self) -> str:
return str(UUID(int=0x2))
@pytest.fixture
def workload_with_invalid_link_id(self) -> str:
return str(UUID(int=0x3))
@pytest.fixture(autouse=True)
@patch(
"pasqal_cloud.client.Auth0TokenProvider",
FakeAuth0AuthenticationSuccess,
)
def init_sdk(self):
self.sdk = SDK(
username="[email protected]",
password="password",
project_id=str(uuid4()),
)
self.workload_id = "00000000-0000-0000-0000-000000000001"
self.backend = "backend_test"
self.workload_type = "workload_type_test"
self.config = {"test1": "test1", "test2": 2}
self.workload_result = {"1001": 12, "0110": 35, "1111": 1}
def test_create_workload(self, mock_request):
workload = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
)
assert workload.id == self.workload_id
assert workload.backend == self.backend
assert workload.workload_type == self.workload_type
assert workload.config == self.config
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads"
)
assert mock_request.last_request.method == "POST"
def test_create_workload_error(self, mock_request_exception):
with pytest.raises(WorkloadCreationError):
_ = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
)
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads"
)
assert mock_request_exception.last_request.method == "POST"
def test_create_workload_and_wait(self, mock_request):
workload = self.sdk.create_workload(
backend=self.backend,
workload_type=self.workload_type,
config=self.config,
wait=True,
)
assert workload.id == self.workload_id
assert workload.backend == self.backend
assert workload.workload_type == self.workload_type
assert workload.config == self.config
assert workload.result == self.workload_result
assert mock_request.last_request.method == "GET"
def test_get_workload(self, mock_request, workload):
workload_requested = self.sdk.get_workload(workload.id)
assert workload_requested.id == self.workload_id
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v2/workloads/{self.workload_id}"
)
def test_get_workload_with_link(
self, mock_request, workload_with_link_id, result_link_endpoint
):
self.sdk.get_workload(workload_with_link_id)
assert mock_request.last_request.url == (
f"{result_link_endpoint}{workload_with_link_id}"
)
def test_get_workload_with_invalid_link(
self, workload_with_invalid_link_id, mock_request
):
with pytest.raises(WorkloadResultsDecodeError):
self.sdk.get_workload(workload_with_invalid_link_id)
assert (
mock_request.last_request.url
== "http://invalid-link/00000000-0000-0000-0000-000000000003"
)
def test_get_workload_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadFetchingError):
_ = self.sdk.get_workload(workload.id)
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v2/workloads/{self.workload_id}"
)
assert mock_request_exception.last_request.method == "GET"
def test_cancel_workload_self(self, mock_request, workload):
workload.cancel()
assert workload.status == "CANCELED"
assert mock_request.last_request.method == "PUT"
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_self_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadCancellingError):
workload.cancel()
assert workload.status == "PENDING"
assert mock_request_exception.last_request.method == "PUT"
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_sdk(self, mock_request, workload):
client_rsp = self.sdk.cancel_workload(self.workload_id)
assert type(client_rsp) == Workload
assert client_rsp.status == "CANCELED"
assert mock_request.last_request.method == "PUT"
assert (
mock_request.last_request.url == f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_cancel_workload_sdk_error(self, mock_request_exception, workload):
with pytest.raises(WorkloadCancellingError):
_ = self.sdk.cancel_workload(self.workload_id)
assert workload.status == "PENDING"
assert mock_request_exception.last_request.method == "PUT"
assert (
mock_request_exception.last_request.url
== f"{self.sdk._client.endpoints.core}"
f"/api/v1/workloads/{self.workload_id}/cancel"
)
def test_workload_instantiation_with_extra_field(self, workload):
"""Instantiating a workload with an extra field should not raise an error.
This enables us to add new fields in the API response on the workloads endpoint
without breaking compatibility for users with old versions of the SDK where
the field is not present in the Batch class.
"""
workload_dict = workload.dict() # Batch data expected by the SDK
# We add an extra field to mimick the API exposing new values to the user
workload_dict["new_field"] = "any_value"
new_workload = Workload(**workload_dict) # this should raise no error
assert (
new_workload.new_field == "any_value"
) # The new value should be stored regardless
|
pasqal-io/pasqal-cloud
|
tests/test_workload.py
|
test_workload.py
|
py
| 6,862 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "uuid.UUID",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "uuid.UUID",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pasqal_cloud.SDK",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tests.test_doubles.authentication.FakeAuth0AuthenticationSuccess",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "pytest.raises",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pasqal_cloud.errors.WorkloadCreationError",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "pytest.raises",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pasqal_cloud.errors.WorkloadResultsDecodeError",
"line_number": 105,
"usage_type": "argument"
},
{
"api_name": "pytest.raises",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pasqal_cloud.errors.WorkloadFetchingError",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "pytest.raises",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pasqal_cloud.errors.WorkloadCancellingError",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "pasqal_cloud.Workload",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pasqal_cloud.errors.WorkloadCancellingError",
"line_number": 153,
"usage_type": "argument"
},
{
"api_name": "pasqal_cloud.Workload",
"line_number": 174,
"usage_type": "call"
}
] |
2908163256
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Union
from supertokens_python.normalised_url_path import NormalisedURLPath
from supertokens_python.querier import Querier
if TYPE_CHECKING:
from .utils import JWTConfig
from .interfaces import CreateJwtResult
from supertokens_python.supertokens import AppInfo
from supertokens_python.recipe.jwt.interfaces import (
CreateJwtResultOk, CreateJwtResultUnsupportedAlgorithm, GetJWKSResult,
RecipeInterface)
from .interfaces import JsonWebKey
class RecipeImplementation(RecipeInterface):
def __init__(self, querier: Querier, config: JWTConfig, app_info: AppInfo):
super().__init__()
self.querier = querier
self.config = config
self.app_info = app_info
async def create_jwt(self, payload: Dict[str, Any], validity_seconds: Union[int, None], user_context: Dict[str, Any]) -> CreateJwtResult:
if validity_seconds is None:
validity_seconds = self.config.jwt_validity_seconds
data = {
'payload': payload,
'validity': validity_seconds,
'algorithm': 'RS256',
'jwksDomain': self.app_info.api_domain.get_as_string_dangerous()
}
response = await self.querier.send_post_request(NormalisedURLPath("/recipe/jwt"), data)
if response['status'] == 'OK':
return CreateJwtResultOk(response['jwt'])
return CreateJwtResultUnsupportedAlgorithm()
async def get_jwks(self, user_context: Dict[str, Any]) -> GetJWKSResult:
response = await self.querier.send_get_request(NormalisedURLPath("/recipe/jwt/jwks"), {})
keys: List[JsonWebKey] = []
for key in response['keys']:
keys.append(JsonWebKey(
key['kty'],
key['kid'],
key['n'],
key['e'],
key['alg'],
key['use']
))
return GetJWKSResult(response['status'], keys)
|
starbillion/supertokens_python
|
supertokens_python/recipe/jwt/recipe_implementation.py
|
recipe_implementation.py
|
py
| 2,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "supertokens_python.recipe.jwt.interfaces.RecipeInterface",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "supertokens_python.querier.Querier",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "utils.JWTConfig",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "supertokens_python.supertokens.AppInfo",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "supertokens_python.normalised_url_path.NormalisedURLPath",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "supertokens_python.recipe.jwt.interfaces.CreateJwtResultOk",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "supertokens_python.recipe.jwt.interfaces.CreateJwtResultUnsupportedAlgorithm",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "interfaces.CreateJwtResult",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "supertokens_python.normalised_url_path.NormalisedURLPath",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "interfaces.JsonWebKey",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "interfaces.JsonWebKey",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "supertokens_python.recipe.jwt.interfaces.GetJWKSResult",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "supertokens_python.recipe.jwt.interfaces.GetJWKSResult",
"line_number": 44,
"usage_type": "name"
}
] |
70767464828
|
"""empty message
Revision ID: 4fa0d71e3598
Revises: bdcfc99aeebf
Create Date: 2021-07-31 23:47:02.420096
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4fa0d71e3598'
down_revision = 'bdcfc99aeebf'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('techniques', sa.Column('japanese_names', postgresql.ARRAY(sa.String()), nullable=True))
op.add_column('techniques', sa.Column('english_names', postgresql.ARRAY(sa.String()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('techniques', 'english_names')
op.drop_column('techniques', 'japanese_names')
# ### end Alembic commands ###
|
AbundantSalmon/judo-techniques-bot
|
judo_techniques_bot/migrations/versions/2021-07-31_4fa0d71e3598_.py
|
2021-07-31_4fa0d71e3598_.py
|
py
| 891 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql.ARRAY",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.String",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op.add_column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql.ARRAY",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.String",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 29,
"usage_type": "name"
}
] |
75341512506
|
"""Script to run antsBrainExtraction on meningioma T1-contrast data.
"""
import os.path as op
from nipype import Node, Workflow, DataGrabber, DataSink, MapNode
from nipype.interfaces import ants
# Node to grab data.
grab = Node(DataGrabber(outfields=['t1c']), name='grabber')
grab.inputs.base_directory = op.abspath('data')
grab.inputs.template = '*.nii.gz'
grab.inputs.field_template = {'t1c': '*.nii.gz'}
grab.inputs.sort_filelist = True
# Node to run ants.BrainExtraction.
# Segments the anatomical image and should extract brain.
template_dir = op.abspath('ants_templates/OASIS-30_Atropos_template')
seg = MapNode(ants.BrainExtraction(), iterfield=['anatomical_image'], name='seg')
seg.inputs.dimension = 3
seg.inputs.keep_temporary_files = 1
seg.inputs.brain_template = op.join(template_dir, 'T_template0.nii.gz')
seg.inputs.brain_probability_mask = op.join(template_dir,
'T_template0_BrainCerebellumProbabilityMask.nii.gz')
# Node to save output files. This does not work. Why?
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = op.abspath('antsBrainExtraction_output')
# Workflow.
wf = Workflow(name='antsBrainExtraction', base_dir='/om/scratch/Wed/jakubk')
wf.connect(grab, 't1c', seg, 'anatomical_image')
wf.connect(seg, 'BrainExtractionBrain', sinker, 'extracted.brain')
wf.connect(seg, 'BrainExtractionMask', sinker, 'extracted.brain_masks')
wf.connect(seg, 'BrainExtractionSegmentation', sinker, 'extracted.seg_full')
wf.connect(seg, 'BrainExtractionCSF', sinker, 'extracted.csf')
wf.connect(seg, 'BrainExtractionGM', sinker, 'extracted.gm')
wf.connect(seg, 'BrainExtractionWM', sinker, 'extracted.wm')
wf.run(plugin='SLURM', plugin_args={'sbatch_args': '--mem=50GB'})
|
kaczmarj/meningioma
|
scripts/run_ants_brainextraction.py
|
run_ants_brainextraction.py
|
py
| 1,750 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "nipype.Node",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nipype.DataGrabber",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "nipype.MapNode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.ants.BrainExtraction",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.ants",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "nipype.Node",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nipype.DataSink",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "nipype.Workflow",
"line_number": 29,
"usage_type": "call"
}
] |
17012330786
|
from flask import Flask, render_template, request, redirect, url_for
from pymongo import MongoClient
client = MongoClient(
"<mongo db cluter url>")
NameListDatabase = client.NameListDatabase
CollectionList = NameListDatabase.CollectionList
app = Flask(__name__)
def getallnames():
namelist = []
names = CollectionList.find({}, {"Name": 1, "_id": 0})
for name in names:
namelist.append(name["Name"])
return namelist
@app.route('/', methods=['POST', 'GET'])
def root():
getallnames()
if request.method == "POST":
return redirect(request.form["Name"])
return render_template('index.html', listofname=getallnames())
@app.route('/<name>/')
def fetchJson(name):
names = list(CollectionList.find({"Name": name}, {"_id": 0}))
nameListInStr = str(names)
if len(names) == 0:
return redirect(url_for("root"))
return nameListInStr
if __name__ == '__main__':
app.run(debug=True)
|
smartkeerthi/Python-MongoDB-Flask-Projects
|
Flask and pymongo/main.py
|
main.py
|
py
| 953 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 33,
"usage_type": "call"
}
] |
28830646732
|
"""Constants for the WiHeat Climate integration."""
import logging
API_URL = 'https://wi-heat.com/'
ID = 'home-assistant'
SESSION = 'A2B3C4D5E6'
DOMAIN = "wiheat"
CONF_CODE_FORMAT = "code_format"
CONF_CODE = "code"
CONF_TEMP = "temp"
UPDATE_INTERVAL = "timesync"
MIN_SCAN_INTERVAL = 60
API_ENDPOINT = {
'getUserDetails': f'{API_URL}usr_API_2.php',
'getData': f'{API_URL}API_2.php'
}
HEADERS = {
'host': 'wi-heat.com',
'accept': '*/*',
'content-type': 'application/x-www-form-urlencoded',
'accept-encoding': 'gzip, deflate, br',
}
QUERY = {
'login': 'login',
'getVpHwid': 'getVPhwid'
}
POWER_STATE = {
'11': 'on',
'21': 'off'
}
FAN_SPEED = {
'2': 'auto',
'3': 'minimum',
'5': 'medium',
'7': 'maximum'
}
PLASMACLUSTER = {
'F4': 'on',
'F0': 'off'
}
LOGGER = logging.getLogger(__package__)
|
kimjohnsson/wiheat
|
custom_components/wiheat/const.py
|
const.py
|
py
| 861 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 51,
"usage_type": "call"
}
] |
5423305185
|
'''
@author:KongWeiKun
@file: follower_crawler.py
@time: 18-2-13 下午3:57
@contact: [email protected]
'''
from multiprocessing import Pool,cpu_count,Lock,Manager
import pandas as pd
import threading
import csv
import requests
from bs4 import BeautifulSoup
import re
try:
from functools import namedtuple
except:
from collections import namedtuple
headers = {
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36'
}
COLUMNS = ['user','name','position','repositories','stars', 'followers', 'following', 'contributions']
PROFILE = namedtuple('PROFILE', COLUMNS)
Result = Manager().list()
DF = pd.DataFrame(columns=COLUMNS, index=["0"])
lock = threading.Lock() # 全局资源锁
def _str_2_int(stri):
if 'k' in stri:
return int(float(stri[:-1]) * 1000)
if ',' in stri:
return int(stri.replace(',', ''))
else:
return int(stri)
#用户信息爬取
def user_crawler(user):
"""crawl user profile
Arguments:
url {string} -- [description]
"""
url = 'https://github.com/{}'.format(user)
values = [None] * len(COLUMNS)
values[COLUMNS.index('user')] = user
try:
html = requests.get(url, headers=headers, timeout=10).text
soup = BeautifulSoup(html, 'lxml')
tag_name = soup.find_all('span', class_='p-name vcard-fullname d-block')
if len(tag_name) > 0:
name = tag_name[0].text
if len(name) > 0:
values[COLUMNS.index('name')] = name
tag_position = soup.find_all('span', class_='p-label')
if len(tag_position) > 0:
position = tag_position[0].text
values[COLUMNS.index('position')] = position
tags_overview = soup.find_all('span', class_='Counter')
repositories = _str_2_int(tags_overview[0].text.replace('\n', '').replace(' ', ''))
stars = _str_2_int(tags_overview[1].text.replace('\n', '').replace(' ', ''))
followers = _str_2_int(tags_overview[2].text.replace('\n', '').replace(' ', ''))
following = _str_2_int(tags_overview[3].text.replace('\n', '').replace(' ', ''))
values[COLUMNS.index('repositories')] = repositories
values[COLUMNS.index('stars')] = stars
values[COLUMNS.index('followers')] = followers
values[COLUMNS.index('following')] = following
tag_contributions = soup.find_all('h2', class_='f4 text-normal mb-2')
try:
contributions = _str_2_int(
tag_contributions[0].text.replace('\n', '').replace(' ', '').replace('contributionsinthelastyear', ''))
except Exception as err:
contributions = _str_2_int(
tag_contributions[0].text.replace('\n', '').replace(' ', '').replace('contributioninthelastyear', ''))
values[COLUMNS.index('contributions')] = contributions
with lock:
print(values)
Result.append(values)
except Exception as e:
print(e)
#爬取followers
def get_all_followers(user):
"""get all followers of user
Arguments:
user {string} -- [description]
"""
followers_list = []
idx = 0
url = 'https://github.com/{}?page={}&tab=followers'
while True:
idx += 1
page_url = url.format(user, idx)
try:
html = requests.get(page_url, headers=headers, timeout=10).text
if 've reached the end' in html:
break
soup = BeautifulSoup(html, 'lxml')
tag_names = soup.find_all('span', class_='link-gray pl-1')
for name in tag_names:
followers_list.append(name.text)
except Exception as e:
print(e)
return followers_list
def save():
""" 将数据保存至本地
"""
with open("data/result.csv", "w+") as f:
global Result
f_csv = csv.writer(f)
f_csv.writerow(COLUMNS)
f_csv.writerows(Result)
print('data saved')
followers_list = []
def main():
"""main process
"""
main_user = 'miguelgrinberg'
print('Crawling followers lists, wait a moment ...')
followers_list = get_all_followers(main_user)
pool = Pool(processes=cpu_count())
for user in followers_list:
pool.apply_async(user_crawler, args=(user,))
pool.close()
pool.join()
save()
if __name__ == '__main__':
main()
|
Winniekun/spider
|
github/follower_crawler.py
|
follower_crawler.py
|
py
| 4,422 |
python
|
en
|
code
| 139 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Manager",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "threading.Lock",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 131,
"usage_type": "call"
}
] |
38046142992
|
from cffi import FFI as _FFI
import numpy as _np
import glob as _glob
import os as _os
__all__ = ['BloscWrapper']
class BloscWrapper:
def __init__(self, plugin_file=""):
this_module_dir = _os.path.dirname(_os.path.realpath(__file__))
# find the C library by climbing the directory tree
while plugin_file == "":
plugin_pattern = _os.path.join(this_module_dir, "*ags_blosc_wrapper.*")
candidate_plugins = _glob.glob(plugin_pattern)
# if found then break
if candidate_plugins:
plugin_file = candidate_plugins[0]
break
# not found and already at root. We're not going to find it
if this_module_dir == "/":
raise ValueError("Cannot find plugin ags_blosc_wrapper")
# go to parent directory and try again
this_module_dir = _os.path.split(this_module_dir)[0]
# specify the C signatures of the foreign functions
self._ffi = _FFI()
self._ffi.cdef("typedef void* ags_BloscWrapper;")
self._ffi.cdef("ags_BloscWrapper ags_BloscWrapper_new();")
self._ffi.cdef("void ags_BloscWrapper_delete(ags_BloscWrapper);")
self._ffi.cdef("size_t ags_BloscWrapper_reserveNeededToCompress(ags_BloscWrapper, size_t);")
self._ffi.cdef("size_t ags_BloscWrapper_reserveNeededToDecompress(ags_BloscWrapper, void*);")
self._ffi.cdef("size_t ags_BloscWrapper_compress(ags_BloscWrapper, void*, size_t, void*, size_t);")
self._ffi.cdef("size_t ags_BloscWrapper_decompress(ags_BloscWrapper, void*, void*, size_t);")
self._cmodule = self._ffi.dlopen(plugin_file)
# allocate a new raw instance
self.blosc_wrapper = self._cmodule.ags_BloscWrapper_new()
def __del__(self):
# free the raw instance
self._cmodule.ags_BloscWrapper_delete(self.blosc_wrapper)
def reserve_needed_to_compress(self, srcsize):
size = self._ffi.cast("size_t", srcsize)
return self._cmodule.ags_BloscWrapper_reserveNeededToCompress(self.blosc_wrapper, size)
def reserve_needed_to_decompress(self, src):
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
return self._cmodule.ags_BloscWrapper_reserveNeededToDecompress(self.blosc_wrapper, src_cffi)
def compress(self, src):
# get sizes
srcsize = src.nbytes
dstsize = self.reserve_needed_to_compress(srcsize)
srcsize_cffi = self._ffi.cast("size_t", srcsize)
dstsize_cffi = self._ffi.cast("size_t", dstsize)
# allocate destination
dst = _np.empty(shape=(dstsize,), dtype=_np.uint8)
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
dst_contiguous = _np.ascontiguousarray(dst)
dst_raw = dst_contiguous.__array_interface__['data'][0]
dst_cffi = self._ffi.cast("void*", dst_raw)
# perform compression and resize
dstsize = self._cmodule.ags_BloscWrapper_compress(self.blosc_wrapper, src_cffi, srcsize_cffi, dst_cffi, dstsize_cffi)
dst.resize((dstsize,))
return dst
def decompress(self, src):
# get sizes
dstsize = self.reserve_needed_to_decompress(src)
dstsize_cffi = self._ffi.cast("size_t", dstsize)
# allocate destination
dst = _np.empty(shape=(dstsize,), dtype=_np.uint8)
# get raw buffers
src_contiguous = _np.ascontiguousarray(src)
src_raw = src_contiguous.__array_interface__['data'][0]
src_cffi = self._ffi.cast("void*", src_raw)
dst_contiguous = _np.ascontiguousarray(dst)
dst_raw = dst_contiguous.__array_interface__['data'][0]
dst_cffi = self._ffi.cast("void*", dst_raw)
# perform decompression and resize
dstsize = self._cmodule.ags_BloscWrapper_decompress(self.blosc_wrapper, src_cffi, dst_cffi, dstsize_cffi)
dst.resize((dstsize,))
return dst
|
ActivisionGameScience/ags_example_py_wrapper
|
ags_py_blosc_wrapper.py
|
ags_py_blosc_wrapper.py
|
py
| 4,277 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cffi.FFI",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 112,
"usage_type": "call"
}
] |
32907694123
|
# 1 Add the usual reports
from sklearn.metrics import classification_report
y_true = [1, 0, 0, 2, 1, 0, 3, 3, 3]
y_pred = [1, 1, 0, 2, 1, 0, 1, 3, 3]
target_names = ['Class-0', 'Class-1', 'Class-2', 'Class-3']
print(classification_report(y_true, y_pred, target_names=target_names))
# 2 Run the code and see
# Instead of computing these metrics separately, you can directly
# use the preceding function to extract those statistics from your model.
|
IbrahimOued/Python-Machine-Learning-cookbook
|
2 Constructing a Classifier/performance_report.py
|
performance_report.py
|
py
| 447 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 6,
"usage_type": "call"
}
] |
10543642062
|
from redis.commands.search.field import GeoField, NumericField, TextField, VectorField
REDIS_INDEX_NAME = "benchmark"
REDIS_PORT = 6380
H5_COLUMN_TYPES_MAPPING = {
"int": NumericField,
"int32": NumericField,
"keyword": TextField,
"text": TextField,
"string": TextField,
"str": TextField,
"float": NumericField,
"float64": NumericField,
"float32": NumericField,
"geo": GeoField,
}
def convert_H52RedisType(h5_column_type: str):
redis_type = H5_COLUMN_TYPES_MAPPING.get(h5_column_type.lower(), None)
if redis_type is None:
raise RuntimeError(f"🐛 redis doesn't support h5 column type: {h5_column_type}")
return redis_type
|
myscale/vector-db-benchmark
|
engine/clients/redis/config.py
|
config.py
|
py
| 687 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "redis.commands.search.field.NumericField",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.NumericField",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.TextField",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.TextField",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.TextField",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.TextField",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.NumericField",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.NumericField",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.NumericField",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "redis.commands.search.field.GeoField",
"line_number": 17,
"usage_type": "name"
}
] |
13295958598
|
import vtk
import numpy as np
import struct
# def save_vf(self, filename):
# """ Write the vector field as .vf file format to disk. """
# if not np.unique(self.resolution).size == 1:
# raise ValueError("Vectorfield resolution must be the same for X, Y, Z when exporting to Unity3D.")
# file_handle = open(filename, 'wb')
# for val in [b'V', b'F', b'_', b'V',
# struct.pack('H', self.resolution[0]),
# struct.pack('H', self.resolution[1]),
# struct.pack('H', self.resolution[2])]:
# file_handle.write(val)
# # Layout data in required order.
# u_stream = self.u.flatten('F')
# v_stream = self.v.flatten('F')
# w_stream = self.w.flatten('F')
# for i in range(u_stream.size):
# file_handle.write(struct.pack('f', v_stream[i]))
# file_handle.write(struct.pack('f', u_stream[i]))
# file_handle.write(struct.pack('f', w_stream[i]))
# file_handle.close()
if __name__ == '__main__':
path = "E:\\VIS22\\Assign3\\Data_Assign3\\Data_Assign3\\"
#input_file_name = "bernard3D_Q.vtk"
input_file_name = "FullHead.mhd"
input_file_name = path + input_file_name
if ".mhd" in input_file_name: #The input file is MetaImageData
input_type = "mhd"
reader = vtk.vtkMetaImageReader()
reader.SetFileName(input_file_name)
reader.Update()
elif ".vtk" in input_file_name: # The input file is VTK
input_type = "vtk"
reader = vtk.vtkDataSetReader()
reader.SetFileName(input_file_name)
reader.Update()
poly = reader.GetOutput()
scalars = poly.GetPointData().GetScalars()
array = np.array(reader.GetOutput().GetPointData().GetScalars())
print(len(array))
print(poly.GetScalarRange()[0])
print(poly.GetScalarRange()[1])
dimension = poly.GetDimensions()
print(dimension)
#print(poly.GetPointData())
ini_file_name = input_file_name + ".raw.ini"
file_handle = open(ini_file_name, 'w')
file_handle.write("dimx:" + str(dimension[0]) +"\n")
file_handle.write("dimy:" + str(dimension[1])+"\n")
file_handle.write("dimz:" +str(dimension[2])+"\n")
file_handle.write("skip:0"+"\n")
file_handle.write("format:int32"+"\n")
file_handle.close()
file_name = input_file_name + ".raw.txt"
file_handle = open(file_name, 'w')
print(array[0])
for i in range(len(array)):
file_handle.write(str(array[i]) +"\n")
file_handle.close()
file_name_raw = input_file_name + ".raw"
file_handle = open(file_name_raw, 'wb')
print(array[0])
for i in range(len(array)):
file_handle.write(struct.pack('i', (int)(array[i])))
file_handle.close()
|
maysie0110/COSC6344-FinalProject
|
write_raw_file.py
|
write_raw_file.py
|
py
| 2,862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "vtk.vtkMetaImageReader",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "vtk.vtkDataSetReader",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 81,
"usage_type": "call"
}
] |
27035685049
|
"""Json module"""
import json
def handler(event, _context):
"""
Lambda Handler
Parameters
----------
event : dict
An event
Returns
-------
dict
The response object
"""
print(f"request: {json.dumps(event)}")
return {
"statusCode": 200,
"headers": {"Content-Type": "application/json"},
"body": json.dumps({ "hello": f"Hello World from Python! Handler at {event['path']}"})
}
|
jhonrocha/aws-cdk-explorations
|
lambda/play-py/main.py
|
main.py
|
py
| 464 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
}
] |
14582545322
|
# Visualisation of Parkes beam pattern: Shows position of beams for a given HDF file
# Input: fname (location of HDF dataset)
# V.A. Moss ([email protected])
__author__ = "V.A. Moss"
__date__ = "$18-sep-2018 17:00:00$"
__version__ = "0.1"
import os
import sys
import tables as tb
import numpy as np
from matplotlib import *
import matplotlib
matplotlib.rcParams["interactive"] = True
from numpy import *
from pylab import *
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['serif'],'size':14})
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage,AnnotationBbox
from matplotlib._png import read_png
import urllib.request, urllib.parse, urllib.error
import datetime
from astropy.io import ascii
# Read the position from the observation record
fname = '2017-09-19_0109-P953_GASS_246.2+39.9+312_0.hdf'
# VLSR
# This function gets the velocity of the observatory for a given position and date/time
def freq2vlsr(ra,dec,fname):
x = datetime.datetime.strptime(fname.split('-P')[0],'%Y-%m-%d_%H%M')
date = x.strftime('%Y%b%d:%H:%M').lower()
path = 'www.narrabri.atnf.csiro.au/cgi-bin/obstools/velo.cgi?radec=%s,%s&velo=0&frame=lsr&type=radio&date=%s&freq1=1420.405752&freq2=&telescope=parkes' % (ra,dec,date)
path1 = path.replace(':','%3A')
path2 = 'http://'+path1.replace(',','%2C')
# Get from online
f = urllib.request.urlopen(path2)
for line in f:
line = line.decode('utf-8')
if 'Observatory velocity' in line:
vel = float(line.split('</td><td>')[1].split()[0])
return vel
def showmb():
# Make image
sfig = 'beams_all.png'
arr_lena = read_png(sfig)
imagebox = OffsetImage(arr_lena, zoom=0.35)
ab = AnnotationBbox(imagebox, [0.095,0.08],
xybox=(0., 0.),
xycoords='axes fraction',
boxcoords="offset points",
frameon=False
)
gca().add_artist(ab)
# Get the positional information
d = ascii.read('P953 Observation Record - Sheet1.csv')
# Get the position
srcname = fname.split('/')[-1]
src = srcname.split('.hdf')[0]
mask = (d['File'] == srcname)
dsub = d[mask]
ra,dec = dsub['RA'][0],dsub['Dec'][0]
print('Input file: %s\nPosition: %s, %s' % (srcname,ra,dec))
# Open the data file
t = tb.open_file('%s' % fname)
# Setup the figure
figure(figsize=(8,8))
cmap = cm.Spectral_r
# Plot each position traced
alph=0.025
sz = 300
scatter(t.root.scan_pointing.cols.mb01_raj[:],t.root.scan_pointing.cols.mb01_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(0/12.),facecolor=cm.Spectral(0/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb02_raj[:],t.root.scan_pointing.cols.mb02_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(1/12.),facecolor=cm.Spectral(1/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb03_raj[:],t.root.scan_pointing.cols.mb03_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(2/12.),facecolor=cm.Spectral(2/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb04_raj[:],t.root.scan_pointing.cols.mb04_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(3/12.),facecolor=cm.Spectral(3/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb05_raj[:],t.root.scan_pointing.cols.mb05_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(4/12.),facecolor=cm.Spectral(4/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb06_raj[:],t.root.scan_pointing.cols.mb06_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(5/12.),facecolor=cm.Spectral(5/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb07_raj[:],t.root.scan_pointing.cols.mb07_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(6/12.),facecolor=cm.Spectral(6/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb08_raj[:],t.root.scan_pointing.cols.mb08_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(7/12.),facecolor=cm.Spectral(7/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb09_raj[:],t.root.scan_pointing.cols.mb09_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(8/12.),facecolor=cm.Spectral(8/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb10_raj[:],t.root.scan_pointing.cols.mb10_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(9/12.),facecolor=cm.Spectral(9/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb11_raj[:],t.root.scan_pointing.cols.mb11_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(10/12.),facecolor=cm.Spectral(10/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb12_raj[:],t.root.scan_pointing.cols.mb12_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(11/12.),facecolor=cm.Spectral(11/12.),alpha=alph)
scatter(t.root.scan_pointing.cols.mb13_raj[:],t.root.scan_pointing.cols.mb13_dcj[:],s=sz,marker='o',edgecolor=cm.Spectral(12/12.),facecolor=cm.Spectral(12/12.),alpha=alph)
# Show a legend of the multi-beam colours
showmb()
figsave = '\_'.join(srcname.split('_'))
title(figsave)
grid(True,alpha=0.2)
xlabel('Right Ascension (deg)')
ylabel('Declination (deg)')
savefig('%s_beampos.pdf' % src,bbox_inches='tight',transparent=True)
|
cosmicpudding/ParkesBeamPattern
|
plot_beampattern.py
|
plot_beampattern.py
|
py
| 4,867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.rcParams",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib._png.read_png",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.offsetbox.OffsetImage",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.offsetbox.AnnotationBbox",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "astropy.io.ascii.read",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "astropy.io.ascii",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "tables.open_file",
"line_number": 74,
"usage_type": "call"
}
] |
28256315705
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox, QListWidgetItem
from PyQt5.QtCore import pyqtSlot, QDir, Qt, QSettings, QFileInfo
from SettingsDialog import SettingsDialog
from ui_MainWindow import Ui_MainWindow
import math
import Settings
def areaOfPolygon(vertices):
vertices.append(vertices[0])
area = lambda a, b: (b[0] - a[0]) * (a[1] + b[1]) / 2.
areas = map(lambda i: area(vertices[i], vertices[i+1]), range(len(vertices) - 1))
return sum(areas)
def lengthOfPath(vertices):
distance = lambda a, b: math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
distances = map(lambda i: distance(vertices[i], vertices[i+1]), range(len(vertices) - 1))
return sum(distances)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.settings = QSettings()
self.ui.exitAction.triggered.connect(QApplication.quit)
self.ui.zoomInAction.triggered.connect(self.ui.imageLabel.zoomIn)
self.ui.zoomOutAction.triggered.connect(self.ui.imageLabel.zoomOut)
self.enableImageActions(False)
self.enableSamplesActions(False)
@pyqtSlot()
def on_openAction_triggered(self):
dir = self.settings.value(
Settings.LAST_DIRECTORY_KEY, Settings.DEFAULT_LAST_DIRECTORY)
(filename, _) = QFileDialog.getOpenFileName(
self,
self.tr('Open Image'),
dir,
self.tr('Images (*.png *.jpg)'))
if filename:
self.settings.setValue(
Settings.LAST_DIRECTORY_KEY, QFileInfo(filename).absolutePath())
self.ui.imageLabel.loadImage(filename)
self.statusBar().showMessage(QDir.toNativeSeparators(filename))
self.enableImageActions(True)
self.on_clearAction_triggered()
@pyqtSlot()
def on_saveAction_triggered(self):
dir = self.settings.value(
Settings.LAST_DIRECTORY_KEY, Settings.DEFAULT_LAST_DIRECTORY)
(filename, _) = QFileDialog.getSaveFileName(
self,
self.tr('Open Image'),
dir,
self.tr('Comma Separated Values files (*.csv)\nText files (*.txt)\n'))
if filename:
self.settings.setValue(
Settings.LAST_DIRECTORY_KEY, QFileInfo(filename).absolutePath())
text = self.getCoordinatesAsCsv()
with open(filename, 'w') as file:
file.write(text)
@pyqtSlot()
def on_settingsAction_triggered(self):
settingsDialog = SettingsDialog(self)
if settingsDialog.exec_():
self.ui.imageLabel.reset()
@pyqtSlot()
def on_clearAction_triggered(self):
self.ui.listWidget.clear()
self.ui.imageLabel.clearSamples()
self.enableSamplesActions(False)
@pyqtSlot()
def on_copyAction_triggered(self):
text = self.getCoordinatesAsTsv()
clipboard = QApplication.clipboard()
clipboard.setText(text)
@pyqtSlot()
def on_aboutQtAction_triggered(self):
QMessageBox.aboutQt(self)
@pyqtSlot()
def on_aboutAction_triggered(self):
QMessageBox.about(
self,
self.tr('About'),
self.tr('<h1>%s %s</h1>\n' +
'<p>Developed by <a href="%s">%s</a></p>') %
(QApplication.applicationName(),
QApplication.applicationVersion(),
QApplication.organizationDomain(),
QApplication.organizationName()
))
@pyqtSlot()
def on_pathLengthAction_triggered(self):
coordinates = list(self.getCoordinates())
totalDistance = lengthOfPath(coordinates)
QMessageBox.information(
self,
self.tr('Path Length'),
self.tr("The path's length is %f" % totalDistance)
)
@pyqtSlot()
def on_polygonAreaAction_triggered(self):
coordinates = list(self.getCoordinates())
totalArea = areaOfPolygon(coordinates)
QMessageBox.information(
self,
self.tr('Polygon Area'),
self.tr("The polygon's area is %f" % totalArea)
)
@pyqtSlot(float, float)
def on_imageLabel_mouseMoved(self, x, y):
self.ui.coordinatesLineEdit.setText("%f × %f" % (x, y))
@pyqtSlot(float, float)
def on_imageLabel_clicked(self, x, y):
item = QListWidgetItem("%f × %f" % (x, y))
item.setData(Qt.UserRole, x)
item.setData(Qt.UserRole + 1, y)
self.ui.listWidget.addItem(item)
self.enableSamplesActions(True)
def getCoordinates(self):
items = self.ui.listWidget.findItems('*', Qt.MatchWildcard)
return map(lambda item: (item.data(Qt.UserRole), item.data(Qt.UserRole + 1)), items)
def getCoordinatesAsCsv(self):
coordinates = self.getCoordinates()
lines = map(lambda coordinate: "%f,%f" % coordinate, coordinates)
return 'x,y\n' + '\n'.join(lines)
def getCoordinatesAsTsv(self):
coordinates = self.getCoordinates()
lines = map(lambda coordinate: "%f\t%f" % coordinate, coordinates)
return 'x\ty\n' + '\n'.join(lines)
def enableSamplesActions(self, enable):
self.ui.saveAction.setEnabled(enable)
self.ui.clearAction.setEnabled(enable)
self.ui.copyAction.setEnabled(enable)
self.ui.pathLengthAction.setEnabled(enable)
self.ui.polygonAreaAction.setEnabled(enable)
def enableImageActions(self, enable):
self.ui.zoomInAction.setEnabled(enable)
self.ui.zoomOutAction.setEnabled(enable)
if __name__ == '__main__':
vertices = [(0.72, 2.28), (2.66, 4.71), (5., 3.5), (3.63, 2.52), (4., 1.6), (1.9, 1.)]
expectedArea = 8.3593
area = areaOfPolygon(vertices)
print("%f =?=\n%f" % (area, expectedArea))
|
claudiomattera/graph-extractor
|
MainWindow.py
|
MainWindow.py
|
py
| 6,147 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "math.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "ui_MainWindow.Ui_MainWindow",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QSettings",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.quit",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Settings.LAST_DIRECTORY_KEY",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "Settings.DEFAULT_LAST_DIRECTORY",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "Settings.LAST_DIRECTORY_KEY",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QFileInfo",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QDir.toNativeSeparators",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QDir",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "Settings.LAST_DIRECTORY_KEY",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "Settings.DEFAULT_LAST_DIRECTORY",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QFileDialog",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "Settings.LAST_DIRECTORY_KEY",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QFileInfo",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "SettingsDialog.SettingsDialog",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.clipboard",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.aboutQt",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.about",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.applicationName",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.applicationVersion",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.organizationDomain",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.organizationName",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.information",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.information",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QListWidgetItem",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.UserRole",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.UserRole",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.MatchWildcard",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.Qt.UserRole",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.Qt",
"line_number": 137,
"usage_type": "name"
}
] |
3714759753
|
from unittest import TestCase
from datetime import datetime
from uuid import uuid4
from sh import git, rm, gitlint, touch, echo, ErrorReturnCode
class BaseTestCase(TestCase):
pass
class IntegrationTests(BaseTestCase):
""" Simple set of integration tests for gitlint """
tmp_git_repo = None
@classmethod
def setUpClass(cls):
""" Sets up the integration tests by creating a new temporary git repository """
cls.tmp_git_repo = "/tmp/gitlint-test-%s" % datetime.now().strftime("%Y%m%d-%H%M%S")
git("init", cls.tmp_git_repo)
# configuring name and email is required in every git repot
git("config", "user.name", "gitlint-test-user", _cwd=cls.tmp_git_repo)
git("config", "user.email", "[email protected]", _cwd=cls.tmp_git_repo)
@classmethod
def tearDownClass(cls):
""" Cleans up the temporary git repository """
rm("-rf", cls.tmp_git_repo)
def _create_simple_commit(self, message):
""" Creates a simple commit with an empty test file.
:param message: Commit message for the commit. """
test_filename = "test-file-" + str(uuid4())
touch(test_filename, _cwd=self.tmp_git_repo)
git("add", test_filename, _cwd=self.tmp_git_repo)
git("commit", "-m", message, _cwd=self.tmp_git_repo)
def test_successful(self):
self._create_simple_commit("Simple title\n\nSimple body")
output = gitlint(_cwd=self.tmp_git_repo, _tty_in=True)
self.assertEqual(output, "")
def test_errors(self):
commit_msg = "WIP: This is a title.\nContent on the second line"
self._create_simple_commit(commit_msg)
output = gitlint(_cwd=self.tmp_git_repo, _tty_in=True, _ok_code=[3])
expected = "1: T3 Title has trailing punctuation (.): \"WIP: This is a title.\"\n" + \
"1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP: This is a title.\"\n" + \
"2: B4 Second line is not empty: \"Content on the second line\"\n"
self.assertEqual(output, expected)
def test_pipe_input(self):
error_msg = None
# For some odd reason, sh doesn't return the error output when piping something into gitlint.
# Note that this does work as expected in the test_errors testcase.
# To work around this we raise and catch an exception
try:
gitlint(echo("WIP: Pipe test."), _tty_in=False)
except ErrorReturnCode as e:
# StdErr is returned as bytes -> decode to unicode string
# http://stackoverflow.com/questions/606191/convert-bytes-to-a-python-string
error_msg = e.stderr.decode("utf-8")
expected = "1: T3 Title has trailing punctuation (.): \"WIP: Pipe test.\"\n" + \
"1: T5 Title contains the word 'WIP' (case-insensitive): \"WIP: Pipe test.\"\n" + \
"3: B6 Body message is missing\n"
self.assertEqual(error_msg, expected)
|
Hawatel/gitlint
|
qa/integration_test.py
|
integration_test.py
|
py
| 2,991 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sh.git",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sh.git",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sh.git",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sh.rm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sh.touch",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sh.git",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sh.git",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sh.gitlint",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sh.gitlint",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sh.gitlint",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sh.echo",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sh.ErrorReturnCode",
"line_number": 58,
"usage_type": "name"
}
] |
72489561467
|
import pdb
import sys
sys.path.append( '..' )
from copy import copy, deepcopy
import kivy.graphics as kg
from kivy.lang import Builder
from kivy.properties import *
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
#KV Lang files
from pkg_resources import resource_filename
path = resource_filename( __name__, 'labels.kv' )
Builder.load_file( path )
TOP_LEFT, LEFT, BOTTOM_LEFT = 0, 1, 2
TOP, BOTTOM, CENTER = 3, 4, 5
TOP_RIGHT, RIGHT, BOTTOM_RIGHT = 6, 7, 8
class BindedLabel( Label ) :
'''
Standard label with some additions :
- Binded text_size to size ( so you can center text )
- Background color
- Some more user-friendly padding usage
'''
fill_color = ListProperty( [0,0,0,0] )
def __init__( self, **kargs ) :
kargs['valign'] = kargs['valign'] if 'valign' in kargs.keys() else 'middle'
kargs['halign'] = kargs['halign'] if 'halign' in kargs.keys() else 'center'
if 'text' not in kargs.keys() : kargs['text'] = u''
super( BindedLabel, self ).__init__( **kargs )
self.bind( size=self.setter('text_size') )
class ResizeableLabel( BindedLabel ) :
'''
User-resizeable label.
'''
hover_color = ListProperty( [0,0,0,1] )
'''
A widget is displayed to show the new size of the label.
It's filled with this color.
'''
root_layout = ObjectProperty( None )
'''
The 'hover' is drawn on the root layout due to possible size mismatch.
You'll need to provide a link to your root layout.
'''
on_new_size = ObjectProperty( None )
'''
Called by on_size method whenever the size of the label changes.
'''
meta = ObjectProperty( None )
'''
Passed as argument to on_new_size, use it as you wish...
'''
min_width = NumericProperty( 50 )
'''
Label minimum width.
'''
_o = ListProperty( [0,0] )
_d = ListProperty( [0,0] )
_hover_size = ListProperty( [0,0] )
_hover_pos = ListProperty( [0,0] )
def __init__( self, **kargs ) :
super( ResizeableLabel, self ).__init__( **kargs )
self._touched = False
self._unique_group = { 'group':'__resizeable_label_%d' % (id(self)) }
def on_touch_down( self, touch ) :
self._touched = False
if ( ( self.pos[0] < touch.pos[0] < self.pos[0]+self.width ) and
( self.pos[1] < touch.pos[1] < self.pos[1]+self.height ) ) :
self._touched = True
self._o = touch.pos
self._pivot = self._get_pivot()
return True
def on_touch_move( self, touch ) :
if self._touched :
self._d = touch.pos
self._hover_size, self._hover_pos = self._get_hover()
if self.root_layout :
self._clear_canvas()
with self.root_layout.canvas :
kg.Color( *self.hover_color, **self._unique_group )
kg.Rectangle(
size=self._hover_size, \
pos=self._hover_pos, \
**self._unique_group
)
return True
def on_touch_up( self, touch ) :
if self._touched :
self._clear_canvas()
self._o = []
if self._hover_size[0] > self.min_width :
self._on_size( self.size, self._hover_size )
return True
def _on_size( self, oldsize, newsize ) :
print( 'Size changed' )
if self.on_new_size : self.on_new_size( oldsize, newsize, self.meta )
self.size = copy( newsize )
def _get_pivot( self ) :
tx, ty = abs(self._o[0]-self.pos[0]), abs(self._o[1]-self.pos[1])
ox, oy = tx/self.size[0], ty/self.size[1]
if ox < 0.33 :
x = 0
elif ox < 0.66 :
x = 3
else :
x = 6
return x +1
"""
if oy > 0.66 :
return x + 0
elif oy > 0.33 :
return x + 1
else :
return x + 2
"""
def _get_hover( self ) :
dx = self._d[0] - self._o[0]
dy = self._d[1] - self._o[1]
if self._pivot == RIGHT :
return [self.size[0]+dx, self.size[1]], self.pos
return self.size, self.pos
def _clear_canvas( self ) :
self.root_layout.canvas.remove_group( self._unique_group['group'] )
|
curzel-it/kivy-material-ui
|
material_ui/flatui/labels.py
|
labels.py
|
py
| 4,448 |
python
|
en
|
code
| 67 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder.load_file",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "kivy.uix.label.Label",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "kivy.graphics",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "kivy.graphics.Rectangle",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "kivy.graphics",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "copy.copy",
"line_number": 122,
"usage_type": "call"
}
] |
24506022571
|
from mock import Mock, patch, ANY, sentinel
from nose.tools import ok_, eq_, raises, timed
from noderunner.client import Client, Context, Handle
from noderunner.connection import Connection
from noderunner.protocol import Protocol
class TestClient(object):
@patch("noderunner.client.get_sockets")
@patch("noderunner.client.open_process")
@patch("noderunner.client.Connection", spec=Connection)
@patch("noderunner.client.Protocol", spec=Protocol)
def _client(self, proto, con, proc, sock):
sock.return_value = (Mock(), Mock(), Mock())
return Client(), proto, con, proc, sock
def test_ctor(self):
c, proto, con, proc, sock = self._client()
proto.assert_called_once_with(con.return_value, ANY)
con.assert_called_once_with(ANY)
proc.assert_called_once_with(ANY, ANY)
sock.assert_called_once_with()
def test_eval(self):
c, proto, con, proc, sock = self._client()
c.eval(sentinel.code, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("eval",
code=sentinel.code,
context=sentinel.context)
def test_stop(self):
c, proto, con, proc, sock = self._client()
c.stop()
proc.return_value.terminate.assert_called_once_with()
proto.return_value.stop.assert_called_once_with()
def test_context(self):
c, proto, con, proc, sock = self._client()
c.context(sentinel.name, sentinel.deps)
p = proto.return_value
p.request_sync.assert_called_once_with("mkcontext",
name=sentinel.name,
requirements=sentinel.deps)
def test_get(self):
c, proto, con, proc, sock = self._client()
c.get(sentinel.path, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("get",
path=sentinel.path,
context=sentinel.context)
def test_set(self):
c, proto, con, proc, sock = self._client()
c.set(sentinel.path, sentinel.val, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("set",
path=sentinel.path,
value=sentinel.val,
context=sentinel.context)
def test_call(self):
c, proto, con, proc, sock = self._client()
c.call(sentinel.path, sentinel.args, sentinel.context)
p = proto.return_value
p.request_sync.assert_called_once_with("call",
path=sentinel.path,
args=sentinel.args,
context=sentinel.context)
class TestContext(object):
def _context(self, name=sentinel.name):
mck = Mock()
return mck, Context(mck, name)
def test_eval(self):
mck, context = self._context()
context.eval(sentinel.code)
mck.eval.assert_called_once_with(sentinel.code,
context=sentinel.name)
def test_get(self):
mck, context = self._context()
context.get(sentinel.path)
mck.get.assert_called_once_with(ANY, sentinel.name)
def test_set(self):
mck, context = self._context()
context.set(sentinel.path, sentinel.value)
mck.set.assert_called_once_with(ANY,
sentinel.value,
sentinel.name)
def test_call(self):
mck, context = self._context()
context.call(sentinel.path, sentinel.args)
mck.call.assert_called_once_with(ANY,
sentinel.args,
sentinel.name)
def test_objects(self):
mck, context = self._context()
handle = context.objects
eq_(handle._context, context)
class TestHandle(object):
def test_call(self):
ctx = Mock()
ctx.call.return_value = sentinel.rtn
h = Handle(ctx)
eq_(h(sentinel.foo), sentinel.rtn)
ctx.call.assert_called_once_with((sentinel.foo,))
def test_attr_access(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h.foobar
eq_(handle2._path, ["foobar"])
def test_item_access(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h["foobar"]
eq_(handle2._path, ["foobar"])
def test_access_context_stays(self):
ctx = Mock()
h = Handle(ctx)
handle2 = h.foobar
eq_(handle2._context, ctx)
def test_get(self):
ctx = Mock()
ctx.get.return_value = sentinel.get
h = Handle(ctx)
eq_(h.get(), sentinel.get)
ctx.get.assert_called_once_with()
def test_attr_set(self):
ctx = Mock()
h = Handle(ctx)
h.key = sentinel.val
ctx.set.assert_called_once_with("key", sentinel.val)
def test_item_set(self):
ctx = Mock()
h = Handle(ctx)
h["key"] = sentinel.val
ctx.set.assert_called_once_with("key", sentinel.val)
|
williamhogman/noderunner
|
tests/test_client.py
|
test_client.py
|
py
| 5,456 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "mock.Mock",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Client",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "noderunner.connection.Connection",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "mock.patch",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "noderunner.protocol.Protocol",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mock.ANY",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "mock.ANY",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "mock.ANY",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "mock.sentinel.code",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.code",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.name",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.deps",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.name",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.deps",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.val",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.context",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.val",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.args",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.context",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.args",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.context",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.name",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Context",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.code",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.code",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.name",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "mock.ANY",
"line_number": 105,
"usage_type": "argument"
},
{
"api_name": "mock.sentinel.name",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.value",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "mock.ANY",
"line_number": 112,
"usage_type": "argument"
},
{
"api_name": "mock.sentinel.value",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.name",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.path",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.args",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "mock.ANY",
"line_number": 120,
"usage_type": "argument"
},
{
"api_name": "mock.sentinel.args",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.name",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.rtn",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.foo",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.rtn",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel.foo",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.get",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.get",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.val",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.val",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "noderunner.client.Handle",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "mock.sentinel.val",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "mock.sentinel.val",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "mock.sentinel",
"line_number": 190,
"usage_type": "name"
}
] |
37583094466
|
import pyvista as pv
axes = pv.Axes()
axes.origin
# Expected:
## (0.0, 0.0, 0.0)
#
# Set the origin of the camera.
#
axes.origin = (2.0, 1.0, 1.0)
axes.origin
# Expected:
## (2.0, 1.0, 1.0)
|
pyvista/pyvista-docs
|
version/dev/api/plotting/_autosummary/pyvista-Axes-origin-1.py
|
pyvista-Axes-origin-1.py
|
py
| 190 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pyvista.Axes",
"line_number": 2,
"usage_type": "call"
}
] |
41766786793
|
from collections import deque
s = input().split()
n = int(s[0])
m = int(s[1])
a = list(map(int, input().split()))
result = ['0']*m
d = {}
for i in range(m):
c = None
if a[i] in d:
c = d[a[i]]
else:
c = deque()
d[a[i]] = c
c.append(i)
while True:
found = True
max_p = 0
for i in range(1, n+1):
if i not in d or len(d[i]) == 0:
found = False
break
p = d[i].popleft()
if p > max_p:
max_p = p
if found == False:
break
result[max_p] = '1'
print(''.join(result))
|
gautambp/codeforces
|
1100-B/1100-B-48361896.py
|
1100-B-48361896.py
|
py
| 626 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
}
] |
72066928509
|
from flask import Flask, flash, redirect, render_template
from form import LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret"
@app.route("/home")
def home():
return "Hello Mines ParisTech"
@app.route("/", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
"""Log in requested for {form.username.data} with passord {form.password.data}"""
## Add function here to check password
return redirect("/home")
return render_template("login.html", form=form)
@app.route("/shutdown")
def shutdown():
raise RuntimeError
if __name__=="__main__":
try:
app.run(debug=False, port=3001)
except RuntimeError:
print("Server closed")
|
basileMarchand/ProgrammeCooperants
|
flask_demo/demo5/app.py
|
app.py
|
py
| 754 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "form.LoginForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "form.validate_on_submit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
}
] |
11317226884
|
import pathlib
from setuptools import find_packages, setup
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="trankit",
version=get_version("trankit/__init__.py"),
description="Trankit: A Light-Weight Transformer-based Toolkit for Multilingual Natural Language Processing",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/nlp-uoregon/trankit",
author="NLP Group at the University of Oregon",
author_email="[email protected]",
license='Apache License 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages(),
include_package_data=True,
install_requires=['numpy', 'protobuf', 'requests', 'torch>=1.6.0', 'tqdm>=4.27', 'langid==1.1.6', 'filelock', 'tokenizers>=0.7.0', 'regex != 2019.12.17', 'packaging', 'sentencepiece', 'sacremoses'],
entry_points={
},
)
|
nlp-uoregon/trankit
|
setup.py
|
setup.py
|
py
| 2,223 |
python
|
en
|
code
| 693 |
github-code
|
6
|
[
{
"api_name": "os.path.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 58,
"usage_type": "call"
}
] |
74337793468
|
# Name : Jiazhao Li Unique name: jiazhaol
import numpy as np
from sklearn import preprocessing
import sys
from sklearn import tree
def load_train_data(filename):
SBD_traindata_list = []
with open(filename, 'r') as f:
for line in f:
line = line.strip('\n')
word = line.split(' ')
SBD_traindata_list.append([word[0], word[1], word[2]])
return SBD_traindata_list
def load_test_data(filename):
SBD_testdata_list = []
with open(filename,'r') as f:
for line in f:
line = line.strip('\n')
word = line.split(' ')
SBD_testdata_list.append([word[0], word[1], word[2]])
return SBD_testdata_list
def feature_label(data_list, mode):
feature = []
label = []
index = 0
for pair in data_list:
if pair[2] == 'EOS' or pair[2] == 'NEOS':
# label list
if pair[2] == 'EOS':
label.append(1)
else:
label.append(0)
# label vacab
L = data_list[index][1][:-1]
if index == len(data_list)-1:
R = ' '
else:
R = data_list[index + 1][1]
len_L = int(len(L) < 3)
if L =='':
L_Cap = 0
else:
L_Cap = int(L[0].isupper())
R_Cap = int(R[0].isupper())
# own features
LL_len = int(len(data_list[index-1][1]) > 3)
if index == len(data_list)-2 or index == len(data_list)-1:
RR_len = 0
else:
RR_len = int(len(data_list[index+1][1]) > 3)
L_Cap_num = 0
for l in L :
if l.isupper():
L_Cap_num += 1
L_Cap_num = int(L_Cap_num > 3)
if mode == 'CoreFeature':
feature.append([L, R, len_L, L_Cap, R_Cap])
elif mode == "OwnThree":
feature.append([LL_len, RR_len, L_Cap_num])
elif mode == 'CoreOwn':
feature.append([L, R, len_L, L_Cap, R_Cap, LL_len, RR_len, L_Cap_num])
index += 1
return feature, label
# encode feature vector of
def encode_feature(train_feature,test_feature):
word_dict = {}
index = 2
for pair in train_feature:
if pair[0] not in word_dict:
word_dict[pair[0]] = index
index += 1
if pair[1] not in word_dict:
word_dict[pair[1]] = index
index += 1
for pair in test_feature:
if pair[0] not in word_dict:
word_dict[pair[0]] = index
index += 1
if pair[1] not in word_dict:
word_dict[pair[1]] = index
index += 1
# substitute the feature vetor:
for pair in train_feature:
pair[0] = word_dict[pair[0]]
pair[1] = word_dict[pair[1]]
for pair in test_feature:
pair[0] = word_dict[pair[0]]
pair[1] = word_dict[pair[1]]
Train_len = len(train_feature)
all = train_feature + test_feature
ohe = preprocessing.OneHotEncoder() # Easier to read
ohe.fit(all)
Feature = ohe.transform(all).toarray()
TrainEncode = Feature[:Train_len,:]
TestEncode = Feature[Train_len:, :]
return TrainEncode, TestEncode
def generate_outfile(SBDTestList, test_predict):
with open('SBD.test.out', 'w') as f:
test_predict_cate = []
for label in test_predict:
if label == 1:
test_predict_cate.append('EOS')
else:
test_predict_cate.append('NEOS')
f.write(mode + '\n')
num = 0
for pair in SBDTestList:
if pair[2] == "EOS" or pair[2] == 'NEOS':
f.write(" ".join([pair[0], pair[1], test_predict_cate[num]]))
f.write('\n')
num += 1
else:
f.write(" ".join([pair[0], pair[1], pair[2]]))
f.write('\n')
if __name__ == '__main__':
# train = "SBD.train"
# test = "SBD.test"
train = sys.argv[1]
test = sys.argv[2]
SBDTrainList = load_train_data(train)
SBDTestList = load_test_data(test)
ModeList = ['CoreFeature', "OwnThree", 'CoreOwn']
# ModeList = ['CoreFeature']
for mode in ModeList:
train_feature, train_label = feature_label(SBDTrainList, mode)
test_feature, test_label = feature_label(SBDTestList, mode)
TrainEncode, TestEncode = encode_feature(train_feature, test_feature)
# train the Dicision Tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(TrainEncode, train_label)
train_acc = clf.score(TrainEncode, train_label)
test_acc = clf.score(TestEncode, test_label)
test_predict = clf.predict(TestEncode)
print(mode)
print("train_acc: " + str(train_acc))
print("test_acc: " + str(test_acc))
if mode == 'CoreOwn':
generate_outfile(SBDTestList, test_predict)
|
JiazhaoLi/Assignment
|
EECS595/Assignment1/hw1/SBD.py
|
SBD.py
|
py
| 5,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "sklearn.tree",
"line_number": 166,
"usage_type": "name"
}
] |
33561062837
|
"""
moving_avg_demo.py
"""
import numpy as np
import scipy as sp
import scipy.signal
import plot
import signal_generator
def moving_average_builder(length):
filt = np.array([1.0/length]*length)
return filt
def moving_average_demo1():
filt = moving_average_builder(5)
sig = signal_generator.sinusoid(128, 0.4*np.pi)
plot.stem(filt, title='Moving Average Filter With 5 Taps')
plot.stem(sig, title='Input Signal')
output = np.convolve(filt, sig, mode='full') # mode can be 'full', 'same', 'valid'
plot.stem(output, title='Output Signal')
ww, hh = scipy.signal.freqz(filt)
plot.mag_phase(hh, xaxis=ww/np.pi)
a = input()
return
if __name__ == '__main__':
moving_average_demo1()
|
Chris93Hall/filtering_presentation
|
moving_avg_demo.py
|
moving_avg_demo.py
|
py
| 730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "signal_generator.sinusoid",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "plot.stem",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "plot.stem",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "plot.stem",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.signal.freqz",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "plot.mag_phase",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 24,
"usage_type": "attribute"
}
] |
42818754446
|
from tkinter import *
from PIL import ImageTk, Image
import string
import random
root = Tk()
root.title("Я люблю BRAWL STARS")
root.geometry("1200x675")
def clicked():
exit = ""
for j in range(3):
n = 5
letters = 0
integers = 0
for i in range(n):
if letters < 3 and integers < 2:
a = random.randint(1,2)
if a == 1:
exit += random.sample(string.ascii_letters, 1)[0]
letters += 1
else:
exit+=str(random.randint(0,9))
integers += 1
elif letters < 3:
exit += random.sample(string.ascii_letters, 1)[0]
else:
exit+=str(random.randint(0,9))
if j == 2:
break
exit+='-'
canvas1.itemconfig(label1_canvas, text=exit.upper())
bg = ImageTk.PhotoImage(Image.open("2D4F4F53-D36C-4213-BB42-CAC30A9DD06D.jpeg"))
canvas1 = Canvas(root, width=1200, height=675)
canvas1.pack(fill="both", expand=True)
canvas1.create_image(0, 0, image=bg, anchor="nw")
btn = Button(root, text="Генерировать ключ", command=clicked)
button1_canvas = canvas1.create_window(950, 550, anchor="nw", window=btn)
label1_canvas = canvas1.create_text(1000, 500, text="Генерация ключа", fill="white",
font=('Arial 25 bold'))
root.mainloop()
|
nelyuboov/Lab-4
|
main (2).py
|
main (2).py
|
py
| 1,483 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "string.ascii_letters",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 35,
"usage_type": "name"
}
] |
12198804557
|
from iskanje_v_sirino import Graph
import collections
import winsound
duration = 3000
freq = 440
'''
NxP_start = [
['', '', '', '', ''],
['', '', '', '', ''],
['B', '', '', '', ''],
['A', 'C', 'D', 'E', 'F']
]
NxP_end = [
['', 'C', '', '', ''],
['', 'E', '', '', ''],
['F', 'D', '', '', ''],
['B', 'A', '', '', '']
]
'''
NxP_start = [
['B', '',''],
['A', '', '']
]
NxP_end = [
['', 'B',''],
['', 'A', '']
]
N = len(NxP_start)
P = len(NxP_start[N-1])
#P - odstavnih polozajev
#N - velikih skatelj ena na drugo
# p => 1 <= p <= P
# r => 1 <= r <= P
def prestavi(p, r, matrika1):
matrika = matrika1[:]
first_element = ''
delete_i = -1
delete_p_1 = -1
#ce je p, r return matriko
if p == r:
return matrika
# dokler nenajdes nepraznega in ga shranis v first_element
for i in range(0, N):
if matrika[i][p-1] != '':
first_element = matrika[i][p-1]
delete_i = i
delete_p_1 = p-1
break
# dokler nenajdes prvega praznega od spodi navzgor in shranis element iz
# first_element v ta prostor in zbrises element iz kordinati i in p-1
for j in range(N-1, -1, -1):
if matrika[j][r-1] == '':
matrika[j][r-1] = first_element
if delete_i > -1 and delete_p_1 > -1:
matrika[delete_i][delete_p_1] = ''
break
return matrika
def izpis(NxP):
for a in NxP:
print(a)
# for dict key = tuple
def tuple_to_list(t):
return [list(i) for i in t]
def list_to_tuple(l):
t = tuple()
for i in l:
t += tuple(i),
return t
def naredi_matriko(matrika):
return [list(i) for i in matrika]
def napolni(graf, start_m, kopija):
start = list_to_tuple(start_m)
for p in range(1, P+1):
for r in range(1, P+1):
kopija = naredi_matriko(start_m)
x = prestavi(p, r, kopija)
tuple_x = list_to_tuple(x)
if tuple_x != start:
graf.add(start, tuple_x)
def BFS(graf, root):
oce_od_elementa = collections.defaultdict(tuple)
vrsta = []
seen = set()
#dodam root
vrsta.append(list_to_tuple(root))
seen.add(str(root))
kopija = naredi_matriko(root) #kopija start
napolni(graf, root, kopija)
i = 0
while vrsta:
vozlisce = vrsta.pop(0)
for neighbour in graf.get(vozlisce):
if str(neighbour) not in seen:
print(i, ".")
i += 1
kopija_neig = naredi_matriko(neighbour)
napolni(graf, neighbour, kopija_neig)
vrsta.append(neighbour)
seen.add(str(neighbour))
if tuple_to_list(neighbour) == NxP_end:
#winsound.Beep(freq, duration)
return neighbour
def IDDFS(graf, root):
stack = []
while stack:
vozilisce = root
if root == NxP_end:
return root
return
g = Graph()
print(BFS(g, NxP_start))
#g.print()
|
martin0b101/UI
|
robotizirano_skladisce.py
|
robotizirano_skladisce.py
|
py
| 3,070 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "iskanje_v_sirino.Graph",
"line_number": 155,
"usage_type": "call"
}
] |
26436874912
|
#mandelbrot by KB for CS550
#inspired by work done with wikipedia example code
from PIL import Image
import random
from PIL import ImageFilter
#set image size
imgx = 500
imgy = 500
xa, xb = -0.75029467235117, -0.7478726919928045
ya, yb = 0.06084172052354717, 0.06326370066585434
image = Image.new("RGB",(imgx,imgy))
#for all the pixels in the image
for Py in range(imgy):
yS= ((yb-ya)/(imgy-1)) * Py + (ya)
for Px in range(imgx):
#divide all the pixels into sections between -2 and 2
xS = ((xb-xa)/(imgx-1))* Px + (xa)
x = 0
y = 0
iteration = 0
#set maximum number of iterations
max_iteration = 256
while (x*x + y*y <= 2) and iteration < max_iteration:
#calculations based on wikihow
xtemp = x*x - y*y + xS
y = 2*x*y + yS
iteration += 1
x = xtemp
# color shades based on iteration
colorR = iteration
colorG = (iteration*50)%256
colorB = 256- iteration
image.putpixel((Px,Py),(colorR, colorG, colorB))
imageedits = image.filter(ImageFilter.CONTOUR)
imageedit.save("mandelbrot2.png", "PNG")
|
gbroady19/CS550
|
mandelbrot2.py
|
mandelbrot2.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.new",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFilter.CONTOUR",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageFilter",
"line_number": 40,
"usage_type": "name"
}
] |
10543655506
|
from datetime import datetime, time, timedelta
import iso8601
import logging
import pytz
import requests
import sys
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import render
logger = logging.getLogger(__name__)
uk_tz = pytz.timezone('Europe/London')
utc_tz = pytz.utc
def rss_reader(request):
'''
HTTP GET the required RSS feed
and render it for inclusion in a widgit
'''
rss_url = request.GET.get('url','')
current_key = "rss_reader_current!{0}".format(rss_url)
lng_key = "rss_reader_lng!{0}".format(rss_url)
rss_xml = cache.get(current_key)
# If we got a value from the cache, use that
if rss_xml is not None:
logger.info('Cache hit for %s', current_key)
# Otherwise, retrieve data from the MetOffice
else:
logger.info('Cache miss for %s', current_key)
rss_xml = ''
try:
r = requests.get(rss_url)
r.raise_for_status()
# https://stackoverflow.com/questions/35042216/requests-module-return-json-with-items-unordered
rss_xml = r.text
except:
logger.error("Error retrieving rss feed for %s: %s %s",
rss_url,
sys.exc_info()[0],
sys.exc_info()[1])
# Whatever happens, cache what we got so we don't keep hitting the API
finally:
cache.set(current_key, rss_xml, timeout=600)
# Try to parse whatever we've got. if that works, cache it
# as the 'last known good' version for ever
try:
cache.set(lng_key, rss_xml, timeout=None)
except:
logger.error("Error cacheing current rss feed for %s: %s %s",
rss_url,
sys.exc_info()[0],
sys.exc_info()[1])
logger.info("rss feed %s was: '%s'", title, rss_xml)
# Fall back to the LNG version, if that's available
lng_data = cache.get(lng_key)
if lng_data is not None:
logger.info('Cache hit for %s', lng_key)
rss_xml = lng_data
else:
logger.info('Cache miss for %s', lng_key)
#rss_xml = "debug"
return render(request, 'smartpanel/rss_reader.html', { "rss_xml": rss_xml }
)
|
SmartCambridge/tfc_web
|
tfc_web/smartpanel/views/widgets/rss_reader.py
|
rss_reader.py
|
py
| 2,242 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytz.utc",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.core.cache.cache.set",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "sys.exc_info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
}
] |
43370134393
|
""" Tests for :module:`statics.markdown`."""
import unittest
__all__ = ["TestMarkdownItem"]
class TestMarkdownItem(unittest.TestCase):
def createFile(self, content):
import tempfile
f = tempfile.NamedTemporaryFile()
f.write(content)
f.flush()
return f
def test_it(self):
from statics.markdown import MarkdownItem
f = self.createFile("some markdown document.")
item = MarkdownItem("name", f.name)
self.assertEqual(item.name, "name")
self.assertEqual(item.metadata(), {})
self.assertEqual(item.content(), "<p>some markdown document.</p>")
def test_with_metadate(self):
from statics.markdown import MarkdownItem
f = self.createFile("Title: A Title\nList: Value1\n\tValue2\n\ncontent")
item = MarkdownItem("name", f.name)
self.assertEqual(item.name, "name")
self.assertEqual(item.metadata(), {"title": "A Title",
"list": ["Value1", "Value2"]})
self.assertEqual(item.content(), "<p>content</p>")
|
andreypopp/statics
|
statics/tests/test_markdown.py
|
test_markdown.py
|
py
| 1,089 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "statics.markdown.MarkdownItem",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "statics.markdown.MarkdownItem",
"line_number": 28,
"usage_type": "call"
}
] |
27615694777
|
"""
Get information about how many adult movies/series etc. there are per
region. Get the top 100 of them from the region with the biggest count to
the region with the smallest one.
Получите информацию о том, сколько фильмов/сериалов для взрослых и т. д. есть на
область, край. Получите 100 лучших из них из региона с наибольшим количеством
область с наименьшим из них.
title.basics.tsv.gz title.akas.tsv.gz
"""
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.types as t
import pyspark.sql.functions as f
from pyspark.sql import Window
def task5():
spark_session = (SparkSession.builder
.master("local")
.appName("task app")
.config(conf=SparkConf())
.getOrCreate())
schema_title_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("titleType", t.StringType(), nullable=True),
t.StructField("primaryTitle", t.StringType(), nullable=True),
t.StructField("originalTitle", t.StringType(), nullable=True),
t.StructField("isAdult", t.StringType(), nullable=True),
t.StructField("startYear", t.IntegerType(), nullable=True),
t.StructField("endYear", t.IntegerType(), nullable=True),
t.StructField("runtimeMinutes", t.IntegerType(), nullable=True),
t.StructField("genres", t.StringType(), nullable=True),
])
schema_title_akas = t.StructType([
t.StructField("titleId", t.StringType(), nullable=False),
t.StructField("ordering", t.StringType(), nullable=False),
t.StructField("title", t.StringType(), nullable=False),
t.StructField("region", t.StringType(), nullable=True),
t.StructField("language", t.StringType(), nullable=True),
t.StructField("types", t.StringType(), nullable=True),
t.StructField("attributes", t.StringType(), nullable=True),
t.StructField("isOriginalTitle", t.StringType(), nullable=True)
])
schema_ratings_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("averageRating", t.DoubleType(), nullable=True),
t.StructField("numVotes", t.IntegerType(), nullable=True)
])
file_read_basics = r'.\Data\input\title.basics.tsv.gz'
file_read_akas = r'.\Data\input\title.akas.tsv.gz'
file_read_ratings = r'.\Data\input\title.ratings.tsv.gz'
from_csv_df = spark_session.read.csv(
file_read_basics, header=True, nullValue='null', sep=r'\t', schema=schema_title_basics)
from_csv_df_akas = spark_session.read.csv(
file_read_akas, header=True, nullValue='null', sep=r'\t', schema=schema_title_akas)
from_csv_df_ratings = spark_session.read.csv(
file_read_ratings, header=True, nullValue='null', sep=r'\t', schema=schema_ratings_basics)
temp_df1 = from_csv_df.select("tconst", "isAdult").filter(f.col("isAdult") == 1)
temp_df2 = from_csv_df_akas.select("region", "titleId", "title")\
.filter((f.col("region").isNotNull()) & (f.col("region") != r"\N")).withColumnRenamed("titleId", "tconst")
temp_df3 = temp_df1.join(temp_df2, "tconst")
temp_df4 = temp_df3.join(from_csv_df_ratings.select("averageRating", "tconst"), "tconst")
window = Window.partitionBy("region").orderBy("region")
temp_df4 = temp_df4.withColumn("adult_per_region", f.count(f.col("region")).over(window))
region_min = temp_df4.agg(f.min("adult_per_region")).collect()[0][0]
region_max = temp_df4.agg(f.max("adult_per_region")).collect()[0][0]
temp_dfmin = temp_df4.filter(f.col("adult_per_region") == region_min).orderBy(f.col("averageRating").desc()).limit(100)
temp_dfmax = temp_df4.filter(f.col("adult_per_region") == region_max).orderBy(f.col("averageRating").desc()).limit(100)
from_csv_df_task8 = temp_dfmin.union(temp_dfmax)
#from_csv_df_task8.show(200, truncate=False)
file_write = r'.\Data\output\task08'
from_csv_df_task8.write.csv(file_write, header=True, mode="overwrite")
return 0
|
Tetyana83/spark
|
task5.py
|
task5.py
|
py
| 4,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.DoubleType",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.Window.partitionBy",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.Window",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.count",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.min",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.max",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 68,
"usage_type": "name"
}
] |
8515938890
|
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from pandas import unique
import csv
import h5py
from astropy import constants as const
from astropy import units as u
I_units = u.erg*u.cm**(-2)*u.s**(-1)*u.Hz**(-1)*u.sr**(-1)
h = const.h
c = const.c
kB = const.k_B
data_path = "/mn/stornext/d20/RoCS/atulm/Project1_stars/SED_data/"
SED_path = data_path + "Clean_SED_data/"
path_art = "/mn/stornext/d19/RoCS/jonasrth/ART/SED/"
def get_star_data(star_name):
"""
Collects necessary data on the stars to compare them with model SED.
star_name (str) must fit with one of the 12 stars compiled in the EMISSA project.
"""
# Collecting SED data for star_name
filename = star_name + "_CleanSED.csv"
with open(SED_path+filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_array = np.array(list(csv_reader))
n_freq = np.argwhere(csv_array[0]=="sed_freq")[0][0]
n_flux = np.argwhere(csv_array[0]=="sed_flux")[0][0]
n_eflux = np.argwhere(csv_array[0]=="sed_eflux")[0][0]
n_tab = np.argwhere(csv_array[0]=="_tabname")[0][0]
sed_freq = csv_array[1:,n_freq].astype(np.float64)*u.GHz
sed_flux = csv_array[1:,n_flux].astype(np.float64)*u.Jy
sed_eflux = csv_array[1:,n_eflux].astype(np.float64)*u.Jy
tabname = csv_array[1:,n_tab]
# Collecting radius and distance data for star_name
with open(data_path + "star_props.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_array = np.array(list(csv_reader))
n_d = np.argwhere(csv_array[0]==" Distance (pc)")[0][0]
n_ed = np.argwhere(csv_array[0]==" Dist_err")[0][0]
n_R = np.argwhere(csv_array[0]=="Radius (Rs)")[0][0]
n_eR = np.argwhere(csv_array[0]=="Rad_err")[0][0]
m = np.argwhere(csv_array[:,1]==star_name.replace("_"," "))[0][0]
d = float(csv_array[m, n_d])*u.pc
d_err = float(csv_array[m, n_ed])*u.pc
R = float(csv_array[m, n_R])*const.R_sun
R_err = float(csv_array[m, n_eR])*const.R_sun
# Returning collected data in dictionary:
data = {}
data["sed_freq"] = sed_freq
data["sed_flux"] = sed_flux
data["sed_eflux"] = sed_eflux
data["tabname"] = tabname
data["d"] = d
data["d_err"] = d_err
data["R"] = R
data["R_err"] = R_err
return data
def plot_SED(star_name, model_data, figname):
"""
"""
data = get_star_data(star_name)
mod_int = np.array([])
mod_wav = np.array([])
for file in model_data.values():
mod_int = np.append(mod_int, np.mean(np.array(file["Stokes_I"][0,...]),axis=(0,1)))
mod_wav = np.append(mod_wav, np.array(file["Wavelength"]))
mod_freq = (c/(mod_wav*u.angstrom)).to(u.GHz)
mod_freq, inds = np.unique(mod_freq, return_index=True)
mod_int = mod_int[inds]*I_units
mod_flux = (np.pi*(data["R"]/data["d"])**2*mod_int*u.sr).to(u.Jy)
mod_flux_max = (np.pi*((data["R"]+data["R_err"])/(data["d"]-data["d_err"]))**2*mod_int*u.sr).to(u.Jy)
mod_flux_min = (np.pi*((data["R"]-data["R_err"])/(data["d"]+data["d_err"]))**2*mod_int*u.sr).to(u.Jy)
a = np.argmin(abs(mod_freq.value - np.min(data["sed_freq"]).value)) - 1
b = np.argmin(abs(mod_freq.value - np.max(data["sed_freq"]).value)) + 1
#a = np.argmin(abs(mod_freq.value - 7)) - 1
### Interpolation
f = interp1d(mod_freq, mod_flux)
### Plotting:
fig = plt.figure(figsize=(8,6.4))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], hspace=0)
ax0 = fig.add_subplot(gs[0])
#plt.suptitle(star_name.replace("_"," "), x=0.5, y=1.0)
ax0.text(0.1, 0.9, star_name.replace("_"," "), transform=ax0.transAxes)
cmap = cm.get_cmap("gnuplot")
gradient = np.linspace(0, 1, len(unique(data["tabname"])))
for it,tab in enumerate(unique(data["tabname"])):
n = np.argwhere(data["tabname"]==tab)[:,0]
ax0.errorbar(data["sed_freq"][n].value, data["sed_flux"][n].value, yerr=data["sed_eflux"][n].value,
color=cmap(gradient[it]), ls="None", marker="o", label=tab)
if tab=="ALMA data":
n_ALMA = it
ax0.plot(mod_freq[a:b], mod_flux[a:b], color="black", ls="solid", label="model data")
ax0.fill_between(mod_freq[a:b], y1=mod_flux_max[a:b], y2=mod_flux_min[a:b], color="grey", alpha=0.5)
handles, labels = ax0.get_legend_handles_labels()
legd=ax0.legend([handles[n_ALMA+1], handles[0]], [labels[n_ALMA+1], labels[0]], loc="lower center", bbox_to_anchor=(0.5,1.01), ncol=5)
ax0.axvspan(0,1e3, color="grey", alpha=0.2)
ax0.set_ylabel("Flux [Jy]")
ax0.xaxis.grid(which="both")
ax0.yaxis.grid(which="major")
ax0.set_yscale("log")
ax1 = fig.add_subplot(gs[1], sharex=ax0)
ax1.errorbar(data["sed_freq"], (data["sed_flux"].value - f(data["sed_freq"]))/f(data["sed_freq"]),
color="black", ls="None", marker="o")
ax1.axvspan(0,1e3, color="grey", alpha=0.2)
ax1.set_ylabel(r"$\Delta S/S_{mod}$")
ax1.set_xlabel("Frequency [GHz]")
ax1.xaxis.grid(which="both")
ax1.yaxis.grid(which="major")
ax1.set_xscale("log")
plt.setp(ax0.get_xticklabels(),visible=False)
plt.savefig("figures/" + figname, bbox_inches="tight")
star_name_list = ["Gam_Vir_A", "Gam_Vir_B", "Eta_Crv", "Gam_Lep", "Alf_Cen_A", "61_Vir", "Alf_Cen_B", "Eps_Eri", "GJ_2006_A", "Proxima_Cen"]
star_letter_list = ["C", "D", "E", "F", "G", "H", "I", "J", "K", "L"]
model_name_list = ["t65", "t65", "t65", "t65", "t57", "t57", "t50", "t50", "t32", "t32"]
for i in range(len(star_name_list)):
print(star_letter_list[i] + ": " +star_name_list[i]+" - "+model_name_list[i])
SEDs = {"t65" : {0 : h5py.File(path_art + "d3t65g45_000G_SED.h5","r")},
"t57" : {0 : h5py.File(path_art + "d3t57g44_000G_SED.h5","r")},
"t50" : {0 : h5py.File(path_art + "d3t50g45_000G_SED.h5","r")},
"t32" : {0 : h5py.File(path_art + "d3t32g45_000G_SED.h5","r")}
}
for i in range(len(star_name_list)):
figname = "EMISSA/SED_"+star_letter_list[i]+"_"+model_name_list[i]+".pdf"
plot_SED(star_name_list[i], SEDs[model_name_list[i]], figname)
#for i in range(len(star_name_list)):
# figname = "presentation/SED_"+star_letter_list[i]+"_"+model_name_list[i]+".pdf"
# plot_SED(star_name_list[i], SEDs[model_name_list[i]], figname)
|
jonasrth/MSc-plots
|
SED_EMISSA_plots.py
|
SED_EMISSA_plots.py
|
py
| 6,655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "astropy.units.erg",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "astropy.units.cm",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.s",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.Hz",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.sr",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants.h",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "astropy.constants.c",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "astropy.constants.k_B",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.GHz",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "numpy.float64",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.Jy",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "numpy.float64",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.Jy",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "astropy.units.pc",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "astropy.units.pc",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "astropy.constants.R_sun",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "astropy.constants.R_sun",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "astropy.units.angstrom",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "astropy.units.GHz",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.sr",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "astropy.units.Jy",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.sr",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "astropy.units.Jy",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.sr",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "astropy.units.Jy",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmin",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.gridspec.GridSpec",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.gridspec",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pandas.unique",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pandas.unique",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.setp",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 175,
"usage_type": "call"
}
] |
32927804563
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Construct templates and categories for Tekniska museet data.
"""
from collections import OrderedDict
import os.path
import csv
import pywikibot
import batchupload.listscraper as listscraper
import batchupload.common as common
import batchupload.helpers as helpers
from batchupload.make_info import MakeBaseInfo
MAPPINGS_DIR = 'mappings'
IMAGE_DIR = 'Curman'
# stem for maintenance categories
BATCH_CAT = 'Media contributed by Tekniska museet'
BATCH_DATE = '2017-10' # branch for this particular batch upload
LOGFILE = "Tekniska.log"
class TekniskaInfo(MakeBaseInfo):
def load_wd_value(self, qid, props, cache=None):
if cache and qid in cache:
return cache[qid]
data = {}
wd_item = pywikibot.ItemPage(self.wikidata, qid)
wd_item.exists() # load data
for pid, label in props.items():
value = None
claims = wd_item.claims.get(pid)
if claims:
value = claims[0].getTarget()
data[label] = value
if cache:
cache[qid] = data
return data
def __init__(self, **options):
super(TekniskaInfo, self).__init__(**options)
self.batch_cat = "{}: {}".format(BATCH_CAT, BATCH_DATE)
self.commons = pywikibot.Site('commons', 'commons')
self.wikidata = pywikibot.Site('wikidata', 'wikidata')
self.log = common.LogFile('', LOGFILE)
self.photographer_cache = {}
self.category_cache = []
def load_data(self, in_file):
return common.open_and_read_file(in_file, as_json=False)
def generate_content_cats(self, item):
# to do -- generate cats from keywords
item.generate_place_cats()
return [x for x in list(item.content_cats) if x is not None]
def generate_filename(self, item):
id_no = item.id_no
title = item.image_title
provider = "TEKM"
return helpers.format_filename(
title, provider, id_no)
def generate_meta_cats(self, item, cats):
cats = set(item.meta_cats)
cats.add(self.batch_cat)
return list(cats)
def get_original_filename(self, item):
# should be updated if files named with another field
return item.id_no
def load_mappings(self, update_mappings):
concrete_motif_file = os.path.join(MAPPINGS_DIR, 'concrete_motif.json')
concrete_motif_page = 'Commons:Tekniska museet/Curman/mapping title'
geo_file = os.path.join(MAPPINGS_DIR, 'geo.json')
geo_page = 'Commons:Tekniska museet/Curman/mapping location'
keywords_file = os.path.join(MAPPINGS_DIR, 'keywords.json')
keywords_page = 'Commons:Tekniska museet/Curman/mapping amnesord'
if update_mappings:
print("Updating mappings...")
self.mappings['concrete_motif'] = self.get_concrete_motif_mapping(
concrete_motif_page)
common.open_and_write_file(concrete_motif_file, self.mappings[
'concrete_motif'], as_json=True)
self.mappings['geo'] = self.get_geo_mapping(geo_page)
common.open_and_write_file(geo_file, self.mappings[
'geo'], as_json=True)
self.mappings['keywords'] = self.get_keywords_mapping(keywords_page)
common.open_and_write_file(keywords_file, self.mappings[
'keywords'], as_json=True)
else:
self.mappings['concrete_motif'] = common.open_and_read_file(
concrete_motif_file, as_json=True)
self.mappings['geo'] = common.open_and_read_file(
geo_file, as_json=True)
self.mappings['keywords'] = common.open_and_read_file(
keywords_file, as_json=True)
pywikibot.output('Loaded all mappings')
def get_concrete_motif_mapping(self, page):
motifs = {}
page = pywikibot.Page(self.commons, page)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'category': '', 'frequency': ''})
for entry in data:
if entry['category'] and entry['name']:
category = entry['category'][0]
name = entry['name'][0]
motifs[name] = category
return motifs
def get_keywords_mapping(self, p):
keywords = {}
page = pywikibot.Page(self.commons, p)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'category': '', 'frequency': ''})
for entry in data:
if entry['category'] and entry['name']:
category = entry['category'][0]
name = entry['name'][0]
keywords[name] = category
return keywords
def get_geo_mapping(self, p):
page = pywikibot.Page(self.commons, p)
data = listscraper.parseEntries(
page.text,
row_t='User:André Costa (WMSE)/mapping-row',
default_params={'name': '', 'wikidata': '', 'frequency': ''})
geo_ids = {}
for entry in data:
if entry['wikidata'] and entry['name']:
wikidata = entry['wikidata'][0]
name = entry['name'][0]
if wikidata != '-':
geo_ids[name] = wikidata
# look up data on Wikidata
props = {'P373': 'commonscat'}
geo = {}
for name, qid in geo_ids.items():
geo[name] = self.load_wd_value(
qid, props)
geo["wd"] = qid
return geo
def make_info_template(self, item):
template_name = 'Photograph'
template_data = OrderedDict()
template_data['title'] = item.generate_title()
template_data['description'] = item.generate_description()
template_data['photographer'] = "{{Creator:Sigurd Curman}}"
template_data['department'] = ("Sigurd Curmans arkiv / "
"Tekniska museet (SC-K1-1)")
# template_data['date'] = item.generate_date()
template_data['permission'] = item.generate_license()
template_data['ID'] = item.generate_id()
template_data['source'] = item.generate_source()
return helpers.output_block_template(template_name, template_data, 0)
def process_data(self, raw_data):
d = {}
reader = csv.DictReader(raw_data.splitlines(), dialect='excel-tab')
tagDict = {
"image_title": "Titel",
"id_no": "Identifikationsnr",
"description": "Motiv-beskrivning",
"location": "Avbildade - orter",
"alt_id_no": "Alternativt nummer-Institutionsintern katalog/lista"
}
for r in reader:
rec_dic = {}
for tag in tagDict:
column_name = tagDict[tag]
value = r[column_name]
rec_dic[tag] = value.strip()
id_no = rec_dic["id_no"]
d[id_no] = TekniskaItem(rec_dic, self)
self.data = d
class TekniskaItem(object):
def __init__(self, initial_data, info):
for key, value in initial_data.items():
setattr(self, key, value)
self.wd = {}
self.content_cats = set()
self.meta_cats = set()
self.info = info
self.commons = pywikibot.Site('commons', 'commons')
def generate_geo_cat(self):
cats = self.info.mappings["geo"]
if self.location in cats.keys():
cat = cats[self.location].get("commonscat")
self.content_cats.add(cat)
def generate_place_cats(self):
has_specific_place = False
cats = self.info.mappings["concrete_motif"]
if self.image_title in cats.keys():
concr_cat = cats.get(self.image_title)
self.content_cats.add(concr_cat)
has_specific_place = True
if not has_specific_place:
self.generate_geo_cat()
def generate_description(self):
if self.description:
swedish = "{{{{sv|{}}}}}".format(self.description)
return swedish
def generate_title(self):
return "{{{{sv|{}}}}}".format(self.image_title)
def generate_source(self):
return "{{Tekniska museet cooperation project}}"
def generate_id(self):
return '{{TEKM-link|' + self.id_no + '}}'
def generate_license(self):
return "{{PD-old-70}}"
if __name__ == '__main__':
TekniskaInfo.main()
|
Vesihiisi/TEKM-import
|
info_tekniska.py
|
info_tekniska.py
|
py
| 8,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "batchupload.make_info.MakeBaseInfo",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pywikibot.ItemPage",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pywikibot.Site",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pywikibot.Site",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "batchupload.common.LogFile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_read_file",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "batchupload.helpers.format_filename",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "batchupload.helpers",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_write_file",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_write_file",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_write_file",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_read_file",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_read_file",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "batchupload.common.open_and_read_file",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "batchupload.common",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "pywikibot.output",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pywikibot.Page",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper.parseEntries",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "pywikibot.Page",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper.parseEntries",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "pywikibot.Page",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper.parseEntries",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "batchupload.listscraper",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "batchupload.helpers.output_block_template",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "batchupload.helpers",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pywikibot.Site",
"line_number": 204,
"usage_type": "call"
}
] |
27944232660
|
"""
This modules defines the base class for all machine learning models to analyse
reusability rate.
Last updated: MB 29/08/2020 - created module.
"""
# import external libraries.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy.stats as stats
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from skopt import BayesSearchCV
# import local modules.
from utils import data_loader
"""
Base class that all models inherit from.
"""
class BaseModel:
"""
store dataset. data is a dictionary.
"""
def __init__(self, data, normalize=False, **kwargs):
print(">> initialising model...")
# if we are normalizing data, save the normalized x value.
if normalize is True:
self.normalization_params = data_loader.get_normalization_params(data['train_x'])
self.train_x = self.normalize_x(data['train_x'])
self.test_x = self.normalize_x(data['test_x'])
# if we are not normalizing data, use regular x values.
else:
self.train_x = data['train_x']
self.test_x = data['test_x']
# save the y values and other attributes.
self.train_y = data['train_y']
self.test_y = data['test_y']
self.test_predictions = pd.Series() # placeholder for 'test' function.
self.train_predictions = pd.Series() # placeholder for 'test' function.
self.is_trained = False
def hyperparameter_tuning(self, type, param_space):
# definees the type and number of cross validation splits - refer to: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RepeatedStratifiedKFold.html
# Repeated Stratified K Fold -> This repeats a stratified k fold n number of times
# Stratified k fold -> Shuffles the data once before splitting into n different parts,
# where each part is used as a test set
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1)
if type == 'Grid':
# Set all the variables for the grid search cross validation
search = GridSearchCV(estimator=self.model, param_grid=param_space, cv=cv, scoring='accuracy')
elif type == 'Bayesian':
# defines the bayes search cv with parameters - refer to: https://scikit-optimize.github.io/stable/modules/generated/skopt.BayesSearchCV.html
# Bayesian optimisation is a type of sequential method in which it learns from each step what the optimal hyper-parameters are
# (in contrast to grid or random search) - using some complicated maths model (im not sure about)
search = BayesSearchCV(estimator=self.model, param_grid=param_space, n_jobs=-1, cv=cv)
# perform the search - i.e. it fits the model on the training data set for the different hyper-parameter settings
search_result = search.fit(self.train_x, self.train_y)
# Prints the results - optimal hyper-parameters and the accuracy score
print("The best parameters are %s with a score of %0.2f"
% (search_result.best_params_, search_result.best_score_))
# Displays all of the hyper-parameters combination in descending order of accuracy score
grid_results = pd.concat([pd.DataFrame(search_result.cv_results_["params"]),pd.DataFrame(search_result.cv_results_["mean_test_score"], columns=["Accuracy"])],axis=1)
grid_results.sort_values(by=['Accuracy'], inplace=True, ascending=False)
print(grid_results.head)
"""
train the model with current train and test XY values saved as attributes.
"""
def train(self):
print(">> training model...")
"""
output a description of the model.
"""
def describe(self):
print(">> describing model...")
# throw an error if model is not trained yet.
if self.is_trained is False:
raise Exception('Train model before describing coefficients.')
return
"""
generate prdictions for the test_x data.
"""
def test(self):
print(">> predicting test data...")
# throw an error if model is not trained yet.
if self.is_trained is False:
raise Exception('Train model before describing coefficients.')
return
"""
analyse the performance of the predictions.
"""
def assess_performance(self):
# if there is no 'test_predictions' data generated, throw error.
if self.test_predictions is None:
raise Exception('Run the `test` function to predict test data.')
print(">> assessing prediction performance...")
"""
Convert a pandas dataframe of values into normalized values based on the
normalized params attribute. x_values is a pandas dataframe.
"""
def normalize_x(self, x_values):
# throw an error if this model was not setup to use normalized values.
if not self.normalization_params:
raise Exception("This model was not setup to use normalized values.")
return
# copy the dataframe.
normalized_values = pd.DataFrame()
# iterate over each column and normalize.
for column in x_values.columns:
# retrieve normalization parameters.
mean = self.normalization_params[column]['mean']
std = self.normalization_params[column]['std']
# if std is zero, set to 1 to prevent NaNs.
if std == 0: std = 1
# save the normalized column.
normalized_values[column] = (x_values[column] - mean) / std
# return the normalized dataframe.
return normalized_values
|
reusability/research
|
model/base_model.py
|
base_model.py
|
py
| 5,759 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "utils.data_loader.get_normalization_params",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "utils.data_loader",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pandas.Series",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.RepeatedStratifiedKFold",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "skopt.BayesSearchCV",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 128,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.