id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3445404
|
from django.contrib import admin
from django.contrib import messages
from rest_framework_api_key.models import APIKey
class ApiKeyAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'service', 'created', 'modified')
fieldsets = (
('Required Information', {'fields': ('name', 'service')}),
('Additional Information', {'fields': ('key_message',)}),
)
search_fields = ('id', 'name',)
def get_readonly_fields(self, request, obj=None):
if not obj:
return ['key_message']
else:
return ['key_message', 'service']
def has_delete_permission(self, request, obj=None):
return True
def key_message(self, obj):
if obj.key:
return "Hidden"
return "The API Key will be generated once you click save."
def save_model(self, request, obj, form, change):
if not change:
messages.add_message(request, messages.WARNING, ('The API Key for %s is %s. Please note it since you will not be able to see it again.' % (obj.name, obj.key)))
obj.save()
admin.site.register(APIKey, ApiKeyAdmin)
|
StarcoderdataPython
|
11327499
|
<filename>GUI/geo/__init__.py
"""
A module for gometry, goegebra is the past !
"""
from GUI.geo.bezier import Bezier
from GUI.geo.basics import Point, Rectangle
|
StarcoderdataPython
|
5193753
|
import cv2 as cv
img = cv.imread('Photos/park.jpg')
# cv.imshow("Original Image",img)
#Gray scaling (B/W)
#cv.imshow('Gray Scaled',cv.cvtColor(img,cv.COLOR_BGR2GRAY))
# BGR to HSV
#cv.imshow('HSV color space(FULL)',cv.cvtColor(img,cv.COLOR_BGR2HSV_FULL))
# cv.imshow('HSV color space',cv.cvtColor(img,cv.COLOR_BGR2HSV))
#BGR to LAB
cv.imshow('LAB color space',cv.cvtColor(img,cv.COLOR_BGR2LAB))
# BGR to RGB
#cv.imshow("RBG color space",cv.cvtColor(img,cv.COLOR_BGR2RGB))
# HSV to BGR
#cv.imshow("HSV to BGR",cv.cvtColor(cv.cvtColor(img,cv.COLOR_BGR2HSV_FULL),cv.COLOR_HSV2BGR_FULL))
#LAB to BGR
cv.imshow("LAB to BGR",cv.cvtColor(cv.cvtColor(img,cv.COLOR_BGR2LAB),cv.COLOR_LAB2BGR))
cv.waitKey(0)
cv.destroyAllWindows()
|
StarcoderdataPython
|
6481538
|
from django import template
register = template.Library()
@register.simple_tag
def greet_user(message, username):
return "{greeting_message}, {user}!!!".format(greeting_message=message, user=username)
|
StarcoderdataPython
|
3395982
|
<gh_stars>10-100
from typing import List, Any
import unittest
from qtt.instrument_drivers.virtualAwg.settings import SettingsInstrument
from qtt.instrument_drivers.virtualAwg.virtual_awg import VirtualAwg
from unittest.mock import Mock, call
class TestVirtualAwg(unittest.TestCase):
def setUp(self) -> None:
self.settings = SettingsInstrument('Fake')
def tearDown(self) -> None:
self.settings.close()
@staticmethod
def __create_awg_drivers() -> List[Any]:
awg_driver1, awg_driver2 = Mock(), Mock()
type(awg_driver2).__name__ = 'Tektronix_AWG5014'
type(awg_driver1).__name__ = 'Tektronix_AWG5014'
return [awg_driver1, awg_driver2]
def test_init_HasNoErrors(self) -> None:
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(awgs, settings=self.settings)
self.assertEqual(virtual_awg.settings, self.settings)
self.assertEqual(awg_driver1, virtual_awg.awgs[0].fetch_awg)
self.assertEqual(awg_driver2, virtual_awg.awgs[1].fetch_awg)
virtual_awg.close()
def test_snapshot_includes_settings(self) -> None:
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(awgs, settings=self.settings)
instrument_snapshot = virtual_awg.snapshot()
self.assertIn('settings_snapshot', instrument_snapshot['parameters'])
self.assertDictEqual(instrument_snapshot['parameters']['settings_snapshot']['value'],
virtual_awg.settings_snapshot())
virtual_awg.close()
def test_init_HasNoInstruments(self) -> None:
virtual_awg = VirtualAwg(settings=self.settings)
self.assertEqual(virtual_awg.settings, self.settings)
self.assertEqual(virtual_awg.instruments, [])
virtual_awg.close()
def test_add_instruments(self) -> None:
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(settings=self.settings)
virtual_awg.add_instruments(awgs)
self.assertEqual(2, len(virtual_awg.instruments))
self.assertEqual(awgs, virtual_awg.instruments)
virtual_awg.add_instruments(awgs)
self.assertEqual(2, len(virtual_awg.instruments))
self.assertEqual(awgs, virtual_awg.instruments)
virtual_awg.close()
def test_run(self):
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(awgs, settings=self.settings)
awg_driver1.run.assert_not_called()
awg_driver2.run.assert_not_called()
virtual_awg.run()
awg_driver1.run.assert_called_once()
awg_driver2.run.assert_called_once()
virtual_awg.close()
def test_stop(self):
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(awgs, settings=self.settings)
awg_driver1.stop.assert_not_called()
awg_driver2.stop.assert_not_called()
virtual_awg.stop()
awg_driver1.stop.assert_called_once()
awg_driver2.stop.assert_called_once()
virtual_awg.close()
def test_reset(self):
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg = VirtualAwg(awgs, settings=self.settings)
awg_driver1.reset.assert_not_called()
awg_driver2.reset.assert_not_called()
virtual_awg.reset()
awg_driver1.reset.assert_called_once()
awg_driver2.reset.assert_called_once()
virtual_awg.close()
def test_enable_outputs(self):
self.settings.awg_map = {'P1': (0, 1), 'P2': (0, 2), 'P3': (1, 3), 'm4i_mk': (1, 4, 1)}
virtual_awg = VirtualAwg(settings=self.settings)
awgs = TestVirtualAwg.__create_awg_drivers()
awg_driver1, awg_driver2 = awgs
virtual_awg._awgs = awgs
virtual_awg.enable_outputs(['P1', 'P2', 'P3'])
awg_driver1.enable_outputs.assert_has_calls([call([1]), call([2])])
awg_driver2.enable_outputs.assert_has_calls([call([(3)]), call([4])])
virtual_awg.close()
|
StarcoderdataPython
|
3242941
|
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# matplotlib.rcParams['ps.useafm'] = True
# matplotlib.rcParams['pdf.use14corefonts'] = True
# matplotlib.rcParams['text.usetex'] = True
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import matplotlib.cm as cm # cm.rainbow
from random import expovariate
import sys, pprint, math, numpy, simpy, getopt, itertools
from simplex_sim import *
from simplex_models import *
from mds_models import mds_exactbound_on_ar
# from mds_exp import sim_mds_nk
from scipy.interpolate import UnivariateSpline
def plot_reptoall_steadystate_probhist():
t, r, k = 1, 2, 2
def get_state_prob_m(ar):
log(WARNING, "ar= {}, t= {}, r= {}, k= {}".format(ar, t, r, k) )
env = simpy.Environment()
pg = PG(env, "pg", ar)
avq = AVQ("avq", env, t, r, k, serv="Exp", servdist_m={'mu': 1} )
# monitor = AVQMonitor(env, avq, poll_dist=lambda: 0.1)
# avq.join_q.out_m = monitor
pg.out = avq
env.run(until=50000)
# print("monitor.polled_state__counter_map= {}".format(pprint.pformat(monitor.polled_state__counter_map) ) )
total_counter = sum([c for rs, c in monitor.polled_state__counter_map.items() ] )
state_prob_m = {rs:float(c)/total_counter for rs, c in monitor.polled_state__counter_map.items() }
# print("polled_state__counter_map= {}".format(pprint.pformat(polled_state__counter_map) ) )
return state_prob_m # ['0,(0,0)']
# for ar in numpy.arange(0.05, 1.2, 0.1):
color = iter(cm.rainbow(numpy.linspace(0, 1, 20) ) )
plot.figure(figsize=(20,10) )
for ar in numpy.arange(0.05, 1.3, 0.1):
# for ar in numpy.arange(0.05, 0.1, 0.1):
state_prob_m = get_state_prob_m(ar)
def state(kp, i, j):
return "{},({},{})".format(kp, i, j)
i__tau_l_map = {}
for i in range(10):
if i not in i__tau_l_map:
i__tau_l_map[i] = []
for kp in range(i, 10):
s_u, s_l = state(kp, i, 0), state(kp+1, i, 0)
if s_u in state_prob_m and s_l in state_prob_m:
i__tau_l_map[i].append(state_prob_m[s_l]/state_prob_m[s_u] )
# if state(k+1, 0, i) in state_prob_m:
# i__tau_l_map[i].append(state_prob_m[state(k+1, 0, i) ] /state_prob_m[state(k, 0, i) ] )
log(WARNING, "i__tau_l_map=\n {}".format(pprint.pformat(i__tau_l_map) ) )
#
wing_cutoff_i = 2
wing_cutoff_sum = 0
for s, p in state_prob_m.items():
split_l = s.split(",")
if int(split_l[1].split("(")[1] ) > wing_cutoff_i or int(split_l[2].split(")")[0] ) > wing_cutoff_i:
wing_cutoff_sum += p
s_l, p_l = [], []
for s, p in state_prob_m.items():
if p > 0.01:
s_l.append(s)
p_l.append(p)
plot.bar(range(len(p_l) ), p_l, color=next(color) )
plot.xticks([i+0.5 for i in range(len(s_l) ) ], s_l, size='small')
plot.xlabel("State")
plot.ylabel("Steady-state probability")
plot.title(r't= {}, $\lambda$= {}, sum_on_plot= {}, wing_cutoff_sum= {}'. \
format(t, "{0:.2f}".format(ar), "{0:.2f}".format(sum(p_l)), "{0:.2f}".format(wing_cutoff_sum) ) )
plot.savefig("plot_reptoall_steadystate_probhist_ar_{0:.2f}.png".format(ar) )
plot.clf()
def test_avq(nf, ar, t, r, k, serv="Exp", servdist_m=None,
w_sys=True, mixed_traff=False, sching="rep-to-all", p_i_l= [] ):
E_T_f_sum = 0
for f in range(nf):
log(WARNING, "ar= {}, t= {}, r= {}, k= {}, servdist_m= {}, w_sys= {}, mixed_traff= {}, sching= {}". \
format(ar, t, r, k, servdist_m, w_sys, mixed_traff, sching) )
env = simpy.Environment()
if mixed_traff:
sym_l, sym__rgroup_l_m = simplex_sym_l__sym__rgroup_l_m(t)
log(WARNING, "sym__rgroup_l_m=\n {}".format(pprint.pformat(sym__rgroup_l_m) ) )
pg = MT_PG(env, "pg", ar, sym_l)
avq = MT_AVQ("mt_avq", env, t, sym__rgroup_l_m, serv, servdist_m)
# monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)
# avq.join_q.out_m = monitor
else:
psize = None
if serv == "Bern*Pareto":
psize = "Pareto"
serv = "Bern"
pg = PG(env, "pg", ar, psize=psize, psize_dist_m=servdist_m)
avq = AVQ("avq", env, t, r, k, servdist_m, sching, w_sys=w_sys)
# monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)
# avq.join_q.out_m = monitor
pg.out = avq
pg.init()
c = 3 if serv == "Pareto" or serv == "Bern" else 1
env.run(until=c*50000) # 20
if mixed_traff:
print("pg.sym__n_sent= {}".format(pprint.pformat(pg.sym__n_sent) ) )
st_l = avq.jsink.st_l
if len(st_l) > 0:
E_T_f_sum += float(sum(st_l) )/len(st_l)
# continue
# print("avq.jsink.qid__num_win_map= {}".format(pprint.pformat(avq.jsink.qid__num_win_map) ) )
total_n_wins = sum([n for i, n in avq.jsink.qid__num_win_map.items() ] )
print("pg.n_sent= {}, total_n_wins= {}".format(pg.n_sent, total_n_wins) )
qid_winfreq_map = {i:float(n)/total_n_wins for i, n in avq.jsink.qid__num_win_map.items() }
print("qid_winfreq_map= {}".format(pprint.pformat(qid_winfreq_map) ) )
# if not mixed_traff:
# total_n_types = sum(avq.servtype__num_m)
# p_i_l[:] = [n/total_n_types for t, n in enumerate(avq.servtype__num_m) ]
# print("p_i_l= {}".format(p_i_l) )
"""
print("\n")
# print("avq.join_q.state__num_found_map= {}".format(pprint.pformat(avq.join_q.state__num_found_map) ) )
# total_num_founds = sum([n for s, n in avq.join_q.state__num_found_map.items() ] )
# state__found_freq_map = {s:float(n)/total_num_founds for s, n in avq.join_q.state__num_found_map.items() }
# print("state__found_freq_map= {}".format(pprint.pformat(state__found_freq_map) ) )
print("\n")
# print("monitor.polled_state__counter_map= {}".format(pprint.pformat(monitor.polled_state__counter_map) ) )
total_counter = sum([c for rs, c in monitor.polled_state__counter_map.items() ] )
polled_state__counter_map = {rs:float(c)/total_counter for rs, c in monitor.polled_state__counter_map.items() }
print("polled_state__counter_map= {}".format(pprint.pformat(polled_state__counter_map) ) )
print("\n")
# print("monitor.state__num_found_by_job_departed_map= {}".format(pprint.pformat(monitor.state__num_found_by_job_departed_map) ) )
total_counter = sum([c for rs, c in monitor.state__num_found_by_job_departed_map.items() ] )
state__freq_found_by_job_departed_map = {rs:float(c)/total_counter for rs, c in monitor.state__num_found_by_job_departed_map.items() }
print("state__freq_found_by_job_departed_map= {}".format(pprint.pformat(state__freq_found_by_job_departed_map) ) )
print("\n")
# print("monitor.start_setup__num_found_by_job_departed_map= {}".format(pprint.pformat(monitor.start_setup__num_found_by_job_departed_map) ) )
total_counter = sum([c for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() ] )
start_setup__freq_found_by_job_departed_map = {rs:float(c)/total_counter for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() }
print("start_setup__freq_found_by_job_departed_map= {}".format(pprint.pformat(start_setup__freq_found_by_job_departed_map) ) )
"""
E_T = E_T_f_sum/nf
print(">> E_T= {}".format(E_T) )
if E_T > 100: return None
return E_T
def plot_winning_freqs():
t, r, k = 1, 2, 2
mu = 1
servdist_m = {'dist': 'Exp', 'mu': mu}
ar_ub = reptoall_innerbound_on_ar(t, servdist_m)
log(WARNING, "t= {}, servdist_m= {}, ar_ub={}".format(t, servdist_m, ar_ub) )
ar_l = []
qid__winfreq_l_map = {}
for ar in numpy.linspace(0.05, ar_ub*1.1, 20):
env = simpy.Environment()
pg = PG(env, "pg", ar)
avq = AVQ("avq", env, t, r, k, servdist_m, "rep-to-all")
pg.out = avq
pg.init()
# monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 1)
env.run(until=50000)
total_n_wins = sum([n for i, n in avq.jsink.qid__num_win_map.items() ] )
qid_winfreq_map = {i:float(n)/total_n_wins for i, n in avq.jsink.qid__num_win_map.items() }
print("ar= {}, qid_winfreq_map= {}".format(ar, pprint.pformat(qid_winfreq_map) ) )
ar_l.append(ar)
for qid, win_freq in qid_winfreq_map.items():
if qid not in qid__winfreq_l_map:
qid__winfreq_l_map[qid] = []
qid__winfreq_l_map[qid].append(win_freq)
plot.axhline(y=0.6, label=r'Lower-bound, $w_s$', c=next(dark_color), lw=2, ls='--')
plot.axhline(y=0.4, label=r'Upper-bound, $w_r$', c=next(dark_color), lw=2, ls='--')
counter = 0
for qid, win_freq_l in qid__winfreq_l_map.items():
if counter == 0:
plot.plot(ar_l, win_freq_l, label=r'Simulation, $w_s$', color=next(dark_color), marker=next(marker), ms=8, mew=2, ls=':')
else:
plot.plot(ar_l, win_freq_l, label=r'Simulation, $w_r$', color=next(dark_color), marker=next(marker), ms=8, mew=2, ls=':')
counter += 1
fontsize = 16
plot.legend(fontsize=13)
plot.xlabel(r'Arrival rate $\lambda$', fontsize=fontsize)
plot.ylabel("Fraction of request completions", fontsize=fontsize)
plot.title(r'Replicate-to-all $t=1$, $\gamma=\alpha=\beta= {}$'.format(mu), fontsize=fontsize)
fig = plot.gcf()
# def_size = fig.get_size_inches()
# fig.set_size_inches(def_size[0]/1.4, def_size[1]/1.4)
fig.set_size_inches(6, 4)
fig.tight_layout()
# plot.savefig("plot_winning_freqs.png", bbox_inches='tight')
plot.savefig("plot_winning_freqs.pdf", dpi=fig.dpi)
plot.gcf().clear()
log(WARNING, "done.")
def plot_simplex_vs_rep():
t, r, k = 3, 2, 2
serv = "Exp"
mu = 1
servdist_m['mu'] = mu
if t == 1: ar_ub = 1.6
elif t == 3: ar_ub = 2.4
elif t == 7:
ar_ub = float(1.1*reptoall_innerbound_on_ar(mu, t, r, w_sys=True) )
mixed_traff = False
if mixed_traff: ar_ub = 1.1*ar_ub
log(WARNING, "t= {}, ar_ub= {}, serv= {}, servdist_m= {}, mixed_traff= {}".format(t, ar_ub, serv, servdist_m, mixed_traff) )
n = 2*t + 1
n_sym = int(numpy.log2(n+1) )
# # Same distance
# n_rep = t + 1
# n_total_rep = n_sym*n_rep
# mu_rep = n*mu/n_total_rep
# n_mds = n_sym + t
# k_mds = n_sym
# mu_mds = (2*t+1)*mu/n_mds
# ar_ub_mds = None
# if t == 3 and not mixed_traff: ar_ub_mds = ar_ub + 0.15 # mds_exactbound_on_ar(mu_mds, n_mds, k_mds)
# Preserving hot-cold data mix
# n_rep = t + 1
# n_total_rep = n_rep
# ar_ub_mds = None
# Same repair bandwidth
n_rep = t + 1
n_total_rep = int(n_sym*(t+1)/2)
mu_rep = n*mu/n_total_rep if not mixed_traff else n*mu/n_total_rep/n_sym
ar_ub_mds = None
ar_ub_rep = n_rep*mu_rep
sim_simplex_reqed = False
ET_sim_l = []
if not mixed_traff and t == 1:
ET_sim_l= [
0.6775872854372559,
0.7909557937247363,
0.9486987202221493,
1.166209238915134,
1.5685720588787688,
2.478342315521276,
2.6376081306859107,
2.906788473547391,
3.263700392764921,
3.5974807041868426,
4.289127887822366,
4.794525358984301,
5.896928018871929,
8.099664758903687,
12.74155958739236]
elif mixed_traff and t == 1:
ET_sim_mixedtraff_l= [
0.6795142458623882,
0.7748927520953908,
0.9120551663968248,
1.1017354073281063,
1.4008309793905753,
2.0319166972531395,
2.3461415096416802,
2.617752845887241,
2.931842457820586,
3.3957906721917803,
4.275140545352988,
5.384652265631004,
8.289396804081276,
None, # 21.85423973012918,
None]
elif not mixed_traff and t == 3:
ET_sim_l= [
0.4676519075931255,
0.5247256264186801,
0.6230081386991332,
0.775814486873029,
1.0207917160021767,
1.6244613243247372,
1.7481208563178903,
1.9667165686859327,
2.163968348080258,
2.5923594863306776,
3.0700378671376627,
3.796384731111067,
4.841880170965622,
6.610367379250164,
13.559429107437742]
elif mixed_traff and t == 3:
ET_sim_mixedtraff_l= [
0.46628732795742817,
0.5184094604634668,
0.5975473670434864,
0.7272615729604553,
0.9228862984361961,
1.3432430706439402,
1.5297012938889547,
1.7382202900329649,
2.006828591863818,
2.409746021676913,
2.9987862815607667,
4.1494167022302415,
6.7589082110731376,
None,
None]
elif not mixed_traff and t == 7:
ET_sim_l= [
0.31868938934489865,
0.3650196292881234,
0.4281058344507201,
0.5206469367259021,
0.6957249200007437,
1.1325417176453465,
1.2307386079673424,
1.3867025010207843,
1.5768489395874896,
1.865829597118924,
2.1844400783734677,
2.89287730113055,
4.276904798075734,
6.184072327220002,
None]
else:
sim_simplex_reqed = True
sim_mds_reqed = False
E_T_sim_mds_l = []
if t == 3:
E_T_sim_mds_l= [
0.4291382378049635,
0.4859752967032978,
0.5573834220518918,
0.6504572423217563,
0.7912534680581111,
1.0617796194912665,
1.1173955998468372,
1.1864819039768486,
1.3132561853089193,
1.4183354786680833,
1.5441924947724337,
1.6800188501504796,
1.97388257061194,
2.365205967704707,
2.552714259149294]
else:
sim_mds_reqed = True
sim_mds_split_to_one_reqed = False
E_T_sim_split_to_one_mds_l = []
if t == 3:
E_T_sim_split_to_one_mds_l= [
0.77365082603341717,
0.82440222647912942,
0.88499585518811741,
0.95059809100622572,
1.026735997953014,
1.1276811830357545,
1.2540326440649683,
1.4212608769595043,
1.6517287453133336,
1.9954850953566452,
2.5853499093220909,
3.8254183518878659,
8.5337611351281506,
None,
None]
else:
sim_mds_split_to_one_reqed = True
mew, ms = 3, 8
nf = 2
def plot_reptoall():
# Simplex
ar_simplex_l = []
for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]:
ar_simplex_l.append(ar)
if sim_simplex_reqed:
ET_sim_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=True, mixed_traff=mixed_traff) )
c = next(dark_color)
label = 'Simplex' # if t != 1 else 'Simplex or MDS'
print("ET_sim_l= {}".format(pprint.pformat(ET_sim_l) ) )
plot.plot(ar_simplex_l, ET_sim_l, label=label, color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':')
# stab_lim = ET_simplex_approx(t, ar, servdist_m, incremental=True, ar_ub=True)
# plot.axvline(stab_lim, label="Simplex stability", color=c, linestyle='--')
# Rep
ar_rep_l, E_T_rep_n_1_l = [], []
for ar in numpy.linspace(0.05, ar_ub_rep-0.05, 20):
ar_rep_l.append(ar)
E_T_rep_n_1_l.append(E_T_rep_n_1(ar, mu_rep, n_rep) )
# E_T_rep_n_1_l = [e*n_rep for e in E_T_rep_n_1_l]
c = next(dark_color)
plot.plot(ar_rep_l, E_T_rep_n_1_l, label=r'Replication', color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':')
# plot.axvline(ar_ub_rep, label="Rep stability", color=c, linestyle='--')
# # MDS
# if ar_ub_mds is not None:
# ar_mds_l = []
# for ar in [*numpy.linspace(0.05, 0.7*ar_ub_mds, 5, endpoint=False), *numpy.linspace(0.7*ar_ub_mds, ar_ub, 10, endpoint=False) ]:
# # for ar in numpy.linspace(ar_ub_mds, ar_ub_mds, 1):
# ar_mds_l.append(ar)
# if sim_mds_reqed:
# E_T_sim_mds_l.append(test_avq(nf, ar, t=1, r, k, serv, {'mu': mu_mds}, w_sys=True) )
# print("E_T_sim_mds_l= {}".format(pprint.pformat(E_T_sim_mds_l) ) )
# plot.plot(ar_mds_l, E_T_sim_mds_l, label=r'MDS', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
def plot_selectone():
# Simplex
ar_ub = arub_simplex_selectone(t, mu) + 0.1
log(WARNING, "ar_ub= {}".format(ar_ub) )
ar_l, ET_l = [], []
for ar in numpy.linspace(0.05, ar_ub, 20):
ar_l.append(ar)
ET_l.append(ET_selectone(t, ar, mu) )
label = 'Simplex' # if t != 1 else 'Simplex or MDS'
plot.plot(ar_l, ET_l, label=label, color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
# Rep
ar_ub_rep = n_rep*mu_rep
ar_l, E_T_rep_l = [], []
for ar in numpy.linspace(0.05, ar_ub_rep-0.2, 20):
ar_l.append(ar)
E_T_rep_l.append(E_T_rep_n_1_split_to_one(ar, mu_rep, n_rep) )
plot.plot(ar_l, E_T_rep_l, label=r'Replication', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot_reptoall()
scheduling = "Replicate-to-all"
# plot_selectone()
# scheduling = "Split-to-one"
plot.legend(prop={'size':12})
plot.xlabel(r'Arrival rate $\lambda$ (Request/s)', fontsize=12)
plot.ylabel(r'Average download time (s)', fontsize=12)
# plot.title(r'$t={}, \mu={}$'.format(t, mu) )
plot.title(r'{} scheduling, $t= {}$'.format(scheduling, t) )
fig = plot.gcf()
def_size = fig.get_size_inches()
fig.set_size_inches(def_size[0]/1.4, def_size[1]/1.4)
fig.tight_layout()
plot.savefig("plot_simplex_vs_rep_t_{}_{}.pdf".format(t, scheduling) )
fig.clear()
# Energy
# ar_simplex_l, Energy_simplex_l = [], []
# for ar in numpy.linspace(0.1, ar_ub, 20):
# ar_simplex_l.append(ar)
# Energy_simplex_l.append(n/ar)
# ar_rep_l, Energy_rep_l = [], []
# for ar in numpy.linspace(0.1, ar_ub_rep, 20):
# ar_rep_l.append(ar)
# Energy_rep_l.append(n_total_rep/ar)
# plot.plot(ar_simplex_l, Energy_simplex_l, label='Simplex', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
# plot.plot(ar_rep_l, Energy_rep_l, label='Rep', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
# plot.legend()
# plot.xlabel(r'Arrival rate $\lambda$', fontsize=12)
# plot.ylabel(r'Unit of energy per request', fontsize=12)
# plot.title(r'$t={}, \mu={}$'.format(t, mu) )
# fig = plot.gcf()
# def_size = fig.get_size_inches()
# fig.set_size_inches(def_size[0]/1., def_size[1]/1.)
# fig.tight_layout()
# plot.savefig("plot_simplex_vs_rep_t_{}_energy.pdf".format(t) )
# fig.clear()
log(WARNING, "done; scheduling= {}, t= {}".format(scheduling, t) )
def plot_reptoall():
mixed_traff, w_sys = False, True
t, r, k = 1, 2, 2
serv = "Exp" # "Bern" # "Bern*Pareto" # "Pareto" # "Dolly"
mu = 1
# loc, a = 1, 2
# U, L, p, loc, a = 1, 8, 0.2, 0.1, 1.5 # 1, 8, 0.2, 1, 3
U, L, p, loc, a = 1, 10, 0.3, 0.1, 1.5 # 1, 8, 0.2, 1, 3
# For rep-to-all
if serv == "Exp":
servdist_m = {'dist': serv, 'mu': mu}
if t == 1: ar_ub = 1.6
elif t == 3: ar_ub = 2.4
elif t == 7: ar_ub = float(1.1*reptoall_innerbound_on_ar(t, servdist_m) )
else: ar_ub = reptoall_innerbound_on_ar(t, servdist_m)
elif serv == "Pareto":
servdist_m = {'dist': serv, 'loc': loc, 'a': a}
ar_ub = reptoall_innerbound_on_ar(t, servdist_m)
elif serv == "TPareto":
servdist_m = {'dist': serv, 'l': l, 'u': u, 'a': a}
ar_ub = reptoall_innerbound_on_ar(t, servdist_m)
elif serv == "Bern" or serv == "Bern*Pareto":
servdist_m = {'dist': serv, 'U': U, 'L': L, 'p': p, 'loc': loc, 'a': a}
ar_ub = reptoall_innerbound_on_ar(t, servdist_m)
elif serv == "Dolly":
servdist_m = None
if t == 1: ar_ub = 0.28
elif t == 3: ar_ub = 0.4
log(WARNING, "w_sys= {}, t= {}, r= {}, k= {}, servdist_m= {}, ar_ub= {}, mixed_traff= {}".format(w_sys, t, r, k, servdist_m, ar_ub, mixed_traff) )
ET_sm_l, ET_sim_l, ET_l, ET_lb_l = [], [], [], []
ET_alt_l, ET_matrixanalytic_l = [], []
ET_bestapprox_l, ET_betterapprox_l, ET_naiveapprox_l, ET_varkigauri_lb_l = [], [], [], []
ET_simbasedapprox_l = []
ET_sim_mixedtraff_l = []
# All below w_sys=True
nf = 3
sim_simplex = False
if serv == "Exp":
if t == 1:
ET_sim_l= [
0.6775872854372559,
0.7909557937247363,
0.9486987202221493,
1.166209238915134,
1.5685720588787688,
2.478342315521276,
2.6376081306859107,
2.906788473547391,
3.263700392764921,
3.5974807041868426,
4.289127887822366,
4.794525358984301,
5.896928018871929,
8.099664758903687,
12.74155958739236]
elif t == 3:
ET_sim_l= [
0.4676519075931255,
0.5247256264186801,
0.6230081386991332,
0.775814486873029,
1.0207917160021767,
1.6244613243247372,
1.7481208563178903,
1.9667165686859327,
2.163968348080258,
2.5923594863306776,
3.0700378671376627,
3.796384731111067,
4.841880170965622,
6.610367379250164,
13.559429107437742]
else: sim_simplex = True
elif serv == "Pareto":
if loc == 1 and a == 2:
if t == 1:
ET_sim_l= [
1.5299993522735693,
1.7233577876041122,
1.8952577131712123,
2.2418712080584897,
2.853623528849504,
4.2208097489868,
4.586420599121132,
5.191481636572133,
5.6340499086639815,
5.9712033727746,
7.94309766204549,
9.599736059102067,
13.280357368839619,
17.20104661693977,
25.449711725024084]
elif t == 3:
ET_sim_l= [
1.3221090353539466,
1.4459274633541828,
1.6229349092564267,
1.9043964678064051,
2.4154300633936936,
3.6666730405584844,
3.9217550909479577,
4.256167164955279,
4.717366068731679,
5.891743883842969,
6.04468767433355,
8.073514650754076,
9.880581947509592,
15.816118977624845,
28.433468299774272]
else: sim_simplex = True
elif loc == 1 and a == 5:
if t == 3:
ET_sim_l= [
1.1276007604818075,
1.240550592912947,
1.3862061325608057,
1.645653757532261,
2.0688083303883276,
3.2115831386711813,
3.2986018954384835,
3.8148027478966227,
4.033705086448495,
5.448028336643181,
5.697392211154507,
9.053323168666376,
10.17868048265699,
23.644561610837382,
None] # 93.02644300031747
else: sim_simplex = True
else: sim_simplex = True
elif serv == "Bern":
if U == 1 and L == 8 and p == 0.2:
if t == 1:
# nf = 3
ET_sim_l= [
1.6376474738985423,
1.9851446427827089,
2.4840795375267626,
3.1829054073054217,
4.39332366216294,
7.063110373762194,
7.4445330550351665,
8.208129233744382,
9.309321611480481,
10.747520637423975,
12.460023568734707,
15.038255521201348,
18.778687793661728,
23.582209372296532,
36.21619587757658]
elif t == 3:
# nf = 1
ET_sim_l= [
1.1072895175117927,
1.2582695204803385,
1.4572200912301614,
1.8340775367273732,
2.4430722742069184,
4.053853819806121,
4.4494192069988605,
5.061922101782603,
5.883304533639656,
6.705043861319703,
8.307668993372534,
11.041651319984396,
17.564101468045756,
33.184482866801716,
None]
else: sim_simplex = True
else: sim_simplex = True
elif serv == "Bern*Pareto":
if U == 1 and L == 8 and p == 0.2 and loc == 1 and a == 3:
if t == 11:
# nf = 3
ET_sim_l= [
2.142631836594827,
2.5302711620514966,
2.941315337537391,
3.8773353598252345,
4.550420407107853,
6.649089020276313,
7.000687768519389,
7.681497353358071,
8.058275694322152,
9.541434770613856,
10.136837383356713,
11.027889242435874,
14.072462480848941,
18.721889173565945,
29.85022801496356]
elif t == 33:
pass
else: sim_simplex = True
else: sim_simplex = True
else: sim_simplex = True
# Mixed traff
sim_simplex_mixed_traff = False
if mixed_traff:
if serv == "Exp":
if t == 1:
ET_sim_mixedtraff_l= [
0.678978501641253,
0.7748022818617738,
0.9072886738372506,
1.0928902616368403,
1.43754904360929,
2.0810587767368154,
2.266461910378062,
2.5977047234601125,
3.2441553951140985,
3.585616438620215,
4.415600179701042,
6.099149242270735,
9.786138444920114,
None, # 21.631079441147904
None]
elif t == 3:
ET_sim_mixedtraff_l= [
0.46217641274184773,
0.5249541076176077,
0.6065798815902482,
0.7193352388312126,
0.9238674360581351,
1.363955390788439,
1.4654931553890183,
1.733811055160431,
2.0493965738680795,
2.479767271681704,
3.065826086322138,
4.300842192226751,
8.05986376865404,
None, # 35.70730644518723,
None]
else:
sim_simplex_mixed_traff = True
ar_l = []
for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]:
# for ar in numpy.linspace(0.05, ar_ub, 2):
ar_l.append(ar)
p_i_l = []
if sim_simplex:
ET_sim = test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l)
print("*** ET_sim= {}".format(ET_sim) )
ET_sim_l.append(ET_sim)
# ET_sim_l.append(None)
# ET_simbasedapprox_l.append(ET_simplex_approx(t, ar, servdist_m, p_i_l=p_i_l)[0] )
# if sim_simplex_mixed_traff:
# ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l, mixed_traff=True) )
ET_sm_l.append(ET_simplex_sm(t, ar, servdist_m) )
ET_lb_l.append(ET_simplex_lb(t, ar, servdist_m) )
if serv == "Exp":
if t == 1:
ET_l.append(ET_reptoall_t1(ar, mu) )
ET_matrixanalytic_l.append(ET_reptoall_t1_matrixanalytic(t, ar, mu) )
elif t == 2:
if w_sys:
ET_alt_l.append(simplex_w_two_repair__E_T(ar, mu, M=2) )
ET_l.append(simplex_w_two_repair__E_T(ar, mu, M=5) )
else:
ET_l.append(simplex_wo_sys_w_two_repair__E_T(ar, mu) )
ET_naiveapprox_l.append(ET_simplex_approx(t, ar, servdist_m, naive=True)[0] )
ET_betterapprox_l.append(ET_simplex_approx(t, ar, servdist_m)[0] )
ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True)[0] )
# ET_varkigauri_lb_l.append(E_T_simplex_varki_gauri_lb(t, ar, gamma, mu)[0] )
ar_mixed_traff_l = []
# for ar in numpy.linspace(0.2, 0.2, 1):
for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, 1.1*ar_ub, 10) ]:
ar_mixed_traff_l.append(ar)
if sim_simplex_mixed_traff:
ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, mixed_traff=True) )
# mew, ms = 0.1, 10
mew, ms = 2, 5
def plot_poster():
# for better looking plot
ar_approx_l = list(ar_l)
ar = ar_ub + 0.03
ar_approx_l.append(ar)
ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True) )
plot.plot(ar_l, ET_sim_l, label="FJ-FA, simulation", marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)
plot.plot(ar_approx_l, ET_bestapprox_l, label="FJ-FA, M/G/1 approximation", zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)
def get_xs_l_ys_l(_x_l, _y_l):
x_l, y_l = [], []
for i, y in enumerate(_y_l):
if y is not None:
x_l.append(_x_l[i])
y_l.append(y)
s = UnivariateSpline(x_l, y_l, s=0.001)
xs_l = np.linspace(min(x_l), max(x_l), 20)
ys_l = s(xs_l)
return xs_l, ys_l
def plot_():
log(WARNING, "ET_sim_l= {}".format(pprint.pformat(ET_sim_l) ) )
# plot.plot(ar_l, ET_simbasedapprox_l, label=r'Sim-based approximation', marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)
label = 'Simulation, fixed-arrivals' if mixed_traff else 'Simulation'
xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_sim_l)
# plot.plot(ar_l, ET_sim_l, label=label, marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)
plot.plot(xs_l, ys_l, label=label, marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)
if mixed_traff:
log(WARNING, "ET_sim_mixedtraff_l= {}".format(pprint.pformat(ET_sim_mixedtraff_l) ) )
plot.plot(ar_mixed_traff_l, ET_sim_mixedtraff_l, label=r'Simulation, mixed-arrivals', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
else:
xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_sm_l)
# plot.plot(ar_l, ET_sm_l, label=r'Split-Merge upper bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
plot.plot(xs_l, ys_l, label=r'Split-Merge upper bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
# plot.plot(ar_l, ET_bestapprox_l, label=r'$M/G/1$ approximation', zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)
xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_lb_l)
# plot.plot(ar_l, ET_lb_l, label=r'Fast-Split-Merge lower bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
plot.plot(xs_l, ys_l, label=r'Fast-Split-Merge lower bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
if t == 1:
xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_matrixanalytic_l)
# plot.plot(ar_l, ET_matrixanalytic_l, label=r'Matrix-analytic upper-bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
plot.plot(xs_l, ys_l, label=r'Matrix-analytic upper-bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_l)
# plot.plot(ar_l, ET_l, label=r'High-traffic approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
plot.plot(xs_l, ys_l, label=r'High-traffic approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
# plot.plot(ar_l, ET_naiveapprox_l, label=r'Straightforward approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
# plot.plot(ar_l, ET_betterapprox_l, label=r'Better approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)
# plot.plot(ar_l, ET_bestapprox_l, label=r'Fine-grained approximation', zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)
# plot.plot(ar_l, ET_varkigauri_lb_l, label=r'$E[\hat{T}_{fast-serial}]$', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew)
# stab_lim = ET_simplex_approx(t, ar, servdist_m, incremental=True, ar_ub=True)
# plot.axvline(stab_lim, label="Stability limit", color='black', linestyle='--')
# plot.gca().set_xlim([0, stab_lim+0.1] )
def plot_selectone():
ar_ub = 0.9*arub_simplex_selectone(t, serv, servdist_m)
log(WARNING, "ar_ub={}".format(ar_ub) )
ar_l, ET_l = [], []
for ar in numpy.linspace(0.05, ar_ub, 50):
# for ar in numpy.linspace(0.05, ar_ub, 2):
ar_l.append(ar)
# if sim:
# ET_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, sching="select-one") )
ET_l.append(ET_selectone(t, ar, mu) )
# log(WARNING, "ET_l= {}".format(pprint.pformat(ET_l) ) )
plot.plot(ar_l, ET_l, 'b', label=r'Select-one', linestyle='--', lw=3, mew=mew, ms=ms)
# plot_poster()
plot_()
# plot.plot(ar_l, ET_sim_l, 'k', label=r'Replicate-to-all', linestyle='-', lw=3)
# plot_selectone()
fontsize = 16
plot.yscale('log')
plot.legend(loc='upper left', fontsize=13, framealpha=0.25)
plot.xlabel(r'Arrival rate $\lambda$', fontsize=fontsize)
plot.ylabel(r'Average download time', fontsize=fontsize)
serv_in_latex = None
if serv == "Exp":
serv_in_latex = '\mathrm{Exp}' + r'(\mu={})'.format(mu)
elif serv == "Pareto":
serv_in_latex = r'Pareto(s={}, \alpha={})'.format(loc, a)
elif serv == "Bern":
serv_in_latex = r'Bernoulli(U={}, L={}, p={})'.format(U, L, p)
elif serv == "Dolly":
serv_in_latex = r'Dolly'
plot.title(r'FJ-FA with $r= {}$, $t= {}$, $\mu= {}$'.format(r, t, mu), fontsize=fontsize)
# plot.title(r'$t={}$, Servers $\sim {}$'.format(t, serv_in_latex) )
fig = plot.gcf()
fig.set_size_inches(6, 4)
fig.tight_layout()
plot.savefig("plot_FJFA_r{}_t{}.pdf".format(r, t) )
log(WARNING, "done; t= {}, r= {}, k= {}".format(t, r, k) )
def get_opts(argv):
opt_map = {}
try:
opts, args = getopt.getopt(argv, '', ['num_q='] )
except getopt.GetoptError:
log(ERROR, "Unexpected command line arg, expecting: exp.py --num_q=<>")
sys.exit(1)
for opt, arg in opts:
opt_map[opt] = arg
return opt_map
if __name__ == "__main__":
# opt_map = get_opts(sys.argv[1:] )
# log(WARNING, "opt_map= {}".format(pprint.pformat(opt_map) ) )
# num_q = int(opt_map["--num_q"] )
# plot_winning_freqs()
plot_reptoall()
# plot_simplex_vs_rep()
|
StarcoderdataPython
|
6542447
|
<reponame>revensky/psion
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from psion.oauth2.models import Request, Response
from .base import BaseProvider
class StarletteProvider(BaseProvider):
async def create_request(self, request: StarletteRequest) -> Request:
return Request(
method=request.method,
url=str(request.url),
headers=dict(request.headers),
body=await request.body(),
user=request.user,
)
async def create_response(self, response: Response) -> StarletteResponse:
return StarletteResponse(response.body, response.status_code, response.headers)
|
StarcoderdataPython
|
1828091
|
<reponame>CornerCaseTechnologies/graphene-django-plus
import datetime
from django.contrib.auth.models import User
from graphene_django.utils.testing import GraphQLTestCase
from guardian.shortcuts import assign_perm
from .schema import schema
from .models import (
Project,
Milestone,
Issue,
)
class BaseTestCase(GraphQLTestCase):
GRAPHQL_SCHEMA = schema
def setUp(self):
self.user = User(username='foobar')
self.user.set_password('<PASSWORD>')
self.user.save()
self._client.login(username='foobar', password='<PASSWORD>')
self.project = Project.objects.create(
name="Test Project",
due_date=datetime.date(2050, 1, 1),
)
self.milestone_1 = Milestone.objects.create(
name="Milestone 1",
due_date=datetime.date(2050, 1, 1),
project=self.project,
)
self.milestone_2 = Milestone.objects.create(
name="Milestone 2",
project=self.project,
)
self.allowed_issues = []
self.unallowed_issues = []
self.issues = []
for i, (priority, milestone) in enumerate([
(1, self.milestone_1),
(1, self.milestone_1),
(0, self.milestone_2),
(3, None)]):
i = Issue.objects.create(
name="Issue {}".format(i + 1),
priority=priority,
milestone=milestone,
)
if milestone == self.milestone_1:
assign_perm('can_read', self.user, i)
assign_perm('can_write', self.user, i)
self.allowed_issues.append(i)
else:
self.unallowed_issues.append(i)
self.issues.append(i)
|
StarcoderdataPython
|
11265093
|
import json
import os
import uuid
import Rhino # type: ignore
import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import scriptcontext as sc # type: ignore
from compas.geometry import Cylinder, Polyhedron
from compas.utilities import DataDecoder
from compas_rhino.geometry import RhinoMesh
from compas_rhino.ui import CommandMenu
from compas_rhino.utilities import clear_layer, delete_objects, draw_breps, draw_cylinders, draw_mesh
from compas_rhino.utilities.objects import get_object_name, get_object_names
from integral_timber_joints.assembly import Assembly
from integral_timber_joints.geometry import EnvironmentModel, JointHalfLap, JointNonPlanarLap, JointPolylineLap
from integral_timber_joints.process import Movement, RobotClampAssemblyProcess, RoboticMovement
from integral_timber_joints.rhino.artist import mesh_to_brep, vertices_and_faces_to_brep_struct
from integral_timber_joints.rhino.load import get_process, get_process_artist, process_is_none
from integral_timber_joints.rhino.utility import get_existing_beams_filter, recompute_dependent_solutions
from integral_timber_joints.tools import Clamp, Gripper, RobotWrist, ToolChanger
try:
from typing import Dict, List, Optional, Tuple, cast
except:
pass
add_brep = sc.doc.Objects.AddBrep
find_object = sc.doc.Objects.Find
guid = uuid.UUID
# ######################################################
# Functions tp draw, redraw and delete selectable joints
# ######################################################
def draw_selectable_joint(process, joint_id, redraw=False, color=None):
# type: (RobotClampAssemblyProcess, tuple[str,str], bool, Tuple) -> None
"""Draw joint feature of a specific joint.
If color is specified, will use that colour.
Otherwise apply a colour scheme based on which side the joint is on.
Green for joint_id[0] < joint_id[1], Red otherwise.
"""
PURPLE = (64, 31, 62)
LIGHT_PURPLE = (151, 73, 146)
BLUE = (87, 115, 128)
LIGHT_BLUE = (170, 189, 197)
BROWN = (162, 97, 21)
LIGHT_BROWN = (239, 187, 129)
artist = get_process_artist()
if not hasattr(artist, '_joint_features'):
artist._joint_features = {}
# Collect all the feature shapes from the joint and Boolean Union into one object
rs.EnableRedraw(False)
joint = process.assembly.joint(joint_id)
beam = process.assembly.beam(joint_id[0])
shapes = joint.get_feature_shapes(beam)
guids_for_union = []
for shape in shapes:
if isinstance(shape, Polyhedron):
vertices_and_faces = shape.to_vertices_and_faces()
struct = vertices_and_faces_to_brep_struct(vertices_and_faces)
# print("Polyhedron :", struct)
guids_for_union.extend(draw_breps(struct, join=True, redraw=False))
elif isinstance(shape, Cylinder):
cylinder = shape
start = cylinder.center + cylinder.normal.scaled(cylinder.height / 2)
end = cylinder.center - cylinder.normal.scaled(cylinder.height / 2)
struct = {'start': list(start), 'end': list(end), 'radius': cylinder.circle.radius}
# print("Cylinder : ", struct)
guids_for_union.extend(draw_cylinders([struct], cap=True, redraw=False))
breps = [rs.coercebrep(guid) for guid in guids_for_union]
# ! First attempt at boolean all objects together
success = [brep.MergeCoplanarFaces(sc.doc.ModelAbsoluteTolerance) for brep in breps]
# print("MergeCoplanarFaces success : %s" % success)
if len(breps) > 1:
boolean_result = rg.Brep.CreateBooleanUnion(breps, sc.doc.ModelAbsoluteTolerance)
else:
boolean_result = breps
# print (boolean_result)
# ! Second attempt at boolean objects iteratively together
if boolean_result is None:
print("Warning: joints.py draw_joint_boolean_feature(%s-%s) Group Boolean Union Failure" % joint_id)
temp_result = [breps[0]]
for brep in breps[1:]:
temp_result.append(brep)
temp_result = rg.Brep.CreateBooleanUnion(temp_result, sc.doc.ModelAbsoluteTolerance)
if temp_result is None:
print("Warning: joints.py draw_joint_boolean_feature(%s-%s) Iterative Boolean Union Failure" % joint_id)
continue
print("Warning: Still OK")
temp_result = list(temp_result)
boolean_result = temp_result
if boolean_result is None:
print("ERROR: joints.py draw_joint_boolean_feature(%s-%s) Boolean Union Failure" % joint_id)
boolean_result = breps
else:
delete_objects(guids_for_union, purge=True, redraw=False)
# Add boolean result into Rhino Doc and save their guids
artist._joint_features[joint_id] = []
for brep in boolean_result:
# New guids from boolean results
guid = add_brep(brep)
if not guid:
continue
# Naming
rs.ObjectName(guid, "%s-%s" % (joint_id[0], joint_id[1]))
joint_is_forward = process.assembly.sequence.index(joint_id[0]) < process.assembly.sequence.index(joint_id[1])
# Apply Color to the geometry
if color is not None:
rs.ObjectColor(guid, color)
elif isinstance(joint, JointHalfLap):
rs.ObjectColor(guid, PURPLE) if joint_is_forward else rs.ObjectColor(guid, LIGHT_PURPLE)
elif isinstance(joint, JointNonPlanarLap):
rs.ObjectColor(guid, BLUE) if joint_is_forward else rs.ObjectColor(guid, LIGHT_BLUE)
else:
rs.ObjectColor(guid, BROWN) if joint_is_forward else rs.ObjectColor(guid, LIGHT_BROWN)
# Add to guid dict
artist._joint_features[joint_id].append(guid)
if redraw:
rs.EnableRedraw(True)
def draw_all_selectable_joints(process, redraw=True):
for joint_id in process.assembly.joint_ids():
draw_selectable_joint(process, joint_id, redraw=False)
if redraw:
rs.EnableRedraw(True)
def delete_selectable_joint(process, joint_id, redraw=False):
# type: (RobotClampAssemblyProcess, tuple[str,str], bool) -> None
artist = get_process_artist()
rs.EnableRedraw(False)
if joint_id in artist._joint_features:
delete_objects(artist._joint_features[joint_id], purge=True, redraw=False)
artist._joint_features[joint_id] = []
if redraw:
rs.EnableRedraw(True)
def delete_all_selectable_joints(process, redraw=True):
# type: (RobotClampAssemblyProcess, bool) -> None
artist = get_process_artist()
if not hasattr(artist, '_joint_features'):
return
rs.EnableRedraw(False)
for joint_id in artist._joint_features:
delete_selectable_joint(process, joint_id, redraw=False)
if redraw:
rs.EnableRedraw(True)
def _get_guids_of_selectable_joints(process, joint_types=[], forward_joint=True, backward_joint=True):
# type: (RobotClampAssemblyProcess, list[type], bool, bool) -> Tuple[list[guid], list[guid]]
artist = get_process_artist()
sequence = process.assembly.sequence
selectable_joint_guids = []
non_selectable_joint_guids = []
for joint_id in process.assembly.joint_ids():
joint = process.assembly.joint(joint_id)
# * Skip based on forward backwards
if not forward_joint and sequence.index(joint_id[0]) < sequence.index(joint_id[1]):
non_selectable_joint_guids.extend(artist._joint_features[joint_id])
continue
if not backward_joint and sequence.index(joint_id[0]) > sequence.index(joint_id[1]):
non_selectable_joint_guids.extend(artist._joint_features[joint_id])
continue
# * Add based on joint_types
if joint.__class__ in joint_types:
selectable_joint_guids.extend(artist._joint_features[joint_id])
else:
non_selectable_joint_guids.extend(artist._joint_features[joint_id])
return (selectable_joint_guids, non_selectable_joint_guids)
def _get_filter_of_selectable_joints(process, joint_types=[], forward_joint=True, backward_joint=True):
# type: (RobotClampAssemblyProcess, list[type], bool, bool) -> Tuple[list[guid], list[guid]]
selectable_joint_guids, _ = _get_guids_of_selectable_joints(process, joint_types=joint_types, forward_joint=forward_joint, backward_joint=backward_joint)
def joint_feature_filter(rhino_object, geometry, component_index):
return rhino_object.Attributes.ObjectId in selectable_joint_guids
return joint_feature_filter
def show_selectable_joints_by_id(process, joint_ids=[], redraw=False):
# type: (RobotClampAssemblyProcess, list[Tuple[str,str]], bool) -> None
artist = get_process_artist()
rs.EnableRedraw(False)
# * Hide joints that are not the right type:
for joint_id in process.assembly.joint_ids():
if joint_id in joint_ids:
rs.ShowObjects(artist._joint_features[joint_id])
else:
rs.HideObjects(artist._joint_features[joint_id])
if redraw:
rs.EnableRedraw(True)
def show_selectable_joints_by_types(process, joint_types=[], forward_joint=True, backward_joint=True, redraw=False):
# type: (RobotClampAssemblyProcess, list[type], bool, bool, bool) -> None
artist = get_process_artist()
rs.EnableRedraw(False)
# * Hide joints that are not the right type:
selectable_joint_guids, non_selectable_joint_guids = _get_guids_of_selectable_joints(process, joint_types=joint_types, forward_joint=forward_joint, backward_joint=backward_joint)
[rs.ShowObjects(guids) for guids in selectable_joint_guids]
[rs.HideObjects(guids) for guids in non_selectable_joint_guids]
if redraw:
rs.EnableRedraw(True)
def show_all_selectable_joints(process, redraw=False):
# type: (RobotClampAssemblyProcess, bool) -> None
artist = get_process_artist()
rs.EnableRedraw(False)
# * Show all joints
for joint_id in process.assembly.joint_ids():
guids = artist._joint_features[joint_id]
rs.ShowObjects(guids)
if redraw:
rs.EnableRedraw(True)
def _joint_id_from_rhino_guids(guids):
joint_ids = []
for guid in guids:
name = get_object_name(guid) # type: str
ids = name.split('-')
joint_ids.append((ids[0], ids[1]))
return joint_ids
def users_select_feature(process, joint_types=None, forward_joint=True, backward_joint=True, prompt='Select joints:'):
# type: (RobotClampAssemblyProcess, list[type], bool, bool, str) -> list[Tuple[str, str]]
"""Returns a list of selected joints.
Multi select is possible as an option but by default can only be belonging to one type.
If user presses Cancel, return None.
"""
artist = get_process_artist()
# * Menu for user
go = Rhino.Input.Custom.GetObject()
go.SetCommandPrompt(prompt)
go.EnablePreSelect(False, True)
# Set getObjects geometry filter
selectable_joint_guids, non_selectable_joint_guids = _get_guids_of_selectable_joints(process, joint_types=joint_types, forward_joint=forward_joint, backward_joint=backward_joint)
def joint_feature_filter(rhino_object, geometry, component_index):
return rhino_object.Attributes.ObjectId in selectable_joint_guids
go.SetCustomGeometryFilter(joint_feature_filter)
# * First Selection is single select and return
go.AddOption("MultiSelect")
result = go.Get()
# * If user opted for muiltiselect, another selection is prompted
if result == Rhino.Input.GetResult.Option and go.Option().EnglishName == "MultiSelect":
go.ClearCommandOptions()
first_result = go.Get()
if first_result is None or first_result == Rhino.Input.GetResult.Cancel:
return None
# Hide joints of different types from the first selection
first_result_joint_id = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])[0]
first_result_type = process.assembly.joint(first_result_joint_id).__class__
show_selectable_joints_by_types(process, [first_result_type], backward_joint=False, redraw=True)
# Prompt user for further selection, without removing the first selection.
go.SetCommandPrompt(prompt + " (MultiSelect)")
go.EnableClearObjectsOnEntry(False)
go.DeselectAllBeforePostSelect = False
result = go.GetMultiple(0, 0)
if result is None or result == Rhino.Input.GetResult.Cancel:
return None
if isinstance(result, str):
if result.startswith("Cancel"):
return None
# Retrive joint_ids from object name
joint_ids = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])
return joint_ids
def cull_double_selected_joint_ids(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> list[Tuple[str, str]]
filtered_joint_ids = []
sequence = process.assembly.sequence
# Add joint_id that are earlier
for joint_id in joint_ids:
beam_id_0, beam_id_1 = joint_id
if sequence.index(beam_id_0) < sequence.index(beam_id_1):
filtered_joint_ids.append(joint_id)
# Add joint_id that are later, only if the earlier id is not in the list
for joint_id in joint_ids:
beam_id_0, beam_id_1 = joint_id
if sequence.index(beam_id_0) > sequence.index(beam_id_1):
if (beam_id_1, beam_id_0) not in filtered_joint_ids:
filtered_joint_ids.append(joint_id)
return filtered_joint_ids
# ##############################
# Functions to operate on Joints
# ##############################
def change_joint_type(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
artist = get_process_artist()
assembly = process.assembly
current_joint_type = process.assembly.joint(joint_ids[0]).__class__
if current_joint_type == JointPolylineLap:
options = ['JointHalfLap', 'Cancel']
elif current_joint_type == JointHalfLap:
options = ['JointPolylineLap', 'Cancel']
result = rs.GetString("Change to HalfLap or PolylineLap :", strings=options)
if result == 'Cancel':
return ([], [])
if result not in ['JointHalfLap', 'JointPolylineLap']:
return ([], [])
sequence = assembly.sequence
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
beam_s_id, beam_m_id = joint_id
if sequence.index(beam_s_id) < sequence.index(beam_m_id):
beam_s_id, beam_m_id = beam_m_id, beam_s_id
beam_stay = assembly.beam(beam_s_id)
beam_move = assembly.beam(beam_m_id)
joint_id_s_m = (beam_s_id, beam_m_id)
joint_id_m_s = (beam_m_id, beam_s_id)
joint_face_id_move = assembly.joint(joint_id_m_s).face_id
joint_face_id_stay = assembly.joint(joint_id_s_m).face_id
if result == 'JointPolylineLap':
j_s, j_m, screw_line = JointPolylineLap.from_beam_beam_intersection(beam_stay, beam_move, joint_face_id_move=joint_face_id_move)
if result == 'JointHalfLap':
j_s, j_m, screw_line = JointHalfLap.from_beam_beam_intersection(beam_stay, beam_move, joint_face_id_move=joint_face_id_move)
if j_m is not None and j_s is not None:
print('- Joint (%s-%s) chagned to %s' % (beam_s_id, beam_m_id, j_m.__class__.__name__))
assembly.add_joint_pair(j_s, j_m, beam_s_id, beam_m_id)
affected_beams.add(beam_s_id)
affected_beams.add(beam_m_id)
for joint_id in [joint_id_s_m, joint_id_m_s]:
affected_joints.add(joint_id)
# Redraw new selectable joint feature
delete_selectable_joint(process, joint_id)
draw_selectable_joint(process, joint_id)
for beam_id in affected_beams:
artist.redraw_interactive_beam(beam_id, force_update=True, redraw=False)
process.dependency.invalidate(beam_id, process.assign_tool_type_to_joints)
return (affected_beams, affected_joints)
def _joint_half_lap_change_thickness(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
artist = get_process_artist()
# Flip selected joint_ids because it is more intuitive to select the positive
joint_ids = [(i, j) for (j, i) in joint_ids]
# * Print out current joint parameters
existing_thickness = set()
for joint_id in joint_ids:
joint = process.assembly.joint(joint_id)
current_thickness = joint.thickness
existing_thickness.add(current_thickness)
print("Joint (%s-%s) thickness = %s" % (joint_id[0], joint_id[1], current_thickness))
# * Ask user for new paramter value
new_thickness = rs.GetReal("New thickness of the lap joint: (Existing Thickness are: %s" % existing_thickness)
if new_thickness is None:
show_all_selectable_joints(process, redraw=True)
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id1, beam_id2 = joint_id
joint_id_nbr = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id)
joint_nbr = process.assembly.joint(joint_id_nbr)
# Skip if there are no change
current_thickness = joint.thickness
if new_thickness == current_thickness:
continue
difference = new_thickness - current_thickness
# * Logic to update this joint and its neighbour
joint.set_parameter('thickness', new_thickness)
joint_nbr.set_parameter('thickness', joint_nbr.get_parameter('thickness')-difference)
print("Thickness of joint pair changed to %s and %s." % (joint.thickness, joint_nbr.thickness))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
# Redraw new selectable joint feature
delete_selectable_joint(process, joint_id)
delete_selectable_joint(process, joint_id_nbr)
draw_selectable_joint(process, joint_id)
draw_selectable_joint(process, joint_id_nbr)
# Redraw affected beams with new joints
for beam_id in affected_beams:
artist.redraw_interactive_beam(beam_id, force_update=True, redraw=False)
return (affected_beams, affected_joints)
def _change_joint_non_planar_lap_thickness(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Flip joint id to the joint on earlier beam.
for i in range(len(joint_ids)):
p, q = joint_ids[i]
if process.assembly.sequence.index(p) > process.assembly.sequence.index(q):
joint_ids[i] = q, p
# * Print out current joint parameters
existing_thickness = set()
for joint_id in joint_ids:
joint = process.assembly.joint(joint_id)
current_thickness = joint.thickness
existing_thickness.add(current_thickness)
print("Joint (%s-%s) thickness = %s" % (joint_id[0], joint_id[1], current_thickness))
# * Ask user for new paramter value
new_thickness = rs.GetReal("New thickness of the lap joint: (Existing Thickness are: %s" % existing_thickness)
if new_thickness is None:
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id1, beam_id2 = joint_id
joint_id_nbr = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_nbr = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Skip if there are no change
current_thickness = joint.thickness
if new_thickness == current_thickness:
continue
# Warn and Skip if thickness is => beam thickness
beam = process.assembly.beam(beam_id1)
beam_depth = beam.get_face_height(joint.beam_move_face_id)
if new_thickness >= beam_depth:
print("Warning: Cannot set joint thickness >= beam depth (=%s)" % (beam_depth))
print("Thickness of joint pair (%s) unchanged (=%s)." % (joint_id, joint_nbr.thickness))
continue
# * Logic to update this joint and its neighbour
joint.set_parameter('thickness', new_thickness)
joint_nbr.set_parameter('thickness', new_thickness)
print("Thickness of joint pair (%s) changed to %s." % (joint_id, joint_nbr.thickness))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
return (affected_beams, affected_joints)
def _change_joint_non_planar_lap_beam_stay_face_id(process, joint_ids):
"""
joint_ids are joint_on_stay_id = (beam_id_stay, beam_id_move)
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Print out current joint parameters
existing_face_id = set()
for joint_on_stay_id in joint_ids:
joint_on_move = process.assembly.joint(joint_on_stay_id) # type: (JointNonPlanarLap)
current_face_id = joint_on_move.beam_stay_face_id
existing_face_id.add(current_face_id)
print("Joint (%s-%s) beam_stay_face_id = %s" % (joint_on_stay_id[0], joint_on_stay_id[1], current_face_id))
# * Ask user for new paramter value
new_face_id = rs.GetInteger("New face_id on Staying Beam: (Existing face_id are: %s" % existing_face_id)
if new_face_id is None:
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_on_stay_id in joint_ids:
# Update this joint and its neighbour
beam_id_stay, beam_id_move = joint_on_stay_id
joint_on_move_id = (beam_id_move, beam_id_stay)
joint_on_move = process.assembly.joint(joint_on_move_id) # type:(JointNonPlanarLap)
joint_on_stay = process.assembly.joint(joint_on_stay_id) # type:(JointNonPlanarLap)
# Skip if there are no change
current_face_id = joint_on_move.beam_stay_face_id
if new_face_id == current_face_id:
continue
# Create a new joint with old parameters
beam_stay = process.assembly.beam(beam_id_stay)
beam_move = process.assembly.beam(beam_id_move)
new_joint_on_stay, new_joint_on_move, _ = JointNonPlanarLap.from_beam_beam_intersection(
beam_stay,
beam_move,
thickness=joint_on_move.thickness,
joint_face_id_move=joint_on_move.beam_move_face_id,
joint_face_id_stay=new_face_id,
)
if new_joint_on_stay is None or new_joint_on_move is None:
print("No joint returned from JointNonPlanarLap.from_beam_beam_intersection() ")
return ([], [])
process.assembly.add_joint_pair(new_joint_on_stay, new_joint_on_move, beam_id_stay, beam_id_move)
for key, value in joint_on_stay.get_parameters_dict().items():
new_joint_on_stay.set_parameter(key, value)
for key, value in joint_on_move.get_parameters_dict().items():
new_joint_on_move.set_parameter(key, value)
# * Logic to update this joint and its neighbour
print("beam_stay_face_id of joint pair (%s) changed to %s." % (joint_on_move.beam_stay_face_id, new_joint_on_move.beam_stay_face_id))
affected_beams.add(beam_id_move)
affected_beams.add(beam_id_stay)
affected_joints.add(joint_on_stay_id)
affected_joints.add(joint_on_move_id)
[process.dependency.invalidate(beam_id, process.assign_tool_type_to_joints) for beam_id in affected_beams]
return (affected_beams, affected_joints)
def _change_joint_non_planar_lap_beam_move_face_id(process, joint_ids):
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Print out current joint parameters
existing_face_id = set()
for joint_id in joint_ids:
joint_on_move = process.assembly.joint(joint_id) # type: (JointNonPlanarLap)
current_face_id = joint_on_move.beam_move_face_id
existing_face_id.add(current_face_id)
print("Joint (%s-%s) beam_move_face_id = %s" % (joint_id[0], joint_id[1], current_face_id))
# * Ask user for new paramter value
new_face_id = rs.GetReal("New face_id on Moving Beam: (Existing face_id are: %s" % existing_face_id)
if new_face_id is None:
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id_move, beam_id_stay = joint_id
joint_id_nbr = (beam_id_stay, beam_id_move)
joint_on_move = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_on_stay = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Skip if there are no change
current_face_id = joint_on_move.beam_move_face_id
if new_face_id == current_face_id:
continue
# Create a new joint with old parameters
beam_stay = process.assembly.beam(beam_id_stay)
beam_move = process.assembly.beam(beam_id_move)
new_joint_on_stay, new_joint_on_move, _ = JointNonPlanarLap.from_beam_beam_intersection(
beam_stay,
beam_move,
thickness=joint_on_move.thickness,
joint_face_id_move=new_face_id,
joint_face_id_stay=joint_on_move.beam_stay_face_id,
)
process.assembly.add_joint_pair(new_joint_on_stay, new_joint_on_move, beam_id_stay, beam_id_move)
for key, value in joint_on_stay.get_parameters_dict().items():
new_joint_on_stay.set_parameter(key, value)
for key, value in joint_on_move.get_parameters_dict().items():
new_joint_on_move.set_parameter(key, value)
# * Logic to update this joint and its neighbour
print("beam_move_face_id of joint pair (%s) changed to %s." % (joint_on_move.beam_stay_face_id, new_joint_on_move.beam_stay_face_id))
affected_beams.add(beam_id_move)
affected_beams.add(beam_id_stay)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
[process.dependency.invalidate(beam_id, process.assign_tool_type_to_joints) for beam_id in affected_beams]
return (affected_beams, affected_joints)
def change_joint_non_planar_lap_parameters(process):
# type: (RobotClampAssemblyProcess) -> None
artist = get_process_artist()
selected_parameter = "thickness"
while True:
# * Hide joints that are not the right type:
show_selectable_joints_by_types(process, joint_types=[JointNonPlanarLap], backward_joint=False, redraw=True)
# * Option Menu for user
para_change_function = {
"thickness": _change_joint_non_planar_lap_thickness,
"beam_stay_face_id": _change_joint_non_planar_lap_beam_stay_face_id,
"beam_move_face_id": _change_joint_non_planar_lap_beam_move_face_id,
}
# * Ask user to pick joints
go = Rhino.Input.Custom.GetObject()
go.EnablePreSelect(False, True)
go.SetCommandPrompt("Select NPJoints to change (%s). ENTER when done:" % (selected_parameter))
[go.AddOption(key) for key in para_change_function.keys()]
joint_feature_filter = _get_filter_of_selectable_joints(process, joint_types=[JointNonPlanarLap], backward_joint=False)
go.SetCustomGeometryFilter(joint_feature_filter)
para_change_result = go.GetMultiple(0, 0)
# * If user press ESC, exit function.
if para_change_result is None or para_change_result == Rhino.Input.GetResult.Cancel:
show_all_selectable_joints(process, redraw=True)
return None
# * If user pressed an Option, it changes the selected_parameter
joint_ids = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])
if para_change_result == Rhino.Input.GetResult.Option:
selected_parameter = go.Option().EnglishName
print("joint_ids: ", len(joint_ids))
# print ("go.Objects():", len(list(go.Objects())))
# * If user pressed an Option while there are selected joints, activate the _change function.
if len(joint_ids) > 0:
pass
# joint_ids = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])
else:
# * If user pressed an Option but no objects are selected, restart the selection process
continue
elif para_change_result == Rhino.Input.GetResult.Object:
if len(joint_ids) > 0: # * If user pressed Enter with selected joints, activate the _change function.
# joint_ids = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])
pass
else: # * If user pressed Enter with no selected joints, exit function
show_all_selectable_joints(process, redraw=True)
return None
# Retrive joint_ids from object name
# joint_ids = _joint_id_from_rhino_guids([obj.ObjectId for obj in go.Objects()])
# Flip selected joint_ids because it is more intuitive to select the positive
if joint_ids is None or len(joint_ids) == 0:
show_all_selectable_joints(process, redraw=True)
return
joint_ids = [(i, j) for (j, i) in joint_ids]
# * Activate sub function to deal with changing a specific type of joint and parameter
para_change_result = para_change_function[selected_parameter](process, joint_ids)
if para_change_result is None:
show_all_selectable_joints(process, redraw=True)
return
affected_beams, affected_joints = para_change_result
# * Redraw new selectable joint feature and affected beams with new joints
for joint_id in affected_joints:
delete_selectable_joint(process, joint_id, redraw=False)
draw_selectable_joint(process, joint_id, redraw=False)
for beam_id in affected_beams:
artist.redraw_interactive_beam(beam_id, force_update=True, redraw=False)
def _joint_polyine_lap_change_thickness(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Print out current joint parameters
existing_thickness = set()
for beam_id1, beam_id2 in joint_ids:
joint_id = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id)
current_thickness = joint.get_parameter('thickness')
existing_thickness.add(current_thickness)
print("Joint (%s-%s) thickness = %s" % (joint_id[0], joint_id[1], current_thickness))
# * Ask user for new paramter value
new_thickness = rs.GetReal("New thickness of the lap joint: (Existing Thickness are: %s" % existing_thickness)
if new_thickness is None:
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for beam_id1, beam_id2 in joint_ids:
# Update this joint and its neighbour
joint_id = (beam_id2, beam_id1)
joint_id_nbr = (beam_id1, beam_id2)
joint = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_nbr = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Skip if there are no change
current_thickness = joint.get_parameter('thickness')
if new_thickness == current_thickness:
continue
# Warn and Skip if thickness is => beam thickness
beam = process.assembly.beam(beam_id1)
beam_depth = beam.get_face_height(joint.face_id)
if new_thickness >= beam_depth:
print("Warning: Cannot set joint thickness >= beam depth (=%s)" % (beam_depth))
print("Thickness of joint pair (%s) unchanged (=%s)." % (joint_id, joint_nbr.thickness))
continue
# * Logic to update this joint and its neighbour
diff = new_thickness - joint.get_parameter('thickness')
joint.set_parameter('thickness', new_thickness)
joint_nbr.set_parameter('thickness', joint_nbr.get_parameter('thickness') - diff)
print("Thickness of joint pair (%s) changed to %s." % (joint_id, joint_nbr.thickness))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
return (affected_beams, affected_joints)
def _joint_polyline_lap_change_polyline_string(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Print out current joint parameters
# existing_thickness = set()
for joint_id in joint_ids:
joint = process.assembly.joint(joint_id)
print("Joint (%s-%s) string = %s" % (joint_id[0], joint_id[1], joint.param_string))
# * Ask user for new paramter value
new_string = rs.StringBox("Enter new string for the joints.")
if new_string is None:
return None
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
angle_warning_messages = []
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id1, beam_id2 = joint_id
joint_id_nbr = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_nbr = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Warn if interior joint angles are < 90
if joint.check_polyline_interior_angle() == False:
polyline_interior_angles = joint.get_polyline_interior_angles()
polyline_interior_angles = [angle for angles in polyline_interior_angles for angle in angles]
print("WARNING WARNING WARNING")
message = "WARNING : Polyline joint (%s-%s) min interior angle < 90degs : %f" % (beam_id1, beam_id2, min(polyline_interior_angles))
print(message)
angle_warning_messages.append(message)
# Skip if there are no change
current_string = joint.param_string
if new_string == current_string:
continue
# Warn and Skip if thickness is => beam thickness
try:
# * Logic to update this joint and its neighbour
joint.set_parameter('param_string', new_string)
joint_nbr.set_parameter('param_string', new_string)
# Check if successful
assert isinstance(joint.thickness, float)
assert len(joint.polylines) == 4
print("String of Joint (%s-%s) changed." % (joint_id))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
except:
joint.set_parameter('param_string', current_string)
joint_nbr.set_parameter('param_string', current_string)
print("Error changing string for Joint (%s-%s), change reverted." % (joint_id))
# Warning message box
if len(angle_warning_messages) > 0:
angle_warning_messages.append("- - - - -")
angle_warning_messages.append("You will need to fix these angles by giving a different polyline.")
rs.MessageBox("\n".join(angle_warning_messages))
return (affected_beams, affected_joints)
def _joint_polyline_lap_rotate(process, joint_ids, rotate_cw=True):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]], bool) -> Tuple[list[str], list[str, str]]
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id1, beam_id2 = joint_id
joint_id_nbr = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_nbr = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Rotate Polyline
current_polylines = joint.polylines
if rotate_cw:
new_polylines = current_polylines[-1:] + current_polylines[:-1]
else:
new_polylines = current_polylines[1:] + current_polylines[:1]
# Skip if there are no change
if new_polylines == current_polylines:
continue
try:
# * Logic to update this joint and its neighbour
joint.polylines = new_polylines
joint_nbr.polylines = new_polylines
# Check if successful
assert len(joint.polylines) == 4
print("Joint (%s-%s) rotated." % (joint_id))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
except:
joint.polylines = current_polylines
joint_nbr.polylines = current_polylines
print("Error changing Joint (%s-%s), change reverted." % (joint_id))
return (affected_beams, affected_joints)
def _joint_polyline_lap_rotate_cw(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
return _joint_polyline_lap_rotate(process, joint_ids, rotate_cw=True)
def _joint_polyline_lap_rotate_ccw(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
return _joint_polyline_lap_rotate(process, joint_ids, rotate_cw=False)
def _joint_polyline_lap_mirror(process, joint_ids, direction_u=True):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]], bool) -> Tuple[list[str], list[str, str]]
"""
Returns (affected_beams, affected_joints)
If user pressed cancel in the data entering process, return None
"""
# * Make changes to selected joints
affected_beams = set()
affected_joints = set()
for joint_id in joint_ids:
# Update this joint and its neighbour
beam_id1, beam_id2 = joint_id
joint_id_nbr = (beam_id2, beam_id1)
joint = process.assembly.joint(joint_id) # type:(JointNonPlanarLap)
joint_nbr = process.assembly.joint(joint_id_nbr) # type:(JointNonPlanarLap)
# Mirror polyline
current_polylines = joint.polylines
if direction_u:
new_polylines = [current_polylines[i] for i in [2, 1, 0, 3]]
else:
new_polylines = [current_polylines[i] for i in [0, 3, 2, 1]]
# Reverse order of each line
for i in range(4):
new_polylines[i] = [[1-u, v] for u, v in new_polylines[i]][::-1]
# Skip if there are no change
if new_polylines == current_polylines:
continue
try:
# * Logic to update this joint and its neighbour
joint.polylines = new_polylines
joint_nbr.polylines = new_polylines
# Check if successful
assert len(joint.polylines) == 4
print("Joint (%s-%s) rotated." % (joint_id))
affected_beams.add(beam_id1)
affected_beams.add(beam_id2)
affected_joints.add(joint_id)
affected_joints.add(joint_id_nbr)
except:
joint.polylines = current_polylines
joint_nbr.polylines = current_polylines
print("Error changing Joint (%s-%s), change reverted." % (joint_id))
return (affected_beams, affected_joints)
def _joint_polyline_lap_mirror_u(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
return _joint_polyline_lap_mirror(process, joint_ids, direction_u=True)
def _joint_polyline_lap_mirror_v(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> Tuple[list[str], list[str, str]]
return _joint_polyline_lap_mirror(process, joint_ids, direction_u=False)
# ############################
# Main menu / Top level menu
# ############################
def select_joint_parameter_to_change(process, joint_ids):
# type: (RobotClampAssemblyProcess, list[Tuple[str, str]]) -> None
artist = get_process_artist()
assert len(set(process.assembly.joint(joint_id).__class__.__name__ for joint_id in joint_ids)) == 1
all_options = {
"JointHalfLap": {
"Thickness": _joint_half_lap_change_thickness,
"ChangeType": change_joint_type
},
"JointNonPlanarLap":
{
"Thickness": _change_joint_non_planar_lap_thickness,
"MoveFaceID": _change_joint_non_planar_lap_beam_move_face_id,
"StayFaceID": _change_joint_non_planar_lap_beam_stay_face_id,
"ChangeType": change_joint_type
},
"JointPolylineLap":
{
"Polyline": _joint_polyline_lap_change_polyline_string,
"MirrorU": _joint_polyline_lap_mirror_u,
"MirrorV": _joint_polyline_lap_mirror_v,
"RotateCW": _joint_polyline_lap_rotate_cw,
"RotateCCW": _joint_polyline_lap_rotate_ccw,
"Thickness": _joint_polyine_lap_change_thickness,
"ChangeType": change_joint_type
},
}
while True:
joint_type = process.assembly.joint(joint_ids[0]).__class__
options = list(all_options[joint_type.__name__].keys())
print("Selected %i joints: %s" % (len(joint_ids), str(joint_ids)))
result = rs.GetString("Options for %s(s) (ESC to return):" % (joint_type.__name__), strings=options)
if result is None:
return None
if result == "Cancel":
return None
if result in all_options[joint_type.__name__]:
function = all_options[joint_type.__name__][result]
result = function(process, joint_ids)
# Result is None if user pressed Escape or Cancel
if result is not None:
affected_beams, affected_joints = result
# * Redraw new selectable joint feature and affected beams with new joints
for joint_id in affected_joints:
delete_selectable_joint(process, joint_id, redraw=False)
draw_selectable_joint(process, joint_id, redraw=False)
for beam_id in affected_beams:
artist.redraw_interactive_beam(beam_id, force_update=True, redraw=False)
show_selectable_joints_by_id(process, joint_ids, True)
print("Function Complete, Anything else?")
def show_menu(process):
# type: (RobotClampAssemblyProcess) -> None
assembly = process.assembly # type: Assembly
artist = get_process_artist()
# Ensure interactive beams are shown initially
rs.EnableRedraw(False)
artist.hide_robot()
artist.hide_all_env_mesh()
artist.hide_all_tools_in_storage()
[artist.hide_beam_all_positions(beam_id) for beam_id in assembly.sequence]
[artist.hide_asstool_all_positions(beam_id) for beam_id in assembly.sequence]
[artist.hide_gripper_all_positions(beam_id) for beam_id in assembly.sequence]
[artist.show_interactive_beam(beam_id) for beam_id in assembly.sequence]
draw_all_selectable_joints(process, redraw=False)
rs.EnableRedraw(True)
sc.doc.Views.Redraw()
# On exist function
def on_exit_ui():
delete_all_selectable_joints(process)
print('Exit Function')
return Rhino.Commands.Result.Cancel
while (True):
# Create Menu
# result = CommandMenu(construct_menu()).select_action()
show_all_selectable_joints(process, redraw=True)
selected_joint_ids = users_select_feature(process, [JointPolylineLap, JointHalfLap, JointNonPlanarLap], backward_joint=False, prompt="Select a Joint to Edit")
# User cancel command by Escape
if selected_joint_ids is None:
return on_exit_ui()
if len(selected_joint_ids) == 0:
return on_exit_ui()
show_selectable_joints_by_id(process, selected_joint_ids, redraw=True)
select_joint_parameter_to_change(process, selected_joint_ids)
######################
# Rhino Entry Point
######################
# Below is the functions that get evoked when user press UI Button
# Put this in the Rhino button ! _-RunPythonScript 'integral_timber_joints.rhino.sequence.py'
if __name__ == '__main__':
process = get_process()
if process_is_none(process):
print("Load json first")
else:
show_menu(process)
|
StarcoderdataPython
|
11306391
|
from utils.keypoints_selection import generate_model_kps
if __name__ == '__main__':
generate_model_kps(path=r'E:\1Downloaded\datasets\LINEMOD_from_yolo-6d\cat', model_name='cat')
|
StarcoderdataPython
|
184171
|
import importlib
import os
import shutil
import logging
import sys
from pathlib import Path
import numba.cuda
import psutil
import tensorflow as tf
from slurmpie import slurmpie
def create_directory(file_path, exist_ok=True):
if not os.path.exists(file_path):
os.makedirs(file_path, exist_ok=exist_ok)
def delete_directory(file_path):
if os.path.exists(file_path):
shutil.rmtree(file_path)
def copy_directory(original_directory, out_directory):
shutil.copytree(original_directory, out_directory, dirs_exist_ok=True)
def get_root_name(file_path):
return os.path.basename(os.path.normpath(file_path))
def get_file_name_from_full_path(file_path):
return os.path.basename(os.path.normpath(file_path))
def get_file_name(file_path, file_extension):
root_name = get_root_name(file_path)
if file_extension[0] != ".":
file_extension = ".".join(file_extension)
return root_name.split(file_extension)[0]
def find_files_with_extension(file_path, file_extension):
if file_extension[0] == ".":
file_extension = file_extension[1:]
return sorted(
f_path.path
for f_path in os.scandir(file_path)
if (f_path.is_file() and file_extension in "".join(Path(f_path.name).suffixes))
)
def get_parent_directory(file_path):
return os.path.dirname(os.path.normpath(os.path.abspath(file_path)))
def get_file_path(file_path):
file_name = get_root_name(file_path)
file_path = file_path.split(file_name)[0]
return normalize_path(file_path)
def normalize_path(path):
if path[-1] == os.sep:
path = path[:-1]
return path
def get_number_of_cpus():
return len(os.sched_getaffinity(0))
def get_subdirectories(root_dir: str) -> list:
return [f_path.path for f_path in os.scandir(root_dir) if f_path.is_dir()]
def get_available_ram(used_memory: int = 0) -> int:
"""
Get the available RAM in bytes.
Returns:
int: available in RAM in bytes
"""
slurm_mem = slurmpie.System().get_job_memory()
if slurm_mem is None:
available_ram = psutil.virtual_memory().available
else:
# Convert from megabytes to bytes (*1024*1024)
slurm_mem *= 1048576
available_ram = slurm_mem - used_memory
return available_ram
def get_dir_size(root_dir):
"""Returns total size of all files in dir (and subdirs)"""
root_directory = Path(os.path.normpath(root_dir))
return sum(f.stat().st_size for f in root_directory.glob("**/*") if f.is_file())
def get_gpu_compute_capability(gpu: tf.config.PhysicalDevice) -> tuple:
try:
gpu_number = int(gpu.name.split(":")[-1])
cuda_device = numba.cuda.select_device(gpu_number)
cuda_capability = cuda_device.compute_capability
cuda_device.reset()
except numba.cuda.cudadrv.error.CudaSupportError:
# We do not actually have a cuda device
cuda_capability = (0, 0)
return cuda_capability
def gpu_supports_float16(gpu: tf.config.PhysicalDevice) -> bool:
gpu_compute_capability = get_gpu_compute_capability(gpu)
# Float16 support is supported with at least compute capability 5.3
supports_float16 = (gpu_compute_capability[0] == 5 and gpu_compute_capability[1] >= 3) or (
gpu_compute_capability[0] > 5
)
return supports_float16
def gpu_supports_mixed_precision(gpu: tf.config.PhysicalDevice) -> bool:
gpu_compute_capability = get_gpu_compute_capability(gpu)
# Mixed precision has benefits on compute 7.5 and higher
return gpu_compute_capability[0] >= 7 and gpu_compute_capability[1] >= 5
def get_gpu_devices() -> list:
return tf.config.list_physical_devices("GPU")
def get_number_of_gpu_devices() -> int:
return len(tf.config.list_physical_devices("GPU"))
def get_cpu_devices() -> list:
return tf.config.list_physical_devices("CPU")
def get_number_of_slurm_nodes() -> int:
if "SLURM_JOB_NUM_NODES" in os.environ:
number_of_slurm_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
else:
number_of_slurm_nodes = 0
return number_of_slurm_nodes
def load_module_from_file(module_path):
if module_path is None:
return None
class_name = get_root_name(module_path).split(".")[0]
module_file_spec = importlib.util.spec_from_file_location(class_name, module_path,)
module = importlib.util.module_from_spec(module_file_spec)
module_file_spec.loader.exec_module(module)
return module
def setup_logger():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s prognosais %(levelname)-1s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stdout,
)
|
StarcoderdataPython
|
169895
|
from collections import namedtuple
from utils import lerp
class RGB(namedtuple('RGB', 'r g b')):
""" stores color as a integer triple from range [0, 255] """
class Color(namedtuple('Color', 'r g b')):
""" stores color as a float triple from range [0.0, 1.0] """
def rgb12(self):
r = int(self.r * 255.0)
g = int(self.g * 255.0)
b = int(self.b * 255.0)
r = (r & ~15) | (r >> 4)
g = (g & ~15) | (g >> 4)
b = (b & ~15) | (b >> 4)
return RGB(r, g, b)
def rgb24(self):
r = int(self.r * 255.0)
g = int(self.g * 255.0)
b = int(self.b * 255.0)
return RGB(r, g, b)
@staticmethod
def lerp(lo, hi, step):
r = lerp(lo.r, hi.r, step)
g = lerp(lo.g, hi.g, step)
b = lerp(lo.b, hi.b, step)
return Color(r, g, b)
|
StarcoderdataPython
|
4924531
|
<reponame>TianTcl/TheMatrix
# Matrix Loves You : push/cramer.py
# Imports
from mLib import find, get, identify, var
from packages import convert
from fractions import Fraction as fnd
# Functions
def cInput():
print("\nPlease enter expressions below (Variables on the left, real numbers on the right. Only use + as seperator)")
a_algebra_store = list(())
a_length_extend = "0123456789"
for a_algebra_require in range(len(a_length_extend)):
a_get_input = input("Expression ("+ str(len(a_algebra_store) + 1) +") : ")
if a_get_input == "" and len(a_algebra_store) >= 2:
break
elif a_get_input == "" and len(a_algebra_store) < 2:
print("Error : Please enter at least 2 expressions")
else:
a_set_pass = set(())
for a_each_require in var.Expsymbol:
if a_each_require in a_get_input:
a_set_pass.add(True)
else:
a_set_pass.add(False)
if False not in a_set_pass:
a_algebra_store.append(a_get_input)
else:
print("Error : Expression doesn't contains required character")
a_length_extend += "."
return a_algebra_store
def cConvert(c_data):
c_store = c_data
# Find all vars
c_set_vars = set(())
for c_each_expression in c_store:
c_esplit = "="
c_asplit = "+"
if c_each_expression[c_each_expression.index("=") - 1] == " ":
c_esplit = " = "
if c_each_expression[c_each_expression.index("+") - 1] == " ":
c_asplit = " + "
c_value = c_each_expression.split(c_esplit)[0].split(c_asplit)
c_value.append(c_each_expression.split(c_esplit)[1])
for c_each_section in c_value:
for c_each_char in c_each_section:
if c_each_char.lower() in var.Lowercase:
c_set_vars.add(c_each_char)
if len(c_set_vars) != len(c_store):
print("Error : Amount of expressions is inequal to amount of variables")
c_return = "error"
else:
c_splited = list(())
c_matrix_equal = list(())
for c_each_expression in c_store:
c_esplit = "="
c_asplit = "+"
if c_each_expression[c_each_expression.index("=") - 1] == " ":
c_esplit = " = "
if c_each_expression[c_each_expression.index("+") - 1] == " ":
c_asplit = " + "
c_splited.append(c_each_expression.split(c_esplit)[0].split(c_asplit))
c_matrix_equal.append([c_each_expression.split(c_esplit)[1]])
del c_each_expression
c_list_vars = list(c_set_vars)
c_list_vars.sort()
c_matrix_transposed = list(())
for c_each_var in c_list_vars:
c_each_row = list(())
for c_each_expression in c_splited:
c_find = True
for c_each_part in c_each_expression:
if c_each_var in c_each_part:
if len(c_each_part) == 1:
c_each_row.append("1")
c_find = False
break
elif len(c_each_part) == 2 and c_each_part[0] == "-":
c_each_row.append("-1")
c_find = False
break
else:
c_each_row.append(c_each_part[:-1])
c_find = False
break
if c_find:
c_each_row.append("0")
c_matrix_transposed.append(c_each_row)
c_matrix = convert.data({"A" : c_matrix_transposed, "B" : c_matrix_equal})
c_matrix[0]['value'] = find.transpose(get.matrix(c_matrix, "A"), True)
c_return = [get.matrix(c_matrix, "A"), get.matrix(c_matrix, "B"), c_list_vars]
return c_return
def cFind(f_data):
if f_data != "error":
f_main = f_data[0]
f_replace = f_data[1]
f_variable = f_data[2]
f_det_main = find.determinant(f_main, True)
if f_det_main == 0:
print("Expression is not true!")
else:
f_store_new = dict(())
for f_each_var_index, f_each_var in enumerate(f_variable):
f_new_matrix = list(())
for f_each_row_index, f_each_row in enumerate(f_main.get('value')):
f_new_row = list(())
for f_each_col_index, f_each_col in enumerate(f_each_row):
if f_each_col_index == f_each_var_index:
f_new_row.append(f_replace.get('value')[f_each_row_index][0])
else:
f_new_row.append(f_each_col)
f_new_matrix.append(f_new_row)
f_store_new[f_each_var.upper()] = f_new_matrix
for f_get_var in f_variable:
f_get_new_matrix = dict(())
f_get_new_matrix['name'] = identify.name(f_get_var.upper())
f_get_new_matrix['value'] = f_store_new.get(f_get_var.upper())
f_get_new_matrix['row'] = len(f_store_new.get(f_get_var.upper()))
f_get_new_matrix['col'] = len(f_store_new.get(f_get_var.upper())[0])
f_get_new_matrix['count'] = len(f_store_new.get(f_get_var.upper()))*len(f_store_new.get(f_get_var.upper())[0])
f_get_new_matrix['call'] = convert.name(f_get_var.upper(), f_store_new.get(f_get_var.upper()))
f_get_new_matrix['type'] = identify.matrix(f_get_new_matrix)
f_det_me = find.determinant(f_get_new_matrix, True)
f_value = f_det_me / f_det_main
if type(f_value) == float and f_value.is_integer() or type(f_value) == int:
f_value = int(f_value)
else:
f_value = str(fnd(float(f_value)).limit_denominator())
print(f_get_var +" = "+ str(f_value))
|
StarcoderdataPython
|
4908970
|
def run(n, to):
raised_to = n
if raised_to == 0:
raised_to += 1
run(raised_to, to)
elif raised_to <= to:
n = str(n)
print('' + n + '\n')
raised_to = raised_to * 2
run(raised_to, to)
if __name__ == '__main__':
print("Printing powers of 2 up to 1000")
run(0, int(input("Type an power 2 aproximation: ")))
|
StarcoderdataPython
|
5090230
|
<filename>Architecture.py
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as layers
import data_train
FLAGS = tf.app.flags.FLAGS
arg_scope = tf.contrib.framework.arg_scope
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 6,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/data/train_demo',
"""Path to the Anti-Spoofing data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = data_train.IMAGE_SIZE
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = data_train.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 15.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.8 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.00003 # Initial learning rate.
R_FOR_LSE = 10
TOWER_NAME = 'tower'
def _activation_summary(x):
"""
nothing
"""
#
#
print(x.shape)
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
def _variable_on_cpu(name, shape, initializer):
"""
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputsB(a):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = FLAGS.data_dir
if a==1:
images, dmaps, labels, sizes, slabels = data_train.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)
else:
images, dmaps, labels, sizes, slabels = data_train.distorted_inputsA(data_dir=data_dir, batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
dmaps = tf.case(images, tf.float16)
return images, dmaps, labels, sizes, slabels
def inputs(testset):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = FLAGS.data_dir
images, dmaps, labels, sizes, slabels = data_train.inputs(testset = testset,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
dmaps = tf.case(images, tf.float16)
return images, dmaps, labels, sizes, slabels
def inference(images, size,labels, training_nn, training_class, _reuse):
#
#
batch_norm_decay = 0.9
batch_norm_epsilon = 1e-5
batch_norm_scale = True
batch_norm_params = {
'is_training': training_nn,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': None, #
}
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
reuse=_reuse,
padding='SAME',
stride=1):
conv0 = layers.conv2d(images,num_outputs = 64, scope='SecondAMIN/conv0')
with tf.name_scope('convBlock-1') as scope:
conv1 = layers.conv2d(conv0,num_outputs = 128, scope='SecondAMIN/conv1')
bconv1 = layers.conv2d(conv1,num_outputs = 196, scope='SecondAMIN/bconv1')
conv2 = layers.conv2d(bconv1, num_outputs = 128, scope='SecondAMIN/conv2')
pool1 = layers.max_pool2d(conv2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')
_activation_summary(conv1)
_activation_summary(bconv1)
_activation_summary(conv2)
with tf.name_scope('convBlock-2') as scope:
conv3 = layers.conv2d(pool1, num_outputs = 128, scope='SecondAMIN/conv3')
bconv2 = layers.conv2d(conv3, num_outputs = 196, scope='SecondAMIN/bconv2')
conv4 = layers.conv2d(bconv2, num_outputs = 128, scope='SecondAMIN/conv4')
pool2 = layers.max_pool2d(conv4, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')
_activation_summary(conv3)
_activation_summary(bconv2)
_activation_summary(conv4)
with tf.name_scope('convBlock-3') as scope:
conv5 = layers.conv2d(pool2, num_outputs = 128, scope='SecondAMIN/conv5')
bconv3 = layers.conv2d(conv5, num_outputs = 196, scope='SecondAMIN/bconv3')
conv6 = layers.conv2d(bconv3, num_outputs = 128, scope='SecondAMIN/conv6')
pool3 = layers.avg_pool2d(conv6, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')
_activation_summary(conv5)
_activation_summary(bconv3)
_activation_summary(conv6)
map1 = tf.image.resize_images(pool1,[32,32])
map2 = tf.image.resize_images(pool2,[32,32])
map3 = tf.image.resize_images(pool3,[32,32])
summap = tf.concat([map1, map2, map3],3)
#
with tf.name_scope('Depth-Map-Block') as scope:
conv7 = layers.conv2d(summap, num_outputs = 128, scope='SecondAMIN/conv7')
dp1 = tf.layers.dropout(conv7,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')
conv8 = layers.conv2d(dp1, num_outputs = 64, scope='SecondAMIN/conv8')
_activation_summary(conv7)
_activation_summary(conv8)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
trainable = training_nn,
reuse=_reuse,
stride=1):
#
conv11 = layers.conv2d(conv8, num_outputs = 1, scope='SecondAMIN/conv11')
_activation_summary(conv11)
tf.summary.image('depthMap_Second', conv11, max_outputs=FLAGS.batch_size)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
reuse=_reuse,
padding='SAME',
stride=1):
conv0_fir = layers.conv2d(images,num_outputs = 24, scope='FirstAMIN/conv0') #
pool1_fir = layers.max_pool2d(conv0_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool1')
with tf.name_scope('convBlock-1_fir') as scope:
conv1_fir = layers.conv2d(pool1_fir,num_outputs = 20, scope='FirstAMIN/conv1')#
bconv1_fir = layers.conv2d(conv1_fir,num_outputs = 25, scope='FirstAMIN/bconv1')#
conv2_fir = layers.conv2d(bconv1_fir, num_outputs = 20, scope='FirstAMIN/conv2')#
with tf.name_scope('convBlock-2_fir') as scope:
pool2_fir = layers.max_pool2d(conv2_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool2')
conv3_fir = layers.conv2d(pool2_fir, num_outputs = 20, scope='FirstAMIN/conv3')
bconv2_fir = layers.conv2d(conv3_fir, num_outputs = 25, scope='FirstAMIN/bconv2')
conv4_fir = layers.conv2d(bconv2_fir, num_outputs = 20, scope='FirstAMIN/conv4')
with tf.name_scope('convBlock-3_fir') as scope:
pool3_fir = layers.avg_pool2d(conv4_fir, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='FirstAMIN/pool3')
conv5_fir = layers.conv2d(pool3_fir, num_outputs = 20, scope='FirstAMIN/conv5')
bconv3_fir = layers.conv2d(conv5_fir, num_outputs = 25, scope='FirstAMIN/bconv3')
conv6_fir = layers.conv2d(bconv3_fir, num_outputs = 20, scope='FirstAMIN/conv6')
map1_fir = tf.image.resize_images(conv2_fir,[32,32])
map2_fir = tf.image.resize_images(conv4_fir,[32,32])
map3_fir = conv6_fir
summap_fir = tf.concat([map1_fir, map2_fir, map3_fir],3)
#
with tf.name_scope('Depth-Map-Block_fir') as scope:
conv7_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv7')
dp1_fir = tf.layers.dropout(conv7_fir,rate = 0, training = training_nn, name = 'FirstAMIN/dropout2')
conv8_fir = layers.conv2d(dp1_fir, num_outputs =16 , scope='FirstAMIN/conv8')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #
activation_fn= None,
normalizer_fn= None,
padding='SAME',
reuse=_reuse,
stride=1):
#
conv11_fir = layers.conv2d(conv8_fir, num_outputs = 1, scope='FirstAMIN/conv11')
tf.summary.image('ZeroOneMap', tf.cast(256*conv11_fir,tf.uint8), max_outputs=FLAGS.batch_size)
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
padding='SAME',
reuse=_reuse,
stride=1):
#
with tf.name_scope('Score-Map-Block09') as scope:
summap_fir = tf.image.resize_images(summap_fir,[256,256])
conv9_fir = layers.conv2d(summap_fir, num_outputs = 28, scope='FirstAMIN/conv9')
conv10_fir = layers.conv2d(conv9_fir, num_outputs = 24, scope='FirstAMIN/conv10')
#
conv12_fir = layers.conv2d(conv10_fir, num_outputs = 20, scope='FirstAMIN/conv12')
conv13_fir = layers.conv2d(conv12_fir, num_outputs = 20, scope='FirstAMIN/conv13')
#
conv14_fir = layers.conv2d(conv13_fir, num_outputs = 20, scope='FirstAMIN/conv14')
conv15_fir = layers.conv2d(conv14_fir, num_outputs = 16, scope='FirstAMIN/conv15')
#
conv16_fir = layers.conv2d(conv15_fir, num_outputs = 16, scope='FirstAMIN/conv16')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.002),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
reuse=_reuse,
stride=1):
conv17 = layers.conv2d(conv16_fir, num_outputs = 6, scope='FirstAMIN/conv17')
thirdPart_comp_1 = tf.complex(conv17, tf.zeros_like(conv17))
thirdPart_comp_1=tf.transpose(thirdPart_comp_1, perm=[0,3,1,2])
thirdPart_fft_1=tf.abs(tf.fft2d(thirdPart_comp_1, name='summap_fft_real_1'))
thirdPart_fft_1=tf.transpose(thirdPart_fft_1, perm=[0,2,3,1])
thirdPart_fft_1=tf.log1p(thirdPart_fft_1[:,32:256-32,32:256-32,:])
#
Live_est1= images-conv17/45
Live_est_mask = tf.cast(tf.greater(Live_est1,0),tf.float32)
Live_est=Live_est1*Live_est_mask
#
#################################################################################################################################
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = training_nn,
padding='SAME',
reuse=_reuse,
stride=1):
# Score Map Branch
with tf.name_scope('Score-Map-Block1_dis') as scope:
conv9_dis = layers.conv2d(Live_est, num_outputs = 24, scope='ThirdAMIN/conv9')
conv10_dis = layers.conv2d(conv9_dis, num_outputs = 20, scope='ThirdAMIN/conv10')
pool1_dis = layers.max_pool2d(conv10_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')
conv12_dis = layers.conv2d(pool1_dis, num_outputs = 20, scope='ThirdAMIN/conv12')
conv13_dis = layers.conv2d(conv12_dis, num_outputs = 16, scope='ThirdAMIN/conv13')
pool2_dis = layers.max_pool2d(conv13_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')
conv14_dis = layers.conv2d(pool2_dis, num_outputs = 12, scope='ThirdAMIN/conv14')
conv15_dis = layers.conv2d(conv14_dis, num_outputs = 6, scope='ThirdAMIN/conv15')
pool3_dis = layers.max_pool2d(conv15_dis, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')
conv16_dis = layers.conv2d(pool3_dis, num_outputs = 1, scope='ThirdAMIN/conv16')
conv20_dis=tf.reshape(conv16_dis, [6,32*32])
sc333_dis = layers.fully_connected(conv20_dis, num_outputs = 100, reuse=_reuse, scope='ThirdAMIN/bconv15_sc333_dis')
dp1_dis = tf.layers.dropout(sc333_dis,rate = 0.2, training = training_nn, name = 'dropout3')
sc = layers.fully_connected(dp1_dis, num_outputs = 2, reuse=_reuse,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')
conv9_dis2 = layers.conv2d(images, num_outputs = 24, reuse= True, scope='ThirdAMIN/conv9')
conv10_dis2 = layers.conv2d(conv9_dis2, num_outputs = 20, reuse= True, scope='ThirdAMIN/conv10')
pool1_dis2 = layers.max_pool2d(conv10_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool1')
conv12_dis2 = layers.conv2d(pool1_dis2, num_outputs = 20,reuse= True, scope='ThirdAMIN/conv12')
conv13_dis2 = layers.conv2d(conv12_dis2, num_outputs = 16, reuse= True, scope='ThirdAMIN/conv13')
pool2_dis2 = layers.max_pool2d(conv13_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool2')
conv14_dis2 = layers.conv2d(pool2_dis2, num_outputs = 12, reuse= True, scope='ThirdAMIN/conv14')
conv15_dis2 = layers.conv2d(conv14_dis2, num_outputs = 6, reuse= True, scope='ThirdAMIN/conv15')
pool3_dis2 = layers.max_pool2d(conv15_dis2, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='ThirdPool3')
conv16_dis2 = layers.conv2d(pool3_dis2, num_outputs = 1, reuse= True, scope='ThirdAMIN/conv16')
conv20_dis2=tf.reshape(conv16_dis2, [6,32*32])
sc333_dis2 = layers.fully_connected(conv20_dis2, reuse= True, num_outputs = 100,scope='ThirdAMIN/bconv15_sc333_dis')
dp1_dis2 = tf.layers.dropout(sc333_dis2,rate = 0.2, training = training_nn, name = 'dropout4')
sc2 = layers.fully_connected(dp1_dis2, num_outputs = 2, reuse= True,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = None, #tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,scope='ThirdAMIN/bconv10_sc')
##################################################################################################################################
batch_norm_decay = 0.9
batch_norm_epsilon = 1e-5
batch_norm_scale = True
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': None, #
'trainable':False,
#'reuse':True
}
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn=tf.nn.elu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params,
trainable = False,
padding='SAME',
reuse=True,
stride=1):
#################################################################################################################################
conv0_new = layers.conv2d(Live_est,num_outputs = 64, scope='SecondAMIN/conv0')
with tf.name_scope('convBlock-1_new') as scope:
conv1_new = layers.conv2d(conv0_new,num_outputs = 128, scope='SecondAMIN/conv1')
bconv1_new = layers.conv2d(conv1_new,num_outputs = 196, scope='SecondAMIN/bconv1')
conv2_new = layers.conv2d(bconv1_new, num_outputs = 128, scope='SecondAMIN/conv2')
pool1_new = layers.max_pool2d(conv2_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool1')
with tf.name_scope('convBlock-2_new') as scope:
conv3_new = layers.conv2d(pool1_new, num_outputs = 128, scope='SecondAMIN/conv3')
bconv2_new = layers.conv2d(conv3_new, num_outputs = 196, scope='SecondAMIN/bconv2')
conv4_new = layers.conv2d(bconv2_new, num_outputs = 128, scope='SecondAMIN/conv4')
pool2_new = layers.max_pool2d(conv4_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool2')
with tf.name_scope('convBlock-3_new') as scope:
conv5_new = layers.conv2d(pool2_new, num_outputs = 128, scope='SecondAMIN/conv5')
bconv3_new = layers.conv2d(conv5_new, num_outputs = 196, scope='SecondAMIN/bconv3')
conv6_new = layers.conv2d(bconv3_new, num_outputs = 128, scope='SecondAMIN/conv6')
pool3_new = layers.avg_pool2d(conv6_new, kernel_size=[3, 3], stride=[2, 2], padding='SAME', scope='SecondAMIN/pool3')
map1_new = tf.image.resize_images(pool1_new,[32,32])
map2_new = tf.image.resize_images(pool2_new,[32,32])
map3_new = tf.image.resize_images(pool3_new,[32,32])
summap_new = tf.concat([map1_new, map2_new, map3_new],3)
# Depth Map Branch
with tf.name_scope('Depth-Map-Block_new') as scope:
conv7_new = layers.conv2d(summap_new, num_outputs = 128, scope='SecondAMIN/conv7')
dp1_new = tf.layers.dropout(conv7_new,rate = 0.2, training = training_nn, name = 'SecondAMIN/dropout1')
conv8_new = layers.conv2d(dp1_new, num_outputs = 64, scope='SecondAMIN/conv8')
with arg_scope( [layers.conv2d],
kernel_size = 3,
weights_initializer = tf.random_normal_initializer(stddev=0.02),
biases_initializer = tf.constant_initializer(0.0),
activation_fn= None,
normalizer_fn= None,
padding='SAME',
trainable = False,
reuse=True,
stride=1):
# Depth Map Branch
conv11_new = layers.conv2d(conv8_new, num_outputs = 1, scope='SecondAMIN/conv11')
label_Amin1=size
LabelsWholeImage=tf.cast(np.ones([6,32,32,1]), tf.float32)
LabelsWholeImage2=LabelsWholeImage*tf.reshape(tf.cast(1-label_Amin1,tf.float32),[6,1,1,1])
LabelsWholeImage=labels*tf.reshape(tf.cast(label_Amin1,tf.float32),[6,1,1,1])
Z_GT2=np.zeros([6,3,3,1])
Z_GT2[:,1,1,:]=1
GT2=tf.cast(Z_GT2, tf.float32)
tf.summary.image('GT2', LabelsWholeImage[:,:,:,0:1], max_outputs=FLAGS.batch_size)
tf.summary.image('SC', tf.cast(256*conv11[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('Live_SC', tf.cast(256*conv11_new[:,:,:,0:1],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('Live', tf.cast(256*Live_est[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('inputImage', tf.cast(256*images[:,:,:,3:6],tf.uint8), max_outputs=FLAGS.batch_size)
tf.summary.image('GT3_Artifact', LabelsWholeImage2[:,:,:,0:1], max_outputs=FLAGS.batch_size)
tf.summary.image('Artifact', conv17[:,:,:,3:6], max_outputs=FLAGS.batch_size)
return Live_est, conv17, conv11, GT2,conv17,images,thirdPart_fft_1,LabelsWholeImage, conv11_new,conv11_new , LabelsWholeImage2, sc, sc2, conv11_fir
#
def lossSecond(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,sc_fake, sc_real):
#
with tf.name_scope('DR_Net_Training') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(tf.abs(tf.subtract(sc,bin_labels2)),
reduction_indices = 2),
reduction_indices = 1)
loss2 = tf.reduce_mean(mean_squared_loss, name='pixel_loss1')*1
tf.summary.scalar('Loss',loss2)
tf.add_to_collection('losses', loss2)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def lossThird(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real):
with tf.name_scope('GAN_Training') as scope:
bin_labels3=tf.ones([6,1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32),
name='cross_entropy_per_example') # logits = (N,2) label = (N,) tf.reshape(label,[-1])
loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1
tf.add_to_collection('losses', loss22)
bin_labels3=tf.zeros([6,1])
bin_labels_1=tf.cast(sc_real, tf.float32)*tf.cast(bin_labels,tf.float32)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labels3,tf.int32),[-1]), logits= bin_labels_1,
name='cross_entropy_per_example2') # logits = (N,2) label = (N,) tf.reshape(label,[-1])
loss23 = tf.reduce_mean(cross_entropy, name='classification_loss3')*1
tf.summary.scalar('Loss',loss23+loss22)
tf.add_to_collection('losses', loss23)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def lossFirst(dmaps, smaps, labels, slabels, sc,GT2, fftmapA, A, B,bin_labels, bin_labels2,Nsc,Lsc,Allsc,sc_fake, sc_real, conv11_fir):
with tf.name_scope('Zero_One_Map_loss') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(Allsc,conv11_fir)))),
reduction_indices = 2),
reduction_indices = 1)
loss823 = tf.reduce_mean(mean_squared_loss, name='pixel_loss823')*6000
tf.summary.scalar('Loss',loss823)
tf.add_to_collection('losses', loss823)
with tf.name_scope('Dr_Net_Backpropagate') as scope:
bin_labels23=labels #tf.zeros_like(bin_labels2)
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(tf.abs(tf.subtract(Lsc,bin_labels23)),
reduction_indices = 2),
reduction_indices = 1)
loss32 = tf.reduce_mean(mean_squared_loss, name='pixel_loss32')*600
tf.summary.scalar('Loss',loss32)
tf.add_to_collection('losses', loss32)
with tf.name_scope('GAN_Backpropagate') as scope:
bin_labelsE=tf.zeros([6,1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(tf.cast(bin_labelsE,tf.int32),[-1]), logits= tf.cast(sc_fake, tf.float32),
name='cross_entropy_per_example')
loss22 = tf.reduce_mean(cross_entropy, name='classification_loss2')*1*100
tf.summary.scalar('Loss',loss22)
tf.add_to_collection('losses', loss22)
with tf.name_scope('Live_Repetitive_Pattern') as scope:
mean_squared_loss = tf.reduce_max(
tf.reduce_max(B,
reduction_indices = 2),
reduction_indices = 1)
#
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels9)
loss81= tf.reduce_mean(mean_squared_loss, name='pixel_loss81')*1
tf.summary.scalar('Loss',loss81)
tf.add_to_collection('losses', loss81)
with tf.name_scope('Spoof_Repetitive_Pattern') as scope:
mean_squared_loss = tf.reduce_max(
tf.reduce_max(B,
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(1-bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss2=mean_squared_loss*(bin_labels9)
mean_squared_loss=-mean_squared_loss2#
loss812= tf.reduce_mean(mean_squared_loss, name='pixel_loss812')*1*2
tf.summary.scalar('Loss',loss812)
tf.add_to_collection('losses', loss812)
with tf.name_scope('Live_Images_Estimation') as scope:
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(A,dmaps)))),
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels8= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels8)
loss8 = tf.reduce_mean(mean_squared_loss, name='pixel_loss8')*150*300
tf.summary.scalar('Loss',loss8)
tf.add_to_collection('losses', loss8)
with tf.name_scope('Live_Noise') as scope:
AllscZero = tf.cast(np.zeros([6,256,256,6]), tf.float32)
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(tf.subtract(AllscZero,smaps)))),#
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss=mean_squared_loss*(bin_labels9)#
loss9 = tf.reduce_mean(mean_squared_loss, name='pixel_loss9')*100*5
tf.summary.scalar('Loss',loss9)
tf.add_to_collection('losses', loss9)
with tf.name_scope('Spoof_Noise') as scope:
AllscOnes = tf.cast(tf.less(tf.abs(smaps),0.04),tf.float32) #
mean_squared_loss = tf.reduce_mean(
tf.reduce_mean(((tf.abs(smaps))),#
reduction_indices = 2),
reduction_indices = 1)
bin_labels_1=tf.cast(1-bin_labels,tf.float32)
bin_labels9= tf.concat([bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1,bin_labels_1],1)
mean_squared_loss2=mean_squared_loss*(bin_labels9)#
mean_squared_loss=tf.abs(mean_squared_loss2 -0.2) #
loss10 = tf.reduce_mean(mean_squared_loss, name='pixel_loss19')*10*3
tf.summary.scalar('Loss',loss10)
tf.add_to_collection('losses', loss10)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""
"""
#
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
#
#
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, varName1):
"""
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
#opt = tf.train.GradientDescentOptimizer(lr)
opt = tf.train.AdamOptimizer(lr)
first_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,varName1)
#
grads = opt.compute_gradients(total_loss,first_train_vars)
#####################################################################################################################
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Track the moving averages of all trainable variables.
with tf.name_scope('TRAIN') as scope:
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(first_train_vars)#tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
|
StarcoderdataPython
|
8969
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 08:40:11 2020
@author: krishan
"""
def funny_division2(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except (ZeroDivisionError, TypeError):
return "Enter a number other than zero"
def funny_division3(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError as e:
print("The exception arguments were",e.args)
#raise
for val in (0, "hello", 50.0, 13):
print(f"Testing {val}:", funny_division3(val))
|
StarcoderdataPython
|
6625869
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_privacy
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.impl.executors import default_executor
from tensorflow_federated.python.core.impl.types import placement_literals
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.utils import differential_privacy
def wrap_aggregate_fn(dp_aggregate_fn, sample_value):
tff_types = type_conversions.type_from_tensors(sample_value)
@computations.federated_computation
def run_initialize():
return intrinsics.federated_value(dp_aggregate_fn.initialize(),
placement_literals.SERVER)
@computations.federated_computation(run_initialize.type_signature.result,
computation_types.FederatedType(
tff_types,
placement_literals.CLIENTS))
def run_aggregate(global_state, client_values):
return dp_aggregate_fn(global_state, client_values)
return run_initialize, run_aggregate
class BuildDpQueryTest(test.TestCase):
def test_build_dp_query_basic(self):
query = differential_privacy.build_dp_query(1.0, 2.0, 3.0)
self.assertIsInstance(query, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(query._numerator._l2_norm_clip, 1.0)
self.assertEqual(query._numerator._stddev, 2.0)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_adaptive(self):
ccba = 0.1
query = differential_privacy.build_dp_query(
1.0,
2.0,
3.0,
adaptive_clip_learning_rate=0.05,
target_unclipped_quantile=0.5,
clipped_count_budget_allocation=ccba,
expected_num_clients=10)
self.assertIsInstance(query,
tensorflow_privacy.QuantileAdaptiveClipAverageQuery)
self.assertIsInstance(query._numerator,
tensorflow_privacy.QuantileAdaptiveClipSumQuery)
expected_sum_query_noise_multiplier = 2.0 * (1.0 - ccba)**(-0.5)
self.assertAlmostEqual(query._numerator._noise_multiplier,
expected_sum_query_noise_multiplier)
self.assertEqual(query._denominator, 3.0)
def test_build_dp_query_per_vector(self):
class MockTensor():
def __init__(self, shape):
self.shape = shape
mock_shape = collections.namedtuple('MockShape', ['dims'])
mock_dim = collections.namedtuple('MockDim', ['value'])
mock_model = collections.namedtuple('MockModel', ['weights'])
mock_weights = collections.namedtuple('MockWeights', ['trainable'])
def make_mock_tensor(*dims):
return MockTensor(mock_shape([mock_dim(dim) for dim in dims]))
vectors = collections.OrderedDict([('a', make_mock_tensor(2)),
('b', make_mock_tensor(2, 3)),
('c', make_mock_tensor(1, 3, 4))])
model = mock_model(mock_weights(vectors))
query = differential_privacy.build_dp_query(
1.0, 2.0, 3.0, per_vector_clipping=True, model=model)
self.assertIsInstance(query, tensorflow_privacy.NestedQuery)
def check(subquery):
self.assertIsInstance(subquery, tensorflow_privacy.GaussianAverageQuery)
self.assertEqual(subquery._denominator, 3.0)
tf.nest.map_structure(check, query._queries)
noise_multipliers = tf.nest.flatten(
tf.nest.map_structure(
lambda query: query._numerator._stddev / query._numerator.
_l2_norm_clip, query._queries))
effective_noise_multiplier = sum([x**-2.0 for x in noise_multipliers])**-0.5
self.assertAlmostEqual(effective_noise_multiplier, 2.0)
class BuildDpAggregateTest(test.TestCase):
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
global_state, result = aggregate(global_state, [1.0, 3.0, 5.0])
self.assertEqual(global_state['l2_norm_clip'], 4.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result, 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
self.assertEqual(result['a'][0], 6.0)
self.assertEqual(result['b'][0], 9.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def _value_type_fn(value):
del value
return [
computation_types.TensorType(tf.float32),
computation_types.TensorType(tf.float32),
]
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(
query, value_type_fn=_value_type_fn)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, data[0])
global_state = initialize()
global_state, result = aggregate(global_state, data)
self.assertEqual(global_state['l2_norm_clip'], 5.0)
self.assertEqual(global_state['stddev'], 0.0)
result = list(result)
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
dp_aggregate_fn, _ = differential_privacy.build_dp_aggregate(query)
initialize, aggregate = wrap_aggregate_fn(dp_aggregate_fn, 0.0)
global_state = initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
global_state, result = aggregate(global_state, records)
self.assertEqual(global_state['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(result, expected_result)
return global_state
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
def test_dp_global_state_type(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
_, dp_global_state_type = differential_privacy.build_dp_aggregate(query)
self.assertEqual(dp_global_state_type.__class__.__name__,
'NamedTupleTypeWithPyContainerType')
def test_default_from_tff_result_fn(self):
def check(elements, expected):
record = anonymous_tuple.AnonymousTuple(elements)
result = differential_privacy._default_from_tff_result_fn(record)
self.assertEqual(result, expected)
check([('a', 1), ('b', 2)], collections.OrderedDict([('a', 1), ('b', 2)]))
check([(None, 1), (None, 2)], [1, 2])
with self.assertRaisesRegex(ValueError, 'partially named fields'):
check([('a', 1), (None, 2)], None)
class BuildDpAggregateProcessTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('float', 0.0), ('list', [0.0, 0.0]),
('odict', collections.OrderedDict([('a', 0.0), ('b', 0.0)])))
def test_process_type_signature(self, value_template):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(value_template)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
server_state_type = computation_types.FederatedType(
computation_types.NamedTupleType([('l2_norm_clip', tf.float32),
('stddev', tf.float32)]),
placements.SERVER)
self.assertEqual(
dp_aggregate_process.initialize.type_signature,
computation_types.FunctionType(
parameter=None, result=server_state_type))
client_value_type = computation_types.FederatedType(value_type,
placements.CLIENTS)
client_value_weight_type = computation_types.FederatedType(
tf.float32, placements.CLIENTS)
server_result_type = computation_types.FederatedType(
value_type, placements.SERVER)
server_metrics_type = computation_types.FederatedType((), placements.SERVER)
self.assertEqual(
dp_aggregate_process.next.type_signature,
computation_types.FunctionType(
parameter=computation_types.NamedTupleType([
(None, server_state_type), (None, client_value_type),
(None, client_value_weight_type)
]),
result=computation_types.NamedTupleType([
('state', server_state_type), ('result', server_result_type),
('measurements', server_metrics_type)
])))
def test_dp_sum(self):
query = tensorflow_privacy.GaussianSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, [1.0, 3.0, 5.0],
[1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 4.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result'], 8.0)
def test_dp_sum_structure_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return collections.OrderedDict([('a', (a,)), ('b', [b])])
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['b'][0], 9.0)
def test_dp_sum_structure_nested_odict(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)),
('bc',
collections.OrderedDict([('b', [b]),
('c', (c,))]))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc']['b'][0], 9.0)
self.assertEqual(output['result']['bc']['c'][0], 2.0)
def test_dp_sum_structure_complex(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b, c):
return collections.OrderedDict([('a', (a,)), ('bc', ([b], (c,)))])
data = [
datapoint(1.0, 2.0, 1.0),
datapoint(2.0, 3.0, 1.0),
datapoint(6.0, 8.0, 0.0), # Clipped to 3.0, 4.0, 0.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
self.assertEqual(output['result']['a'][0], 6.0)
self.assertEqual(output['result']['bc'][0][0], 9.0)
self.assertEqual(output['result']['bc'][1][0], 2.0)
def test_dp_sum_structure_list(self):
query = tensorflow_privacy.GaussianSumQuery(5.0, 0.0)
def datapoint(a, b):
return [tf.Variable(a, name='a'), tf.Variable(b, name='b')]
data = [
datapoint(1.0, 2.0),
datapoint(2.0, 3.0),
datapoint(6.0, 8.0), # Clipped to 3.0, 4.0
]
value_type = type_conversions.type_from_tensors(data[0])
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
output = dp_aggregate_process.next(global_state, data, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], 5.0)
self.assertEqual(output['state']['stddev'], 0.0)
result = list(output['result'])
self.assertEqual(result[0], 6.0)
self.assertEqual(result[1], 9.0)
def test_dp_stateful_mean(self):
class ShrinkingSumQuery(tensorflow_privacy.GaussianSumQuery):
def get_noised_result(self, sample_state, global_state):
global_state = self._GlobalState(
tf.maximum(global_state.l2_norm_clip - 1, 0.0), global_state.stddev)
return sample_state, global_state
query = ShrinkingSumQuery(4.0, 0.0)
value_type = type_conversions.type_from_tensors(0.0)
dp_aggregate_process = differential_privacy.build_dp_aggregate_process(
value_type, query)
global_state = dp_aggregate_process.initialize()
records = [1.0, 3.0, 5.0]
def run_and_check(global_state, expected_l2_norm_clip, expected_result):
output = dp_aggregate_process.next(global_state, records, [1.0, 1.0, 1.0])
self.assertEqual(output['state']['l2_norm_clip'], expected_l2_norm_clip)
self.assertEqual(output['result'], expected_result)
return output['state']
self.assertEqual(global_state['l2_norm_clip'], 4.0)
global_state = run_and_check(global_state, 3.0, 8.0)
global_state = run_and_check(global_state, 2.0, 7.0)
global_state = run_and_check(global_state, 1.0, 5.0)
global_state = run_and_check(global_state, 0.0, 3.0)
global_state = run_and_check(global_state, 0.0, 0.0)
if __name__ == '__main__':
default_executor.initialize_default_executor()
test.main()
|
StarcoderdataPython
|
82959
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tDescription" mixed="true">
<xs:sequence>
<xs:element ref="sub-description" minOccurs="0"/>
</xs:sequence>
</xs:complexType>
<xs:element name="sub-description" type="xs:string"/>
<xs:element name="description" type="tDescription"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_200907231924 (unittest.TestCase):
# This verifies that we do not improperly interpret non-element
# content as being the content of a nested element.
def testSub (self):
xml = '<sub-description>Floor</sub-description>'
instance = CreateFromDocument(xml)
self.assertEqual(instance, 'Floor')
def testMain (self):
xml = '<description>Main Office</description>'
instance = CreateFromDocument(xml)
self.assertEqual(1, len(instance.orderedContent()))
self.assertTrue(instance.sub_description is None)
self.assertEqual(instance.orderedContent()[0].value, 'Main Office')
def testMainSub (self):
xml = '<description>Main Office<sub-description>Floor</sub-description>State</description>'
instance = CreateFromDocument(xml)
self.assertTrue(instance.sub_description is not None)
self.assertEqual(instance.sub_description, 'Floor')
self.assertEqual(3, len(instance.orderedContent()))
self.assertEqual(instance.orderedContent()[0].value, 'Main Office')
self.assertEqual(instance.orderedContent()[2].value, 'State')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4862426
|
<gh_stars>1-10
def direction(from_cell, to_cell):
"""
Determine which direction to move\n
@param from_cell -> Current cell occupied\n
@param to_cell -> Destination cell\n
@returns -> Direction to move\n
"""
dx = to_cell[0] - from_cell[0]
dy = to_cell[1] - from_cell[1]
if dx == 1:
return 'right'
elif dx == -1:
return 'left'
elif dy == -1:
return 'up'
elif dy == 1:
return 'down'
def distance(p, q):
"""
Manhattan distance between two points\n
@param start -> Coordinates of starting point\n
@param goal -> Coordinates of ending / goal point\n
@returns -> Distance between cells
"""
dx = abs(p[0] - q[0])
dy = abs(p[1] - q[1])
return dx + dy
def reconstruct_path(came_from, current):
"""
Helper method for astar to determine path\n
@param came_from -> Cells searched while finding path\n
@param current -> Current node where snake head is\n
@returns total_path -> Reversed list of coordinates to goal
"""
total_path = [current]
while current in came_from.keys():
current = came_from[current]
total_path.append(current)
return list(reversed(total_path))
def neighbours(node, grid, score, tail, ignore_list):
"""
Retrieve a list of cells around a specific node\n
Originally from https://github.com/noahspriggs/battlesnake-python\n
@param node -> Current cell\n
@param grid -> Current state of the game board\n
@param score -> Score of the cell\n
@param tail -> Coords of snake body used for filter\n
@param ignore_list -> Danger cells on grid\n
@returns result -> Array of available connected cells\n
"""
width = grid.width
height = grid.height
subtail = []
if score >= len(tail):
subtail = [tuple(x) for x in tail]
else:
subtail = [tuple(x) for x in tail[len(tail)-score:]]
result = []
if (node[0] > 0):
result.append((node[0]-1,node[1]))
if (node[0] < width-1):
result.append((node[0]+1,node[1]))
if (node[1] > 0):
result.append((node[0],node[1]-1))
if (node[1] < height-1):
result.append((node[0],node[1]+1))
result = filter(lambda p: (grid.get_cell(p) not in ignore_list) or (p in subtail), result)
return result
def check_ahead(tentative_path, charlie, next_move, grid, check_grid):
"""
Ensure we have a path to our tail from next move\n
Originally from https://github.com/noahspriggs/battlesnake-python\n
@param tentative_path -> Potential path to take\n
@param Charlie -> Own snake information\n
@param next_move -> Own snakes next move\n
@param grid -> Updated grid\n
@param check_grid -> Grid used to checkk ahead\n
@return path -> The path if safe to take
"""
from .a_star import a_star
SAFE = 0
FOOD = 3
SNAKE = 1
DANGER = 5
path_length = len(tentative_path)
my_length = charlie.length+1
if path_length < my_length:
remainder = my_length - path_length
my_new_coords = list(reversed(tentative_path)) + charlie.coords[:remainder]
else:
my_new_coords = list(reversed(tentative_path))[:my_length]
if grid.get_cell(my_new_coords[0]) == FOOD:
my_new_coords.append(my_new_coords[-1])
for coord in charlie.coords:
check_grid.set_cell(coord, SAFE)
for coord in my_new_coords:
check_grid.set_cell(coord, SNAKE)
path_to_tail = a_star(next_move, my_new_coords[-1], check_grid, my_new_coords, [SNAKE, DANGER])
if path_to_tail:
return tentative_path
|
StarcoderdataPython
|
11322232
|
<reponame>preranaandure/wildlifecompliance<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-03-26 02:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0031_wildlifelicenceactivity'),
]
operations = [
migrations.CreateModel(
name='DefaultActivity',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('activity',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='wildlifecompliance.WildlifeLicenceActivity')),
('activity_type',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='wildlifecompliance.WildlifeLicenceActivityType')),
],
),
migrations.AlterUniqueTogether(
name='defaultactivity',
unique_together=set(
[
('activity_type',
'activity')]),
),
]
|
StarcoderdataPython
|
1775473
|
import pytest
from utils.helpers import assert_equals
from starkware.starknet.compiler.compile import get_selector_from_name
@pytest.mark.asyncio
async def test_mint_NFTs(owner_factory, erc721_factory, minter_factory, game_factory):
#starknet = get_starknet
erc721 = erc721_factory
admin = owner_factory
minter = minter_factory
ogame = game_factory
# Submit NFT contract address to minter.
await admin.execute(minter.contract_address,
get_selector_from_name('setNFTaddress'),
[erc721.contract_address], 0).invoke()
# Mint 200 NFTs and assign them to minter.
await admin.execute(minter.contract_address,
get_selector_from_name('mintAll'),
[200, 1, 0], 1).invoke()
# Assert minte contract NFT balance is equal 200.
data = await admin.execute(erc721.contract_address,
get_selector_from_name('balanceOf'),
[minter.contract_address], 2).invoke()
assert_equals(data.result.response[0], 200)
# Assert admin can give game contract approval on NFT transfer.
await admin.execute(minter.contract_address,
get_selector_from_name('setNFTapproval'),
[ogame.contract_address, 1], 3).invoke()
data = await admin.execute(erc721.contract_address,
get_selector_from_name('isApprovedForAll'),
[minter.contract_address, ogame.contract_address], 4).invoke()
assert_equals(data.result.response[0], 1)
|
StarcoderdataPython
|
383271
|
# use part function of problem 290
class Solution:
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
def get_p(string):
p = []
keys = []
for word in string:
if len(keys) == 0:
keys.append(word)
p.append('a')
else:
add_flag = True
for i in range(len(keys)):
if word == keys[i]:
p.append(p[i])
add_flag = False
break
if add_flag:
keys.append(word)
p.append(chr(ord(p[-1]) + 1))
return ''.join(p)
return get_p(s) == get_p(t)
|
StarcoderdataPython
|
11369786
|
#write a program to draw a pentagram
import turtle
wn = turtle.Screen()
gardner = turtle.Turtle()
gardner.shape("turtle")
for i in range(5):
gardner.forward(100)
gardner.right(144)
gardner.hideturtle()
wn.mainloop()
|
StarcoderdataPython
|
6573230
|
#!/usr/bin/python
import sys
maximum=0
total = 0
Key1 = None
name_path1="http://www.the-associates.co.uk"
name_file="favicon.ico"
for line in sys.stdin:
mapping_data = line.strip().split("\t")
if len(mapping_data) != 2:
continue
Key, fullPath = mapping_data
if Key1 and Key1 != Key:
Key1 = Key;
if total > maximum:
maximum=total
name_file=Key
name_path1=fullPath
total = 0
Key1 = Key
total += 1
print name_file+'\t'+ name_path1
|
StarcoderdataPython
|
5074383
|
<gh_stars>1-10
"""Creates and simulates a simple circuit.
"""
import cirq
import numpy as np
def main():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1), cirq.measure(q0, q1))
simulator = cirq.KnowledgeCompilationSimulator(circuit, dtype=np.complex64)
for _ in range(10):
result = simulator.run(circuit)
bits = result.measurements['0,1'][0]
assert bits[0] == bits[1]
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11301125
|
from sqlalchemy.orm import Session
from api import schemas,models
from fastapi import HTTPException,status
from api.hashing import Hash
def create_admin(request:schemas.Admin,db:Session):
new_admin = models.Admin(name=request.name,email=request.email,password=<PASSWORD>(request.password))
db.add(new_admin)
db.commit()
db.refresh(new_admin)
return new_admin
def show_admin(id:int,db:Session):
admin = db.query(models.Admin).filter(models.Admin.id==id).first()
if not admin:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with the id {id} is not available")
return admin
|
StarcoderdataPython
|
5180235
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Jul-12-19 16:37
# @Author : <NAME> (<EMAIL>)
# @Link : http://example.org
import os
def subset_binary(nums):
"""
子集二进制数
最小的len(n)*0 都没有元素,空集
最小的len(n)*1 都有元素,该集合本身
0~2^n-1
根据二进制数,取集合中都元素
"""
n = len(nums)
ans = []
for b in range(1 << n):
subset = []
i = 0
while b != 0:
print("b: %d" % b)
if b & 0x01:
# print("i: %d" % i)
# print("b: %d" % b)
subset.append(nums[i])
i += 1
b >>= 1
ans.append(subset)
return ans
class Solution:
def subsets(self, nums: list) -> list:
return subset_binary(nums)
def main():
nums = [1, 2, 3, 4]
nums = [1, 2, 3]
ans = subset_binary(nums)
print(ans)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3479263
|
import numpy as np
from scipy.special import i0, k0, i1, k1
import time
import pybie2d
from pybie2d.kernels.high_level.modified_helmholtz import Modified_Helmholtz_Kernel_Apply, Modified_Helmholtz_Kernel_Form
from pybie2d.misc.numba_special_functions import numba_k0, numba_k1
print('\n-- Testing numba special function implementation --\n')
# test the underlying numba implementations of i0, k0
x = np.linspace(0,100,10000)
y1 = k0(x)
y2 = numba_k0(x)
print('Timing scipy k0')
%timeit k0(x)
print('Timing numba k0')
%timeit numba_k0(x)
print('Max relative difference in k0: {:0.2e}'.format(np.abs((y1[1:]-y2[1:])/y1[1:]).max()))
y1 = k1(x)
y2 = numba_k1(x)
print('\nTiming scipy k1')
%timeit k1(x)
print('Timing numba k1')
%timeit numba_k1(x)
print('Max relative difference in k1: {:0.2e}'.format(np.abs((y1[1:]-y2[1:])/y1[1:]).max()))
"""
Demonstrate usage of the basic Laplace Kernels
Also timing/consistency checks
"""
def get_random(sh, dtype):
r = np.random.rand(*sh).astype(dtype)
if dtype is complex:
r += 1j*np.random.rand(*sh)
return r
dtype=float
ns = 2000
nt = 2000
test_self = False
helmk = 10.0
source = get_random([2, ns], float)
target = source if test_self else get_random([2, nt], float)
dipvec = get_random([2, ns], float)
charge = get_random([ns,], dtype)
dipstr = get_random([ns,], dtype)
print('\n-- Modified Helmholtz 2D Kernel Tests, Charge Only, No Derivatives --\n')
# using numba
print('Testing Numba (Apply)')
pot_numba = Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, backend='numba')
time_numba = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, backend='numba')
# using FMM
print('Testing FMM (Apply)')
pot_fmm = Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, backend='FMM')
time_fmm = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, backend='FMM')
# using numexpr
print('Testing Numexpr (Form)')
MAT = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifcharge=True)
st = time.time()
MAT = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifcharge=True)
time_numexpr_form = time.time() - st
pot_numexpr = MAT.dot(charge)
time_apply = %timeit -o MAT.dot(charge)
# print comparison
print('')
print('Maximum difference, potential, numba vs. FMM: {:0.1e}'.format(np.abs(pot_numba-pot_fmm).max()))
print('Maximum difference, potential, numba vs. Form: {:0.1e}'.format(np.abs(pot_numba-pot_numexpr).max()))
print('Maximum difference, potential, FMM vs. Form: {:0.1e}'.format(np.abs(pot_fmm-pot_numexpr).max()))
print('')
print('Time for numba apply (ms): {:0.2f}'.format(time_numba.average*1000))
print('Time for FMM apply (ms): {:0.2f}'.format(time_fmm.average*1000))
print('Time for numexpr form (ms): {:0.2f}'.format(time_numexpr_form*1000))
print('Time for preformed apply (ms): {:0.2f}'.format(time_apply.average*1000))
print('\n-- Modified Helmholtz 2D Kernel Tests, Dipole Only, No Derivatives --\n')
# using numba
print('Testing Numba (Apply)')
pot_numba = Modified_Helmholtz_Kernel_Apply(source, target, helmk, dipstr=dipstr, dipvec=dipvec, backend='numba')
time_numba = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, dipstr=dipstr, dipvec=dipvec, backend='numba')
# using FMM
print('Testing FMM (Apply)')
pot_fmm = Modified_Helmholtz_Kernel_Apply(source, target, helmk, dipstr=dipstr, dipvec=dipvec, backend='FMM')
time_fmm = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, dipstr=dipstr, dipvec=dipvec, backend='FMM')
# using numexpr
print('Testing Numexpr (Form)')
MAT = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifdipole=True, dipvec=dipvec)
st = time.time()
MAT = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifdipole=True, dipvec=dipvec)
time_numexpr_form = time.time() - st
pot_numexpr = MAT.dot(dipstr)
time_apply = %timeit -o MAT.dot(dipstr)
# print comparison
print('')
print('Maximum difference, potential, numba vs. FMM: {:0.1e}'.format(np.abs(pot_numba-pot_fmm).max()))
print('Maximum difference, potential, numba vs. Form: {:0.1e}'.format(np.abs(pot_numba-pot_numexpr).max()))
print('Maximum difference, potential, FMM vs. Form: {:0.1e}'.format(np.abs(pot_fmm-pot_numexpr).max()))
print('')
print('Time for numba apply (ms): {:0.2f}'.format(time_numba.average*1000))
print('Time for FMM apply (ms): {:0.2f}'.format(time_fmm.average*1000))
print('Time for numexpr form (ms): {:0.2f}'.format(time_numexpr_form*1000))
print('Time for preformed apply (ms): {:0.2f}'.format(time_apply.average*1000))
print('\n-- Modified Helmholtz 2D Kernel Tests, Charge and Dipole, No Derivatives --\n')
# using numba
print('Testing Numba (Apply)')
pot_numba = Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, dipstr=dipstr, dipvec=dipvec, backend='numba')
time_numba = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, dipstr=dipstr, dipvec=dipvec, backend='numba')
# using FMM
print('Testing FMM (Apply)')
pot_fmm = Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, dipstr=dipstr, dipvec=dipvec, backend='FMM')
time_fmm = %timeit -o Modified_Helmholtz_Kernel_Apply(source, target, helmk, charge=charge, dipstr=dipstr, dipvec=dipvec, backend='FMM')
# using numexpr
print('Testing Numexpr (Form)')
st = time.time()
MATc = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifcharge=True)
MATd = Modified_Helmholtz_Kernel_Form(source, target, helmk, ifdipole=True, dipvec=dipvec)
time_numexpr_form = time.time() - st
pot_numexpr = MATc.dot(charge) + MATd.dot(dipstr)
time_apply = %timeit -o MATc.dot(charge) + MATd.dot(dipstr)
# print comparison
print('')
print('Maximum difference, potential, numba vs. FMM: {:0.1e}'.format(np.abs(pot_numba-pot_fmm).max()))
print('Maximum difference, potential, numba vs. Form: {:0.1e}'.format(np.abs(pot_numba-pot_numexpr).max()))
print('Maximum difference, potential, FMM vs. Form: {:0.1e}'.format(np.abs(pot_fmm-pot_numexpr).max()))
print('')
print('Time for numba apply (ms): {:0.2f}'.format(time_numba.average*1000))
print('Time for FMM apply (ms): {:0.2f}'.format(time_fmm.average*1000))
print('Time for numexpr form (ms): {:0.2f}'.format(time_numexpr_form*1000))
print('Time for preformed apply (ms): {:0.2f}'.format(time_apply.average*1000))
|
StarcoderdataPython
|
8009923
|
import re
import json
import praw
import requests
import configparser
class accountLookup:
def __init__(self, username):
self.username = username
self.playerInfoApi = "https://playerdb.co/api/player/minecraft/"
self.header = {'User-agent': f'This code is associated with the reddit bot /u/{self.username}'}
def lookup(self, inputID):
# If the username/UUID is valid look it up
if self.validateInput(inputID):
playerFound, data = self.sendApiRequest(inputID)
if playerFound:
return self.genFoundReply(data)
else:
return self.genNotFoundReply(inputID)
else:
return self.genInvalidReply(inputID)
# Test to see if the input provided is a valid MC account name or UUID
def validateInput(self, inputID):
# TODO: Minecraft usernames cannot have a - but UUIDs do. Regex passes - regardless of input.
# TODO: Condition ignores that MC names can't be longer than 16 chars
if len(inputID) > 36 or len(inputID) < 4 or bool(re.search("[^A-Za-z0-9_-]+", inputID)):
return False
else:
return True
def genFoundReply(self, data):
reply = f"{data['data']['player']['username']} has been found! \n" \
f"Their account UUID is {data['data']['player']['id']} \n" \
f"There player head can be found [here]({data['data']['player']['avatar']}). \n" \
"This comment was sent by a bot!"
return reply
def genInvalidReply(self, inputID):
reply = f"{inputID} is not a valid UUID/username. \n" \
"Please check the spelling of the username and try again. \n"\
f"Reply with '\\u\\{self.username} !help' for a quick help guide. \n" \
"This comment was sent by a bot!"
return reply
def genNotFoundReply(self, inputID):
reply = f"{inputID} could not be found. \n" \
"Please check the spelling of the username and try again. \n" \
f"Reply with '\\u\\{self.username} !help' for a quick help guide. \n"\
"This comment was sent by a bot!"
return reply
def sendApiRequest(self, inputID):
url = self.playerInfoApi + inputID
# TODO: Encase request in try except block in case the api is down
r = requests.get(url, headers=self.header)
data = json.loads(r.text)
if data['code'] == 'player.found':
playerFound = True
else:
playerFound = False
return (playerFound, data)
def sendHelp(self):
reply = "To see this comment use the command '!help' \n" \
"To use this bot simply: \n" \
f"u/{self.username} PlayerName \n" \
f"u/{self.username} UUID \n" \
"Examples: \n" \
f"u/{self.username} Notch \n" \
f"u/{self.username} 069a79f4-44e9-4726-a5be-fca90e38aaf5 \n" \
"This comment was sent by a bot!"
return reply
if __name__ == '__main__':
# Open up the config file with the reddit bot login info
config = configparser.ConfigParser()
config.read('config.ini')
try:
r = praw.Reddit(username=config['LOGIN']['username'],
password=config['LOGIN']['password'],
client_id=config['LOGIN']['client_id'],
client_secret=config['LOGIN']['client_secret'],
user_agent=config['LOGIN']['user_agent'])
except KeyError:
print("The configuration file is not set up correctly")
quit()
username = config['LOGIN']['username']
# initialize that account lookup class
a = accountLookup(username)
# Get the reddit inbox stream
messages = r.inbox.stream()
# Iterate through the items in the stream
for message in messages:
try:
# split up each word in the message body
splitmsg = message.body.split()
# Look for messages that are both unread and reddit mentions
if message in r.inbox.mentions() and message in r.inbox.unread():
# If the mention is from the bots account mark it as read
if message.author.name == username:
message.mark_read()
# If the message is a help request send it
elif len(splitmsg) == 1 or len(splitmsg) > 2 or splitmsg[1] == '!help':
message.reply(a.sendHelp())
print("Help sent")
# Lookup the requested MC account
else:
message.reply(a.lookup(splitmsg[1]))
print("Main message Sent")
message.mark_read()
# If praw throws an exception just catch it
except praw.exceptions.APIException:
print("Probably hit a rate limit")
|
StarcoderdataPython
|
5035820
|
<gh_stars>1-10
"""
Face related processing class:
1. Face alignment
2. Face landmarks
3. ...
"""
import numpy as np
import dlib
from utils.proc_vid import parse_vid
from utils.face_utils import shape_to_np
from tqdm import tqdm
class FaceProc(object):
def __init__(self):
# Set up dlib face detector and landmark estimator
self.landmark_estimatior= dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat")
self.face_detector = dlib.get_frontal_face_detector()
def get_landmarks(self, img):
# return 68X2 landmarks
img_rgb = img[:, :, (2 ,1, 0)]
rects = self.face_detector(np.uint8(img_rgb))
if(len(rects)==0): return None
marks = self.landmark_estimatior(img_rgb, rects[0])
marks = shape_to_np(marks)
return marks
def get_all_face_rects(self, img):
img_rgb = img[:, :, (2 ,1, 0)]
rects = self.face_detector(np.uint8(img_rgb))
if(len(rects)==0):
return None
return rects
def get_landmarks_all_faces(self, img, rects):
all_landmarks = []
for rect in rects:
img_rgb = img[:, :, (2, 1, 0)]
marks = self.landmark_estimatior(img_rgb, rect)
marks = shape_to_np(marks)
all_landmarks.append(marks)
return all_landmarks
def get_landmarks_vid(self, video_path):
print('vid_path: ' + video_path)
imgs, frame_num, fps, width, height = parse_vid(video_path)
mark_list = []
for i, img in enumerate(tqdm(imgs)):
mark = self.get_landmarks(img)
mark_list.append(mark)
return mark_list
|
StarcoderdataPython
|
6442933
|
"""
Functions and decorators supporting the {{app_name}} command line interface.
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
_ARGS = None
PARSER = ArgumentParser()
PARSER.add_argument("--log-level", type=str,
help="level of detail during logging",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO")
PARSER.add_argument("--build-dir",
help="output directory for build targets.",
default="./build")
PARSER.add_argument("-c", "--config-file", type=str, default=None,
help="configuration file to load. May be repeated.",)
PARSER.add_argument("--config-string", type=str, default=None,
help="configuration string to load. May be repeated. "
+ "Loaded after configuration files.",)
PARSER.add_argument("--out-file", type=str, default=None,
help="Output file (default: stdout)")
COMMANDS_PARSER = PARSER.add_subparsers(title="commands", dest="command")
def init_global_args(args):
"""Initialize global arguments"""
global _ARGS # pylint: disable=global-statement
_ARGS = args
def get_args():
"""Retrieve global arguments"""
global _ARGS # pylint: disable=global-statement
return _ARGS
def command(args=[], parent=COMMANDS_PARSER):
# pylint: disable=dangerous-default-value
"""
Decorator for functions that act as CLI commands.
Functions using this decorator will be added to a global parser that is used
when invoking the module via __main__.
"""
def decorator(func):
parser = parent.add_parser(
func.__name__,
description=func.__doc__,
formatter_class=ArgumentDefaultsHelpFormatter,
)
for _arg in args:
parser.add_argument(*_arg[0], **_arg[1])
parser.set_defaults(func=func)
return func
return decorator
def task(args=[]):
# pylint: disable=dangerous-default-value
"""
Tasks are standalone functions that have a corresponding argument parser.
Unlike commands, tasks are not integrated into the application's
command parsing tree. they are intended to be invoked programatically.
This is especially useful for training functions, where multiple
`train(...)` functions would inevitably lead to naming collisions, but each
defines its own set of hyperparameters.
A task function's parser may be accessed via its `parser` attribute.
"""
def decorator(func):
parser = ArgumentParser(
func.__name__,
description=func.__doc__,
formatter_class=ArgumentDefaultsHelpFormatter,
)
for _arg in args:
parser.add_argument(*_arg[0], **_arg[1])
func.parser = parser
return func
return decorator
def arg(*args, **kwargs):
"""Utility function used in defining command args."""
return ([*args], kwargs)
|
StarcoderdataPython
|
6409582
|
"""
Author: <NAME>
Date: 09-18-2020
Description: Script for visualizing magnetic bottles. An assumption is made that bottles are driven by locations where Br changes sign. Current use case is to check coil-shifted DS map for GA requested changes.
"""
import os
import numpy as np
import pandas as pd
import lmfit as lm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# user configs file
from user_configs import *
# set plot configs
plt.rcParams['figure.figsize'] = [12, 8] # larger figures
plt.rcParams['axes.grid'] = True # turn grid lines on
plt.rcParams['axes.axisbelow'] = True # put grid below points
plt.rcParams['grid.linestyle'] = '--' # dashed grid
plt.rcParams.update({'font.size': 12.0}) # increase plot font size
# check if proper directories exist
def check_dirs(outdir=outdir):
print('Checking directories')
to_check = ['plots/', 'plots/bottle_viz/', 'pickles/']
for tc in to_check:
if not os.path.exists(outdir+tc):
os.mkdir(outdir+tc)
# read raw map information
# units = [mm, mm, mm, tesla, tesla, tesla]
def read_Bmap_txt(filename=mapdir+mapfile):
print('Reading raw data')
df = pd.read_csv(filename, header=None, names=['X', 'Y', 'Z', 'Bx', 'By', 'Bz'],
delim_whitespace=True, skiprows=4)
print(df.head())
return df
# shift and calculate
def calculate_Bmap_extras(df, length_scale=1., field_scale=1.):
print('Calculating extras for dataframe')
df.eval('X = X + 3896', inplace=True) # want x=y=0 along magnet axis
df.eval('R = (X**2 + Y**2)**(1/2)', inplace=True) # radius
df.eval('Phi = arctan2(Y,X)', inplace=True) # phi
df.eval('Br = Bx*cos(Phi)+By*sin(Phi)', inplace=True) # calculate Br for fitting
df.eval('Bphi = -Bx*sin(Phi)+By*cos(Phi)', inplace=True) # Bphi calculated for completion...not needed
# rescale positions
df.eval(f'X = {length_scale} * X', inplace=True)
df.eval(f'Y = {length_scale} * Y', inplace=True)
df.eval(f'Z = {length_scale} * Z', inplace=True)
df.eval(f'R = {length_scale} * R', inplace=True)
# rescalse fields
df.eval(f'Bx = {field_scale} * Bx', inplace=True)
df.eval(f'By = {field_scale} * By', inplace=True)
df.eval(f'Bz = {field_scale} * Bz', inplace=True)
df.eval(f'Br = {field_scale} * Br', inplace=True)
df.eval(f'Bphi = {field_scale} * Bphi', inplace=True)
print(df.head())
return df
'''
# query proper region
def query_tracker(df):
print('Query for tracker region')
# region requested by <NAME> -- Tracker region
rmax = 650 # mm
zcent = 10175 # mm
zpm = 1500 # mm
# query
df = df.query(f'R <= {rmax} & Z >= {zcent-zpm} & Z <= {zcent+zpm}')
df.reset_index(drop=True, inplace=True)
return df
'''
# pickle/unpickle data
def write_pickle_df(df, filename=outdir+'pickles/'+mapfile_pkl):
print('Saving pickle')
df.to_pickle(filename)
def read_pickle_df(filename=outdir+'pickles/'+mapfile_pkl):
print('Loading pickle')
return pd.read_pickle(filename)
'''
# model function
def maxwell_gradient(r, z, **params):
Bz = params['dBzdz'] * z + params['B0']
Br = - r / 2 * params['dBzdz']
return np.concatenate([Bz, Br])
def Bz_gradient(r, z, **params):
Bz = params['dBzdz'] * z + params['B0']
return Bz
# fit data
def run_fit_maxwell(df, model_func=maxwell_gradient):
print('Running fit')
model = lm.Model(model_func, independent_vars=['r','z'])
params = lm.Parameters()
params.add('dBzdz', value=0)
params.add('B0', value=0)
samples = np.concatenate([df.Bz.values, df.Br.values])
result = model.fit(samples, r=df.R.values, z=df.Z.values, params=params,
method='least_squares', fit_kws={'loss': 'linear'})
result_array = result.eval().reshape((2,-1))
df.loc[:, 'Bz_fit'] = result_array[0]
df.loc[:, 'Br_fit'] = result_array[1]
df.eval('Bz_res = Bz - Bz_fit', inplace=True)
df.eval('Br_res = Br - Br_fit', inplace=True)
df.eval('Bz_res_rel = (Bz - Bz_fit)/Bz', inplace=True)
df.eval('Br_res_rel = (Br - Br_fit)/Br', inplace=True)
df.to_pickle(outdir+'pickles/df_results.pkl')
print(result.fit_report())
return result, df
def run_fit_Bz(df, model_func=Bz_gradient):
print('Running fit')
model = lm.Model(model_func, independent_vars=['r','z'])
params = lm.Parameters()
params.add('dBzdz', value=0)
params.add('B0', value=0)
result = model.fit(df.Bz.values, r=df.R.values, z=df.Z.values, params=params,
method='least_squares', fit_kws={'loss': 'linear'})
result_array = result.eval()
df.loc[:, 'Bz_fit'] = result_array
df.loc[:, 'Br_fit'] = -result.params['dBzdz'].value / 2 * df['R']
df.eval('Bz_res = Bz - Bz_fit', inplace=True)
df.eval('Br_res = Br - Br_fit', inplace=True)
df.eval('Bz_res_rel = (Bz - Bz_fit)/Bz', inplace=True)
df.eval('Br_res_rel = (Br - Br_fit)/Br', inplace=True)
df.to_pickle(outdir+'pickles/df_results.pkl')
print(result.fit_report())
return result, df
'''
# def write_result(result, filename=outdir+'fit_result.txt'):
# with open(filename, 'w+') as f:
# f.write(result.fit_report())
# plotting
'''
def make_plots(df, result):
# wireframes
df0 = df.query('Y==0')
df0.sort_values(by=['X', 'Z'])
xs = df0.X.unique()
zs = df0.Z.unique()
elevs = [27, 40]
azims = [21, 73]
for B, el, az in zip(['Bz', 'Br'], elevs, azims):
X = df0['X'].values.reshape((len(xs), len(zs)))
Z = df0['Z'].values.reshape((len(xs), len(zs)))
B_fit = df0[f'{B}_fit'].values.reshape((len(xs), len(zs)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df0.X, df0.Z, df0[B], c='black', s=1, label='Data (Mau13)')
ax.plot_wireframe(X, Z, B_fit, color='green', label='Fit (Linear Gradient)')
ax.view_init(elev=el, azim=az)
ax.set_xlabel('X [mm]', labelpad=30)
ax.set_ylabel('Z [mm]', labelpad=30)
ax.set_zlabel(f'{B} [Tesla]', labelpad=30)
ax.xaxis.set_rotate_label(False)
ax.yaxis.set_rotate_label(False)
ax.zaxis.set_rotate_label(False)
plt.legend()
fig.suptitle(f'{B} vs. X, Z for Y==0')
fig.tight_layout(rect=(0,0.04,1,1))
fig.savefig(outdir+f'plots/{B}_vs_X_Z_Y=0.pdf')
fig.savefig(outdir+f'plots/{B}_vs_X_Z_Y=0.png')
# residual histograms
plt.rcParams['figure.figsize'] = [16, 8] # larger figures
plt.rcParams.update({'font.size': 18.0}) # increase plot font size
label_temp = r'$\mu = {0:.3E}$'+ '\n' + 'std' + r'$= {1:.3E}$' + '\n' + 'Integral: {2}\n' + 'Underflow: {3}\nOverflow: {4}'
N_bins = 200
lsize = 16
for res in ['res', 'res_rel']:
if res == 'res':
xlabel_z = r'$\Delta B_z$'+' [Tesla]'
xlabel_r = r'$\Delta B_r$'+' [Tesla]'
title_ = ''
fname_ = ''
scale = 'linear'
xmin_z = df[[f'Bz_{res}',f'Br_{res}']].min().min()
xmax_z = df[[f'Bz_{res}',f'Br_{res}']].max().max()+1e-5
xmin_r = xmin_z
xmax_r = xmax_z
else:
xlabel_z = r'$\Delta B_z / B_z$'
xlabel_r = r'$\Delta B_r / B_r$'
title_ = 'Relative '
fname_ = '_relative'
scale = 'log'
xmin_z = -1e-2
xmax_z = 1e-2
xmin_r = -100
xmax_r = 100
under_z = (df[f'Bz_{res}'] < xmin_z).sum()
over_z = (df[f'Bz_{res}'] >= xmax_z).sum()
under_r = (df[f'Br_{res}'] < xmin_r).sum()
over_r = (df[f'Br_{res}'] >= xmax_r).sum()
bins_z = np.linspace(xmin_z, xmax_z, N_bins+1)
bins_r = np.linspace(xmin_r, xmax_r, N_bins+1)
fig, axs = plt.subplots(1, 2)
axs[0].hist(df[f'Bz_{res}'], bins=bins_z, label=label_temp.format(df[f'Bz_{res}'].mean(), df[f'Bz_{res}'].std(), len(df)-under_z-over_z, under_z, over_z))
axs[0].set(xlabel=xlabel_z, ylabel="Count", yscale=scale)
axs[0].legend(prop={'size': lsize})
axs[1].hist(df[f'Br_{res}'], bins=bins_r, label=label_temp.format(df[f'Br_{res}'].mean(), df[f'Br_{res}'].std(), len(df)-under_r-over_r, under_r, over_r))
axs[1].set(xlabel=xlabel_r, ylabel="Count", yscale=scale)
axs[1].legend(prop={'size': lsize})
title_main=f'Linear Gradient Tracker Region {title_}Residuals'
fig.suptitle(title_main)
fig.tight_layout(rect=[0,0,1,1])
plot_file = outdir+f'plots/B{fname_}_residuals_hist'
fig.savefig(plot_file+'.pdf')
fig.savefig(plot_file+'.png')
'''
def make_plots(df, query, clips, names, x='Z', y='X', mapname='Mau13 (coil shift)', fname='coilshift'):
df_ = df.query(query).copy()
df_.sort_values(by=[x, y], inplace=True)
Lx = len(df_[x].unique())
Ly = len(df_[y].unique())
X = df_[x].values.reshape((Lx, Ly))
Y = df_[y].values.reshape((Lx, Ly))
# Lz = len(df_.Z.unique())
# Lx = len(df_.X.unique())
# X = df_.Z.values.reshape((Lz, Lx))
# Y = df_.X.values.reshape((Lz, Lx))
for clip, name in zip(clips, names):
if clip is None:
clip = np.max(np.abs(df_['Br']))
if clip == -1:
# C = (df_['Br'] > 0).values.reshape((Lz, Lx))
C = (df_['Br'] > 0).values.reshape((Lx, Ly))
else:
# C = np.clip(df_['Br'].values, -clip, clip).reshape((Lz, Lx))
C = np.clip(df_['Br'].values, -clip, clip).reshape((Lx, Ly))
fig = plt.figure()
p = plt.pcolormesh(X, Y, C, shading='auto')
cb = plt.colorbar(p)
cb.ax.set_ylabel('Br [Gauss]')
plt.xlabel(f'{x} [m]')
plt.ylabel(f'{y} [m]')
plt.title(r'$B_r$'+ f' in {mapname} DS: ({name})\n{query}')
fig.tight_layout(rect=[0,0,1,1])
plot_file = outdir+f'plots/bottle_viz/{fname}_Br_vs_X_vs_Z_clip-{clip}_query-{query}'
fig.savefig(plot_file+'.pdf')
fig.savefig(plot_file+'.png')
if __name__ == '__main__':
# check if proper directories exist
check_dirs()
# calculate data from raw file or pickle from previous calculation
pickle_exists = os.path.exists(outdir+'pickles/'+mapfile_pkl)
if pickle_exists and usepickle:
df = read_pickle_df()
else:
df = read_Bmap_txt()
df = calculate_Bmap_extras(df, length_scale=1e-3, field_scale=1e4)
write_pickle_df(df)
# making plots
# make_plots(df, '(Y==0.) & (R <= 1.)', clips=[None, 1e3, 1e2, 1e1, -1],
# make_plots(df, '(X==0.) & (R <= 1.)', clips=[None, 1e3, 1e2, 1e1, -1],
make_plots(df, '(Z==9.946) & (X <= 1.) & (X >= -1.) & (Y <= 1.) & (Y >= -1.)', clips=[None, 1e3, 1e2, 1e1, -1],
names=['Full Scale', r'$|B_r| \leq 1000$ Gauss', r'$|B_r| \leq 100$ Gauss', r'$|B_r| \leq 10$ Gauss', r'$B_r$ positive/negative'], x='X', y='Y',
# names=['Full Scale', r'$|B_r| \leq 1000$ Gauss', r'$|B_r| \leq 100$ Gauss', r'$|B_r| \leq 10$ Gauss', r'$B_r$ positive/negative'], x='Z', y='Y',
mapname='Mau13 (coil shift, no bus)', fname='coilshift_nobus')
# mapname='Mau13', fname='mau13')
# mapname='Mau13 (coil shift)', fname='coilshift')
|
StarcoderdataPython
|
11386283
|
<reponame>w0rp/w0rpzone
import json
from io import BytesIO
from unittest import mock
from django.core.files.base import File
from django.core.files.storage import FileSystemStorage
from django.test import TestCase
from django.urls import reverse as url_reverse
from .util import create_author
class UploadTestCase(TestCase):
def test_get_response_as_anonymous_user(self):
response = self.client.get(url_reverse('upload-file'))
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'{}')
def test_post_response_as_anonymous_user(self):
response = self.client.post(url_reverse('upload-file'))
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'{}')
def test_get_response_as_authenticated_user(self):
author = create_author()
self.client.login(username=author.username, password="<PASSWORD>")
response = self.client.get(url_reverse('upload-file'))
self.assertEqual(response.status_code, 405)
self.assertEqual(response.content, b'{}')
def test_post_without_file(self):
author = create_author()
self.client.login(username=author.username, password="<PASSWORD>")
response = self.client.post(url_reverse('upload-file'))
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b'{}')
@mock.patch.object(FileSystemStorage, 'url')
@mock.patch.object(FileSystemStorage, 'save')
def test_post_valid_file(self, save_mock, url_mock):
author = create_author()
self.client.login(username=author.username, password="<PASSWORD>")
upload = {'filename': None, 'data': None}
def save_side_effect(filename, file_obj):
upload['filename'] = filename
upload['data'] = file_obj.read()
return 'New Filename'
save_mock.side_effect = save_side_effect
url_mock.return_value = '/media/foobar'
response = self.client.post(url_reverse('upload-file'), {
'file': File(BytesIO(b'123'), 'My Filename'),
})
self.assertEqual(response.status_code, 201)
self.assertEqual(save_mock.call_count, 1)
self.assertEqual(upload['filename'], 'My Filename')
self.assertEqual(upload['data'], b'123')
self.assertEqual(url_mock.call_count, 1)
self.assertEqual(url_mock.call_args, mock.call('New Filename'))
self.assertEqual(
response.content,
json.dumps({"url": "/media/foobar"}).encode('utf-8'),
)
|
StarcoderdataPython
|
9743047
|
<reponame>scil/sqlalchemy-mixins-for-starlette
# noinspection PyPep8Naming
class classproperty(object):
"""
@property for @classmethod
taken from http://stackoverflow.com/a/13624858
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
# class classproperty_with_cache(object):
# """
# @property for @classmethod
# taken from http://stackoverflow.com/a/13624858
# """
#
# def __init__(self, fget):
# self.fget = fget
# self.cache = None
#
# def __get__(self, owner_self, owner_cls):
# if self.cache is None:
# self.cache = self.fget(owner_cls)
# return self.cache
|
StarcoderdataPython
|
12802224
|
<filename>oschool/teachings/urls.py
from django.urls import path
from .views import (
SubjectCreateView,
SubjectDetailView,
SubjectDeleteView,
SubjectListView,
LessonListView,
LessonCreateView,
LessonDetailView,
LessonDeleteView,
)
app_name = 'teachings'
urlpatterns = [
path("subjects/", view=SubjectListView.as_view(), name='subject-list'),
path("subjects/create/", view=SubjectCreateView.as_view(), name='subject-create'),
path("subjects/detail/<slug:subject_slug>/", view=SubjectDetailView.as_view(), name='subject-detail'),
path("subjects/delete/<slug:subject_slug>/", view=SubjectDeleteView.as_view(), name='subject-delete'),
path("lessons/", view=LessonListView.as_view(), name='lesson-list'),
path("lessons/create/", view=LessonCreateView.as_view(), name='lesson-create'),
path("lessons/detail/<slug:lesson_slug>/", view=LessonDetailView.as_view(), name='lesson-detail'),
path("lessons/delete/<slug:lesson_slug>/", view=LessonDeleteView.as_view(), name='lesson-delete'),
]
|
StarcoderdataPython
|
162529
|
<filename>nlabel/importers/csv.py<gh_stars>1-10
import csv
import mmap
import codecs
from nlabel import NLP, Slice
from nlabel.nlp.core import Text
from .base import Importer as AbstractImporter, Selection
from cached_property import cached_property
from pathlib import Path
from tqdm import tqdm
from typing import List, Union, Any
def _count_data_rows(f):
lines = 0
with mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) as mm:
with codecs.getreader("utf-8")(mm) as text:
last_pos = 0
with tqdm(desc="counting lines", total=mm.size(), unit_scale=True) as pbar:
for _ in csv.reader(iter(text.readline, '')):
lines += 1
pos = mm.tell()
pbar.update(pos - last_pos)
last_pos = pos
if lines > 0:
lines -= 1 # exclude header
return lines
class ByKeySelection:
def __init__(self, external_keys):
if not external_keys:
self._keys = []
self._select = {}
else:
keys = sorted(external_keys[0].keys())
self._keys = keys
if not all(sorted(x.keys()) == keys for x in external_keys):
raise RuntimeError("inhomogenous keys not supported")
self._select = set(
tuple([xk[k] for k in keys])
for xk in external_keys)
def __call__(self, i, row):
return tuple([row[k] for k in self._keys]) in self._select
class ByIndexSelection:
def __init__(self, indices):
self._indices = set(indices)
def __call__(self, i, row):
return i in self._indices
class BySliceSelection:
def __init__(self, arg):
self._slice = arg if isinstance(arg, Slice) else Slice(arg)
def __call__(self, i, row):
return self._slice(i)
class Filter:
def __init__(self, fs):
self._i = [0] * len(fs)
self._fs = fs
def __call__(self, row):
for j, f in enumerate(self._fs):
ok = f(self._i[j], row)
self._i[j] += 1
if not ok:
return False
return True
def _make_filter(selection: Selection, keys):
if selection is None:
return lambda row: True
else:
constructor = {
'by_key': ByKeySelection,
'by_index': ByIndexSelection,
'by_slice': BySliceSelection
}
fs = []
for f_name, f_arg in selection.filters:
fs.append(constructor[f_name](f_arg))
return Filter(fs)
class Importer(AbstractImporter):
def __init__(self, csv_instance, nlp: Union[NLP, Any], keys, text, selection: Selection = None):
self._csv = csv_instance
self._keys = sorted(keys)
self._text_key = text
self._csv_path = csv_instance.path
self._filter = _make_filter(selection, keys)
super().__init__(nlp, self._csv.path)
def _items(self):
with open(self._csv_path, "r") as f:
n_rows = self._csv.num_rows
reader = csv.DictReader(f)
for i, row in enumerate(tqdm(reader, total=n_rows, desc=f"processing {self._csv_path}")):
if not self._filter(row):
continue
if len(self._keys) == 1:
external_key = row[self._keys[0]]
else:
external_key = dict((k, row[k]) for k in self._keys)
text = row[self._text_key]
meta = dict((k, v) for k, v in row.items() if k != self._text_key)
yield Text(
text=text,
external_key=external_key,
meta=meta)
class CSV:
def __init__(self, csv_path: Union[str, Path], keys: List[str], text: str = 'text'):
self._csv_path = Path(csv_path)
self._keys = keys
self._text = text
@property
def path(self):
return self._csv_path
@cached_property
def num_rows(self):
with open(self._csv_path, "r") as f:
return _count_data_rows(f)
def importer(self, nlp: Union[NLP, Any], selection: Selection = None):
return Importer(self, nlp, self._keys, self._text, selection=selection)
|
StarcoderdataPython
|
8060056
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteServiceProviderBandwidthsOffered(Model):
"""Contains bandwidths offered in ExpressRouteServiceProvider resources.
:param offer_name: The OfferName.
:type offer_name: str
:param value_in_mbps: The ValueInMbps.
:type value_in_mbps: int
"""
_attribute_map = {
'offer_name': {'key': 'offerName', 'type': 'str'},
'value_in_mbps': {'key': 'valueInMbps', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ExpressRouteServiceProviderBandwidthsOffered, self).__init__(**kwargs)
self.offer_name = kwargs.get('offer_name', None)
self.value_in_mbps = kwargs.get('value_in_mbps', None)
|
StarcoderdataPython
|
157827
|
<filename>crud-flask-demo/service/test/exception.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 单元测试需要的异常
# wencan
# 2019-04-23
from ..abcs import NoRowsAbstractException
__all__ = ("NoRowsForTest")
class NoRowsForTest(NoRowsAbstractException):
'''not found'''
pass
|
StarcoderdataPython
|
1722553
|
import unittest
from deployment.research_field_classifier import ResearchFieldClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
class TestResearchFieldClassifier(unittest.TestCase):
def test_load_embedding(self):
classifier = ResearchFieldClassifier()
self.assertTrue(hasattr(classifier.abstract_path, 'embedder'))
self.assertTrue(hasattr(classifier.title_path, 'embedder'))
tfidf = classifier._load_embedding(classifier.title_path['embedder'])
self.assertTrue(type(tfidf) == TfidfVectorizer)
tfidf = classifier._load_embedding(classifier.abstract_path['embedder'])
self.assertTrue(type(tfidf) == TfidfVectorizer)
def test_load_model(self):
classifier = ResearchFieldClassifier()
self.assertTrue(hasattr(classifier.abstract_path, 'model'))
model = classifier._load_model(classifier.title_path['model'])
self.assertTrue(type(model) == SGDClassifier)
model = classifier._load_model(classifier.abstract_path['model'])
self.assertTrue(type(model) == SGDClassifier)
def test_load_labels(self):
classifier = ResearchFieldClassifier()
self.assertTrue(hasattr(classifier.title_path, 'label'))
labels = classifier._load_labels(classifier.title_path['label'])
self.assertTrue(type(labels) == dict)
def test_predict_field(self):
title = 'A Neural Conversation Generation Model via Equivalent Shared Memory Investigation'
abstract = 'Conversation generation as a challenging task in Natural Language Generation (NLG)'
classifier = ResearchFieldClassifier()
self.assertTrue(type(classifier.predict_research_field(title)) == str)
self.assertTrue(type(classifier.predict_research_field(title, abstract)) == str)
|
StarcoderdataPython
|
6488361
|
# flake8: noqa
from .common import (
is_array_like, is_bool, is_bool_dtype, is_categorical,
is_categorical_dtype, is_complex, is_complex_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dict_like, is_dtype_equal,
is_extension_array_dtype, is_extension_type, is_file_like, is_float,
is_float_dtype, is_hashable, is_int64_dtype, is_integer, is_integer_dtype,
is_interval, is_interval_dtype, is_iterator, is_list_like, is_named_tuple,
is_number, is_numeric_dtype, is_object_dtype, is_period, is_period_dtype,
is_re, is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse,
is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype,
is_unsigned_integer_dtype, pandas_dtype)
|
StarcoderdataPython
|
5099577
|
# -*- coding: utf-8 -*-
"""
@date: 2020/11/21 下午3:15
@file: bottleneck.py
@author: zj
@description:
"""
from abc import ABC
import torch.nn as nn
from zcls.model.attention_helper import make_attention_block
from zcls.model.layers.split_attention_conv2d import SplitAttentionConv2d
"""
from 《ResNeSt: Split-Attention Networks》 Appendix
1. depth-wise convolution is not optimal for training and inference efficiency on GPU;
2. model accuracy get saturated on ImageNet with a fixed input image size;
3. increasing input image size can get better accuracy and FLOPS trade-off;
4. bicubic upsampling strategy is needed for large crop-size (≥ 320).
"""
class ResNeStBlock(nn.Module, ABC):
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1,
downsample=None,
groups=1,
base_width=64,
with_attention=False,
reduction=4,
attention_type='SqueezeAndExcitationBlock2D',
conv_layer=None,
norm_layer=None,
act_layer=None,
radix=1,
fast_avg=False,
**kwargs
):
"""
依次执行大小为1x1、3x3、1x1的卷积操作,如果进行下采样,那么使用第二个卷积层对输入空间尺寸进行减半操作
参考Torchvision实现
对于注意力模块,有两种嵌入方式:
1. 对于Squeeze-And-Excitation或者Global Context操作,在残差连接中(after 1x1)嵌入;
2. 对于NonLocal或者SimplifiedNonLoal,在Block完成计算后(after add)嵌入。
对于Selective Kernel Conv2d,替换3x3卷积层;
对于下采样操作,参考
ResNeSt-fast setting, the effective average downsampling is applied prior to the
3 × 3 convolution to avoid introducing extra computational costs in the model.
With the downsampling operation moved after the convolutional layer, ResNeSt-
50 achieves 81.13% accuracy
在3x3卷积层之前(fast设置)或者之后执行AvgPool2d操作
:param in_channels: 输入通道数
:param out_channels: 输出通道数
:param stride: 步长
:param downsample: 下采样
:param groups: cardinality
:param base_width: 基础宽度
:param with_attention: 是否使用注意力模块
:param reduction: 衰减率
:param attention_type: 注意力模块类型
:param conv_layer: 卷积层类型
:param norm_layer: 归一化层类型
:param act_layer: 激活层类型
:param radix: 每个group中的分离数
:param fast_avg: 在3x3之前执行下采样操作
:param kwargs: 其他参数
"""
super(ResNeStBlock, self).__init__()
assert radix > 0
assert with_attention in (0, 1)
assert attention_type in ['GlobalContextBlock2D',
'SimplifiedNonLocal2DEmbeddedGaussian',
'NonLocal2DEmbeddedGaussian',
'SqueezeAndExcitationBlock2D']
if conv_layer is None:
conv_layer = nn.Conv2d
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if act_layer is None:
act_layer = nn.ReLU
self.radix = radix
self.down_sample = downsample
width = int(out_channels * (base_width / 64.)) * groups
self.conv1 = conv_layer(in_channels, width, kernel_size=1, stride=1, bias=False)
self.bn1 = norm_layer(width)
self.conv2 = SplitAttentionConv2d(width, width, groups, radix, reduction_rate=reduction)
if self.radix == 0:
self.bn2 = norm_layer(width)
self.conv3 = conv_layer(width, out_channels * self.expansion, kernel_size=1, stride=1, bias=False)
self.bn3 = norm_layer(out_channels * self.expansion)
self.relu = act_layer(inplace=True)
self.attention_after_1x1 = None
self.attention_after_add = None
if with_attention and attention_type in ['SqueezeAndExcitationBlock2D', 'GlobalContextBlock2D']:
self.attention_after_1x1 = make_attention_block(out_channels * self.expansion, reduction, attention_type)
self.attention_after_add = None
if with_attention and attention_type in ['NonLocal2DEmbeddedGaussian', 'SimplifiedNonLocal2DEmbeddedGaussian']:
self.attention_after_1x1 = None
self.attention_after_add = make_attention_block(out_channels * self.expansion, reduction, attention_type)
self.fast_avg = fast_avg
self.avg = None
if stride > 1:
self.avg = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.fast_avg and self.avg is not None:
out = self.avg(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
out = self.relu(out)
if not self.fast_avg and self.avg is not None:
out = self.avg(out)
out = self.conv3(out)
out = self.bn3(out)
if self.attention_after_1x1 is not None:
out = self.attention_after_1x1(out)
if self.down_sample is not None:
identity = self.down_sample(x)
out += identity
out = self.relu(out)
if self.attention_after_add is not None:
out = self.attention_after_add(out)
return out
|
StarcoderdataPython
|
3300858
|
"""Tests for the venstar integration."""
import requests_mock
from homeassistant.components.climate.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PLATFORM
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
TEST_MODELS = ["t2k", "colortouch"]
def mock_venstar_devices(f):
"""Decorate function to mock a Venstar Colortouch and T2000 thermostat API."""
async def wrapper(hass):
# Mock thermostats are:
# Venstar T2000, FW 4.38
# Venstar "colortouch" T7850, FW 5.1
with requests_mock.mock() as m:
for model in TEST_MODELS:
m.get(
f"http://venstar-{model}.localdomain/",
text=load_fixture(f"venstar/{model}_root.json"),
)
m.get(
f"http://venstar-{model}.localdomain/query/info",
text=load_fixture(f"venstar/{model}_info.json"),
)
m.get(
f"http://venstar-{model}.localdomain/query/sensors",
text=load_fixture(f"venstar/{model}_sensors.json"),
)
return await f(hass)
return wrapper
async def async_init_integration(
hass: HomeAssistant,
skip_setup: bool = False,
):
"""Set up the venstar integration in Home Assistant."""
platform_config = []
for model in TEST_MODELS:
platform_config.append(
{
CONF_PLATFORM: "venstar",
CONF_HOST: f"venstar-{model}.localdomain",
}
)
config = {DOMAIN: platform_config}
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
|
StarcoderdataPython
|
3215763
|
<reponame>simkovicha/virtool<gh_stars>0
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['TestCreate.test[uvloop-none] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 1'] = {
'all_read': True,
'all_write': True,
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'id': '9pfsom1b',
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-none] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'none',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-users_primary_group] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'technician',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['TestCreate.test[uvloop-force_choice] 2'] = {
'_id': '9pfsom1b',
'all_read': True,
'all_write': True,
'created_at': GenericRepr('datetime.datetime(2015, 10, 6, 20, 0)'),
'files': [
{
'id': 'test.fq'
}
],
'format': 'fastq',
'group': 'diagnostics',
'group_read': True,
'group_write': True,
'hold': True,
'is_legacy': False,
'library_type': 'normal',
'name': 'Foobar',
'nuvs': False,
'paired': False,
'pathoscope': False,
'quality': None,
'ready': False,
'subtraction': {
'id': 'apple'
},
'user': {
'id': 'test'
}
}
snapshots['test_find[uvloop-fred-None-None-d_range5-meta5] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-True-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz',
'replace_url': '/upload/samples/test/files/1'
}
],
'id': 'test',
'name': 'Test',
'ready': True
}
snapshots['test_find[uvloop-None-None-None-d_range0-meta0] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-1-d_range1-meta1] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
},
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
}
],
'found_count': 3,
'page': 1,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-None-2-2-d_range2-meta2] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 3,
'page': 2,
'page_count': 2,
'per_page': 2,
'total_count': 3
}
snapshots['test_find[uvloop-gv-None-None-d_range3-meta3] 1'] = {
'documents': [
{
'created_at': '2015-10-06T21:00:00Z',
'host': '',
'id': 'beb1eb10',
'isolate': 'Thing',
'name': '16GVP042',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'bob'
}
},
{
'created_at': '2015-10-06T20:00:00Z',
'host': '',
'id': '72bb8b31',
'isolate': 'Test',
'name': '16GVP043',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find[uvloop-sp-None-None-d_range4-meta4] 1'] = {
'documents': [
{
'created_at': '2015-10-06T22:00:00Z',
'host': '',
'id': 'cb400e6d',
'isolate': '',
'name': '16SPP044',
'nuvs': False,
'pathoscope': False,
'ready': True,
'user': {
'id': 'fred'
}
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_get[uvloop-False-None] 1'] = {
'caches': [
],
'created_at': '2015-10-06T20:00:00Z',
'files': [
{
'download_url': '/download/samples/files/file_1.fq.gz',
'id': 'foo',
'name': 'Bar.fq.gz'
}
],
'id': 'test',
'name': 'Test',
'ready': False
}
snapshots['test_find_analyses[uvloop-None-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_3',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'foo',
'name': 'Foo'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 3,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-bob-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 1,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
snapshots['test_find_analyses[uvloop-Baz-None] 1'] = {
'documents': [
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_1',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'bob'
},
'workflow': 'pathoscope_bowtie'
},
{
'created_at': '2015-10-06T20:00:00Z',
'id': 'test_2',
'index': {
'id': 'foo',
'version': 2
},
'job': {
'id': 'test'
},
'ready': True,
'reference': {
'id': 'baz',
'name': 'Baz'
},
'sample': {
'id': 'test'
},
'user': {
'id': 'fred'
},
'workflow': 'pathoscope_bowtie'
}
],
'found_count': 2,
'page': 1,
'page_count': 1,
'per_page': 25,
'total_count': 3
}
|
StarcoderdataPython
|
4882929
|
<filename>S6/CS334-NPL/002b_thread (004b).py
# Python code to create threads
import threading
import time
def dummy():
pass
def main():
for i in range(5):
tid = threading.Thread(target=dummy, args=[])
print (f'Created new thread {tid} {threading.get_ident()}')
time.sleep(1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8173357
|
from payroll import Payroll
payroll_log = {1: Payroll(40, 10), 2:Payroll(30, 10), \
3: Payroll(20, 10)}
print('Hours', 'Rate', 'Pay\n')
for p in payroll_log.values():
print(p.hours_worked, p.hourly_rate, p.calculate())
|
StarcoderdataPython
|
3342286
|
# Builtins
import os
import pathlib
import unittest
import datetime as dt
from harvest.api.paper import PaperBroker
from harvest.definitions import *
from harvest.utils import *
from _util import *
class TestPaperBroker(unittest.TestCase):
@delete_save_files(".")
def test_account(self):
"""
By default, the account is created with a balance of 1 million dollars.
"""
paper = PaperBroker()
d = paper.fetch_account()
self.assertEqual(d["equity"], 1000000.0)
self.assertEqual(d["cash"], 1000000.0)
self.assertEqual(d["buying_power"], 1000000.0)
self.assertEqual(d["multiplier"], 1)
@delete_save_files(".")
def test_dummy_account(self):
"""
Test if positions can be saved correctly.
"""
paper = PaperBroker()
paper.stocks.append({"symbol": "A", "avg_price": 1.0, "quantity": 5})
paper.stocks.append({"symbol": "B", "avg_price": 10.0, "quantity": 5})
paper.cryptos.append({"symbol": "@C", "avg_price": 289.21, "quantity": 2})
stocks = paper.fetch_stock_positions()
self.assertEqual(len(stocks), 2)
self.assertEqual(stocks[0]["symbol"], "A")
self.assertEqual(stocks[0]["avg_price"], 1.0)
self.assertEqual(stocks[0]["quantity"], 5)
cryptos = paper.fetch_crypto_positions()
self.assertEqual(len(cryptos), 1)
self.assertEqual(cryptos[0]["symbol"], "@C")
self.assertEqual(cryptos[0]["avg_price"], 289.21)
self.assertEqual(cryptos[0]["quantity"], 2)
@delete_save_files(".")
def test_buy_order_limit(self):
"""
Test if buy orders can be placed correctly.
"""
_, dummy, paper = create_trader_and_api("dummy", "paper", "5MIN", ["A"])
account = paper.fetch_account()
# First, check that there is $1m in the account
self.assertEqual(account["equity"], 1000000.0)
self.assertEqual(account["cash"], 1000000.0)
self.assertEqual(account["buying_power"], 1000000.0)
# Get the current price of A
A_price = dummy.fetch_latest_price("A")
# Place an order to buy A
order = paper.order_stock_limit("buy", "A", 5, A_price * 1.05)
self.assertEqual(order["order_id"], 0)
self.assertEqual(order["symbol"], "A")
# Since this is a simulation, orders are immediately filled
status = paper.fetch_stock_order_status(order["order_id"])
self.assertEqual(status["order_id"], 0)
self.assertEqual(status["symbol"], "A")
self.assertEqual(status["quantity"], 5)
self.assertEqual(status["filled_qty"], 0)
# self.assertEqual(status["filled_price"], 0)
self.assertEqual(status["side"], "buy")
self.assertEqual(status["time_in_force"], "gtc")
self.assertEqual(status["status"], "filled")
# Assume order will be filled at the price of A
filled_price = A_price
filled_qty = status["quantity"]
cost_1 = filled_price * filled_qty
# Advance time so broker status gets updated
dummy.main()
account_after = paper.fetch_account()
self.assertEqual(account_after["equity"], account["equity"])
self.assertEqual(account_after["cash"], account["cash"] - cost_1)
self.assertEqual(
account_after["buying_power"], account["buying_power"] - cost_1
)
# def test_buy(self):
# trader, dummy, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
# order = paper.buy("A", 5, 1e5)
# self.assertEqual(order["order_id"], 0)
# self.assertEqual(order["symbol"], "A")
# status = paper.fetch_stock_order_status(order["order_id"])
# self.assertEqual(status["order_id"], 0)
# self.assertEqual(status["symbol"], "A")
# self.assertEqual(status["quantity"], 5)
# self.assertEqual(status["filled_qty"], 5)
# self.assertEqual(status["side"], "buy")
# self.assertEqual(status["time_in_force"], "gtc")
# self.assertEqual(status["status"], "filled")
# paper._delete_account()
@delete_save_files(".")
def test_sell_order_limit(self):
"""
Test if Paper Broker can sell orders correctly.
Assumes the buy feature works.
"""
_, dummy, paper = create_trader_and_api("dummy", "paper", "5MIN", ["A"])
account = paper.fetch_account()
A_price = dummy.fetch_latest_price("A")
order = paper.order_stock_limit("buy", "A", 2, A_price * 1.05)
status = paper.fetch_stock_order_status(order["order_id"])
filled_price = A_price
filled_qty = status["quantity"]
cost = filled_price * filled_qty
dummy.main()
A_price = dummy.fetch_latest_price("A")
account_1 = paper.fetch_account()
print(account_1)
order = paper.order_stock_limit("sell", "A", 2, A_price * 0.95)
status = paper.fetch_stock_order_status(order["order_id"])
self.assertEqual(status["order_id"], 1)
self.assertEqual(status["symbol"], "A")
self.assertEqual(status["quantity"], 2)
self.assertEqual(status["filled_qty"], 0)
# self.assertEqual(status["filled_price"], 0)
self.assertEqual(status["side"], "sell")
self.assertEqual(status["time_in_force"], "gtc")
self.assertEqual(status["status"], "filled")
filled_price_s = A_price
filled_qty_s = status["quantity"]
cost_s = filled_price_s * filled_qty_s
profit = cost_s - cost
exp_equity = 1000000.0 + profit
dummy.main()
account_2 = paper.fetch_account()
self.assertAlmostEqual(account_2["equity"], exp_equity, 2)
self.assertEqual(account_2["cash"], account_1["cash"] + cost_s)
self.assertEqual(account_2["buying_power"], account_1["buying_power"] + cost_s)
@delete_save_files(".")
def test_sell(self):
_, _, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
paper.stocks = [{"symbol": "A", "avg_price": 10.0, "quantity": 5}]
order = paper.sell("A", 2)
self.assertEqual(order["order_id"], 0)
self.assertEqual(order["symbol"], "A")
status = paper.fetch_stock_order_status(order["order_id"])
self.assertEqual(status["order_id"], 0)
self.assertEqual(status["symbol"], "A")
self.assertEqual(status["quantity"], 2)
self.assertEqual(status["filled_qty"], 0)
# self.assertEqual(status["filled_price"], 0)
self.assertEqual(status["side"], "sell")
self.assertEqual(status["time_in_force"], "gtc")
self.assertEqual(status["status"], "filled")
paper._delete_account()
@delete_save_files(".")
def test_order_option_limit(self):
paper = PaperBroker()
_, _, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
exp_date = dt.datetime(2021, 11, 14) + dt.timedelta(hours=5)
order = paper.order_option_limit(
"buy", "A", 5, 50000, "OPTION", exp_date, 50001
)
self.assertEqual(order["order_id"], 0)
self.assertEqual(order["symbol"], "A211114P50001000")
status = paper.fetch_option_order_status(order["order_id"])
self.assertEqual(status["symbol"], "A211114P50001000")
self.assertEqual(status["quantity"], 5)
paper._delete_account()
@delete_save_files(".")
def test_commission(self):
commission_fee = {"buy": 5.76, "sell": "2%"}
_, _, paper = create_trader_and_api("dummy", "paper", "1MIN", ["A"])
paper.commission_fee = commission_fee
total_cost = paper.apply_commission(50, paper.commission_fee, "buy")
self.assertEqual(total_cost, 55.76)
total_cost = paper.apply_commission(50, paper.commission_fee, "sell")
self.assertEqual(total_cost, 49)
paper._delete_account()
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3599739
|
<reponame>savyasachi16/CVE-data-model
from docopt import docopt
from database import Database
from download import Download
from parser import Parser
from config import GENERIC_FILEPATHS, RECENT_FILEPATH, MODIFIED_FILEPATH
class Driver:
def __init__(self):
self.downloader = Download()
self.parser = Parser()
self.db = Database()
def rebuildDB(self):
self.downloader.downloadAll()
self.db.dropAllTables()
self.db.createAllTables()
for file in GENERIC_FILEPATHS:
cves = self.parser.getCVEs(file)
self.db.insertRows(cves)
def updateDB(self):
self.downloader.downloadModifiedFeed()
self.downloader.downloadRecentFeed()
recent_cves = self.parser.getCVEs(RECENT_FILEPATH)
modified_cves = self.parser.getCVEs(MODIFIED_FILEPATH)
self.db.updateRows(recent_cves)
self.db.updateRows(modified_cves)
def getTopVulnerableProducts(self):
pass
def getRoundedScoreCVECount(self):
pass
if __name__ == "__main__":
exe = Driver()
exe.updateDB()
|
StarcoderdataPython
|
12828804
|
<filename>news_collector/news_collector/spiders/minutouno.py
import datetime
import newspaper
import scrapy
import locale
import datetime
locale.setlocale(locale.LC_ALL, "es_AR.utf8")
BASE_URL = 'http://www.minutouno.com'
class MinutoUnoSpider(scrapy.Spider):
name = "m1"
def start_requests(self):
urls = [
'https://www.minutouno.com/politica',
'https://www.minutouno.com/economia'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_seccion)
def parse_seccion(self, response):
noticias = set(response.xpath('//div[@class="note"]/article//a[contains(@href,"notas")]/@href').extract())
for noticia_url in noticias:
yield scrapy.Request(url=noticia_url, callback=self.parse_noticia)
def parse_noticia(self, response):
ff = newspaper.Article(response.url)
ff.download()
ff.parse()
noticia_fecha = ff.publish_date
if not noticia_fecha:
try:
fecha_texto = response.xpath('//span[@class="date"]/text()').extract()[0].split('-')[0].lower().strip()
noticia_fecha = datetime.datetime.strptime(fecha_texto, '%d de %B de %Y')
except:
noticia_fecha = datetime.datetime.now()
noticia_cuerpo = ff.text
data = {
'titulo': ff.title,
'fecha': noticia_fecha,
'noticia_texto': noticia_cuerpo,
'noticia_url': response.url,
'source': 'minuto1',
'formato': 'web'
}
yield data
|
StarcoderdataPython
|
4962088
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup
import requests, csv
url = 'https://www.seek.co.nz/jobs/in-All-Auckland/full-time?daterange=14&keywords=%22devops%20engineer%22&salaryrange=150000-999999&salarytype=annual&sortmode=ListedDate'
request = requests.get(url).text
soup = BeautifulSoup(request, 'lxml')
jobs = soup.find_all('article')
with open('jobs.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['JOB TITLE', 'COMPANY', 'LOCATION', 'SALARY', 'LINK'])
for job in jobs:
job_title = job.find('a', class_='_2S5REPk').text
company = job.find('a', class_='_17sHMz8').text.upper()
location = job.find('strong', class_='_7ZnNccT').span.a.text
raw_job_link = job.find('a', class_='_2S5REPk')['href']
split_raw_link = raw_job_link.split('?')
job_link = 'https://www.seek.co.nz' + split_raw_link[0]
try:
salary = job.find('span', class_='_7ZnNccT').text
except Exception as e:
salary = 'No salary info provided'
csv_writer.writerow([job_title, company, location, salary, job_link])
print('CSV file has been created successfully!')
|
StarcoderdataPython
|
1988719
|
<gh_stars>1-10
import sys
import os
import time
import pandas as pd
import numpy as np
import coltools as ct
import re
def inflect(key):
root = key[:-1]
if key.endswith("o"):
return [root + x for x in ["on", "oj", "ojn"]]
elif key.endswith("a"):
return [root + x for x in ["aj", "an", "ajn"]]
elif key.endswith("e"):
return [root + "en"]
elif key.endswith("i"):
return [root + x for x in ["as", "os", "is", "us", "u",
"ita", "ata", "ota",
"inta", "anta", "onta",
"intan", "antan", "ontan",
"intaj", "antaj", "ontaj",
"intajn", "antajn", "ontajn"]]
else:
return []
def main(argv):
filen = argv[0]
lines = open(filen, "r").readlines()
x = 0
for line in lines:
if "idx:orth" in line:
x+=1
l = line.strip().replace("</idx:orth>", "")
key = l.split(">")[1]
inflections = inflect(key)
# print(f">>>> {key} , {l}, {line}<<<")
# print(inflect(key))
if len(inflections) == 0:
print(line)
else:
print(l)
print("<idx:infl>")
for infl in inflections:
print('<idx:iform value="' + infl + '" />')
print("</idx:infl>")
print("</idx:orth>")
else:
print(line)
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
11277095
|
"""Test that a Gene Ontology Enrichement Analysis can be run quietly"""
import os
from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS
from goatools.anno.idtogos_reader import IdToGosReader
from goatools.base import get_godag
__copyright__ = "Copyright (C) 2010-present, <NAME> al., All rights reserved."
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_goea_quiet():
"""Test that a Gene Ontology Enrichement Analysis can be run quietly"""
goeaobj = _get_goeaobj()
study_fin = "{REPO}/tests/data/small_study".format(REPO=REPO)
study_ids = [line.rstrip() for line in open(study_fin)]
print('\nTEST 1: GOEA run_study(study_ids)')
goea_results1 = goeaobj.run_study(study_ids)
print('{N} GOEA results for verbose GOEA'.format(N=len(goea_results1)))
print('\nTEST 2: GOEA run_study(study_ids, prt=None)')
goea_results2 = goeaobj.run_study(study_ids, prt=None)
print('{N} GOEA results for quiet GOEA'.format(N=len(goea_results2)))
# Original keyword is 'log'
print('\nTEST 3: GOEA run_study(study_ids, log=None)')
goea_results3 = goeaobj.run_study(study_ids, log=None)
print('{N} GOEA results for quiet GOEA'.format(N=len(goea_results3)))
_chk_results(goea_results1, goea_results2)
_chk_results(goea_results1, goea_results3)
def _get_goeaobj(methods=None):
"""Test GOEA with method, fdr."""
# REad GODag
obo_fin = os.path.join(REPO, "go-basic.obo")
obo_dag = get_godag(obo_fin, loading_bar=None)
# Read association
fin_assc = "{REPO}/tests/data/small_association".format(REPO=REPO)
objanno = IdToGosReader(fin_assc, godag=obo_dag)
ns2assc = objanno.get_ns2assc()
popul_fin = "{REPO}/tests/data/small_population".format(REPO=REPO)
popul_ids = [line.rstrip() for line in open(popul_fin)]
goeaobj = GOEnrichmentStudyNS(popul_ids, ns2assc, obo_dag, methods=methods)
return goeaobj
def _chk_results(results1, results2):
"""Check that results match"""
# pylint: disable=line-too-long
for res1, res2 in zip(results1, results2):
assert res1.GO == res2.GO, '\nRES1: {R1}\nRES2: {R2}\n\n'.format(R1=res1, R2=res2)
assert res1.p_bonferroni == res2.p_bonferroni, '\nRES1: {R1}\nRES2: {R2}\n\n'.format(R1=res1, R2=res2)
assert res1.p_sidak == res2.p_sidak, '\nRES1: {R1}\nRES2: {R2}\n\n'.format(R1=res1, R2=res2)
assert res1.p_holm == res2.p_holm, '\nRES1: {R1}\nRES2: {R2}\n\n'.format(R1=res1, R2=res2)
if __name__ == '__main__':
test_goea_quiet()
# Copyright (C) 2010-present, <NAME>., All rights reserved.
|
StarcoderdataPython
|
8172709
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==================================================
# @Time : 2019-06-20 22:59
# @Author : ryuchen
# @File : critical.py
# @Desc :
# ==================================================
class CuckooCriticalError(Exception):
"""Cuckoo struggle in a critical error."""
class CuckooStartupError(CuckooCriticalError):
"""Error starting up Cuckoo."""
class CuckooDatabaseError(CuckooCriticalError):
"""Cuckoo database error."""
class CuckooDependencyError(CuckooCriticalError):
"""Missing dependency error."""
class CuckooConfigurationError(CuckooCriticalError):
"""Invalid configuration error."""
|
StarcoderdataPython
|
8001169
|
<gh_stars>1-10
# R_mouth_up_right_geo
# R_mouth_down_left_geo
# R_mouth_up_left_geo
# R_mouth_down_right_geo
# L_mouth_down_right_geo
# L_mouth_up_right_geo
# L_mouth_down_left_geo
# L_mouth_up_left_geo
# 1, rename current mouth corrective shapes and unparent from mouth_shapes group
# 2, dial ctrl to max value(e.g.tx=1,ty=-1)
# 3, hit the button named "Create Corrective Target from Current Pose"
# 4, select org shape then cur shape, create blendshape then delete histroy
# 5, hit button named "Generate Corrective Shapes and Connect to Rig"
# 6, test load pose if worked. Then hide corrective shape and do next.
|
StarcoderdataPython
|
92196
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""Tests for aiida-openmx."""
import os
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
|
StarcoderdataPython
|
209682
|
<filename>sloth_toolkit/utilities/extend.py
__doc__ = """ A module with the functionality to extend or modify an object."""
from slugify import slugify
from . import verify, prope
def validate_attribute_name(name):
""" Modify a string to become a valid/legal lower case attribute """
name = slugify(name).replace('-', '_')
return name
def add_attribute(cls, attr_name, attr_value):
""" Add a valid attribute name to a class and set the attribute value """
attr_name = validate_attribute_name(attr_name)
setattr(cls, attr_name, attr_value)
# Code solution from: http://stackoverflow.com/questions/20078816/replace-non-ascii-characters-with-a-single-space
def validate_ascii_only(text, replacement=' '):
"""
When mining for data, you may come across special characters
that will raise a 'UnicodeEncodeError', this may include printing,
storing in a database or other reasons.
Call the function, passing the inflicted string, to replace those characters
with, by default a space. Instead of a space the second paramater, which is
optional allows you to specify the replacement character.
"""
return str(re.sub( r'[^\x00-\x7F]+', replacement, text ))
def extend_get_has_response_header_methods(obj):
"""
The passed object must have the attribute `response`, and that response
must be an object that has an attribute named `headers` that is a dict.
Dynamically create methods for the headers in the response object,
and append those methods to the class of the object (not the instance).
"""
verify.has_response(obj)
verify.has_headers(obj.response)
#### Methods Wrapper ##########################################
def _make_method(header_name, has=False, get=False):
""" Create a method for a header """
# Create a valid header name
valid_header_name = verify.validate_attribute_name(header_name)
#### Methods to make #######################
def _has_header(self):
""" Return Flase if header value is None """
if header_name not in self.response.headers:
self.response.headers[header_name] = None
return self.response.headers[header_name] != None
def _get_header(self):
""" Returns the header value """
if header_name not in self.response.headers:
self.response.headers[header_name] = None
return self.response.headers[header_name]
############################################
if has:
_method = _has_header
_method.__name__ = 'has_%s_header' % valid_header_name
elif get:
_method = _get_header
_method.__name__ = 'get_%s_header' % valid_header_name
return _method
###############################################################
objclass = prope.get_class(obj)
for header_name in obj.response.headers:
_has_method = _make_method(header_name, has=True)
method_name = _has_method.__name__
setattr(objclass, method_name, _has_method)
_get_method = _make_method(header_name, get=True)
method_name = _has_method.__name__
setattr(objclass, method_name, _get_method)
|
StarcoderdataPython
|
11346707
|
<gh_stars>1-10
import googleapiclient.discovery
compute = googleapiclient.discovery.build('compute', 'v1')
compute.instances().start(project='noqcks', instance='us-east-1-ping', zone='us-east1-c').execute()
compute.instances().start(project='noqcks', instance='asia-east1-ping', zone='asia-east1-a').execute()
compute.instances().start(project='noqcks', instance='asia-northeast1-ping', zone='asia-northeast1-a').execute()
compute.instances().start(project='noqcks', instance='asia-southeast1-ping', zone='asia-southeast1-a').execute()
compute.instances().start(project='noqcks', instance='europe-west1-ping', zone='europe-west1-d').execute()
compute.instances().start(project='noqcks', instance='europe-west2-ping', zone='europe-west2-b').execute()
compute.instances().start(project='noqcks', instance='us-central1-ping', zone='us-central1-c').execute()
compute.instances().start(project='noqcks', instance='us-east4-a', zone='us-east4-a').execute()
compute.instances().start(project='noqcks', instance='us-west-1', zone='us-west1-b').execute()
print "started instances"
|
StarcoderdataPython
|
12866047
|
#!/usr/bin/env python
"""
cc_plugin_eustace.eustace_global_attrs
Compliance Test Suite: Check core global attributes in EUSTACE files
"""
import os
from netCDF4 import Dataset
# Import base objects from compliance checker
from compliance_checker.base import Result, BaseNCCheck, GenericFile
# Restrict which vocabs will load (for efficiency)
os.environ["ESSV_VOCABS_ACTIVE"] = "eustace-team"
# Import checklib
import checklib.register.nc_file_checks_register as check_package
class EUSTACEGlobalAttrsCheck(BaseNCCheck):
register_checker = True
name = 'eustace-global-attrs'
_cc_spec = 'eustace-global-attrs'
_cc_spec_version = '0.2'
supported_ds = [GenericFile, Dataset]
_cc_display_headers = {
3: 'Required',
2: 'Recommended',
1: 'Suggested'
}
def setup(self, ds):
pass
def check_cr01(self, ds):
return check_package.ValidGlobalAttrsMatchFileNameCheck(kwargs={'delimiter': '_', 'order': 'institution_id,realm,frequency', 'extension': '.nc'},
level="HIGH",
vocabulary_ref="eustace-team:eustace")(ds)
def check_cr02(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': 'CF-1\\.6', 'attribute': 'Conventions'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr03(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'source'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr04(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': 'EUSTACE', 'attribute': 'project_id'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr05(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'contact'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr06(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'history'},
level="MEDIUM",
vocabulary_ref="")(ds)
def check_cr07(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'references'},
level="MEDIUM",
vocabulary_ref="")(ds)
def check_cr08(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{1,}', 'attribute': 'product_version'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr09(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'title'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr10(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{20,}', 'attribute': 'summary'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr11(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.{4,}', 'attribute': 'creator_name'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr12(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '.+@.+\\..+', 'attribute': 'creator_email'},
level="HIGH",
vocabulary_ref="")(ds)
def check_cr13(self, ds):
return check_package.GlobalAttrVocabCheck(kwargs={'attribute': 'frequency', 'vocab_lookup': 'canonical_name'},
level="LOW",
vocabulary_ref="eustace-team:eustace")(ds)
def check_cr14(self, ds):
return check_package.GlobalAttrVocabCheck(kwargs={'attribute': 'institution_id', 'vocab_lookup': 'canonical_name'},
level="HIGH",
vocabulary_ref="eustace-team:eustace")(ds)
def check_cr15(self, ds):
return check_package.GlobalAttrRegexCheck(kwargs={'regex': '\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.*', 'attribute': 'creation_date'},
level="MEDIUM",
vocabulary_ref="")(ds)
|
StarcoderdataPython
|
5179244
|
<reponame>renovate-bot/sphinx-docfx-yaml
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
Sphinx DocFX YAML Top-level Extension.
This extension allows you to automagically generate DocFX YAML from your Python AutoAPI docs.
"""
import os
import inspect
import re
import copy
import shutil
import black
import logging
from pathlib import Path
from functools import partial
from itertools import zip_longest
from typing import List
from black import InvalidInput
try:
from subprocess import getoutput
except ImportError:
from commands import getoutput
from yaml import safe_dump as dump
from sphinx.util.console import darkgreen, bold
from sphinx.util import ensuredir
from sphinx.errors import ExtensionError
from sphinx.util.nodes import make_refnode
from sphinxcontrib.napoleon.docstring import GoogleDocstring
from sphinxcontrib.napoleon import Config, _process_docstring
from .utils import transform_node, transform_string
from .settings import API_ROOT
from .monkeypatch import patch_docfields
from .directives import RemarksDirective, TodoDirective
from .nodes import remarks
import subprocess
import ast
from docuploader import shell
class Bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
try:
from conf import *
except ImportError:
print(Bcolors.FAIL + 'can not import conf.py! '
'you should have a conf.py in working project folder' + Bcolors.ENDC)
METHOD = 'method'
FUNCTION = 'function'
MODULE = 'module'
CLASS = 'class'
EXCEPTION = 'exception'
ATTRIBUTE = 'attribute'
REFMETHOD = 'meth'
REFFUNCTION = 'func'
INITPY = '__init__.py'
# Regex expression for checking references of pattern like ":class:`~package_v1.module`"
REF_PATTERN = ':(py:)?(func|class|meth|mod|ref|attr|exc):`~?[a-zA-Z0-9_\.<> ]*(\(\))?`'
# Regex expression for checking references of pattern like "~package_v1.subpackage.module"
REF_PATTERN_LAST = '~([a-zA-Z0-9_<>]*\.)*[a-zA-Z0-9_<>]*(\(\))?'
# Regex expression for checking references of pattern like
# "[module][google.cloud.cloudkms_v1.module]"
REF_PATTERN_BRACKETS = '\[[a-zA-Z0-9\_\<\>\-\. ]+\]\[[a-zA-Z0-9\_\<\>\-\. ]+\]'
REF_PATTERNS = [
REF_PATTERN,
REF_PATTERN_LAST,
REF_PATTERN_BRACKETS,
]
PROPERTY = 'property'
CODEBLOCK = "code-block"
CODE = "code"
PACKAGE = "package"
# Disable blib2to3 output that clutters debugging log.
logging.getLogger("blib2to3").setLevel(logging.ERROR)
# Run sphinx-build with Markdown builder in the plugin.
def run_sphinx_markdown():
cwd = os.getcwd()
# Skip running sphinx-build for Markdown for some unit tests.
# Not required other than to output DocFX YAML.
if "docs" in cwd:
return
return shell.run(
[
"sphinx-build",
"-M",
"markdown",
"docs/",
"docs/_build",
],
hide_output=False
)
def build_init(app):
print("Running sphinx-build with Markdown first...")
run_sphinx_markdown()
print("Completed running sphinx-build with Markdown files.")
"""
Set up environment data
"""
if not app.config.docfx_yaml_output:
raise ExtensionError('You must configure an docfx_yaml_output setting')
# This stores YAML object for modules
app.env.docfx_yaml_modules = {}
# This stores YAML object for classes
app.env.docfx_yaml_classes = {}
# This stores YAML object for functions
app.env.docfx_yaml_functions = {}
# This store the data extracted from the info fields
app.env.docfx_info_field_data = {}
# This stores signature for functions and methods
app.env.docfx_signature_funcs_methods = {}
# This store the uid-type mapping info
app.env.docfx_info_uid_types = {}
# This stores uidnames of docstrings already parsed
app.env.docfx_uid_names = {}
# This stores file path for class when inspect cannot retrieve file path
app.env.docfx_class_paths = {}
# This stores the name and href of the markdown pages.
app.env.markdown_pages = []
app.env.docfx_xrefs = {}
remote = getoutput('git remote -v')
try:
app.env.docfx_remote = remote.split('\t')[1].split(' ')[0]
except Exception:
app.env.docfx_remote = None
try:
app.env.docfx_branch = getoutput('git rev-parse --abbrev-ref HEAD').strip()
except Exception:
app.env.docfx_branch = None
try:
app.env.docfx_root = getoutput('git rev-parse --show-toplevel').strip()
except Exception:
app.env.docfx_root = None
patch_docfields(app)
app.docfx_transform_node = partial(transform_node, app)
app.docfx_transform_string = partial(transform_string, app)
def _get_cls_module(_type, name):
"""
Get the class and module name for an object
.. _sending:
Foo
"""
cls = None
if _type in [FUNCTION, EXCEPTION]:
module = '.'.join(name.split('.')[:-1])
elif _type in [METHOD, ATTRIBUTE, PROPERTY]:
cls = '.'.join(name.split('.')[:-1])
module = '.'.join(name.split('.')[:-2])
elif _type in [CLASS]:
cls = name
module = '.'.join(name.split('.')[:-1])
elif _type in [MODULE]:
module = name
else:
return (None, None)
return (cls, module)
def _create_reference(datam, parent, is_external=False):
return {
'uid': datam['uid'],
'parent': parent,
'isExternal': is_external,
'name': datam['source']['id'],
'fullName': datam['fullName'],
}
def _refact_example_in_module_summary(lines):
new_lines = []
block_lines = []
example_block_flag = False
for line in lines:
if line.startswith('.. admonition:: Example'):
example_block_flag = True
line = '### Example\n\n'
new_lines.append(line)
elif example_block_flag and len(line) != 0 and not line.startswith(' '):
example_block_flag = False
new_lines.append(''.join(block_lines))
new_lines.append(line)
block_lines[:] = []
elif example_block_flag:
if line == ' ': # origianl line is blank line ('\n').
line = '\n' # after outer ['\n'.join] operation,
# this '\n' will be appended to previous line then. BINGO!
elif line.startswith(' '):
# will be indented by 4 spaces according to yml block syntax.
# https://learnxinyminutes.com/docs/yaml/
line = ' ' + line + '\n'
block_lines.append(line)
else:
new_lines.append(line)
return new_lines
def _resolve_reference_in_module_summary(pattern, lines):
new_lines, xrefs = [], []
for line in lines:
matched_objs = list(re.finditer(pattern, line))
new_line = line
for matched_obj in matched_objs:
start = matched_obj.start()
end = matched_obj.end()
matched_str = line[start:end]
# TODO: separate this portion into a function per pattern.
if pattern == REF_PATTERN:
if '<' in matched_str and '>' in matched_str:
# match string like ':func:`***<***>`'
index = matched_str.index('<')
ref_name = matched_str[index+1:-2]
else:
# match string like ':func:`~***`' or ':func:`***`'
index = matched_str.index('~') if '~' in matched_str else matched_str.index('`')
ref_name = matched_str[index+1:-1]
index = ref_name.rfind('.') + 1
# Find the last component of the target. "~Queue.get" only returns <xref:get>
ref_name = ref_name[index:]
elif pattern == REF_PATTERN_LAST:
index = matched_str.rfind('.') + 1
if index == 0:
# If there is no dot, push index to not include tilde
index = 1
ref_name = matched_str[index:]
elif pattern == REF_PATTERN_BRACKETS:
lbracket = matched_str.find('[')+1
rbracket = matched_str.find(']')
ref_name = matched_str[lbracket:rbracket]
else:
raise ValueError(f'Encountered wrong ref pattern: \n{pattern}')
# Find the uid to add for xref
index = matched_str.find("google.cloud")
if index > -1:
xref = matched_str[index:]
while not xref[-1].isalnum():
xref = xref[:-1]
xrefs.append(xref)
# Check to see if we should create an xref for it.
if 'google.cloud' in matched_str:
new_line = new_line.replace(matched_str, '<xref uid=\"{}\">{}</xref>'.format(xref, ref_name))
# If it not a Cloud library, don't create xref for it.
else:
# Carefully extract the original uid
if pattern == REF_PATTERN:
index = matched_str.index('~') if '~' in matched_str else matched_str.index('`')
ref_name = matched_str[index+1:-1]
else:
ref_name = matched_str[1:]
new_line = new_line.replace(matched_str, '`{}`'.format(ref_name))
new_lines.append(new_line)
return new_lines, xrefs
def enumerate_extract_signature(doc, max_args=20):
el = "((?P<p%d>[*a-zA-Z_]+) *(?P<a%d>: *[a-zA-Z_.]+)? *(?P<d%d>= *[^ ]+?)?)"
els = [el % (i, i, i) for i in range(0, max_args)]
par = els[0] + "?" + "".join(["( *, *" + e + ")?" for e in els[1:]])
exp = "(?P<name>[a-zA-Z_]+) *[(] *(?P<sig>{0}) *[)]".format(par)
reg = re.compile(exp)
for func in reg.finditer(doc.replace("\n", " ")):
yield func
def enumerate_cleaned_signature(doc, max_args=20):
for sig in enumerate_extract_signature(doc, max_args=max_args):
dic = sig.groupdict()
name = sig["name"]
args = []
for i in range(0, max_args):
p = dic.get('p%d' % i, None)
if p is None:
break
d = dic.get('d%d' % i, None)
if d is None:
args.append(p)
else:
args.append("%s%s" % (p, d))
yield "{0}({1})".format(name, ", ".join(args))
def _extract_signature(obj_sig):
try:
signature = inspect.signature(obj_sig)
parameters = signature.parameters
except TypeError as e:
mes = "[docfx] unable to get signature of '{0}' - {1}.".format(
object_name, str(e).replace("\n", "\\n"))
signature = None
parameters = None
except ValueError as e:
# Backup plan, no __text_signature__, this happen
# when a function was created with pybind11.
doc = obj_sig.__doc__
sigs = set(enumerate_cleaned_signature(doc))
if len(sigs) == 0:
mes = "[docfx] unable to get signature of '{0}' - {1}.".format(
object_name, str(e).replace("\n", "\\n"))
signature = None
parameters = None
elif len(sigs) > 1:
mes = "[docfx] too many signatures for '{0}' - {1} - {2}.".format(
object_name, str(e).replace("\n", "\\n"), " *** ".join(sigs))
signature = None
parameters = None
else:
try:
signature = inspect._signature_fromstr(
inspect.Signature, obj_sig, list(sigs)[0])
parameters = signature.parameters
except TypeError as e:
mes = "[docfx] unable to get signature of '{0}' - {1}.".format(
object_name, str(e).replace("\n", "\\n"))
signature = None
parameters = None
return signature, parameters
# Given a line containing restructured keyword, returns which keyword it is.
def extract_keyword(line):
# Must be in the form of:
# .. keyword::
# where it begind with 2 dot prefix, followed by a space, then the keyword
# followed by 2 collon suffix.
try:
return line[ 3 : line.index("::") ]
except ValueError:
# TODO: handle reST template.
if line[3] != "_":
raise ValueError(f"Wrong formatting enoucntered for \n{line}")
return line
# Given lines of code, indent to left by 1 block, based on
# amount of trailing white space of first line as 1 block.
def indent_code_left(lines):
parts = lines.split("\n")
# Count how much leading whitespaces there are based on first line.
# lstrip(" ") removes all trailing whitespace from the string.
tab_space = len(parts[0]) - len(parts[0].lstrip(" "))
parts = [part[tab_space:] for part in parts]
return "\n".join(parts)
def _parse_docstring_summary(summary):
summary_parts = []
attributes = []
attribute_type_token = ":type:"
keyword = name = description = var_type = ""
# We need to separate in chunks, which is defined by 3 newline breaks.
# Otherwise when parsing for code and blocks of stuff, we will not be able
# to have the entire context when just splitting by single newlines.
# We should fix this from the library side for consistent docstring style,
# rather than monkey-patching it in the plugin.
for part in summary.split("\n\n\n"):
# Don't process empty string
if part == "":
continue
# Continue adding parts for code-block.
if keyword and keyword in [CODE, CODEBLOCK]:
# If we reach the end of keyword, close up the code block.
if not part.startswith(" "*tab_space) or part.startswith(".."):
summary_parts.append("```\n")
keyword = ""
else:
if tab_space == -1:
parts = [split_part for split_part in part.split("\n") if split_part]
tab_space = len(parts[0]) - len(parts[0].lstrip(" "))
if tab_space == 0:
raise ValueError(f"Code in the code block should be indented. Please check the docstring: \n{summary}")
if not part.startswith(" "*tab_space):
# No longer looking at code-block, reset keyword.
keyword = ""
summary_parts.append("```\n")
summary_parts.append(indent_code_left(part))
continue
# Attributes come in 3 parts, parse the latter two here.
elif keyword and keyword == ATTRIBUTE:
# Second part, extract the description.
if not found_name:
description = part.strip()
found_name = True
continue
# Third part, extract the attribute type then add the completed one
# set to a list to be returned. Close up as needed.
else:
if attribute_type_token in part:
var_type = part.split(":type:")[1].strip()
keyword = ""
if name and description and var_type:
attributes.append({
"name": name,
"description": description,
"var_type": var_type
})
else:
print(f"Could not process the attribute. Please check the docstring: \n{summary}")
continue
# Parse keywords if found.
if part.startswith(".."):
try:
keyword = extract_keyword(part)
except ValueError:
raise ValueError(f"Please check the docstring: \n{summary}")
# Works for both code-block and code
if keyword and keyword in [CODE, CODEBLOCK]:
# Retrieve the language found in the format of
# .. code-block:: lang
# {lang} is optional however.
language = part.split("::")[1].strip()
summary_parts.append(f"```{language}")
tab_space = -1
# Extract the name for attribute first.
elif keyword and keyword == ATTRIBUTE:
found_name = False
name = part.split("::")[1].strip()
# Reserve for additional parts
# elif keyword == keyword:
else:
summary_parts.append(part + "\n")
else:
summary_parts.append(part + "\n")
# Close up from the keyword if needed.
if keyword and keyword in [CODE, CODEBLOCK]:
# Check if it's already closed.
if summary_parts[-1] != "```\n":
summary_parts.append("```\n")
# Requires 2 newline chars to properly show on cloud site.
return "\n".join(summary_parts), attributes
# Given documentation docstring, parse them into summary_info.
def _extract_docstring_info(summary_info, summary, name):
top_summary = ""
# Return clean summary if returning early.
parsed_text = summary
# Initialize known types needing further processing.
var_types = {
':rtype:': 'returns',
':returns:': 'returns',
':type': 'variables',
':param': 'variables',
':raises': 'exceptions',
':raises:': 'exceptions'
}
initial_index = -1
front_tag = '<xref'
end_tag = '/xref>'
end_len = len(end_tag)
# Prevent GoogleDocstring crashing on custom types and parse all xrefs to normal
if front_tag in parsed_text:
type_pairs = []
# Constant length for end of xref tag
initial_index = max(0, parsed_text.find(front_tag))
summary_part = parsed_text[initial_index:]
# Remove all occurrences of "<xref uid="uid">text</xref>"
while front_tag in summary_part:
# Expecting format of "<xref uid="uid">text</xref>"
if front_tag in summary_part:
# Retrieve the index for starting position of xref tag
initial_index += summary_part.find(front_tag)
# Find the index of the end of xref tag, relative to the start of xref tag
end_tag_index = initial_index + parsed_text[initial_index:].find(end_tag) + end_len
# Retrieve the entire xref tag
original_type = parsed_text[initial_index:end_tag_index]
initial_index += len(original_type)
original_type = " ".join(filter(None, re.split(r'\n| |\|\s|\t', original_type)))
# Extract text from "<xref uid="uid">text</xref>"
index = original_type.find(">")
safe_type = 'xref_' + original_type[index+1:index+(original_type[index:].find("<"))]
else:
raise ValueError("Encountered unexpected type in Exception docstring.")
type_pairs.append([original_type, safe_type])
summary_part = parsed_text[initial_index:]
# Replace all the found occurrences
for pairs in type_pairs:
original_type, safe_type = pairs[0], pairs[1]
parsed_text = parsed_text.replace(original_type, safe_type)
# Clean the string by cleaning newlines and backlashes, then split by white space.
config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
# Convert Google style to reStructuredText
parsed_text = str(GoogleDocstring(parsed_text, config))
# Trim the top summary but maintain its formatting.
indexes = []
for types in var_types:
# Ensure that we look for exactly the string we want.
# Adding the extra space for non-colon ending types
# helps determine if we simply ran into desired occurrence
# or if we ran into a similar looking syntax but shouldn't
# parse upon it.
types += ' ' if types[-1] != ':' else ''
if types in parsed_text:
index = parsed_text.find(types)
if index > -1:
# For now, skip on parsing custom fields like attribute
if types == ':type ' and 'attribute::' in parsed_text:
continue
indexes.append(index)
# If we found types needing further processing, locate its index,
# if we found empty array for indexes, stop processing further.
index = min(indexes) if indexes else 0
# Store the top summary separately.
if index == 0:
return summary
top_summary = parsed_text[:index]
parsed_text = parsed_text[index:]
# Revert back to original type
if initial_index > -1:
for pairs in type_pairs:
original_type, safe_type = pairs[0], pairs[1]
parsed_text = parsed_text.replace(safe_type, original_type)
# Clean up whitespace and other characters
parsed_text = " ".join(filter(None, re.split(r'\|\s', parsed_text))).split()
cur_type = ''
words = []
arg_name = ''
index = 0
# Used to track return type and description
r_type, r_descr = '', ''
# Using counter iteration to easily extract names rather than
# coming up with more complicated stopping logic for each tags.
while index <= len(parsed_text):
word = parsed_text[index] if index < len(parsed_text) else ""
# Check if we encountered specific words.
if word in var_types or index == len(parsed_text):
# Finish processing previous section.
if cur_type:
if cur_type == ':type':
summary_info[var_types[cur_type]][arg_name]['var_type'] = " ".join(words)
elif cur_type == ':param':
summary_info[var_types[cur_type]][arg_name]['description'] = " ".join(words)
elif ":raises" in cur_type:
summary_info[var_types[cur_type]].append({
'var_type': arg_name,
'description': " ".join(words)
})
else:
if cur_type == ':rtype:':
r_type = " ".join(words)
else:
r_descr = " ".join(words)
if r_type and r_descr:
summary_info[var_types[cur_type]].append({
'var_type': r_type,
'description': r_descr
})
r_type, r_descr = '', ''
else:
# If after we processed the top summary and get in this state,
# likely we encountered a type that's not covered above or the docstring
# was formatted badly. This will likely break docfx job later on, should not
# process further.
if word not in var_types:
raise ValueError(f"Encountered wrong formatting, please check docstring for {name}")
# Reached end of string, break after finishing processing
if index == len(parsed_text):
break
# Start processing for new section
cur_type = word
if cur_type in [':type', ':param', ':raises', ':raises:']:
index += 1
# Exception that's not xref should be treated same as other names
if ':raises' not in cur_type or 'xref' not in parsed_text[index]:
arg_name = parsed_text[index][:-1]
# xrefs are treated by taking its second half and combining the two
elif ':raises' in cur_type and 'xref' in parsed_text[index]:
arg_name = f'{parsed_text[index]} {parsed_text[index+1][:-1]}'
index += 1
try:
# Initialize empty dictionary if it doesn't exist already
if arg_name not in summary_info[var_types[cur_type]] and ':raises' not in cur_type:
summary_info[var_types[cur_type]][arg_name] = {}
except KeyError:
raise KeyError(f"Encountered wrong formatting, please check docstring for {name}")
# Empty target string
words = []
else:
words.append(word)
index += 1
return top_summary
# Returns appropriate product name to display for given full name of entry.
def extract_product_name(name):
if 'google.cloud' in name:
product_name = '.'.join(name.split('.')[2:])
elif 'google' in name:
product_name = '.'.join(name.split('.')[1:])
else:
# Use the short name for other formats.
product_name = name.split('.')[-1]
return product_name
def _create_datam(app, cls, module, name, _type, obj, lines=None):
"""
Build the data structure for an autodoc class
"""
def _update_friendly_package_name(path):
package_name_index = path.find(os.sep)
package_name = path[:package_name_index]
if len(package_name) > 0:
try:
for name in namespace_package_dict:
if re.match(name, package_name) is not None:
package_name = namespace_package_dict[name]
path = os.path.join(package_name, path[package_name_index + 1:])
return path
except NameError:
pass
return path
if lines is None:
lines = []
short_name = name.split(".")[-1]
args = []
# Check how many arguments are present in the function.
arg_count = 0
try:
if _type in [METHOD, FUNCTION]:
argspec = inspect.getfullargspec(obj) # noqa
type_map = {}
if argspec.annotations:
for annotation in argspec.annotations:
if annotation == "return":
continue
# Extract names for simple types.
try:
type_map[annotation] = (argspec.annotations[annotation]).__name__
# Try to extract names for more complicated types.
except AttributeError:
vartype = argspec.annotations[annotation]
try:
type_map[annotation] = str(vartype._name)
if vartype.__args__:
type_map[annotation] += str(vartype.__args__)[:-2] + ")"
except AttributeError:
print(f"Could not parse argument information for {annotation}.")
continue
# Add up the number of arguments. `argspec.args` contains a list of
# all the arguments from the function.
arg_count += len(argspec.args)
for arg in argspec.args:
arg_map = {}
# Ignore adding in entry for "self"
if arg != 'cls':
arg_map['id'] = arg
if arg in type_map:
arg_map['var_type'] = type_map[arg]
args.append(arg_map)
if argspec.varargs:
args.append({'id': argspec.varargs})
if argspec.varkw:
args.append({'id': argspec.varkw})
if argspec.defaults:
# Attempt to add default values to arguments.
try:
for count, default in enumerate(argspec.defaults):
# Find the first index which default arguments start at.
# Every argument after this offset_count all have default values.
offset_count = len(argspec.defaults)
# Find the index of the current default value argument
index = len(args) + count - offset_count
# Only add defaultValue when str(default) doesn't contain object address string(object at 0x)
# inspect.getargspec method will return wrong defaults which contain object address for some default values, like sys.stdout
if 'object at 0x' not in str(default):
args[index]['defaultValue'] = str(default)
# If we cannot find the argument, it is missing a type and was taken out intentionally.
except IndexError:
pass
try:
lines = inspect.getdoc(obj)
lines = lines.split("\n") if lines else []
except TypeError as e:
print("couldn't getdoc from method, function: {}".format(e))
elif _type in [PROPERTY]:
lines = inspect.getdoc(obj)
lines = lines.split("\n") if lines else []
except TypeError as e:
print("Can't get argspec for {}: {}. {}".format(type(obj), name, e))
if name in app.env.docfx_signature_funcs_methods:
sig = app.env.docfx_signature_funcs_methods[name]
else:
sig = None
try:
full_path = inspect.getsourcefile(obj)
if full_path is None: # Meet a .pyd file
raise TypeError()
# Sub git repo path
path = full_path.replace(app.env.docfx_root, '')
# Support global file imports, if it's installed already
import_path = os.path.dirname(inspect.getfile(os))
path = path.replace(os.path.join(import_path, 'site-packages'), '')
path = path.replace(import_path, '')
# Make relative
path = path.replace(os.sep, '', 1)
start_line = inspect.getsourcelines(obj)[1]
path = _update_friendly_package_name(path)
# Get folder name from conf.py
path = os.path.join(app.config.folder, path)
app.env.docfx_class_paths[cls] = path
# append relative path defined in conf.py (in case of "binding python" project)
try:
source_prefix # does source_prefix exist in the current namespace
path = source_prefix + path
app.env.docfx_class_paths[cls] = path
except NameError:
pass
except (TypeError, OSError):
# TODO: remove this once there is full handler for property
if _type in [PROPERTY]:
print("Skip inspecting for property: {}".format(name))
else:
print("Can't inspect type {}: {}".format(type(obj), name))
path = None
start_line = None
datam = {
'module': module,
'uid': name,
'type': _type,
'name': short_name,
'fullName': name,
'source': {
'remote': {
'path': path,
'branch': app.env.docfx_branch,
'repo': app.env.docfx_remote,
},
'id': short_name,
'path': path,
'startLine': start_line,
},
'langs': ['python'],
}
summary_info = {
'variables': {}, # Stores mapping of variables and its description & types
'returns': [], # Stores the return info
'exceptions': [] # Stores the exception info
}
# Add extracted summary
if lines != []:
for ref_pattern in REF_PATTERNS:
lines, xrefs = _resolve_reference_in_module_summary(ref_pattern, lines)
for xref in xrefs:
if xref not in app.env.docfx_xrefs:
app.env.docfx_xrefs[xref] = ''
summary = app.docfx_transform_string('\n'.join(_refact_example_in_module_summary(lines)))
# Extract summary info into respective sections.
if summary:
top_summary = _extract_docstring_info(summary_info, summary, name)
try:
datam['summary'], datam['attributes'] = _parse_docstring_summary(top_summary)
except ValueError:
debug_line = []
if path:
debug_line.append(f"In file {path}\n")
debug_line.append(f"For module {module}, type {_type}:\n")
debug_line.append(f"Failed to parse docstring on {name}.")
raise ValueError("".join(debug_line))
# If there is no summary, add a short snippet.
else:
product_name = extract_product_name(name)
datam['summary'] = f"API documentation for `{product_name}` {_type}."
if args or sig or summary_info:
datam['syntax'] = {}
# If there are well-formatted arguments or a lot of arguments we should look
# into, loop through what we got from the docstring.
if args or arg_count > 0:
variables = summary_info['variables']
arg_id = []
for arg in args:
arg_id.append(arg['id'])
if arg['id'] in variables:
# Retrieve argument info from extracted map of variable info
arg_var = variables[arg['id']]
arg['var_type'] = arg_var.get('var_type') if arg_var.get('var_type') else ''
arg['description'] = arg_var.get('description') if arg_var.get('description') else ''
# Add any variables we might have missed from extraction.
for variable in variables:
if variable not in arg_id:
new_arg = {
"id": variable,
"var_type": variables[variable].get('var_type'),
"description": variables[variable].get('description')
}
args.append(new_arg)
datam['syntax']['parameters'] = args
if sig:
datam['syntax']['content'] = sig
if summary_info['returns']:
datam['syntax']['returns'] = summary_info['returns']
if summary_info['exceptions']:
datam['syntax']['exceptions'] = summary_info['exceptions']
if cls:
datam[CLASS] = cls
if _type in [CLASS, MODULE]:
datam['children'] = []
datam['references'] = []
return datam
def _fullname(obj):
"""
Get the fullname from a Python object
"""
return obj.__module__ + "." + obj.__name__
def process_docstring(app, _type, name, obj, options, lines):
"""
This function takes the docstring and indexes it into memory.
"""
# Check if we already processed this docstring.
if name in app.env.docfx_uid_names:
return
# Register current docstring to a set.
app.env.docfx_uid_names[name] = ''
# Use exception as class
if _type == EXCEPTION:
_type = CLASS
cls, module = _get_cls_module(_type, name)
if not module and _type != PROPERTY:
print('Unknown Type: %s' % _type)
return None
datam = _create_datam(app, cls, module, name, _type, obj, lines)
if _type == MODULE:
if module not in app.env.docfx_yaml_modules:
app.env.docfx_yaml_modules[module] = [datam]
else:
app.env.docfx_yaml_modules[module].append(datam)
if _type == CLASS or _type == PROPERTY:
if cls not in app.env.docfx_yaml_classes:
app.env.docfx_yaml_classes[cls] = [datam]
else:
app.env.docfx_yaml_classes[cls].append(datam)
if _type == FUNCTION and app.config.autodoc_functions:
if datam['uid'] is None:
raise ValueError("Issue with {0} (name={1})".format(datam, name))
if cls is None:
cls = name
if cls is None:
raise ValueError("cls is None for name='{1}' {0}".format(datam, name))
if cls not in app.env.docfx_yaml_functions:
app.env.docfx_yaml_functions[cls] = [datam]
else:
app.env.docfx_yaml_functions[cls].append(datam)
insert_inheritance(app, _type, obj, datam)
insert_children_on_module(app, _type, datam)
insert_children_on_class(app, _type, datam)
insert_children_on_function(app, _type, datam)
app.env.docfx_info_uid_types[datam['uid']] = _type
# Uses black.format_str() to reformat code as if running black/linter
# for better presnetation.
def format_code(code):
# Signature code comes in raw text without formatting, to run black it
# requires the code to look like actual function declaration in code.
# Returns the original formatted code without the added bits.
return black.format_str("def " + code + ": pass", mode=black.FileMode())[4:-11]
def process_signature(app, _type, name, obj, options, signature, return_annotation):
if signature:
short_name = name.split('.')[-1]
signature = short_name + signature
try:
signature = format_code(signature)
except InvalidInput as e:
print(f"Could not format the given code: \n{e})")
app.env.docfx_signature_funcs_methods[name] = signature
def insert_inheritance(app, _type, obj, datam):
def collect_inheritance(base, to_add):
for new_base in base.__bases__:
new_add = {'type': _fullname(new_base)}
collect_inheritance(new_base, new_add)
if 'inheritance' not in to_add:
to_add['inheritance'] = []
to_add['inheritance'].append(new_add)
if hasattr(obj, '__bases__'):
if 'inheritance' not in datam:
datam['inheritance'] = []
for base in obj.__bases__:
to_add = {'type': _fullname(base)}
collect_inheritance(base, to_add)
datam['inheritance'].append(to_add)
def insert_children_on_module(app, _type, datam):
"""
Insert children of a specific module
"""
if MODULE not in datam or datam[MODULE] not in app.env.docfx_yaml_modules:
return
insert_module = app.env.docfx_yaml_modules[datam[MODULE]]
# Find the module which the datam belongs to
for obj in insert_module:
# Add standardlone function to global class
if _type in [FUNCTION] and \
obj['type'] == MODULE and \
obj[MODULE] == datam[MODULE]:
obj['children'].append(datam['uid'])
# If it is a function, add this to its module. No need for class and module since this is
# done before calling this function.
insert_module.append(datam)
obj['references'].append(_create_reference(datam, parent=obj['uid']))
break
# Add classes & exceptions to module
if _type in [CLASS, EXCEPTION] and \
obj['type'] == MODULE and \
obj[MODULE] == datam[MODULE]:
obj['children'].append(datam['uid'])
obj['references'].append(_create_reference(datam, parent=obj['uid']))
break
if _type in [MODULE]: # Make sure datam is a module.
# Add this module(datam) to parent module node
if datam[MODULE].count('.') >= 1:
parent_module_name = '.'.join(datam[MODULE].split('.')[:-1])
if parent_module_name not in app.env.docfx_yaml_modules:
return
insert_module = app.env.docfx_yaml_modules[parent_module_name]
for obj in insert_module:
if obj['type'] == MODULE and obj[MODULE] == parent_module_name:
obj['children'].append(datam['uid'])
obj['references'].append(_create_reference(datam, parent=obj['uid']))
break
# Add datam's children modules to it. Based on Python's passing by reference.
# If passing by reference would be changed in python's future release.
# Time complex: O(N^2)
for module, module_contents in app.env.docfx_yaml_modules.items():
if module != datam['uid'] and \
module[:module.rfind('.')] == datam['uid']: # Current module is submodule/subpackage of datam
for obj in module_contents: # Traverse module's contents to find the module itself.
if obj['type'] == MODULE and obj['uid'] == module:
datam['children'].append(module)
datam['references'].append(_create_reference(obj, parent=module))
break
def insert_children_on_class(app, _type, datam):
"""
Insert children of a specific class
"""
if CLASS not in datam:
return
insert_class = app.env.docfx_yaml_classes[datam[CLASS]]
# Find the parent class using the module for subclasses of a class.
parent_class = app.env.docfx_yaml_classes.get(datam[MODULE])
# Find the class which the datam belongs to
for obj in insert_class:
if obj['type'] != CLASS:
continue
# Add subclass & methods & attributes & properties to class
if _type in [METHOD, ATTRIBUTE, PROPERTY, CLASS] and \
(obj[CLASS] == datam[CLASS] and obj != datam):
obj['children'].append(datam['uid'])
obj['references'].append(_create_reference(datam, parent=obj['uid']))
insert_class.append(datam)
# If there is a parent class, determine if current class is a subclass.
if not parent_class:
return
for obj in parent_class:
if obj['type'] != CLASS:
continue
if _type == CLASS and obj['class'] == datam['module']:
# No need to add datam to the parent class.
obj['children'].append(datam['uid'])
obj['references'].append(_create_reference(datam, parent=obj['uid']))
def insert_children_on_function(app, _type, datam):
"""
Insert children of a specific class
"""
if FUNCTION not in datam:
return
insert_functions = app.env.docfx_yaml_functions[datam[FUNCTION]]
insert_functions.append(datam)
# Parses the package name and returns unique identifer and name.
def find_unique_name(package_name, entries):
for name in package_name:
# Only find unique identifiers beside "google" and "cloud"
# For example, if given
# "google.cloud.spanner.v1.params_v1.types"
# "google.cloud.spanner.v1.instance_v1.types"
# it will return "instace_v1" or "params_v1" and "types".
# Also ensure that if name == package_name[-1], we only return one of
# the duplicate and not both.
if name != "google" and name != "cloud" and entries[name] == 1 and name != package_name[-1]:
return [name, package_name[-1]]
# If there is no way to disambiguate or we found duplicates, return the identifier name.
return [package_name[-1]]
# Used to disambiguate names that have same entries.
# Returns a dictionary of names that are disambiguated in the form of:
# {uidname: disambiguated_name}
def disambiguate_toc_name(pkg_toc_yaml):
name_entries = {}
disambiguated_names = {}
for module in pkg_toc_yaml:
module_name = module['name']
if module_name not in name_entries:
name_entries[module_name] = {}
# Split the name and mark all duplicates.
# There will be at least 1 unique identifer for each name.
for part in module['uidname'].split("."):
if part not in name_entries[module_name]:
name_entries[module_name][part] = 1
else:
name_entries[module_name][part] += 1
# Some entries don't contain `name` in `uidname`, add these into the map as well.
if module_name not in name_entries[module_name]:
name_entries[module_name][module_name] = 1
if 'items' in module:
# Update the dictionary of dismabiguated names
disambiguated_names.update(disambiguate_toc_name(module['items']))
for module in pkg_toc_yaml:
module_name = module['name']
# Check if there are multiple entires of module['name'], disambiguate if needed.
if name_entries[module_name][module_name] > 1:
module['name'] = ".".join(find_unique_name(module['uidname'].split("."), name_entries[module_name]))
disambiguated_names[module['uidname']] = module['name']
return disambiguated_names
# Combines pkg_toc_yaml entries with similar version headers.
def group_by_package(pkg_toc_yaml):
new_pkg_toc_yaml = []
package_groups = {}
for module in pkg_toc_yaml:
package_group = find_package_group(module['uidname'])
if package_group not in package_groups:
package_name = pretty_package_name(package_group)
package_groups[package_group] = {
"name": package_name,
"uidname": package_group,
"items": []
}
package_groups[package_group]['items'].append(module)
for package_group in package_groups:
new_pkg_toc_yaml.append(package_groups[package_group])
return new_pkg_toc_yaml
# Given the full uid, return the package group including its prefix.
def find_package_group(uid):
return ".".join(uid.split(".")[:3])
# Given the package group, make its name presentable.
def pretty_package_name(package_group):
name = ""
# Retrieve only the package name
split_name = package_group.split(".")[-1]
# Capitalize the first letter of each package name part
capitalized_name = [part.capitalize() for part in split_name.split("_")]
return " ".join(capitalized_name)
# Check is the current lines conform to markdown header format.
def parse_markdown_header(header_line, prev_line):
# Markdown h1 prefix should have only 1 of '#' character followed by exactly one space.
h1_header_prefix = "# "
if h1_header_prefix in header_line and header_line.count("#") == 1:
# Check for proper h1 header formatting, ensure there's more than just
# the hashtag character, and exactly only one space after the hashtag.
if not header_line[header_line.index(h1_header_prefix)+2].isspace() and \
len(header_line) > 2:
return header_line.strip("#").strip()
elif "=" in header_line:
# Check if we're inspecting an empty or undefined lines.
if not prev_line:
return ""
# Check if the current line only has equal sign divider.
if header_line.count("=") == len(header_line.strip()):
# Update header to the previous line.
return prev_line.strip()
return ""
# For a given markdown file, extract its header line.
def extract_header_from_markdown(mdfile_iterator):
mdfile_name = mdfile_iterator.name.split("/")[-1].split(".")[0].capitalize()
prev_line = ""
for header_line in mdfile_iterator:
# Ignore licenses and other non-headers prior to the header.
header = parse_markdown_header(header_line, prev_line)
# If we've found the header, return the header.
if header != "":
return header
prev_line = header_line
print(f"Could not find a title for {mdfile_iterator.name}. Using {mdfile_name} as the title instead.")
return mdfile_name
# Given generated markdown files, incorporate them into the docfx_yaml output.
# The markdown file metadata will be added to top level of the TOC.
def find_markdown_pages(app, outdir):
# Use this to ignore markdown files that are unnecessary.
files_to_ignore = [
"index.md", # merge index.md and README.md and index.yaml later.
# See https://github.com/googleapis/sphinx-docfx-yaml/issues/105.
"reference.md", # Reference docs overlap with Overview. Will try and incorporate this in later.
# See https://github.com/googleapis/sphinx-docfx-yaml/issues/106.
"readme.md", # README does not seem to work in cloud site
# See https://github.com/googleapis/sphinx-docfx-yaml/issues/107.
"upgrading.md", # Currently the formatting breaks, will need to come back to it.
# See https://github.com/googleapis/sphinx-docfx-yaml/issues/108.
]
markdown_dir = Path(app.builder.outdir).parent / "markdown"
if not markdown_dir.exists():
print("There's no markdown file to move.")
return
# For each file, if it is a markdown file move to the top level pages.
for mdfile in markdown_dir.iterdir():
if mdfile.is_file() and mdfile.name.lower() not in files_to_ignore:
shutil.copy(mdfile, f"{outdir}/{mdfile.name.lower()}")
# Extract the header name for TOC.
with open(mdfile) as mdfile_iterator:
name = extract_header_from_markdown(mdfile_iterator)
# Add the file to the TOC later.
app.env.markdown_pages.append({
'name': name,
'href': mdfile.name.lower(),
})
# Finds and replaces occurrences which should be a cross reference in the given
# content, except for the current name.
def convert_cross_references(content: str, current_name: str, entry_names: List[str]):
words = content.split(" ")
new_words = []
# Using counter to check if the entry is already a cross reference.
for index, word in enumerate(words):
cross_reference = ""
for keyword in entry_names:
if keyword != current_name and keyword not in current_name and keyword in word:
# If it is already processed as cross reference, skip over it.
if "<xref" in words[index-1] or (new_words and f"<xref uid=\"{keyword}" in new_words[-1]):
continue
cross_reference = f"<xref uid=\"{keyword}\">{keyword}</xref>"
new_words.append(word.replace(keyword, cross_reference))
print(f"Converted {keyword} into cross reference in: \n{content}")
# If cross reference has not been found, add current unchanged content.
if not cross_reference:
new_words.append(word)
return " ".join(new_words)
# Used to look for cross references in the obj's data where applicable.
# For now, we inspect summary, syntax and attributes.
def search_cross_references(obj, current_name: str, entry_names: List[str]):
if obj.get("summary"):
obj["summary"] = convert_cross_references(obj["summary"], current_name, entry_names)
if obj.get("syntax"):
if obj["syntax"].get("parameters"):
for param in obj["syntax"]["parameters"]:
if param.get("description"):
param["description"] = convert_cross_references(
param["description"],
current_name,
entry_names
)
if param.get("id"):
param["id"] = convert_cross_references(
param["id"],
current_name,
entry_names
)
if param.get("var_type"):
param["var_type"] = convert_cross_references(
param["var_type"],
current_name,
entry_names
)
if obj["syntax"].get("exceptions"):
for exception in obj["syntax"]["exceptions"]:
if exception.get("description"):
exception["description"] = convert_cross_references(
exception["description"],
current_name,
entry_names
)
if exception.get("var_type"):
exception["var_type"] = convert_cross_references(
exception["var_type"],
current_name,
entry_names
)
if obj["syntax"].get("returns"):
for ret in obj["syntax"]["returns"]:
if ret.get("description"):
ret["description"] = convert_cross_references(
ret["description"],
current_name,
entry_names
)
if ret.get("var_type"):
ret["var_type"] = convert_cross_references(
ret["var_type"],
current_name,
entry_names
)
if obj.get("attributes"):
for attribute in obj["attributes"]:
if attribute.get("description"):
attribute["description"] = convert_cross_references(
attribute["description"],
current_name,
entry_names
)
if attribute.get("name"):
attribute["name"] = convert_cross_references(
attribute["name"],
current_name,
entry_names
)
if attribute.get("var_type"):
attribute["var_type"] = convert_cross_references(
attribute["var_type"],
current_name,
entry_names
)
def build_finished(app, exception):
"""
Output YAML on the file system.
"""
# Used to get rid of the uidname field for cleaner toc file.
def sanitize_uidname_field(pkg_toc_yaml):
for module in pkg_toc_yaml:
if 'items' in module:
sanitize_uidname_field(module['items'])
module.pop('uidname')
def find_node_in_toc_tree(pkg_toc_yaml, to_add_node):
for module in pkg_toc_yaml:
if module['uidname'] == to_add_node:
return module
if 'items' in module:
items = module['items']
found_module = find_node_in_toc_tree(items, to_add_node)
if found_module != None:
return found_module
return None
def convert_module_to_package_if_needed(obj):
if 'source' in obj and 'path' in obj['source'] and obj['source']['path']:
if obj['source']['path'].endswith(INITPY):
obj['type'] = 'subPackage'
product_name = extract_product_name(obj['fullName'])
obj['summary'] = f"API documentation for `{product_name}` package."
return
for child_uid in obj['children']:
if child_uid in app.env.docfx_info_uid_types:
child_uid_type = app.env.docfx_info_uid_types[child_uid]
if child_uid_type == MODULE:
obj['type'] = 'package'
return
normalized_outdir = os.path.normpath(os.path.join(
app.builder.outdir, # Output Directory for Builder
API_ROOT,
))
ensuredir(normalized_outdir)
# Add markdown pages to the configured output directory.
find_markdown_pages(app, normalized_outdir)
pkg_toc_yaml = []
# Used to record filenames dumped to avoid confliction
# caused by Windows case insensitive file system
file_name_set = set()
# Used to disambiguate entry names
yaml_map = {}
# Order matters here, we need modules before lower level classes,
# so that we can make sure to inject the TOC properly
for data_set in (app.env.docfx_yaml_modules,
app.env.docfx_yaml_classes,
app.env.docfx_yaml_functions): # noqa
for uid, yaml_data in iter(sorted(data_set.items())):
if not uid:
# Skip objects without a module
continue
references = []
# Merge module data with class data
for obj in yaml_data:
arg_params = obj.get('syntax', {}).get('parameters', [])
if(len(arg_params) > 0 and 'id' in arg_params[0] and arg_params[0]['id'] == 'self'):
# Support having `self` as an arg param, but not documented
arg_params = arg_params[1:]
obj['syntax']['parameters'] = arg_params
if obj['uid'] in app.env.docfx_info_field_data and \
obj['type'] == app.env.docfx_info_field_data[obj['uid']]['type']:
# Avoid entities with same uid and diff type.
del(app.env.docfx_info_field_data[obj['uid']]['type']) # Delete `type` temporarily
if 'syntax' not in obj:
obj['syntax'] = {}
merged_params = []
if 'parameters' in app.env.docfx_info_field_data[obj['uid']]:
doc_params = app.env.docfx_info_field_data[obj['uid']].get('parameters', [])
if arg_params and doc_params:
if len(arg_params) - len(doc_params) > 0:
app.warn(
"Documented params don't match size of params:"
" {}".format(obj['uid']))
# Zip 2 param lists until the long one is exhausted
for args, docs in zip_longest(arg_params, doc_params, fillvalue={}):
if len(args) == 0:
merged_params.append(docs)
else:
args.update(docs)
merged_params.append(args)
obj['syntax'].update(app.env.docfx_info_field_data[obj['uid']])
if merged_params:
obj['syntax']['parameters'] = merged_params
if 'parameters' in obj['syntax'] and obj['type'] == 'method':
for args in obj['syntax']['parameters']:
if 'isRequired' not in args and 'defaultValue' not in args:
args['isRequired'] = True
# Raise up summary
if 'summary' in obj['syntax'] and obj['syntax']['summary']:
obj['summary'] = obj['syntax'].pop('summary').strip(" \n\r\r")
# Raise up remarks
if 'remarks' in obj['syntax'] and obj['syntax']['remarks']:
obj['remarks'] = obj['syntax'].pop('remarks')
# Raise up seealso
if 'seealso' in obj['syntax'] and obj['syntax']['seealso']:
obj['seealsoContent'] = obj['syntax'].pop('seealso')
# Raise up example
if 'example' in obj['syntax'] and obj['syntax']['example']:
obj.setdefault('example', []).append(obj['syntax'].pop('example'))
# Raise up exceptions
if 'exceptions' in obj['syntax'] and obj['syntax']['exceptions']:
obj['exceptions'] = obj['syntax'].pop('exceptions')
# Raise up references
if 'references' in obj['syntax'] and obj['syntax']['references']:
obj.setdefault('references', []).extend(obj['syntax'].pop('references'))
# add content of temp list 'added_attribute' to children and yaml_data
if 'added_attribute' in obj['syntax'] and obj['syntax']['added_attribute']:
added_attribute = obj['syntax'].pop('added_attribute')
for attrData in added_attribute:
existed_Data = next((n for n in yaml_data if n['uid'] == attrData['uid']), None)
if existed_Data:
# Update data for already existed one which has attribute comment in source file
existed_Data.update(attrData)
else:
obj.get('children', []).append(attrData['uid'])
yaml_data.append(attrData)
if 'class' in attrData:
# Get parent for attrData of Non enum class
parent = attrData['class']
else:
# Get parent for attrData of enum class
parent = attrData['parent']
obj['references'].append(_create_reference(attrData, parent))
app.env.docfx_info_field_data[obj['uid']]['type'] = obj['type'] # Revert `type` for other objects to use
if 'references' in obj:
# Ensure that references have no duplicate ref
ref_uids = [r['uid'] for r in references]
for ref_obj in obj['references']:
if ref_obj['uid'] not in ref_uids:
references.append(ref_obj)
obj.pop('references')
if obj['type'] == 'module':
convert_module_to_package_if_needed(obj)
if obj['type'] == 'method':
# Update the name to use shorter name to show
obj['name'] = obj['source']['id']
# To distinguish distribution package and import package
if obj.get('type', '') == 'package' and obj.get('kind', '') != 'distribution':
obj['kind'] = 'import'
try:
if remove_inheritance_for_notfound_class:
if 'inheritance' in obj:
python_sdk_name = obj['uid'].split('.')[0]
obj['inheritance'] = [n for n in obj['inheritance'] if not n['type'].startswith(python_sdk_name) or
n['type'] in app.env.docfx_info_uid_types]
if not obj['inheritance']:
obj.pop('inheritance')
except NameError:
pass
if 'source' in obj and (not obj['source']['remote']['repo'] or \
obj['source']['remote']['repo'] == 'https://apidrop.visualstudio.com/Content%20CI/_git/ReferenceAutomation'):
del(obj['source'])
# Extract any missing cross references where applicable.
# Potential targets are instances of full uid shown, or
# if we find a short form of the uid of one of current
# package's items. For example:
# cross reference candidates:
# google.cloud.bigquery_storage_v1.types.storage.SplitReadStreamResponse
# SplitReadStreamResponse
# non-candidates:
# (not from the same library)
# google.cloud.aiplatform.AutoMLForecastingTrainingJob
current_name = obj["fullName"]
entry_names = sorted(app.env.docfx_uid_names.keys(), reverse=True)
# Currently we only need to look in summary, syntax and
# attributes for cross references.
search_cross_references(obj, current_name, entry_names)
yaml_map[uid] = [yaml_data, references]
# Parse the name of the object.
# Some types will need additional parsing to de-duplicate their names and contain
# a portion of their parent name for better disambiguation. This is done in
# disambiguate_toc_name
node_name = uid.split(".")[-1]
# Build nested TOC
if uid.count('.') >= 1:
parent_level = '.'.join(uid.split('.')[:-1])
found_node = find_node_in_toc_tree(pkg_toc_yaml, parent_level)
if found_node:
found_node.setdefault(
'items',
[{'name': 'Overview', 'uidname': parent_level, 'uid': parent_level}]
).append({
'name': node_name,
'uidname': uid,
'uid': uid
})
else:
pkg_toc_yaml.append({
'name': node_name,
'uidname': uid,
'uid': uid
})
else:
pkg_toc_yaml.append({
'name': node_name,
'uidname': uid,
'uid': uid
})
# Exit if there are no generated YAML pages or Markdown pages.
if len(pkg_toc_yaml) == 0 and len(app.env.markdown_pages) == 0:
raise RuntimeError("No documentation for this module.")
pkg_toc_yaml = group_by_package(pkg_toc_yaml)
# Perform additional disambiguation of the name
disambiguated_names = disambiguate_toc_name(pkg_toc_yaml)
# Keeping uidname field carrys over onto the toc.yaml files, we need to
# be keep using them but don't need them in the actual file
pkg_toc_yaml_with_uid = copy.deepcopy(pkg_toc_yaml)
sanitize_uidname_field(pkg_toc_yaml)
toc_file = os.path.join(normalized_outdir, 'toc.yml')
with open(toc_file, 'w') as writable:
writable.write(
dump(
[{
'name': app.config.project,
'items': [{'name': 'Overview', 'uid': 'project-' + app.config.project}] + app.env.markdown_pages + pkg_toc_yaml
}],
default_flow_style=False,
)
)
# Output files
for uid, data in iter(yaml_map.items()):
for yaml_item in data:
for obj in yaml_item:
# If the entry was disambiguated, update here:
obj_full_name = obj['fullName']
if disambiguated_names.get(obj_full_name):
obj['name'] = disambiguated_names[obj_full_name]
if obj['type'] == 'subPackage':
obj['summary'] = "API documentation for `{}` package.".format(obj['name'])
# data is formatted as [yaml_data, references]
yaml_data, references = data
if uid.lower() in file_name_set:
filename = uid + "(%s)" % app.env.docfx_info_uid_types[uid]
else:
filename = uid
out_file = os.path.join(normalized_outdir, '%s.yml' % filename)
ensuredir(os.path.dirname(out_file))
if app.verbosity >= 1:
app.info(bold('[docfx_yaml] ') + darkgreen('Outputting %s' % filename))
with open(out_file, 'w') as out_file_obj:
out_file_obj.write('### YamlMime:UniversalReference\n')
try:
dump(
{
'items': yaml_data,
'references': references,
'api_name': [], # Hack around docfx YAML
},
out_file_obj,
default_flow_style=False
)
except Exception as e:
raise ValueError("Unable to dump object\n{0}".format(yaml_data)) from e
file_name_set.add(filename)
index_file = os.path.join(normalized_outdir, 'index.yml')
index_children = []
index_references = []
for package in pkg_toc_yaml_with_uid:
for item in package.get("items"):
index_children.append(item.get('uidname', ''))
index_references.append({
'uid': item.get('uidname', ''),
'name': item.get('name', ''),
'fullname': item.get('uidname', ''),
'isExternal': False
})
with open(index_file, 'w') as index_file_obj:
index_file_obj.write('### YamlMime:UniversalReference\n')
dump(
{
'items': [{
'uid': 'project-' + app.config.project,
'name': app.config.project,
'fullName': app.config.project,
'langs': ['python'],
'type': 'package',
'kind': 'distribution',
'summary': 'Reference API documentation for `{}`.'.format(app.config.project),
'children': index_children
}],
'references': index_references
},
index_file_obj,
default_flow_style=False
)
'''
# TODO: handle xref for other products.
xref_file = os.path.join(normalized_outdir, 'xrefs.yml')
with open(xref_file, 'w') as xref_file_obj:
for xref in app.env.docfx_xrefs:
xref_file_obj.write(f'{xref}\n')
'''
def missing_reference(app, env, node, contnode):
reftarget = ''
refdoc = ''
reftype = ''
module = ''
if 'refdomain' in node.attributes and node.attributes['refdomain'] == 'py':
reftarget = node['reftarget']
reftype = node['reftype']
if 'refdoc' in node:
refdoc = node['refdoc']
if 'py:module' in node:
module = node['py:module']
#Refactor reftarget to fullname if it is a short name
if reftype in [CLASS, REFFUNCTION, REFMETHOD] and module and '.' not in reftarget:
if reftype in [CLASS, REFFUNCTION]:
fields = (module, reftarget)
else:
fields = (module, node['py:class'], reftarget)
reftarget = '.'.join(field for field in fields if field is not None)
return make_refnode(app.builder, refdoc, reftarget, '', contnode)
def setup(app):
"""
Plugin init for our Sphinx extension.
Args:
app (Application): The Sphinx application instance
"""
app.setup_extension('sphinx.ext.autodoc')
app.connect('autodoc-process-docstring', _process_docstring)
app.add_node(remarks, html = (remarks.visit_remarks, remarks.depart_remarks))
app.add_directive('remarks', RemarksDirective)
app.add_directive('todo', TodoDirective)
app.connect('builder-inited', build_init)
app.connect('autodoc-process-docstring', process_docstring)
app.connect('autodoc-process-signature', process_signature)
app.connect('build-finished', build_finished)
app.connect('missing-reference', missing_reference)
app.add_config_value('docfx_yaml_output', API_ROOT, 'html')
app.add_config_value('folder', '', 'html')
app.add_config_value('autodoc_functions', False, 'env')
|
StarcoderdataPython
|
9646863
|
from setuptools import setup, find_packages
setup(
name = "Milkman",
version = "0.1",
packages = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires = ['docutils>=0.3'],
package_data = {
'': ['*.txt', '*.rst'],
},
# metadata for upload to PyPI
author = "<NAME>",
author_email = "<EMAIL>",
description = "A Django model generator to replace fixtures for testing",
license = "BSD",
keywords = "django testing mock stub",
url = "http://github.com/wilkes/milkman",
)
|
StarcoderdataPython
|
3489291
|
<reponame>xmdy/h9eNi8F5Ut
from __future__ import absolute_import
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')
app = Celery('application')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
|
StarcoderdataPython
|
3220882
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class GmapGsnap(AutotoolsPackage):
"""GMAP: A Genomic Mapping and Alignment Program for
mRNA and EST Sequences, and GSNAP: Genomic Short-read
Nucleotide Alignment Program"""
homepage = "http://research-pub.gene.com/gmap/"
url = "http://research-pub.gene.com/gmap/src/gmap-gsnap-2017-06-16.tar.gz"
version('2020-06-01', sha256='7917f9f78570943f419445e371f2cc948c6741e73c3cbb063391756f4479d365')
version('2019-05-12', sha256='3dc1b6ee4f6c049c07bcf4a5aba30eb2d732997241cdcad818dab571719f8008')
version('2019-02-15', sha256='7e82b9867a1e561b4816fb2f2fb916294077c384c6a88bb94cce39bfe71ab3ac')
version('2018-07-04', sha256='a9f8c1f0810df65b2a089dc10be79611026f4c95e4681dba98fea3d55d598d24')
version('2018-03-25', sha256='a65bae6115fc50916ad7425d0b5873b611c002690bf35026bfcfc41ee0c0265a')
version('2018-02-12', sha256='5dedddab7f08f9924a995332ebc7bdbe2621fcd67021690707c876d865091fcc')
version('2017-06-16', sha256='2a277a6d45cade849be3bfb7b0f69f61ab79744af821a88eb1d599b32f358f8d')
version('2014-12-28', sha256='108433f3e3ea89b8117c8bb36d396913225caf1261d46ce6d89709ff1b44025d')
depends_on('zlib')
depends_on('bzip2')
variant(
'simd',
description='CPU support.',
values=('avx2', 'sse42', 'avx512', 'sse2'),
multi=True,
default='sse2'
)
def configure(self, spec, prefix):
configure = Executable('../configure')
for simd in spec.variants['simd'].value:
with working_dir(simd, create=True):
configure('--with-simd-level={0}'.format(simd),
'--prefix={0}'.format(prefix))
def build(self, spec, prefix):
for simd in spec.variants['simd'].value:
with working_dir(simd):
make()
def check(self):
for simd in self.spec.variants['simd'].value:
with working_dir(simd):
make('check')
def install(self, spec, prefix):
for simd in spec.variants['simd'].value:
with working_dir(simd):
make('install')
|
StarcoderdataPython
|
11249254
|
"""ppm cache get命令的处理."""
from pmfp.utils.remote_cache_utils import SourcePack
from pmfp.utils.tools_info_utils import get_cache_dir
from .core import cache_get
@cache_get.as_main
def get_sourcepack(source_pack_string: str) -> None:
"""从远程指定位置获取资源包.
Args:
source_pack_string (str): 描述资源包的字符串,格式为"[{host}::]{repo_namespace}::{repo_name}[@{tag}]".
"""
sourcepack = SourcePack.from_sourcepack_string(source_pack_string)
cache_dir = get_cache_dir()
sourcepack.cache(cache_dir)
|
StarcoderdataPython
|
12856137
|
# Faça um algoritmo que leia o preço de um produto e mostre o novo preço com um desconto.
preco = float(input('Digite o preço atual do produto: R$ '))
desconto = float(input('Digite o valor do desconto (0.X): '))
novopreco = preco * desconto
print('O novo preço é R$ {}.'.format(novopreco))
|
StarcoderdataPython
|
6584769
|
<reponame>ws2516/sportsbookProjects
'''
Redditor: u/NInjas101
Ask: I want to be able to track a players points rebounds assists over time and come up
with last 3 game average, last 5 game average etc
'''
import requests
import datetime
DaysBack = 21
NumGameAverage = 5
tod = datetime.datetime.now()
d = datetime.timedelta(days = DaysBack) # 3 weeks should be enough
a = tod - d
date = str(a).split(' ')[0]
playerName = input('Player Name? ')
idUrl = 'https://www.balldontlie.io/api/v1/players?search=' + playerName
respID = requests.get(idUrl)
dataID = respID.json()['data'][0]['id']
statsUrl = 'https://www.balldontlie.io/api/v1/stats?start_date='+ date +'&player_ids[]=' + str(dataID)
respStat = requests.get(statsUrl)
data = respStat.json()['data']
assists, mins = [], []
for i in range(0,NumGameAverage):
assists += [data[i]['ast']]
mins += [data[i]['min']]
print(playerName, ' scored ', assists, ' assists in the past ', NumGameAverage, 'games.')
print(playerName, ' played ', mins, ' minutes in the past ', NumGameAverage, 'games.')
|
StarcoderdataPython
|
5172597
|
<gh_stars>0
class SpaceAge:
# Planet Years in seconds
EARTH_YEAR = 31557600.0
MERCURY_YEAR_RATIO = 0.2408467
VENUS_YEAR_RATIO = 0.61519726
MARS_YEAR_RATIO = 1.8808158
JUPITER_YEAR_RATIO = 11.862615
SATURN_YEAR_RATIO = 29.447498
URANUS_YEAR_RATIO = 84.016846
NEPTUNE_YEAR_RATIO = 164.79132
def __init__(self, seconds):
self.seconds = seconds
def calculate_planet_year(self, secondsPerPlanetYear):
return round(self.seconds / secondsPerPlanetYear, 2)
def on_earth(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR)
def on_mercury(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.MERCURY_YEAR_RATIO)
def on_venus(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.VENUS_YEAR_RATIO)
def on_mars(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.MARS_YEAR_RATIO)
def on_jupiter(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.JUPITER_YEAR_RATIO)
def on_saturn(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.SATURN_YEAR_RATIO)
def on_uranus(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.URANUS_YEAR_RATIO)
def on_neptune(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.NEPTUNE_YEAR_RATIO)
|
StarcoderdataPython
|
8084144
|
from kgbase import Query
import datetime
import json
# TODO
# csv upload
# raw import
if __name__ == "__main__":
# import requests
# response = requests.post(
# 'https://kgbase.com/kgbase-query',
# headers={
# "Accept": "application/json",
# "Content-Type": "application/json",
# "User-Agent": "Python API 0.23"
# },
# json={
# "operationName": "LoginUser",
# "query": "mutation LoginUser($data: LoginInput!) {\n loginUser(data: $data) {\n ok\n user {\n ...UserAll\n __typename\n }\n error\n __typename\n }\n}\nfragment UserAll on UserType {\n uuid\n graphqlId\n username\n name\n avatarUrl\n nickname\n apiKey\n isStaff\n lastActivityAt\n hasExtendedTrial\n __typename\n}\n",
# "variables": {"data": {"username": "<EMAIL>", "password": "<PASSWORD>!"}}
# }
# #{'query': 'fragment UserAll on UserType {\n uuid\n graphqlId\n username\n name\n avatarUrl\n nickname\n apiKey\n isStaff\n lastActivityAt\n hasExtendedTrial\n}\n\nmutation LoginUser($data: LoginInput!) {\n loginUser(data: $data) {\n ok\n user {\n ...UserAll\n }\n error\n }\n}', 'variables': {'data': {'username': '<EMAIL>', 'password': '<PASSWORD>!'}}, 'operationName': 'LoginUser'}
# )
# print (response.status_code)
# print (response.text)
q = Query()
result = q.login(
username='<EMAIL>',
password='<PASSWORD>'
)
print (result)
'''
{
"data": {
"loginUser": {
"ok": true,
"user": {
"uuid": null,
"graphqlId": "User/1",
"username": "<EMAIL>",
"name": "<NAME>",
"avatarUrl": "https://kgbase.s3.amazonaws.com/profile_pictures/2019/12/KakaoTalk_Photo_2019-12-23-08-51-51.jpeg",
"nickname": "sangwonseo",
"apiKey": "<KEY>",
"isStaff": true,
"lastActivityAt": "2020-04-16T20: 41: 12.897637+00: 00"
},
"error": null
}
}
}
'''
# result = q.get_graph(
# 'ctx-MI1FrWruBh42XpBxA1C',
# 'tab-MI1FrWxd9rkK4DIgz1l'
# )
# print (result)
# result = q.get_graph(
# 'ctx-MI1FrWruBh42XpBxA1C',
# )
# print (result)
# result = q.get_user_state()
# print (result)
'''
{
"data": {
"currentUser": {
"uuid": null,
"graphqlId": "User/1",
"username": "<EMAIL>",
"name": "<NAME>",
"avatarUrl": "https://kgbase.s3.amazonaws.com/profile_pictures/2019/12/KakaoTalk_Photo_2019-12-23-08-51-51.jpeg",
"nickname": "sangwonseo",
"apiKey": "w2gJRyzNwB",
"isStaff": true,
"lastActivityAt": "2020-04-16T20: 41: 12.897637+00: 00"
}
}
}
'''
# result = q.logout()
# print (result)
'''
{
"data": {
"logoutUser": {
"ok": true
}
}
}
'''
# result = q.get_public_projects()
# print (result)
# result = q.get_team_projects()
# print (result)
# result = q.get_favorite_projects()
# print (result)
# result = q.get_user_projects()
# print (result)
'''
{
"data": {
"publicProjects": [
{
"uuid": "f08d0007-0908-4edb-beeb-87ee5e41344b",
"graphqlId": "Project/266",
"projectId": "cz05qj1k5zo4puif",
"name": "clinicaltrials",
"description": "",
"updatedAt": "2019-12-13T06:27:42.614425+00:00",
"dataChangedAt": null,
"color": "gray",
"slug": "clinicaltrials",
"owner": {
"ownerType": "USER",
"name": "Health Knowledge",
"slug": "healthknow",
"avatarUrl": null
},
"isPublic": true,
"collaborators": [
{
"uuid": "1349191d-4cf2-4db7-8f1b-30d4535fd915",
"graphqlId": "User/36",
"username": "<EMAIL>",
"name": "StellaWeng",
"avatarUrl": "https: //kgbase.s3.amazonaws.com/profile_pictures/2019/12/WX20191107-100351.png",
"nickname": "stellaweng",
"apiKey": null,
"isStaff": true,
"lastActivityAt": "2020-04-16T19:26:04.589058+00:00"
}
],
"apiUsers": [],
"favoritesCount": 1,
"canManage": false
}
]
}
}
'''
# result = q.create_project(
# name='test-api',
# is_public=True,
# )
# print (result)
'''
{
"data": {
"createProject": {
"ok": true,
"project": {
"uuid": "a0406c6d-122a-4ff2-af7f-976ba5cbca01",
"graphqlId": "Project/1509",
"projectId": "ctx-M53lgnjpCkc_plt0lqo",
"name": "test-api",
"description": null,
"updatedAt": "2020-04-16T20:55:53.329530+00:00",
"dataChangedAt": "2020-04-16T20:55:53.329296",
"color": "gray",
"slug": "test-api-lw",
"owner": {
"ownerType": "USER",
"name": "<NAME>",
"slug": "sangwonseo",
"avatarUrl": "https: //kgbase.s3.amazonaws.com/profile_pictures/2019/12/KakaoTalk_Photo_2019-12-23-08-51-51.jpeg"
},
"isPublic": true,
"collaborators": [],
"apiUsers": [],
"favoritesCount": 0,
"canManage": true
}
}
}
}
'''
# result = q.get_project_state(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH'
# )
# print (result)
'''
{
"data": {
"project": {
"uuid": "a0406c6d-122a-4ff2-af7f-976ba5cbca01",
"graphqlId": "Project/1509",
"projectId": "ctx-M53lgnjpCkc_plt0lqo",
"name": "test-api",
"description": null,
"updatedAt": "2020-04-16T20:55:53.329530+00:00",
"dataChangedAt": "2020-04-16T20:55:53.329296+00:00",
"color": "gray",
"slug": "test-api-lw",
"owner": {
"ownerType": "USER",
"name": "<NAME>",
"slug": "sangwonseo",
"avatarUrl": "https: //kgbase.s3.amazonaws.com/profile_pictures/2019/12/KakaoTalk_Photo_2019-12-23-08-51-51.jpeg"
},
"isPublic": true,
"collaborators": [],
"apiUsers": [],
"favoritesCount": 0,
"canManage": true
},
"isOwnerOrMember": true,
"canManageProject": true,
"canRead": true,
"canWrite": true,
"hasRequestedAccess": false,
"isProjectFavorited": false,
"wantsNotifications": null
}
}
'''
# result = q.update_project(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# name='test-api-v2',
# is_public=False
# )
# print (result)
'''
{
"data": {
"updateProject": {
"ok": true,
"project": {
"uuid": "a0406c6d-122a-4ff2-af7f-976ba5cbca01",
"graphqlId": "Project/1509",
"projectId": "ctx-M53lgnjpCkc_plt0lqo",
"name": "test-api-v2",
"description": null,
"updatedAt": "2020-04-16T20:58:12.252444+00:00",
"dataChangedAt": "2020-04-16T20:58:12.252324",
"color": "gray",
"slug": "test-api-v2",
"owner": {
"ownerType": "USER",
"name": "<NAME>",
"slug": "sangwonseo",
"avatarUrl": "https://kgbase.s3.amazonaws.com/profile_pictures/2019/12/KakaoTalk_Photo_2019-12-23-08-51-51.jpeg"
},
"isPublic": false,
"collaborators": [],
"apiUsers": [],
"favoritesCount": 0,
"canManage": true
}
}
}
}
'''
# result = q.destroy_project(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH'
# )
# print (result)
'''
{
"data": {
"destroyProject": {
"ok": true
}
}
}
'''
# result = q.create_table(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# display_name='api-test',
# description='Api test'
# )
# print (result)
'''
{
"data": {
"createTable": {
"ok": true,
"tableId": "tab-M53nDu1y1SXMVA_ny97"
}
}
}
'''
# result = q.update_table(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# display_name='api-table2',
# description='asdfsdfs',
# )
# print (result)
'''
{
"data": {
"updateTable": {
"ok": true
}
}
}
'''
# result = q.delete_table(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxWDQtyYHrv12o3-id',
# )
# print (result)
'''
{
"data": {
"deleteTable": {
"ok": true,
"taskId": "1931"
}
}
}
'''
# result = q.create_column(
# project_id='ctx-MCx_qF9v1ixsSmwIUjD',
# table_id='tab-MCx_qFFfmpIxugPFD63',
# display_name='api_column',
# data_type='text'
# )
# print (result)
# result = q.update_column(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# column_id='col-0',
# display_name='api-column0',
# data_type='text'
# # data_type='link_one',
# # linked_table='tab-dfj23eijSFdfewf'
# )
# print (result)
'''
{
"data": {
"createColumn": {
"ok": true,
"columnId": "col-1"
}
}
}
'''
# text, number, boolean, url, date, date_added, link_one, link_many
# result = q.update_column(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# column_id='col-0',
# display_name='api-column10',
# data_type='number'
# )
# result = q.update_column(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5NlqLNJdGIrk5uFI4m',
# column_id='col-0',
# display_name='api-column10',
# data_type='link_one',
# linked_table='tab-dfj23eijSFdfewf'
# )
# print (result)
'''
{
"data": {
"updateColumn": {
"ok": true,
"taskId": null
}
}
}
'''
# result = q.delete_column(
# project_id='ctx-M5Na8A_zgK3m455pp-r',
# table_id='tab-M5Nb1-JqUtsb-SwpnH3',
# column_id='col-0',
# )
# print (result)
# '''
# {
# "data": {
# "deleteColumn": {
# "ok": true
# }
# }
# }
# '''
# result = q.create_vertex(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# values={
# 'col-0': 'Palantir', # text
# # 'col-1': 1, # number
# # 'col-2': False, # boolean
# # 'col-3': 'https://google.com', # url
# # 'col-4': datetime.datetime.today() # date
# # 'col-5' # date_added
# },
# edges=[]
# )
# print (result)
# result = q.create_vertex(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# values={
# # 'col-0': 'Google', # text
# 'col-0': 'Shopify', # number
# # 'col-2': False, # boolean
# # 'col-3': 'https://google.com', # url
# # 'col-4': datetime.datetime.today() # date
# # 'col-5' # date_added
# },
# edges=[]
# )
# print (result)
# result = q.create_vertex(
# project_id='ctx-M5Na8A_zgK3m455pp-r',
# table_id='tab-M5Nb1-JqUtsb-SwpnH3',
# values={
# # 'col-0': 'Google', # text
# 'col-1': 'Tesla', # number
# # 'col-2': False, # boolean
# # 'col-3': 'https://google.com', # url
# # 'col-4': datetime.datetime.today() # date
# # 'col-5' # date_added
# },
# edges=[]
# )
# print (result)
# result = q.get_graph(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# filters=[],
# offset=1,
# limit=50
# )
# print (result)
# result = q.create_vertex(
# project_id='ctx-M57S8onUVXwdNMRgHPf',
# table_id='tab-M57wxxBqH0D7aKgYhhH',
# values={
# 'col-0': 'Apple7',
# 'col-1': True,
# 'col-2': ''
# },
# edges=[
# ("column3", "row-M587jZETRpuCBIXUfw6"),
# ("column3", "row-M58Ac6n8CjPhqp8-u7M")
# ]
# )
# print (result)
# result = q.update_vertex(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# vertex_id='row-MCxYO25MsG10qTdrSPQ',
# values={
# 'col-0': 'SW',
# },
# edges=[
# ]
# )
# print (result)
# result = q.get_graph(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# filters=[],
# offset=1,
# limit=50
# )
# print (result)
'''
{
"data": {
"createVertex": {
"ok": true,
"vertex": {
"id": "row-M53tT_jTviJ50qyzgsL",
"label": "tab-M53pRIARajeAYTwPIAN",
"values": [
{
"key": "col-1",
"value": "test"
},
{
"key": "col-3",
"value": "1.0"
}
],
"contextId": null
}
}
}
}
'''
# result = q.update_vertex(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# vertex_id='row-M5NnwDXIN6tZVOWOCuR',
# values={
# 'col-0': 'Samsung',
# },
# edges=[]
# )
# print (result)
'''
{
"data": {
"updateVertex": {
"ok": true
}
}
}
'''
# result = q.delete_vertex(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# vertex_id='row-MCxYO25MsG10qTdrSPQ',
# )
# print (result)
'''
{
"data": {
"deleteVertex": {
"ok": true
}
}
}
'''
# result = q.bulk_delete_vertices(
# project_id='ctx-MCxXEVZjyndWiHH7VPY',
# table_id='tab-MCxXOmTENzVaAZUdDGR',
# vertex_ids=[
# "row-MCxYexMyxDjAJ6oTmlh",
# "row-MCxYhXtaOKSnyADwYXa"
# ]
# )
# print (result)
'''
{
"data": {
"bulkDeleteVertices": {
"ok": true
}
}
}
'''
# result = q.get_schema(
# project_id='ctx-M5Na8A_zgK3m455pp-r'
# )
# print (result)
'''
{
"data": {
"getSchema": {
"tables": [
{
"tableId": "tab-M53lgnpTXtQhMqMBXHH",
"displayName": "Table 1",
"columns": [],
"config": null
}
],
"links": []
}
}
}
'''
# result = q.get_graph(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# offset=1,
# limit=50
# )
# print (result)
# result = q.get_graph(
# project_id='ctx-M5Na8A_zgK3m455pp-r',
# table_id='tab-M5Nb1-JqUtsb-SwpnH3',
# filters=[],
# offset=1,
# limit=50
# )
# print (result)
'''
{
"data": {
"getGraph": {
"vertices": [
{
"id": "row-M54-YHKSk3i_9rXO7cl",
"label": "tab-M53zumOvT3xKmqkIB_X",
"values": [
{
"key": "col-0",
"value": "ipod"
},
{
"key": "col-1",
"value": "10.0"
},
{
"key": "col-2",
"value": "apple"
}
],
"contextId": null
},
{
"id": "row-M54-akM_ctW_QMTAmd5",
"label": "tab-M53zumOvT3xKmqkIB_X",
"values": [
{
"key": "col-0",
"value": "galaxy s"
},
{
"key": "col-1",
"value": "20.0"
},
{
"key": "col-2",
"value": "samsung"
}
],
"contextId": null
},
{
"id": "row-M54-e4d-BIwZywV9Qoq",
"label": "tab-M53zumOvT3xKmqkIB_X",
"values": [
{
"key": "col-0",
"value": "galaxy tab"
},
{
"key": "col-1",
"value": "20.0"
},
{
"key": "col-2",
"value": "samusng"
}
],
"contextId": null
},
{
"id": "row-M54-fbtdB4FLEO_R2xy",
"label": "tab-M53zumOvT3xKmqkIB_X",
"values": [
{
"key": "col-0",
"value": "mac"
},
{
"key": "col-1",
"value": "20.0"
},
{
"key": "col-2",
"value": "apple"
}
],
"contextId": null
}
],
"edges": [],
"total": 5
}
}
}
'''
# result = q.get_graph(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# filters=[
# {
# "property": "col-0",
# "predicate": "=",
# "value": "Samsung"
# }
# ],
# offset=1,
# limit=50
# )
# print (result)
# # Summarize Graph
# # count, sum, mean, max, min
# result = q.summarize_graph(
# project_id='ctx-MCxWvNH-NPIaFmE6OQH',
# table_id='tab-M5Nm23_krbcQHdBqWh-',
# filters=[
# {
# "property": "col-0",
# "predicate": "=",
# "value": "Samsung"
# }
# ],
# groups=[{"property": "col-0"}],
# aggregations=[{"property": "col-0", "function": "count"}],
# offset=1,
# limit=50
# )
# print (result)
'''
{
"data": {
"getGraph": {
"vertices": [],
"edges": [],
"total": 0
}
}
}
'''
result = q.bulk_upload(
project_id='ctx-MK09gSHJwa3WtvUhInI',
table_id='tab-MK09gSNWBtZ-g2iLW8-',
filepath='/Users/sangwonseo/Downloads/test1.csv',
column_ids=['col-0', 'col-1', 'col-2'],
configs={
'countSkipRows': 0,
'hasHeader': False,
'dropEmpty': False
}
)
print (result)
'''
{
"data": {
"getBulkBundle": {
"bundleId": "1976ac21-45b6-4e02-8f6c-36456cf70b1d",
"status": "finished"
}
}
}
'''
|
StarcoderdataPython
|
287944
|
<reponame>entelecheia/eKorpKit<gh_stars>1-10
from .tokenizer.trainer import train_tokenizer
|
StarcoderdataPython
|
3322494
|
<reponame>odoochain/addons_oca
from . import project_task
from . import project_project
|
StarcoderdataPython
|
6532337
|
<gh_stars>0
"""AccountReports API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class AccountReportsAPI(BaseCanvasAPI):
"""AccountReports API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AccountReportsAPI."""
super(AccountReportsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.AccountReportsAPI")
def list_available_reports(self, account_id):
"""
List Available Reports.
Returns a paginated list of reports for the current context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/reports with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/reports".format(**path),
data=data,
params=params,
no_data=True,
)
def start_report(
self,
account_id,
report,
parameters=None,
parameters_course_id=None,
parameters_users=None,
):
"""
Start a Report.
Generates a report instance for the account. Note that "report" in the
request must match one of the available report names. To fetch a list of
available report names and parameters for each report (including whether or
not those parameters are required), see
{api:AccountReportsController#available_reports List Available Reports}.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""
ID
"""
path["report"] = report
# OPTIONAL - parameters
"""
The parameters will vary for each report. To fetch a list
of available parameters for each report, see {api:AccountReportsController#available_reports List Available Reports}.
A few example parameters have been provided below. Note that the example
parameters provided below may not be valid for every report.
"""
if parameters is not None:
data["parameters"] = parameters
# OPTIONAL - parameters[course_id]
"""
The id of the course to report on.
Note: this parameter has been listed to serve as an example and may not be
valid for every report.
"""
if parameters_course_id is not None:
data["parameters[course_id]"] = parameters_course_id
# OPTIONAL - parameters[users]
"""
If true, user data will be included. If
false, user data will be omitted. Note: this parameter has been listed to
serve as an example and may not be valid for every report.
"""
if parameters_users is not None:
data["parameters[users]"] = parameters_users
self.logger.debug(
"POST /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/accounts/{account_id}/reports/{report}".format(**path),
data=data,
params=params,
single_item=True,
)
def index_of_reports(self, account_id, report):
"""
Index of Reports.
Shows all reports that have been run for the account of a specific type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""
ID
"""
path["report"] = report
self.logger.debug(
"GET /api/v1/accounts/{account_id}/reports/{report} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/reports/{report}".format(**path),
data=data,
params=params,
all_pages=True,
)
def status_of_report(self, account_id, id, report):
"""
Status of a Report.
Returns the status of a report.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""
ID
"""
path["report"] = report
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/reports/{report}/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/reports/{report}/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
def delete_report(self, account_id, id, report):
"""
Delete a Report.
Deletes a generated report instance.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - report
"""
ID
"""
path["report"] = report
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/accounts/{account_id}/reports/{report}/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/accounts/{account_id}/reports/{report}/{id}".format(**path),
data=data,
params=params,
single_item=True,
)
class Report(BaseModel):
"""Report Model."""
def __init__(
self,
id=None,
report=None,
file_url=None,
attachment=None,
status=None,
created_at=None,
started_at=None,
ended_at=None,
parameters=None,
progress=None,
current_line=None,
):
"""Init method for Report class."""
self._id = id
self._report = report
self._file_url = file_url
self._attachment = attachment
self._status = status
self._created_at = created_at
self._started_at = started_at
self._ended_at = ended_at
self._parameters = parameters
self._progress = progress
self._current_line = current_line
self.logger = logging.getLogger("py3canvas.Report")
@property
def id(self):
"""The unique identifier for the report."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def report(self):
"""The type of report."""
return self._report
@report.setter
def report(self, value):
"""Setter for report property."""
self.logger.warn(
"Setting values on report will NOT update the remote Canvas instance."
)
self._report = value
@property
def file_url(self):
"""The url to the report download."""
return self._file_url
@file_url.setter
def file_url(self, value):
"""Setter for file_url property."""
self.logger.warn(
"Setting values on file_url will NOT update the remote Canvas instance."
)
self._file_url = value
@property
def attachment(self):
"""The attachment api object of the report. Only available after the report has completed."""
return self._attachment
@attachment.setter
def attachment(self, value):
"""Setter for attachment property."""
self.logger.warn(
"Setting values on attachment will NOT update the remote Canvas instance."
)
self._attachment = value
@property
def status(self):
"""The status of the report."""
return self._status
@status.setter
def status(self, value):
"""Setter for status property."""
self.logger.warn(
"Setting values on status will NOT update the remote Canvas instance."
)
self._status = value
@property
def created_at(self):
"""The date and time the report was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn(
"Setting values on created_at will NOT update the remote Canvas instance."
)
self._created_at = value
@property
def started_at(self):
"""The date and time the report started processing."""
return self._started_at
@started_at.setter
def started_at(self, value):
"""Setter for started_at property."""
self.logger.warn(
"Setting values on started_at will NOT update the remote Canvas instance."
)
self._started_at = value
@property
def ended_at(self):
"""The date and time the report finished processing."""
return self._ended_at
@ended_at.setter
def ended_at(self, value):
"""Setter for ended_at property."""
self.logger.warn(
"Setting values on ended_at will NOT update the remote Canvas instance."
)
self._ended_at = value
@property
def parameters(self):
"""The report parameters."""
return self._parameters
@parameters.setter
def parameters(self, value):
"""Setter for parameters property."""
self.logger.warn(
"Setting values on parameters will NOT update the remote Canvas instance."
)
self._parameters = value
@property
def progress(self):
"""The progress of the report."""
return self._progress
@progress.setter
def progress(self, value):
"""Setter for progress property."""
self.logger.warn(
"Setting values on progress will NOT update the remote Canvas instance."
)
self._progress = value
@property
def current_line(self):
"""This is the current line count being written to the report. It updates every 1000 records."""
return self._current_line
@current_line.setter
def current_line(self, value):
"""Setter for current_line property."""
self.logger.warn(
"Setting values on current_line will NOT update the remote Canvas instance."
)
self._current_line = value
class Reportparameters(BaseModel):
"""Reportparameters Model.
The parameters returned will vary for each report."""
def __init__(
self,
enrollment_term_id=None,
include_deleted=None,
course_id=None,
order=None,
users=None,
accounts=None,
terms=None,
courses=None,
sections=None,
enrollments=None,
groups=None,
xlist=None,
sis_terms_csv=None,
sis_accounts_csv=None,
include_enrollment_state=None,
enrollment_state=None,
start_at=None,
end_at=None,
):
"""Init method for Reportparameters class."""
self._enrollment_term_id = enrollment_term_id
self._include_deleted = include_deleted
self._course_id = course_id
self._order = order
self._users = users
self._accounts = accounts
self._terms = terms
self._courses = courses
self._sections = sections
self._enrollments = enrollments
self._groups = groups
self._xlist = xlist
self._sis_terms_csv = sis_terms_csv
self._sis_accounts_csv = sis_accounts_csv
self._include_enrollment_state = include_enrollment_state
self._enrollment_state = enrollment_state
self._start_at = start_at
self._end_at = end_at
self.logger = logging.getLogger("py3canvas.Reportparameters")
@property
def enrollment_term_id(self):
"""The canvas id of the term to get grades from."""
return self._enrollment_term_id
@enrollment_term_id.setter
def enrollment_term_id(self, value):
"""Setter for enrollment_term_id property."""
self.logger.warn(
"Setting values on enrollment_term_id will NOT update the remote Canvas instance."
)
self._enrollment_term_id = value
@property
def include_deleted(self):
"""If true, deleted objects will be included. If false, deleted objects will be omitted."""
return self._include_deleted
@include_deleted.setter
def include_deleted(self, value):
"""Setter for include_deleted property."""
self.logger.warn(
"Setting values on include_deleted will NOT update the remote Canvas instance."
)
self._include_deleted = value
@property
def course_id(self):
"""The id of the course to report on."""
return self._course_id
@course_id.setter
def course_id(self, value):
"""Setter for course_id property."""
self.logger.warn(
"Setting values on course_id will NOT update the remote Canvas instance."
)
self._course_id = value
@property
def order(self):
"""The sort order for the csv, Options: 'users', 'courses', 'outcomes'."""
return self._order
@order.setter
def order(self, value):
"""Setter for order property."""
self.logger.warn(
"Setting values on order will NOT update the remote Canvas instance."
)
self._order = value
@property
def users(self):
"""If true, user data will be included. If false, user data will be omitted."""
return self._users
@users.setter
def users(self, value):
"""Setter for users property."""
self.logger.warn(
"Setting values on users will NOT update the remote Canvas instance."
)
self._users = value
@property
def accounts(self):
"""If true, account data will be included. If false, account data will be omitted."""
return self._accounts
@accounts.setter
def accounts(self, value):
"""Setter for accounts property."""
self.logger.warn(
"Setting values on accounts will NOT update the remote Canvas instance."
)
self._accounts = value
@property
def terms(self):
"""If true, term data will be included. If false, term data will be omitted."""
return self._terms
@terms.setter
def terms(self, value):
"""Setter for terms property."""
self.logger.warn(
"Setting values on terms will NOT update the remote Canvas instance."
)
self._terms = value
@property
def courses(self):
"""If true, course data will be included. If false, course data will be omitted."""
return self._courses
@courses.setter
def courses(self, value):
"""Setter for courses property."""
self.logger.warn(
"Setting values on courses will NOT update the remote Canvas instance."
)
self._courses = value
@property
def sections(self):
"""If true, section data will be included. If false, section data will be omitted."""
return self._sections
@sections.setter
def sections(self, value):
"""Setter for sections property."""
self.logger.warn(
"Setting values on sections will NOT update the remote Canvas instance."
)
self._sections = value
@property
def enrollments(self):
"""If true, enrollment data will be included. If false, enrollment data will be omitted."""
return self._enrollments
@enrollments.setter
def enrollments(self, value):
"""Setter for enrollments property."""
self.logger.warn(
"Setting values on enrollments will NOT update the remote Canvas instance."
)
self._enrollments = value
@property
def groups(self):
"""If true, group data will be included. If false, group data will be omitted."""
return self._groups
@groups.setter
def groups(self, value):
"""Setter for groups property."""
self.logger.warn(
"Setting values on groups will NOT update the remote Canvas instance."
)
self._groups = value
@property
def xlist(self):
"""If true, data for crosslisted courses will be included. If false, data for crosslisted courses will be omitted."""
return self._xlist
@xlist.setter
def xlist(self, value):
"""Setter for xlist property."""
self.logger.warn(
"Setting values on xlist will NOT update the remote Canvas instance."
)
self._xlist = value
@property
def sis_terms_csv(self):
"""sis_terms_csv."""
return self._sis_terms_csv
@sis_terms_csv.setter
def sis_terms_csv(self, value):
"""Setter for sis_terms_csv property."""
self.logger.warn(
"Setting values on sis_terms_csv will NOT update the remote Canvas instance."
)
self._sis_terms_csv = value
@property
def sis_accounts_csv(self):
"""sis_accounts_csv."""
return self._sis_accounts_csv
@sis_accounts_csv.setter
def sis_accounts_csv(self, value):
"""Setter for sis_accounts_csv property."""
self.logger.warn(
"Setting values on sis_accounts_csv will NOT update the remote Canvas instance."
)
self._sis_accounts_csv = value
@property
def include_enrollment_state(self):
"""If true, enrollment state will be included. If false, enrollment state will be omitted. Defaults to false."""
return self._include_enrollment_state
@include_enrollment_state.setter
def include_enrollment_state(self, value):
"""Setter for include_enrollment_state property."""
self.logger.warn(
"Setting values on include_enrollment_state will NOT update the remote Canvas instance."
)
self._include_enrollment_state = value
@property
def enrollment_state(self):
"""Include enrollment state. Defaults to 'all' Options: ['active'| 'invited'| 'creation_pending'| 'deleted'| 'rejected'| 'completed'| 'inactive'| 'all']."""
return self._enrollment_state
@enrollment_state.setter
def enrollment_state(self, value):
"""Setter for enrollment_state property."""
self.logger.warn(
"Setting values on enrollment_state will NOT update the remote Canvas instance."
)
self._enrollment_state = value
@property
def start_at(self):
"""The beginning date for submissions. Max time range is 2 weeks."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn(
"Setting values on start_at will NOT update the remote Canvas instance."
)
self._start_at = value
@property
def end_at(self):
"""The end date for submissions. Max time range is 2 weeks."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn(
"Setting values on end_at will NOT update the remote Canvas instance."
)
self._end_at = value
|
StarcoderdataPython
|
340675
|
<filename>preDeal/utils.py
import datetime
import scipy as sp
from keras import backend as K
def my_logloss(act, pred):
epsilon = 1e-15
pred = K.maximum(epsilon, pred)
pred = K.minimum(1 - epsilon, pred)
ll = K.sum(act * K.log(pred) + (1 - act) * K.log(1 - pred))
ll = ll * -1.0 / K.shape(act)[0]
return ll
def logloss(act, pred):
'''
官方给的损失函数
:param act:
:param pred:
:return:
'''
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1 - epsilon, pred)
ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred)))
ll = ll * -1.0 / len(act)
return ll
def get_how_much_time(time_str, year_month='2017-01', start_date_time='2017-01-010000'):
"""
通过输入xxxxxx格式的时间,得到一个时间差。单位是秒(相对于1号)
"""
t_str = year_month + "-" + time_str
t1 = datetime.datetime.strptime(t_str, '%Y-%m-%d%H%M')
t2 = datetime.datetime.strptime(start_date_time, '%Y-%m-%d%H%M')
how_long = t1.timestamp() - t2.timestamp()
return how_long
def get_how_much_time_of_days(time_str, year_month='2017-01', start_date_time='2017-01-010000'):
"""
通过输入xxxxxx格式的时间,得到一个时间差。单位是天(相对于1号)
"""
t_str = year_month + "-" + time_str
t1 = datetime.datetime.strptime(t_str, '%Y-%m-%d%H%M')
t2 = datetime.datetime.strptime(start_date_time, '%Y-%m-%d%H%M')
x = t1 - t2
# print(type(x.days-15))
return x.days
def get_app_categories(app_categories_dict, appID): # 获取广告类别目录
app_categories = app_categories_dict[appID]
if app_categories == '0':
return [0, 00]
elif len(app_categories) == 1:
return [int(app_categories), 00]
elif len(app_categories) == 3:
# print("app类别:", app_categories)
# print("len", len(app_categories))
return [int(app_categories[0]), int(str(app_categories[1]) + str(app_categories[2]))]
else:
raise Exception('类别解析存在bug,请审查重新编写')
def get_ad_info(ad_dict, app_categories_dict, creativeID):
'''
获取广告的相关信息
:param ad_dict:
:param creativeID:
:return:
'''
line = ad_dict[creativeID]
words = line.split(',')
adID = int(words[1])
camgaignID = int(words[2]) # 推广计划是广告的集合,类似电脑文件夹功能
advertiserID = int(words[3]) # 账户id
appID = int(words[4])
app_categories = get_app_categories(app_categories_dict, appID) # 广告类别
appPlatform = int(words[5]) # app 平台系统,如苹果、安卓等
return app_categories + [appPlatform]
def get_label_1(ad_xx_dict, target_id):
label_1 = ad_xx_dict[1].get(target_id, 0)
return label_1
def get_percent(ad_xx_dict, target_id):
label_1 = ad_xx_dict[1].get(target_id, 0)
label_0 = ad_xx_dict[0].get(target_id, 0)
if label_1 + label_0 == 0:
baifenbi = 0
else:
baifenbi = label_1 / (label_1 + label_0)
baifenbi = round(baifenbi, 7)
return baifenbi
def get_positionID_num_label(num):
if num < 10:
return 1
elif 10 <= num < 50:
return 2
elif 50 <= num < 200:
return 3
elif 200 <= num < 500:
return 4
elif 500 <= num < 1000:
return 5
elif 1000 <= num < 2000:
return 6
elif 2000 <= num < 5000:
return 7
elif num >= 5000:
return 8
def get_app_categories_count_dict_big_label(num):
if num < 1000:
return 1
elif 1000 <= num < 10000:
return 2
elif 10000 <= num < 50000:
return 3
elif 50000 <= num:
return 4
def get_app_categories_count_dict_small_label(num):
if num < 1000:
return 1
elif 1000 <= num < 5000:
return 2
elif 5000 <= num < 10000:
return 3
elif 10000 <= num < 30000:
return 4
elif 30000 <= num:
return 5
def get_creativeID_num_label(num):
if num < 2:
return 0
elif 2 <= num < 10:
return 1
elif 10 <= num < 50:
return 2
elif 50 <= num < 100:
return 3
elif 100 <= num < 300:
return 4
elif 300 <= num < 500:
return 5
elif 500 <= num < 1000:
return 6
elif 1000 <= num < 2000:
return 7
elif 2000 <= num < 6000:
return 8
elif num >= 6000:
return 9
def get_advertiserID_num_label(num):
if num < 30:
return 0
elif 30 <= num < 100:
return 1
elif 100 <= num < 200:
return 2
elif 200 <= num < 500:
return 3
elif 500 <= num < 1000:
return 4
elif 1000 <= num < 2000:
return 5
elif 2000 <= num < 5000:
return 6
elif 5000 <= num < 10000:
return 7
elif num >= 10000:
return 8
def get_camgaignID_num_label(num):
if num < 30:
return 0
elif 30 <= num < 100:
return 1
elif 100 <= num < 200:
return 2
elif 200 <= num < 500:
return 3
elif 500 <= num < 1000:
return 4
elif 1000 <= num < 2000:
return 5
elif 2000 <= num < 6000:
return 6
elif num >= 6000:
return 7
def get_adID_num_label(num):
if num < 30:
return 0
elif 30 <= num < 100:
return 1
elif 100 <= num < 200:
return 2
elif 200 <= num < 500:
return 3
elif 500 <= num < 1000:
return 4
elif 1000 <= num < 2000:
return 5
elif 2000 <= num < 6000:
return 6
elif num >= 6000:
return 7
def get_position_info(position_dict, positionID):
'''
根据positionid获取广告位置信息
:param position_dict:
:param positionID:
:return: [sitesetID, positionID]
'''
position = position_dict[positionID]
words = position.split(',')
# 广告位类型
positionType = int(words[2])
# 站点集合ID
sitesetID = int(words[1])
return [sitesetID, positionType]
def get_user_info(user_dict, userID):
'''
根据输入的userid获取user相关的信息
:param user_dict:
:param userID:
:return: [age, gender, education, marriageStatus, haveBaby, hometown, residence]
'''
user = user_dict[userID]
words = user.split(',')
# print(words[0], userID)
# 处理年龄
if int(words[1]) == 0:
age = 0
elif int(words[1]) < 10:
age = 1
elif 10 <= int(words[1]) < 15:
age = 2
elif 15 <= int(words[1]) <18:
age = 3
elif 18 <= int(words[1]) < 24:
age = 4
elif 24 <= int(words[1]) < 30:
age = 5
elif 30 <= int(words[1]) < 35:
age = 6
elif 35 <= int(words[1]) < 40:
age = 7
elif 40 <= int(words[1]) < 50:
age = 8
elif int(words[1]) >= 50:
age = 9
gender = int(words[2]) # 性别
education = int(words[3]) # 教育
marriageStatus = int(words[4]) # 婚姻状态
haveBaby = int(words[5])
hometown = int(words[6])
residence = int(words[7]) # 常驻地
# print([age, gender, education, marriageStatus, haveBaby, hometown, residence])
return [age, gender, education, marriageStatus, haveBaby, hometown, residence]
|
StarcoderdataPython
|
158541
|
<gh_stars>1-10
from coralillo.utils import parse_embed
def test_parse_embed():
array = ['object']
output = [['object', None]]
assert parse_embed(array) == output
array = ['object.field']
output = [['object', ['field']]]
assert parse_embed(array) == output
array = ['object.field', 'foo', 'object.var']
output = [['foo', None], ['object', ['field', 'var']]]
assert parse_embed(array) == output
|
StarcoderdataPython
|
11234931
|
<reponame>winkste/python_scripts
#import from standard library
import pprint
#generate dictionaries or list
d = {'Tim' : 1, 'Struppi' : 2, 'Any' : 3}
l = [1,2,3,4,5,6]
#prints the data object to the console
pprint.pprint(l)
pprint.pprint(d)
# here the formated print with a variable name to console, could be
# also a python data file for later reuse
print('d1 = ' + pprint.pformat(d))
print('l2 = ' + pprint.pformat(l))
|
StarcoderdataPython
|
9673401
|
<filename>1247_min_swap_to_string_equal.py
# You are given two strings s1 and s2 of equal length consisting of letters "x" and "y" only. Your task is to make these two strings equal to each other.
# You can swap any two characters that belong to different strings, which means: swap s1[i] and s2[j].
# Return the minimum number of swaps required to make s1 and s2 equal, or return -1 if it is impossible to do so.
# Example 1:
# Input: s1 = "xx", s2 = "yy"
# Output: 1
# Explanation:
# Swap s1[0] and s2[1], s1 = "yx", s2 = "yx".
# Example 2:
# Input: s1 = "xy", s2 = "yx"
# Output: 2
# Explanation:
# Swap s1[0] and s2[0], s1 = "yy", s2 = "xx".
# Swap s1[0] and s2[1], s1 = "xy", s2 = "xy".
# Note that you can't swap s1[0] and s1[1] to make s1 equal to "yx", cause we can only swap chars in different strings.
# Example 3:
# Input: s1 = "xx", s2 = "xy"
# Output: -1
# Example 4:
# Input: s1 = "xxyyxyxyxx", s2 = "xyyxyxxxyx"
# Output: 4
class Solution(object):
def minimumSwap(self, s1, s2):
xy, yx, result = 0, 0, 0
for a, b in zip(s1, s2):
if a == 'x' and b == 'y':
xy += 1
elif a == 'y' and b == 'x':
yx += 1
# The divmod() method in python takes two numbers and returns a pair of numbers consisting of their quotient and remainder.
# Input : x = 9, y = 3 OUTPUT (3, 0)
xy_swaps, xy_rem = divmod(xy, 2)
yx_swaps, yx_rem = divmod(yx, 2)
if xy_rem == 1 or yx_rem == 1:
return xy_swaps + yx_swaps + 2 if xy_rem == yx_rem else -1
else:
return xy_swaps + yx_swaps
|
StarcoderdataPython
|
4956175
|
<reponame>dauden1184/home-assistant
"""
Support for Tellstick switches using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tellduslive/
"""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.tellduslive import TelldusLiveEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tellstick Net lights."""
if discovery_info is None:
return
add_entities(TelldusLiveLight(hass, light) for light in discovery_info)
class TelldusLiveLight(TelldusLiveEntity, Light):
"""Representation of a Tellstick Net light."""
def __init__(self, hass, device_id):
"""Initialize the Tellstick Net light."""
super().__init__(hass, device_id)
self._last_brightness = self.brightness
def changed(self):
"""Define a property of the device that might have changed."""
self._last_brightness = self.brightness
super().changed()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self.device.dim_level
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def is_on(self):
"""Return true if light is on."""
return self.device.is_on
def turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, self._last_brightness)
self.device.dim(level=brightness)
self.changed()
def turn_off(self, **kwargs):
"""Turn the light off."""
self.device.turn_off()
self.changed()
|
StarcoderdataPython
|
9621867
|
from __future__ import unicode_literals
class DocumentException(Exception):
"""
Base documents warning
"""
pass
|
StarcoderdataPython
|
8076883
|
#Section 1: Import and declare variables
##Note, did not import os because I will just be reading the file locally.
import re
filename = ("raw_data/paragraph_1.txt")
#self-explanatory lists
wordList = []
#counter is equivalent to word count here
counter = 0
########################################
#Section 2: Read and iterate through the desired text file and add all words into a list.
with open(filename,'r') as f:
for line in f: #for each sentence
for word in line.split(): #in the sentence above, for each word.
wordList.append(word)
counter+=1 #count total words
########################################
#Section 3: Creating strings of letters and words in two lists, then create a list of sentences from the original text.
#Please read if you want to understand usage:
'''Here, I use the map function here on the list wordList I just created to create two strings.
These strings will be important since they allow me to print the results of the analysis easily by counting things like list length.
(1) letters: a string with all the letters that excludes spaces, due to how wordList had stored words from the text.
(2) words: a string as well. The differnce is that I use a ' ' delimiter to generate the string rather than the None delimiter('')
The way I conceputalize map is as a method that applies something (like a str typecast in this case) to all of the elements in a list, then
returns them as a string type.'''
#good examples to run through here: https://www.w3schools.com/python/ref_func_map.asp in official documentation it seems to be implied.
letters = ''.join(map(str, wordList))
words = ' '.join(map(str,wordList))
#print(len(letters)), Use check number of sentneces to see if map function did what I wanted it to do.
#Create a list of sentences.
listSentences = words.split(".")
#Get rid of empty indices in list, if any.
listSentences= list(filter(None, listSentences))
#print(listSentences) to check that the array actually has distinct sentences.
########################################
#Section 4: Output results to terminal
print(f'''Paragraph Analysis
-----------------
Approximate Word Count: {counter}
Approximate Sentence Count: {len(listSentences)}
Average Letter Count: {round((len(letters)/counter),1)}
Average Sentence Length: {round(len(wordList)/len(listSentences),1)}
Note: Checked for accuracy via Google Docs!''')
########################################
#Section 5: Analogous to Section 3, simply write a new file instead of printing.
#used w+ because I was too lazy to make a new file for output.
out = open("output.txt","w+")
out.write(f'''Paragraph Analysis
-----------------
Approximate Word Count: {counter}
Approximate Sentence Count: {len(listSentences)}
Average Letter Count: {round((len(letters)/counter),1)}
Average Sentence Length: {round(len(wordList)/len(listSentences),1)}''')
out.close()
#close file stream.
|
StarcoderdataPython
|
5187051
|
<filename>Operators/ExampleFaceLivenessDetectOperator/__init__.py
from Operators.ExampleFaceLivenessDetectOperator.FaceLivenessDetectOperator import GeneralMiniFASNetV1SE, \
GeneralMiniFASNetV2
|
StarcoderdataPython
|
3491837
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku graph export controller."""
from requests import RequestException
from sentry_sdk import capture_exception
from renku.core.commands.graph import export_graph_command
from renku.core.commands.migrate import migrations_check
from renku.core.errors import RenkuException
from renku.service.config import PROJECT_CLONE_NO_DEPTH
from renku.service.controllers.api.abstract import ServiceCtrl
from renku.service.controllers.api.mixins import RenkuOperationMixin
from renku.service.serializers.graph import (
GraphExportCallbackError,
GraphExportCallbackSuccess,
GraphExportRequest,
GraphExportResponseRPC,
)
from renku.service.views import result_response
class GraphExportCtrl(ServiceCtrl, RenkuOperationMixin):
"""Controller for export graph endpoint."""
REQUEST_SERIALIZER = GraphExportRequest()
RESPONSE_SERIALIZER = GraphExportResponseRPC()
def __init__(self, cache, user_data, request_data):
"""Construct a datasets list controller."""
self.ctx = GraphExportCtrl.REQUEST_SERIALIZER.load(request_data)
super(GraphExportCtrl, self).__init__(cache, user_data, request_data, clone_depth=PROJECT_CLONE_NO_DEPTH)
@property
def context(self):
"""Controller operation context."""
return self.ctx
def renku_op(self):
"""Renku operation for the controller."""
result = migrations_check().build().execute().output
if not result["project_supported"]:
raise RenkuException("project not supported")
callback_payload = {
"project_url": self.context["git_url"],
"commit_id": self.context["revision"] or "master",
}
try:
result = (
export_graph_command()
.build()
.execute(revision_or_range=self.context["revision"], format=self.context["format"])
)
if self.context.get("callback_url"):
self.report_success(callback_payload, {"payload": result.output}, self.context["callback_url"])
return result.output
except (RequestException, RenkuException, MemoryError) as e:
if self.context.get("callback_url"):
self.report_recoverable(callback_payload, e, self.context["callback_url"])
raise
except BaseException as e:
if self.context.get("callback_url"):
self.report_unrecoverable(callback_payload, e, self.context["callback_url"])
raise
def to_response(self):
"""Execute controller flow and serialize to service response."""
self.ctx["graph"] = self.execute_op()
return result_response(GraphExportCtrl.RESPONSE_SERIALIZER, self.ctx)
def report_recoverable(self, payload, exception, callback_url):
"""Report to callback URL recoverable state."""
from renku.core.utils import requests
capture_exception(exception)
if not callback_url:
return
payload["failure"] = {"type": "RECOVERABLE_FAILURE", "message": str(exception)}
data = GraphExportCallbackError().load(payload)
requests.post(callback_url, data=data)
def report_unrecoverable(self, payload, exception, callback_url):
"""Report to callback URL unrecoverable state."""
from renku.core.utils import requests
capture_exception(exception)
if not callback_url:
return
payload["failure"] = {"type": "UNRECOVERABLE_FAILURE", "message": str(exception)}
data = GraphExportCallbackError().load(payload)
requests.post(callback_url, data=data)
def report_success(self, request_payload, graph_payload, callback_url):
"""Report to callback URL success state."""
from renku.core.utils import requests
data = GraphExportCallbackSuccess().load({**request_payload, **graph_payload})
if not callback_url:
return data
requests.post(callback_url, data=data)
return data
|
StarcoderdataPython
|
63824
|
#!/usr/bin/env python
# PostgreSQL doesn't allow ADDing columns to a table in a particular position -
# because it doesn't really make sense in SQL -
# but COPY from CSV **requires** the columns in a specific order
# as the fields aren't specified in the source CSV file.
# so specify /ALL/ of the fields to import.
# This code assumes a database with exclusive access to EBatch / Ppatient /
# PpatientRawdata tables, where the latest values from each sequence have been
# committed as entries in the database. It works by trying to precompute the
# next values that will come off each sequence, then doing a direct load of
# the data as a CSV file.
# If the database state doesn't support this, you could workaround with:
# ./fix_prescr_sequences.sh
# Old workaround:
# irb> eb=EBatch.new; eb.save(validate: false)
# irb> pprd=Pseudo::PpatientRawdata.new; pprd.save(validate:false)
# irb> pp=Pseudo::Ppatient.new; pp.save(validate:false)
# $ ./create_prescr.py 2015 04 a
# $ ./load_tables.sh 2015 04 a
# irb> eb.destroy; pprd.destroy; pp.destroy
# We could make this work slightly more expensively but more reliably, by actually
# pulling a single value off each sequence below.
# use Python 3 print
from __future__ import print_function
import sys
import calendar
import psycopg2
import csv
import base64
import hashlib
import getpass
import os.path
import os
# ----------------------------------------------------------------------------------
def to_asciihex(b):
"""
Convert raw binary data to a sequence of ASCII-encoded hex bytes,
suitable for import via COPY .. CSV into a PostgreSQL bytea field.
"""
return '\\x'+''.join('%.2x' % ord(x) for x in b)
# ----------------------------------------------------------------------------------
# Get year and month parameters from command line
if len(sys.argv)!=4:
print('Usage: %s <year> <month> <part>' % sys.argv[0])
print(""" where <part> is a or b - meaning choose rows with
pseudo_id1 starting with 0-7 (a) or 8-f (b).
This is to split the CSV file into two equal (manageable) chunks
due to limited memory on the db1 server""")
exit(1)
try:
year = int(sys.argv[1])
month = int(sys.argv[2])
month2s = '%.2d' % month # string version with leading 0 if needed
part = sys.argv[3]
if part=='a':
partmatch = '01234567'
elif part=='b':
partmatch = '89abcdef'
else:
raise # part must be a or b
except:
print('Parameter error')
sys.exit(1)
DB=os.environ['DB']
DBA=os.environ['DBA']
csvpath = '/home/pgsql_recovery/source_data/static'
# Initialise empty cache for rawdata records - refreshed on per-month basis.
# key = (rawdata,decrypt_key) [i.e. (encrypted_demog,key_bundle)]
# value = ppatient_rawdataid
rawdata_cache = {}
rawdata_cache_size = 0
max_rawdata_cache_size = 30E6
password = os.environ.get('PGPASSWORD') or getpass.getpass('(create_prescr.py) DB password: ')
conn = psycopg2.connect('dbname=%s user=%s password=%s' % (DB,DBA,password))
cur = conn.cursor()
# get last of: ppatients(id), ppatient_rawdata(ppatient_rawdataid), e_batch(e_batchid)
cur.execute('SELECT MAX(id) FROM ppatients')
last_ppatients_id = cur.fetchone()[0] or 0 # return 0 if None (no rows)
cur.execute('SELECT MAX(ppatient_rawdataid) FROM ppatient_rawdata')
last_ppatient_rawdataid = cur.fetchone()[0] or 0
cur.execute('SELECT MAX(e_batchid) FROM e_batch')
last_e_batchid = cur.fetchone()[0] or 0
print('Last: ppatients(id) = %d, rawdataid = %d, e_batchid = %d' % (last_ppatients_id,last_ppatient_rawdataid,last_e_batchid))
# ----------------------------------------------------------------------------------
# Use the last e_batchid value from the e_batch table - this is the value for this month's load.
# Increment in part a only.
e_batchid = last_e_batchid
if part=='a':
e_batchid += 1
ppatients_f = open('ppatients_%d%s%s.csv' % (year,month2s,part), 'a')
ppatients_f.truncate(0)
ppatient_rawdata_f = open('ppatient_rawdata_%d%s%s.csv' % (year,month2s,part), 'a')
ppatient_rawdata_f.truncate(0)
prescription_data_f = open('prescription_data_%d%s%s.csv' % (year,month2s,part), 'a')
prescription_data_f.truncate(0)
csv_filename = os.path.join(csvpath, 'PHE_%d%s_pseudonymised.csv' % (year,month2s))
with open(csv_filename, 'r') as csvfile:
preader = csv.reader(csvfile, delimiter=',', quotechar='"')
# prescription_data_writer = csv.writer(prescription_data_f)
pseudonymisation_keyid = 1 # Hard-coded for PSPRESCRIPTION data
# first N data rows, skipping 2 header rows
rown = 0
for row in preader:
rown += 1
if rown<=2: continue
# if rown>=1000003: break # For testing: only load first 1,000,000 rows
data = row[0].split()
pseudo_id1 = data[0]
if pseudo_id1[0] not in partmatch:
# first character must match corresponding part
continue
key_bundle = to_asciihex(base64.b64decode(data[1][1:-1])) # strip () before decoding
encrypted_demog = to_asciihex(base64.b64decode(data[2]))
# Binary digest = 20 bytes.
# [Python] 20-byte string takes 52 bytes
# 10-byte string takes 47 bytes.
rawdata_key = hashlib.sha1(encrypted_demog+key_bundle).digest()
if rawdata_key in rawdata_cache:
rawdataid = rawdata_cache[rawdata_key]
# print('row %d: using rawdata_cache: %d' % (rown,rawdataid))
else:
last_ppatient_rawdataid += 1
rawdataid = last_ppatient_rawdataid
#print('row %d: not cached, using: %d' % (rown,rawdataid))
# rawdata bytea,decrypt_key bytea
# COPY ppatient_rawdata (rawdata,decrypt_key)
# FROM 'input.csv' CSV;
print('"%s","%s"' % (encrypted_demog,key_bundle), file=ppatient_rawdata_f)
# Update cache, or reset if limit reached.
# Each SHA1'ed key entry uses 160 bits = 20 bytes, but the python object size is 52 bytes.
# int takes 24 bytes, so total for hash entry is 79 bytes.
# So 10 million entries ~= 790Mb.
rawdata_cache_size += 1
if rawdata_cache_size > max_rawdata_cache_size:
print('Cache size limit (%d) reached - resetting cache.' % rawdata_cache_size)
rawdata_cache = {}
rawdata_cache_size = 0
rawdata_cache[rawdata_key] = rawdataid
# -- don't COPY id field and don't return it - use a counter here.
# COPY ppatients (e_batchid,ppatient_rawdata_id,type,pseudo_id1,pseudo_id2,pseudonymisation_keyid)
# FROM 'input.csv' CSV;
print('%d,%d,"Pseudo::Prescription","%s",,%d' % (e_batchid,rawdataid,pseudo_id1,pseudonymisation_keyid), file=ppatients_f)
last_ppatients_id += 1
# Fill in 5 deleted columns, removed in 2018-07 and later extracts:
# PCO_NAME PRACTICE_NAME PRESC_QUANTITY CHEMICAL_SUBSTANCE_BNF CHEMICAL_SUBSTANCE_BNF_DESCR
# Change row to row[0:5] + ['pco_name'] + row[5:6] + ['practice_name'] + row[6:7] + ['presc_quantity'] + row[7:21] + ['chemical_substance_bnf', 'chemical_substance_bnf_descr'] + row[21:]
if len(row) == 24:
row = row[0:5] + [''] + row[5:6] + [''] + row[6:7] + [''] + row[7:21] + ['', ''] + row[21:]
# prescription data -
# basic data cleaning based on errors from PostgreSQL's COPY importer
# - note that "" fields are already implicitly converted to <blank> from csv.reader
# i.e. acceptable for COPY (e.g. for pat_age: integer field)
if '.' in row[12]:
# must be integer pay_quantity - round down
row[12] = int(float(row[12]))
# Add additional dummy columns for PF_ID,AMPP_ID,VMPP_ID (not included in first 4 months' data)
if len(row) == 19: row += ['', '', '']
# add additional dummy columns for SEX,FORM_TYPE,CHEMICAL_SUBSTANCE_BNF,
# CHEMICAL_SUBSTANCE_BNF_DESCR,VMP_ID,VMP_NAME,VTM_NAME (not included in first 11 months' data,
# but included in 2018-07 refresh)
if len(row) == 22: row += ['', '', '', '', '', '', '']
# quote text fields, i.e. not integer
# TODO: Move to using a proper CSV library instead of manual quoting
for f in range(29):
if f not in (10,15,19,20,21,26): # ITEM_NUMBER,PAT_AGE,PF_ID,AMPP_ID,VMPP_ID,VMP_ID
row[f] = '"%s"' % row[f]
# remove DEMOG field - leave till last to avoid index confusion
del row[0]
# remove quotes from PRESC_DATE field (DATE type) - a blank field will be stored as NULL.
row[0] = row[0].replace('"','')
# COPY prescription_data
# (ppatient_id,presc_date,part_month,presc_postcode,pco_code,pco_name,practice_code,practice_name,
# nic,presc_quantity,item_number,unit_of_measure,pay_quantity,drug_paid,bnf_code,
# pat_age,pf_exempt_cat,etp_exempt_cat,etp_indicator,pf_id,ampp_id,vmpp_id,
# sex,form_type,chemical_substance_bnf,chemical_substance_bnf_descr,vmp_id,vmp_name,vtm_name)
# FROM 'input.csv' CSV;
print(','.join(['%d' % last_ppatients_id] + row), file=prescription_data_f)
# prescription_data_writer.writerow(['%d' % last_ppatients_id] + row)
if (rown%1000)==0:
sys.stdout.write('%d: %d, %d\r' % (rown,last_ppatients_id,last_ppatient_rawdataid))
sys.stdout.flush
# end of row loop
ppatients_f.close()
ppatient_rawdata_f.close()
prescription_data_f.close()
# Part a only - create an e_batch record for this month
if part=='a':
e_batch_f = open('e_batch_%d%s.csv' % (year,month2s), 'w')
# COPY e_batch
# (e_type,provider,media,original_filename,cleaned_filename,numberofrecords,
# date_reference1,date_reference2,e_batchid_traced,comments,digest,
# lock_version,inprogress,registryid,on_hold)
month = int(month)
monthend = calendar.monthrange(year,month)[1]
dateref1 = '%d-%.2d-01' % (year,month)
dateref2 = '%d-%.2d-%.2d' % (year,month,monthend)
num_rows = rown-3 # 2 header rows from 0
filename = os.path.basename(csv_filename)
print(\
""""PSPRESCRIPTION","T145Z","Hard Disk","%s","%s",%d,%s,%s,0,"Month %d batch","Not computed",0,"","X25",0""" \
% (filename,filename,num_rows,dateref1,dateref2,month), file=e_batch_f)
e_batch_f.close()
print('\nFinal cache size = %d' % (len(rawdata_cache)))
# ----------------------------------------------------------------------------------
"""
DEMOG,PRESC_DATE,PART_MONTH,PRESC_POSTCODE,PCO_CODE,PCO_NAME,PRACTICE_CODE,PRACTICE_NAME,NIC,PRESC_QUANTITY,ITEM_NUMBER,UNIT_OF_MEASURE,PAY_QUANTITY,DRUG_PAID,BNF_CODE,PAT_AGE,PF_EXEMPT_CAT,ETP_EXEMPT_CAT,ETP_INDICATOR
0 pseudoid text,
1 presc_date text,
2 part_month text,
3 presc_postcode text,
4 pco_code text,
5 pco_name text,
6 practice_code text,
7 practice_name text,
8 nic text,
9 presc_quantity text,
10 item_number integer,
11 unit_of_measure text,
12 pay_quantity integer,
13 drug_paid text,
14 bnf_code text,
15 pat_age integer,
16 pf_exempt_cat text,
17 etp_exempt_cat text,
18 etp_indicator text
# e_batchid | 1 -- autoincrement primary key
# e_type | PSPRESCRIPTION
# provider | T145Z
# media | Hard Disk -- options in era are: 'Email', 'Floppy Disk', 'CD/DVD', 'Others'
# original_filename | PHE_201504_pseudonymised_first10000.csv
# cleaned_filename | PHE_201504_pseudonymised_first10000.csv
# numberofrecords | 10000
# date_reference1 | 2015-04-01 00:00:00 -- beginning of month
# date_reference2 | 2015-04-30 00:00:00 -- end of month
# e_batchid_traced |
# comments | month 4 batch
# digest | not computed
# lock_version | 0
# inprogress |
# registryid | X25
# on_hold | 0
"""
|
StarcoderdataPython
|
1691177
|
<reponame>DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials<gh_stars>1000+
import argparse
from robosat.osm.parking import ParkingHandler
from robosat.osm.building import BuildingHandler
# Register your osmium handlers here; in addition to the osmium handler interface
# they need to support a `save(path)` function for GeoJSON serialization to a file.
handlers = { 'parking': ParkingHandler,
'building': BuildingHandler }
def add_parser(subparser):
parser = subparser.add_parser('extract', help='extracts GeoJSON features from OpenStreetMap',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--type', type=str, required=True, choices=handlers.keys(), help='type of feature to extract')
parser.add_argument('map', type=str, help='path to .osm.pbf base map')
parser.add_argument('out', type=str, help='path to GeoJSON file to store features in')
parser.set_defaults(func=main)
def main(args):
handler = handlers[args.type]()
handler.apply_file(filename=args.map, locations=True)
handler.save(args.out)
|
StarcoderdataPython
|
11312441
|
<filename>tests/tests.py
"""tests
Provides functions to compile c++ files and for unit testing.
"""
import site
import os.path as pth
from nose.tools import eq_
from excentury.command import exec_cmd
def build_cpp(name, debug=None):
"""Compile a file and place it in bin. """
root = site.getuserbase()
if debug is None:
out = '-o %s/lib/excentury/bin/%s.run' % (root, name)
else:
out = '-DDEBUG=%s -o %s/lib/excentury/bin/%s.run%s' % (
debug, root, name, debug
)
root = pth.abspath(pth.dirname(__file__)+'/cpp')
cmd = 'g++ -O3 %s %s/%s.cpp' % (out, root, name)
out, err, _ = exec_cmd(cmd)
eq_(err, "", "Build Error -->\n%s\n%s" % (cmd, err))
def run_cmd(cmd, exp_err, exp_out):
"""Run a command and compare the expected output and error."""
out, err, _ = exec_cmd(cmd)
hline = '_'*60
msg = "%s stderr -->\n%s\n%s\n%s\n\
%s expected stderr -->\n%s\n%s\n%s\n" % (cmd, hline, err, hline,
cmd, hline, exp_err, hline)
eq_(err, exp_err, msg)
msg = "%s stdout -->\n%s\n%s\n%s\n\
%s expected stdout -->\n%s\n%s\n%s\n" % (cmd, hline, out, hline,
cmd, hline, exp_out, hline)
eq_(out, exp_out, msg)
def build_run(prog, exp_err, exp_out, debug=None):
"""Build and run. """
build_cpp(prog, debug)
if debug is None:
cmd = '%s.run' % prog
else:
cmd = '%s.run%d' % (prog, debug)
run_cmd(cmd, exp_err, exp_out)
def str_eq(str1, str2):
"""Compare two strings. """
hline = '_'*60
msg = "str1 -->\n%s\n%s\n%s\n\
str2 -->\n%s\n%s\n%s\n" % (hline, str1, hline,
hline, str2, hline)
eq_(str1, str2, msg)
|
StarcoderdataPython
|
78744
|
<gh_stars>0
import json
import tornado.httpclient
class PushServerError(Exception):
pass
class PushServerClient:
def __init__(self, *, url, username=None, password=<PASSWORD>):
self.client = tornado.httpclient.AsyncHTTPClient()
self.username = username
self.password = password
while url.endswith("/"):
url = url[:-1]
self.base_url = url
async def send(self, toshi_id, service, device_token, data):
# TODO: intricisies of the PushServer format
# https://github.com/tokenbrowser/PushServer/blob/master/src/main/java/org/whispersystems/pushserver/entities/GcmMessage.java
if len(data) > 1 or 'message' not in data:
raise NotImplementedError("Only data key allowed is 'message'")
payload = {
"number": toshi_id,
"deviceId": 1,
"receipt": False,
"notification": False,
"redphone": False,
"call": False
}
if service == 'gcm' or service == 'fcm':
payload["gcmId"] = device_token
payload["message"] = data['message']
url = "{}/api/v1/push/gcm".format(self.base_url)
elif service == 'apn':
payload["apnId"] = device_token
aps_payload = {
"aps": {
"content-available": 1
},
"sofa": data['message']
}
payload["message"] = json.dumps(aps_payload)
url = "{}/api/v1/push/apn".format(self.base_url)
else:
raise PushServerError("Unsupported network: '{}'".format(service))
resp = await self.client.fetch(url, method="PUT",
headers={
'Content-Type': 'application/json'
},
body=json.dumps(payload).encode('utf-8'),
auth_username=self.username,
auth_password=<PASSWORD>,
raise_error=False)
if resp.code < 400:
return True
raise PushServerError(resp.body)
class GCMHttpPushClient:
def __init__(self, server_key):
self.server_key = server_key
self.client = tornado.httpclient.AsyncHTTPClient()
def send_impl(self, payload, service):
if service == 'fcm':
url = 'https://fcm.googleapis.com/fcm/send'
else:
url = "https://gcm-http.googleapis.com/gcm/send"
return self.client.fetch(url, method="POST",
headers={
'Authorization': "key={}".format(self.server_key),
'Content-Type': 'application/json'
},
body=json.dumps(payload).encode('utf-8'),
raise_error=False)
async def send(self, toshi_id, service, device_token, data):
if not isinstance(data, dict):
raise TypeError("data must be a dict")
if not (service == 'gcm' or service == 'fcm'):
raise PushServerError("Unsupported network: '{}'".format(service))
payload = {
"data": data,
"to": device_token
}
resp = await self.send_impl(payload, service)
if resp.code == 200:
return True
raise PushServerError(resp.body)
|
StarcoderdataPython
|
5033991
|
from estimator import Estimator
from utils import *
class rgbEstimator(Estimator):
def __init__(self, ):
super().__init__()
def fit(self, X, y=None):
assert len(X.shape) == 4, RuntimeError("Expected RGB images")
assert len(X.shape) == 4, RuntimeError("Expected RGB images")
if y is not None:
y = simplify_labels(y)
self.mu = np.array([np.nanmean(X[:, :, :, ch] * y, axis=0) for ch in range(3)])
self.var = np.array([np.nanvar(X[:, :, :, ch] * y, axis=0) for ch in range(3)])
else:
self.mu = np.array([np.nanmean(X[:, :, :, ch], axis=0) for ch in range(3)])
self.var = np.array([np.nanvar(X[:, :, :, ch], axis=0) for ch in range(3)])
return self
def predict(self, X):
assert len(X.shape) == 4, RuntimeError("Expected RGB images")
prediction = []
for ch in range(3):
ch_prediction = np.zeros(X[:,:,:,ch].shape)
ch_prediction[np.absolute(X[:,:,:,ch] - self.mu[ch]) >= self.alpha * (self.var[ch] + 2)] = 1
prediction.append(ch_prediction)
return np.prod(prediction, axis=0)
|
StarcoderdataPython
|
3358491
|
######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
############################################################################################################################################################################################################################################
import os
import random
import types
from datetime import datetime
from time import time
import botocore.config
import services
from .aws_service_retry import AwsApiServiceRetry
from .dynamodb_service_retry import DynamoDbServiceRetry
from .ec2_service_retry import Ec2ServiceRetry
from .logs_service_retry import CloudWatchLogsServiceRetry
DEFAULT_SUFFIX = "_with_retries"
DEFAULT_WAIT_SECONDS = 10
DEFAULT_MAX_WAIT = 60
DEFAULT_RANDOM_FACTOR = 0.25
MAX_WAIT = 24 * 3600
EXPECTED_EXCEPTIONS = "_expected_boto3_exceptions_"
STATS_FORMAT = "{}: , calls: {}, failed: {}, retries: {}, timed-out {}"
LOG_FORMAT = "{:0>4d}-{:0>2d}-{:0>2d} - {:0>2d}:{:0>2d}:{:0>2d}.{:0>3s} {}, retry: {}"
ENV_BOTO_RETRY_STATS = "BOTO_RETRY_STATS"
ENV_BOTO_STATS_OUTPUT = "BOTO_RETRY_OUTPUT"
ENV_USER_AGENT = "USER_AGENT"
stats_enabled = False
boto_retry_stats = str(os.getenv(ENV_BOTO_RETRY_STATS, "false")).lower() == "true" or stats_enabled
boto_stats_output = str(os.getenv(ENV_BOTO_STATS_OUTPUT, "false")).lower() == "true"
statistics = {}
def make_method_with_retries(boto_client_or_resource, name, service_retry_strategy=None, method_suffix=DEFAULT_SUFFIX):
"""
Creates a wrapper for a boto3 method call that handles boto_retry in case of an exception from which
it can recover. Situations in which case this is possible are defined in the service specific
service_retry_strategy class
:param boto_client_or_resource: boto client or resource to add method to
:param name: Name of the boto call
:param service_retry_strategy: Strategy that implements the logic that determines if boto_retry are possible
in case of an exception
:param method_suffix: suffix for wrapped boto method
:return:
"""
# default strategy
retry_strategy = service_retry_strategy if service_retry_strategy is not None else AwsApiServiceRetry()
# new method name
method_name = name + method_suffix
# closure function
def wrapped_api_method(client_or_resource, **args):
return retry_strategy.call(client_or_resource, name, args)
# add closure function to the client or resource
# noinspection PyArgumentList
setattr(boto_client_or_resource, method_name, types.MethodType(wrapped_api_method, boto_client_or_resource))
# return the method, but it can also be called directly as method of the boto client
return wrapped_api_method
def get_default_wait_strategy(service):
"""
Returns the default wait strategy for a service
:param service: service name
:return: Default wait strategy
"""
if service == "logs":
return MultiplyWaitStrategy(start=2, factor=2, max_wait=15, random_factor=DEFAULT_RANDOM_FACTOR)
return MultiplyWaitStrategy(start=DEFAULT_WAIT_SECONDS, max_wait=DEFAULT_MAX_WAIT, random_factor=DEFAULT_RANDOM_FACTOR)
def get_default_retry_strategy(service, wait_strategy=None, context=None, logger=None):
if wait_strategy is None:
wait_strategy = get_default_wait_strategy(service)
service_retry_strategy_class = _get_service_retry_strategy_class(service)
strategy = service_retry_strategy_class(wait_strategy=wait_strategy, context=context, logger=logger)
return strategy
def _get_service_retry_strategy_class(service):
"""
Returns the default wait strategy class for a service
:param service: Name of the service
:return: Class that implements the default strategy for a service
"""
if service == "ec2":
retry_class = Ec2ServiceRetry
elif service == "dynamodb":
retry_class = DynamoDbServiceRetry
elif service == "logs":
retry_class = CloudWatchLogsServiceRetry
else:
retry_class = AwsApiServiceRetry
return retry_class
def get_client_with_retries(service_name, methods, context=None, region=None, session=None, wait_strategy=None,
method_suffix=DEFAULT_SUFFIX, logger=None):
args = {
"service_name": service_name,
}
if region is not None:
args["region_name"] = region
user_agent = os.getenv(ENV_USER_AGENT, None)
if user_agent is not None:
session_config = botocore.config.Config(user_agent=user_agent)
args["config"] = session_config
if session is not None:
aws_session = session
else:
aws_session = services.get_session()
result = aws_session.client(**args)
# get strategy for the service
service_retry_strategy = get_default_retry_strategy(context=context, service=service_name,
wait_strategy=wait_strategy, logger=logger)
# add a new method to the client instance that wraps the original method with service specific retry logic
for method in methods:
make_method_with_retries(boto_client_or_resource=result,
name=method,
service_retry_strategy=service_retry_strategy,
method_suffix=method_suffix)
return result
def add_retry_methods_to_resource(resource, methods, context=None, method_suffix=DEFAULT_SUFFIX):
"""
Adds new methods to a boto3 resource that wrap the original methods with retry logic.
:param resource: Boto3 resource
:param methods: List of methods for which a new method will be added to the client wrapped in retry logic
:param context: Lambda execution context
:param method_suffix:
:return: Suffix to add to the methods with retry logic that are added to the client, use none for DEFAULT_SUFFIX
"""
# get name of the service and get the default strategy for that service
service_name = type(resource).__name__.split(".")[0]
service_retry_strategy_class = _get_service_retry_strategy_class(service_name)
retry_wait_strategy = get_default_wait_strategy(service_name)
# add a new method to the resource instance that wraps the original method with service specific retry logic
for method in methods:
make_method_with_retries(boto_client_or_resource=resource,
name=method,
method_suffix=method_suffix,
service_retry_strategy=service_retry_strategy_class(
wait_strategy=retry_wait_strategy,
context=context)
)
return resource
def _apply_randomness(value, random_factor):
"""
Applies a random factor to the value
:param value: Input value
:param random_factor: Random factor, must be between 0 (no random) and 1 (output is between 0 and 2* value)
:return: Value with random factor applied
"""
if random_factor < 0 or random_factor > 1:
raise ValueError("Random factor must be in range 0 to 1")
return value + (random.uniform(random_factor * -1, random_factor) * value) if random_factor != 0 else value
class WaitStrategy(object):
"""
Implements wait strategy with defined wait
"""
def __init__(self, waits, random_factor=0):
"""
Initializes constant wait strategy
:param waits: list of wait waits
"""
self.waits = waits
self.random_factor = random_factor
self._index = 0
def __iter__(self):
return self
def __next__(self):
"""
Returns next wait period
:return: Next wait period
"""
if self._index < len(self.waits):
val = self.waits[self._index]
self._index += 1
return _apply_randomness(val, self.random_factor)
raise StopIteration
def reset(self):
"""
Resets wait strategy (
:return:
"""
self._index = 0
class ConstantWaitStrategy(object):
"""
Implements wait strategy with constant wait waits [step,step,step...]
"""
def __init__(self, step=DEFAULT_WAIT_SECONDS, random_factor=0):
"""
Initializes constant wait strategy
:param step: wait interval
"""
self.step = step
self.random_factor = random_factor
def __iter__(self):
return self
def __next__(self):
"""
Returns next wait period
:return: Next wait period
"""
return _apply_randomness(self.step, self.random_factor)
@classmethod
def reset(cls):
"""
Resets wait strategy (No action for this strategy)
:return:
"""
pass
class LinearWaitStrategy(object):
"""
Implements wait strategy with incrementing wait waits [start, start+incr, start+incr+incr..., max_wait]
"""
def __init__(self, start=DEFAULT_WAIT_SECONDS, incr=DEFAULT_WAIT_SECONDS, max_wait=MAX_WAIT, random_factor=0.0):
"""
Initializes Linear wait strategy implementation
:param start: First wait period
:param incr: Wait period increment
:param max_wait: Max wait period
"""
self.start = start
self.incr = incr
self.max_wait = max_wait
self.random_factor = random_factor
self._val = start
def __iter__(self):
return self
def __next__(self):
"""
Returns next wait period
:return: Next wait period
"""
val = self._val
self._val = min(self._val + self.incr, self.max_wait)
return _apply_randomness(val, self.random_factor)
def reset(self):
"""
Reset wait period to start wait period
:return:
"""
self._val = self.start
class MultiplyWaitStrategy(object):
"""
Implements wait strategy with multiplied wait waits [start, start* factor, start*factor*factor..., max_wait]
"""
def __init__(self, start=DEFAULT_WAIT_SECONDS, factor=2, max_wait=MAX_WAIT, random_factor=0.0):
"""
Initializes Multiply wait strategy
:param start: Start wait period
:param factor: Wait period multiply factor
:param max_wait: Max wait period
"""
self.start = start
self.factor = factor
self.max_wait = max_wait
self.random_factor = random_factor
self._val = start
def __iter__(self):
return self
def __next__(self):
"""
Returns next wait period
:return: Next wait period
"""
val = self._val
self._val = min(self._val * self.factor, self.max_wait)
return _apply_randomness(val, self.random_factor)
def reset(self):
self._val = self.start
def update_calls(client_or_resource, method_name, retry):
if boto_retry_stats:
dt = datetime.fromtimestamp(time())
full_name = "{}.{}".format(type(client_or_resource).__name__, method_name)
if boto_stats_output:
print((LOG_FORMAT.format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, str(dt.microsecond)[0:3], full_name,
retry)))
if method_name in statistics:
statistics[full_name]["calls"] += 1
else:
statistics[full_name] = {"calls": 1, "retries": 0, "failed": 0, "timed-out": 0}
def update_retries(client_or_resource, method_name, failed, retries, timed_out):
if boto_retry_stats:
full_name = "{}.{}".format(type(client_or_resource).__name__, method_name)
statistics[full_name]["retries"] += retries
statistics[full_name]["failed"] += failed
timed_out[full_name]["timed-out"] += 1 if timed_out else 0
def print_statistics():
if boto_retry_stats and boto_stats_output:
for name in sorted(statistics):
print((STATS_FORMAT.format(name, statistics[name]["calls"], statistics[name]["failed"], statistics[name]["retries"],
statistics["timed-out"])))
def clear_statistics():
global statistics
statistics = {}
|
StarcoderdataPython
|
8183043
|
<filename>Python/7 - kyu/7 kyu - Binary Addition.py
# https://www.codewars.com/kata/binary-addition/train/python
# My solution
def add_binary(a,b):
return str(bin(a+b))[2:]
# ...
def add_binary(a,b):
return '{0:b}'.format(a + b)
# ...
def add_binary(a, b):
return format(a + b, 'b')
# ...
def find_highest_power_2(num):
n=0
while 2**n <= num:
n += 1
return n-1
def add_binary(a,b):
sum = a + b
number = 0
while sum != 0:
place_holder = find_highest_power_2(sum)
number += 10**place_holder
sum = sum - 2**place_holder
return str(number)
# ...
def add_binary(a,b):
n = a + b
binList = []
while (n > 0):
binList.append(n % 2)
n = n // 2
return ''.join(map(str, reversed(binList)))
|
StarcoderdataPython
|
9731115
|
import sys
import traceback
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_model
class AppNotFoundError(Exception):
pass
class ClassNotFoundError(Exception):
pass
def get_class(module_label, classname):
"""
Dynamically import a single class from the given module.
This is a simple wrapper around `get_classes` for the case of loading a
single class.
Args:
module_label (str): Module label comprising the app label and the
module name, separated by a dot. For example, 'catalogue.forms'.
classname (str): Name of the class to be imported.
Returns:
The requested class object or `None` if it can't be found
"""
return get_classes(module_label, [classname])[0]
def get_classes(module_label, classnames):
"""
Dynamically import a list of classes from the given module.
This works by looping over ``INSTALLED_APPS`` and looking for a match
against the passed module label. If the requested class can't be found in
the matching module, then we attempt to import it from the corresponding
core Oscar app (assuming the matched module isn't in Oscar).
This is very similar to ``django.db.models.get_model`` function for
dynamically loading models. This function is more general though as it can
load any class from the matching app, not just a model.
Args:
module_label (str): Module label comprising the app label and the
module name, separated by a dot. For example, 'catalogue.forms'.
classname (str): Name of the class to be imported.
Returns:
The requested class object or ``None`` if it can't be found
Examples:
Load a single class:
>>> get_class('basket.forms', 'BasketLineForm')
oscar.apps.basket.forms.BasketLineForm
Load a list of classes:
>>> get_classes('basket.forms', ['BasketLineForm', 'AddToBasketForm'])
[oscar.apps.basket.forms.BasketLineForm,
oscar.apps.basket.forms.AddToBasketForm]
Raises:
AppNotFoundError: If no app is found in ``INSTALLED_APPS`` that matches
the passed module label.
ImportError: If the attempted import of a class raises an
``ImportError``, it is re-raised
"""
app_module_path = _get_app_module_path(module_label)
if not app_module_path:
raise AppNotFoundError("No app found matching '%s'" % module_label)
# Check if app is in oscar
if app_module_path.split('.')[0] == 'oscar':
# Using core oscar class
module_path = 'oscar.apps.%s' % module_label
imported_module = __import__(module_path, fromlist=classnames)
return _pluck_classes([imported_module], classnames)
# App must be local - check if module is in local app (it could be in
# oscar's)
app_label = module_label.split('.')[0]
if '.' in app_module_path:
base_package = app_module_path.rsplit('.' + app_label, 1)[0]
local_app = "%s.%s" % (base_package, module_label)
else:
local_app = module_label
try:
imported_local_module = __import__(local_app, fromlist=classnames)
except ImportError:
# There are 2 reasons why there is ImportError:
# 1. local_app does not exist
# 2. local_app exists but is corrupted (ImportError inside of the app)
#
# Obviously, for the reason #1 we want to fall back to use Oscar app.
# For the reason #2 we want to propagate error (the dev obviously wants
# to override app and not use Oscar app)
#
# ImportError does not provide easy way to distinguish those two cases.
# Fortunately, the traceback of the ImportError starts at __import__
# statement. If the traceback has more than one frame, it means that
# application was found and ImportError originates within the local app
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise
# Module not in local app
imported_local_module = {}
oscar_app = "oscar.apps.%s" % module_label
try:
imported_oscar_module = __import__(oscar_app, fromlist=classnames)
except ImportError:
# Oscar does not have this application, can't fallback to it
imported_oscar_module = None
return _pluck_classes([imported_local_module, imported_oscar_module],
classnames)
def _pluck_classes(modules, classnames):
klasses = []
for classname in classnames:
klass = None
for module in modules:
if hasattr(module, classname):
klass = getattr(module, classname)
break
if not klass:
packages = [m.__name__ for m in modules]
raise ClassNotFoundError("No class '%s' found in %s" % (
classname, ", ".join(packages)))
klasses.append(klass)
return klasses
def _get_app_module_path(module_label):
app_name = module_label.rsplit(".", 1)[0]
for installed_app in settings.INSTALLED_APPS:
if installed_app.endswith(app_name):
return installed_app
return None
def get_profile_class():
"""
Return the profile model class
"""
setting = getattr(settings, 'AUTH_PROFILE_MODULE', None)
if setting is None:
return None
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
profile_class = get_model(app_label, model_name)
if not profile_class:
raise ImproperlyConfigured("Can't import profile model")
return profile_class
def feature_hidden(feature_name):
"""
Test if a certain Oscar feature is disabled.
"""
return (feature_name is not None and
feature_name in settings.OSCAR_HIDDEN_FEATURES)
|
StarcoderdataPython
|
6463421
|
# -*- coding: utf-8 -*-
"""
Module defines modifier that compresses a stream with lbzip2
"""
from psutil import cpu_count
from twindb_backup.modifiers.parallel_compressor import ParallelCompressor
DEFAULT_THREADS = cpu_count() - 1
class Lbzip2(ParallelCompressor):
"""
Modifier that compresses the input_stream with lbzip2.
"""
def __init__(self, input_stream, threads=DEFAULT_THREADS, level=9):
"""
Modifier that uses lbzip2 compression
:param input_stream: Input stream. Must be file object
:param threads: number of threads to use (defaults to total-1)
:type threads: int
:param level: compression level from 1 to 9 (fastest to best)
:type level: int
"""
super(Lbzip2, self).__init__(
input_stream,
program="lbzip2",
threads=threads,
level=level,
suffix=".bz",
)
@property
def _modifier_cmd(self):
"""get compression program cmd"""
return [
self._program,
"-{0}".format(self._level),
"-n",
str(self._threads),
"-c",
"-",
]
@property
def _unmodifier_cmd(self):
"""get decompression program cmd"""
return [self._program, "-n", str(self._threads), "-d", "-c"]
|
StarcoderdataPython
|
3355982
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32, Bool
import numpy as np
import math
import tf
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
It is taken into account the position of the traffic light in order to decelerate waypoints.
This node is subscribed to the following topics:
- current_pose
- base_waypoints: publishes a list of all waypoints for the track, so this list includes waypoints
both before and after the vehicle
- traffic_waypoint: it is the index of the waypoint for nearest upcoming red light's stop line
And it publishes final_waypoints, which are the list of waypoints to be followed.
There are two parameters that can be tuned:
- LOOKAHEAD_WPS: which defines the number of waypoints that will be published,
- MAX_DECEL: it is the maximum deceleration to be commanded.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_DECEL = .5
MID_POINT = 60 # Point where velocity is decreased from approaching velocity to zero
class WaypointUpdater(object):
def __init__(self):
rospy.loginfo('Initializing my waypoint_updater.')
rospy.init_node('waypoint_updater')
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.base_lane = None
self.stopline_wp_idx = -1
self.alpha = self.calc_coef_c2(MID_POINT)/self.calc_coef_c1(MID_POINT)
self.close_to_tl = False
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/close_to_tl', Bool, self.close_to_tl_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add a subscriber for /obstacle_waypoint below
self.loop()
def loop(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints and self.waypoint_tree and self.waypoints_2d:
# Get closest waypoint
self.publish_waypoints(self.get_closest_waypoint_idx())
self.check_stopline()
rate.sleep()
# Compute the closest waypoint index
def get_closest_waypoint_idx(self):
# Get the car's current position
x = self.pose.pose.position.x
y = self.pose.pose.position.y
# Note: .query returns (distance, index)
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest is ahead or behind vehicle
prev_idx = closest_idx - 1
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[prev_idx]
# Equation for hyperplane through closest_coords
closest_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(closest_vect - prev_vect, pos_vect - closest_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
# Publish the main output final_waypoints
def publish_waypoints(self, closest_id):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
# This function generates the lane which will be sent
def generate_lane(self):
lane = Lane()
# Compute the closest index to our position
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
# Slice base_waypoints with our closest and farthest indexes
base_waypoints = self.base_lane.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx>=farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)
return lane
#pass
def check_stopline(self):
# Get stopline coordinates
sl_x = self.base_waypoints.waypoints[self.stopline_wp_idx].pose.pose.position.x
sl_y = self.base_waypoints.waypoints[self.stopline_wp_idx].pose.pose.position.y
# Check if stopline is ahead or behind vehicle
yaw_rad = self.get_yaw()
yaw_deg = yaw_rad*180/np.pi
yaw_vect = (np.cos(yaw_rad), np.sin(yaw_rad))
sl_vect = (sl_x,sl_y)
pos_vect = np.array(self.get_car_xy())
val = np.dot(yaw_vect, sl_vect - pos_vect)
close_wp = self.get_closest_waypoint_idx()
sl_wp = self.stopline_wp_idx
#print('val: ', val, 'close_wp: ', close_wp, 'sl_wp: ', sl_wp)
def get_yaw(self):
q = self.pose.pose.orientation;
q = (q.x, q.y, q.z, q.w)
#print(self.pose.pose.orientation)
#rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
attitude = tf.transformations.euler_from_quaternion(q)
return attitude[2]
def get_car_xy(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
return x,y
# Function to decelerate the waypoints between our position and the traffic light
def decelerate_waypoints(self, waypoints, closest_idx):
temp = []
stop_idx = max(self.stopline_wp_idx - closest_idx -2, 0)
print('stop_idx', stop_idx)
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
dist = self.distance(waypoints, i, stop_idx)
#vel = math.sqrt(2*MAX_DECEL*dist)
#vel_coef = (dist/20)**2
#vel_coef = 0.0
#vel_coef = 1-(2/(1+math.exp(dist/15)))
if self.close_to_tl:
vel_coef = self.alpha*self.calc_coef_c1(dist)
else:
vel_coef = self.calc_coef_c2(dist)
if vel_coef >1:
vel_coef = 1
vel = vel_coef* wp.twist.twist.linear.x
if vel<1.:
vel = 0.
p.twist.twist.linear.x = vel
temp.append(p)
return temp
def calc_coef_c1 (self,dist):
return (-(1/(1+math.exp(dist/10)))+0.5)
def calc_coef_c2 (self,dist):
return (-(0.5/(1+math.exp((dist-(MID_POINT+50))/10)))+1)
# Callback function when receiving current_pose
def pose_cb(self, msg):
self.pose = msg
# Callback function when receiving base_waypoints
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [
[w.pose.pose.position.x, w.pose.pose.position.y]
for w in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
# Callback function when receiving close_to_tl
def close_to_tl_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.close_to_tl = msg.data
# Callback function when receiving traffic_waypoint
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
# Callback for /obstacle_waypoint message
def obstacle_cb(self, msg):
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
# Compute distance between two waypoints
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
StarcoderdataPython
|
11364354
|
<reponame>LegitStack/lazydata<filename>lazydata/config/config.py
"""
Class to read and manipulate the project's config file
"""
from pathlib import Path
import yaml
import os
from lazydata.storage.hash import calculate_file_sha256
class Config:
def __init__(self, init_dir=Path.cwd()):
"""
Object initialisation
:param init_dir: The initialisation dir to use for the search for the project file.
Usually this is the path to the python file calling lazydata.use()
"""
# look for the location of the config file by looking at parents directories
self.config_path = None
init_parents = [init_dir]
init_parents.extend([p for p in init_dir.parents])
for p in init_parents:
proposed_path = Path(p.resolve(), "lazydata.yml")
if proposed_path.exists():
self.config_path = proposed_path
if self.config_path is None:
raise RuntimeError("Cannot find the lazydata.yml file in any of the parent directories. "
"Did you run `lazydata init`?")
try:
with open(str(self.config_path)) as fp:
self.config = yaml.safe_load(fp)
except Exception as e:
raise RuntimeError("Error parsing `lazydata.yml`. Please revert to the last working version.\n%s" % str(e))
if "files" not in self.config:
self.config["files"] = []
def path_relative_to_config(self, path:str) -> Path:
"""
Return the Path relative to the config file
:param path: file path
:return: Path object relative to the config file
"""
return Path(os.path.abspath(path)).relative_to(self.config_path.parent)
def abs_path(self, path_relative_to_config:str) -> Path:
"""
Return the absolute path of a file that is defined as being relative to config
:param path_relative_to_config:
:return:
"""
return Path(self.config_path.parent.resolve(), path_relative_to_config)
def get_latest_and_all_file_entries(self, path:str):
"""
Get the latest and all other versions of the file entry for a path
:param path:
:return: tuple: None, None if nothing found, otherwise <latest>,<older>
"""
# path relative to the config file
path_rel = str(self.path_relative_to_config(path))
all_entries = [f for f in self.config["files"] if f["path"] == path_rel]
if len(all_entries) == 0:
return None, None
else:
return all_entries[-1], all_entries[:-1]
def add_file_entry(self, path:str, script_path:str):
"""
Add a file entry to the config file
:param path: The path to the data file
:param script_path: The path to the script that used it
:return:
"""
# path relative to the config file
path_rel = str(self.path_relative_to_config(path))
script_path_rel = str(self.path_relative_to_config(script_path))
sha256 = calculate_file_sha256(path)
self.config["files"].append({
"path": path_rel,
"hash": sha256,
"usage": script_path_rel,
})
self.save_config()
def add_usage(self, entry:dict, script_path:str):
"""
Make sure the usage string is present in the usage.
This function modifies the `entry` input parameter and only has side-effects.
:param entry: The dict with the config file entry that needs to be modified
:param script_path: The location where the file was used
:return:
"""
script_path_rel = str(self.path_relative_to_config(script_path))
if isinstance(entry["usage"], list):
usage_set = set(entry["usage"])
if script_path_rel not in usage_set:
entry["usage"].append(script_path_rel)
elif entry["usage"] != script_path_rel:
entry["usage"] = [entry["usage"], script_path_rel]
self.save_config()
def add_remote(self, remote_url:str, endpoint_url=''):
"""
Add a remote to the config file
:param remote_url:
:return:
"""
if "remote" in self.config:
print("ERROR: Remote storage backend in `lazydata.yml` already exists. Aborting...")
else:
# Setting the remote config automatically sets the endpoint parameter, even if it is None
self.config["remote"] = remote_url
self.config["endpoint"] = endpoint_url
self.save_config()
def check_file_tracked(self, path:str):
"""
Checks if the file is tracked in the config file
:return:
"""
latest, _ = self.get_latest_and_all_file_entries(path)
return latest is not None
def tracked_files_used_in(self, script_path:str):
"""
See if there are any tracked files used by this script
:param script_path:
:return:
"""
script_path_rel = str(self.path_relative_to_config(script_path))
entries = [e for e in self.config["files"] if usage_filter(e["usage"], script_path_rel)]
return entries
def abs_path_matches_prefix(self, abspath_prefix:str):
"""
Select those tracked files that match an absolute path prefix
:param abspath_prefix:
:return:
"""
entries = [e for e in self.config["files"] if str(self.abs_path(e["path"])).startswith(abspath_prefix)]
return entries
def save_config(self):
"""
Save the config file
:return:
"""
with open(str(self.config_path), "w") as fp:
yaml.dump({"version": self.config["version"]}, fp, default_flow_style=False)
if "remote" in self.config:
yaml.dump({"remote": self.config["remote"]}, fp, default_flow_style=False)
if "endpoint" in self.config:
yaml.dump({"endpoint": self.config["endpoint"]}, fp, default_flow_style=False)
if "files" in self.config:
yaml.dump({"files": self.config["files"]}, fp, default_flow_style=False)
def usage_filter(usage, script_path):
if isinstance(usage, list):
return script_path in usage
else:
return script_path == usage
|
StarcoderdataPython
|
5069216
|
import re
from gwv.dump import Dump
from gwv.kagedata import KageData
from gwv.validators import Validator
from gwv.validators import ErrorCodes
error_codes = ErrorCodes(
BLANK_LINE="0", # 空行
INVALID_CHAR="1", # 不正な文字
NOT_AN_INT="2", # 整数として解釈できない
NONNORMALIZED_NUMBER_EXPRESSION="3", # 不正な数値の表現
)
_re_valid_chars = re.compile(r"^[\da-z_\:@-]+$")
class NumexpValidator(Validator):
name = "numexp"
def is_invalid(self, name: str, related: str, kage: KageData, gdata: str,
dump: Dump):
for i, line in enumerate(gdata.split("$")):
if line == "":
return [error_codes.BLANK_LINE, [i, line]] # 空行
if not _re_valid_chars.match(line):
return [error_codes.INVALID_CHAR, [i, line]] # 不正な文字
data = line.split(":")
for j, col in enumerate(data):
if j == 7 and data[0] == "99":
continue
try:
numdata = int(col)
except ValueError:
return [error_codes.NOT_AN_INT, [i, line]] # 整数として解釈できない
if str(numdata) != col:
# 不正な数値の表現
return [
error_codes.NONNORMALIZED_NUMBER_EXPRESSION, [i, line]]
return False
|
StarcoderdataPython
|
3579221
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 18:14:30 2022
@author: victor
"""
name = input("What's your name? ")
print("Hello, " + name + "!")
|
StarcoderdataPython
|
5043983
|
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from oslo_log import log as logging
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
from sysinv.helm import elastic
LOG = logging.getLogger(__name__)
class ElasticsearchHelm(elastic.ElasticBaseHelm):
"""Class to encapsulate helm operations for elasticsearch"""
CHART = common.HELM_CHART_ELASTICSEARCH
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_MONITOR: {
'cluster': self._get_cluster_overrides(),
'master': self._get_master_overrides(),
'data': self._get_data_overrides(),
'client': self._get_client_overrides(),
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
def _get_cluster_overrides(self):
env_vars = {'MINIMUM_MASTER_NODES': "1",
'EXPECTED_MASTER_NODES': "1",
'RECOVER_AFTER_MASTER_NODES': "1"}
if utils.is_aio_simplex_system(self.dbapi):
cluster_initial_master_nodes = ['stx-elasticsearch-master-0']
else:
cluster_initial_master_nodes = ['stx-elasticsearch-master-0',
'stx-elasticsearch-master-1']
conf = {
'env': env_vars,
'config': {
'cluster.initial_master_nodes': cluster_initial_master_nodes},
}
return conf
def _get_master_overrides(self):
if utils.is_aio_system(self.dbapi):
heap_size = "256m"
else:
heap_size = "512m"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_CONTROLLER),
'heapSize': heap_size,
'nodeSelector': {common.LABEL_MONITOR_CONTROLLER: "enabled"},
}
return conf
def _get_data_overrides(self):
# Note memory values are to be system engineered.
if utils.is_aio_system(self.dbapi):
heap_size = "512m"
memory_size = "512Mi"
else:
heap_size = "1536m"
memory_size = "1536Mi"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_DATA),
'heapSize': heap_size,
'resources': {
'limits': {
'cpu': "1"
},
'requests': {
'cpu': "25m",
'memory': memory_size,
}, },
'persistence': {'storageClass': 'general',
'size': "100Gi"},
'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"},
}
return conf
def _get_client_overrides(self):
if utils.is_aio_system(self.dbapi):
heap_size = "256m"
else:
heap_size = "512m"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_CLIENT),
'heapSize': heap_size,
'nodeSelector': {common.LABEL_MONITOR_CLIENT: "enabled"},
}
return conf
|
StarcoderdataPython
|
6412096
|
"""Instrument sqlite3 to report SQLite queries.
``patch_all`` will automatically patch your sqlite3 connection to make it work.
::
from ddtrace import Pin, patch
import sqlite3
# If not patched yet, you can patch sqlite3 specifically
patch(sqlite3=True)
# This will report a span with the default settings
db = sqlite3.connect(":memory:")
cursor = db.cursor()
cursor.execute("select * from users where id = 1")
# Use a pin to specify metadata related to this connection
Pin.override(db, service='sqlite-users')
"""
from .connection import connection_factory
from .patch import patch
__all__ = ['connection_factory', 'patch']
|
StarcoderdataPython
|
132871
|
# libraries and data
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
import pprint
from tabulate import tabulate
def plot_line(X,Y,x_label,y_label,title,legend):
# style
plt.style.use('seaborn-darkgrid')
# line plot
# first is x axis, 2nd is y axis
plt.plot(X, Y, marker='', color='red', linewidth=1, alpha=1)
# Add legend
red_line = mlines.Line2D([], [], color='red', alpha=1, linewidth=2, label=legend)
plt.legend(loc=1, ncol=2, handles=[red_line])
#red_patch = mpatches.Patch(color='red', label=header[1])
#plt.legend(loc=1, ncol=2, handles=[red_patch])
# Add titles
plt.title(title, loc='left', fontsize=12, fontweight=0, color='orange')
plt.xlabel(x_label)
plt.ylabel(y_label)
#plt.xticks(df[str(header[0])] , rotation=45 )
plt.show(block=True)
|
StarcoderdataPython
|
243357
|
__author__ = "JJ.sven"
import sys
import os
import day_2.mod2
# from day_1 import var
print(sys.path) # 环境变量
#
print(sys.argv) # 参数
print(sys.argv[0])
cmd_res = os.system("ls") # 执行shell命令
print(cmd_res)
# cmd_res 返回码
cmd_res = os.popen("ls").read()
print(cmd_res)
|
StarcoderdataPython
|
5150340
|
"""
Copyright 2020 Nvidia Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from torch import nn
from config import cfg
from network.mynn import initialize_weights, Norm2d, Upsample, Upsample2
from network.mynn import ResizeX, scale_as
from network.utils import get_aspp, get_trunk, ConvBnRelu
from network.utils import make_seg_head, make_attn_head
from utils.misc import fmt_scale
from network.apnb import APNB
from network.afnb import AFNB
from network.asnb import ASNB
from network.adnb import ADNB
class MscaleBase(nn.Module):
"""
Multi-scale attention segmentation model base class
"""
def __init__(self):
super(MscaleBase, self).__init__()
self.criterion = None
self.fuse_aspp = False
def _fwd(self, x, aspp_in=None):
pass
def recurse_fuse_fwd(self, x, scales, aspp_lo=None, attn_lo=None):
"""
recursive eval for n-scales
target resolution is fixed at 1.0
[0.5, 1.0]:
p_0.5, aspp_0.5, attn_0.5 = fwd(attn,aspp=None)
p_1.0 = recurse([1.0], aspp_0.5, attn_0.5)
p_1.0 = fwd(attn_0.5, aspp_0.5)
output = attn_0.5 * p_0.5 + (1 - attn_0.5) * p_1.0
"""
this_scale = scales.pop()
if this_scale == 1.0:
x_resize = x
else:
x_resize = ResizeX(x, this_scale)
p, attn, aspp = self._fwd(x_resize, attn_lo=attn_lo, aspp_lo=aspp_lo)
if this_scale == 1.0:
p_1x = p
attn_1x = attn
else:
p_1x = scale_as(p, x)
attn_1x = scale_as(attn, x)
if len(scales) == 0:
output = p_1x
else:
output = attn_1x * p_1x
p_next, _ = self.recurse_fuse_fwd(x, scales,
attn_lo=attn, aspp_lo=aspp)
output += (1 - attn_1x) * p_next
return output, attn_1x
def nscale_fused_forward(self, inputs, scales):
"""
multi-scale evaluation for model with fused_aspp feature
Evaluation must happen in two directions: from low to high to feed
aspp features forward, then back down high to low to apply attention
such that the lower scale gets higher priority
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Evaluation must happen low to high so that we can feed the ASPP
# features forward to higher scales
scales = sorted(scales, reverse=True)
# Recursively evaluate from low to high scales
pred, attn = self.recurse_fuse_fwd(x_1x, scales)
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
return {'pred': pred, 'attn_10x': attn}
def nscale_forward(self, inputs, scales):
"""
Hierarchical attention, primarily used for getting best inference
results.
We use attention at multiple scales, giving priority to the lower
resolutions. For example, if we have 4 scales {0.5, 1.0, 1.5, 2.0},
then evaluation is done as follows:
p_joint = attn_1.5 * p_1.5 + (1 - attn_1.5) * down(p_2.0)
p_joint = attn_1.0 * p_1.0 + (1 - attn_1.0) * down(p_joint)
p_joint = up(attn_0.5 * p_0.5) * (1 - up(attn_0.5)) * p_joint
The target scale is always 1.0, and 1.0 is expected to be part of the
list of scales. When predictions are done at greater than 1.0 scale,
the predictions are downsampled before combining with the next lower
scale.
Inputs:
scales - a list of scales to evaluate
inputs - dict containing 'images', the input, and 'gts', the ground
truth mask
Output:
If training, return loss, else return prediction + attention
"""
x_1x = inputs['images']
assert 1.0 in scales, 'expected 1.0 to be the target scale'
# Lower resolution provides attention for higher rez predictions,
# so we evaluate in order: high to low
scales = sorted(scales, reverse=True)
pred = None
output_dict = {}
for s in scales:
x = ResizeX(x_1x, s)
bs = x.shape[0]
scale_float = torch.Tensor(bs).fill_(s)
p, attn, _aspp_attn, _aspp = self._fwd(x, scale_float=scale_float)
output_dict[fmt_scale('pred', s)] = p
if s != 2.0:
output_dict[fmt_scale('attn', s)] = attn
if pred is None:
pred = p
elif s >= 1.0:
# downscale previous
pred = scale_as(pred, p)
pred = attn * p + (1 - attn) * pred
else:
# upscale current
p = attn * p
p = scale_as(p, pred)
attn = scale_as(attn, pred)
pred = p + (1 - attn) * pred
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(pred, gts)
return loss
else:
output_dict['pred'] = pred
return output_dict
def two_scale_forward(self, inputs):
assert 'images' in inputs
x_1x = inputs['images']
x_lo = ResizeX(x_1x, cfg.MODEL.MSCALE_LO_SCALE)
pred_05x, attn_05x, aspp_attn, aspp_lo = \
self._fwd(x_lo)
p_1x, _, _, _ = self._fwd(x_1x, aspp_lo=aspp_lo,
aspp_attn=aspp_attn)
p_lo = attn_05x * pred_05x
p_lo = scale_as(p_lo, p_1x)
logit_attn = scale_as(attn_05x, p_1x)
joint_pred = p_lo + (1 - logit_attn) * p_1x
if self.training:
assert 'gts' in inputs
gts = inputs['gts']
loss = self.criterion(joint_pred, gts)
# Optionally, apply supervision to the multi-scale predictions
# directly. Turn off RMI to keep things lightweight
if cfg.LOSS.SUPERVISED_MSCALE_WT:
scaled_pred_05x = scale_as(pred_05x, p_1x)
loss_lo = self.criterion(scaled_pred_05x, gts, do_rmi=False)
loss_hi = self.criterion(p_1x, gts, do_rmi=False)
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_lo
loss += cfg.LOSS.SUPERVISED_MSCALE_WT * loss_hi
return loss
else:
output_dict = {
'pred': joint_pred,
'pred_05x': pred_05x,
'pred_10x': p_1x,
'attn_05x': attn_05x,
}
return output_dict
def forward(self, inputs):
if cfg.MODEL.N_SCALES and not self.training:
if self.fuse_aspp:
return self.nscale_fused_forward(inputs, cfg.MODEL.N_SCALES)
else:
return self.nscale_forward(inputs, cfg.MODEL.N_SCALES)
return self.two_scale_forward(inputs)
class MscaleV3Plus(MscaleBase):
"""
DeepLabV3Plus-based mscale segmentation model
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
use_dpc=False, fuse_aspp=False, attn_2b=False):
super(MscaleV3Plus, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=256,
output_stride=8,
dpc=use_dpc,
img_norm = False)
self.bot_fine = nn.Conv2d(s2_ch, 48, kernel_size=1, bias=False)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
#self.asnb = ASNB(low_in_channels = 48, high_in_channels=256, out_channels=256, key_channels=64, value_channels=256, dropout=0., sizes=([1]), norm_type='batchnorm',attn_scale=0.25)
self.adnb = ADNB(d_model=256, nhead=8, num_encoder_layers=2, dim_feedforward=256, dropout=0.5, activation="relu", num_feature_levels=1, enc_n_points=4)
# Semantic segmentation prediction head
bot_ch = cfg.MODEL.SEGATTN_BOT_CH
self.final = nn.Sequential(
nn.Conv2d(256 + 48, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, bot_ch, kernel_size=3, padding=1, bias=False),
Norm2d(bot_ch),
nn.ReLU(inplace=True),
nn.Conv2d(bot_ch, num_classes, kernel_size=1, bias=False))
# Scale-attention prediction head
if self.attn_2b:
attn_ch = 2
else:
attn_ch = 1
scale_in_ch = 256 + 48
self.scale_attn = make_attn_head(in_ch=scale_in_ch,
out_ch=attn_ch)
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.bot_fine)
initialize_weights(self.bot_aspp)
initialize_weights(self.scale_attn)
initialize_weights(self.final)
else:
initialize_weights(self.final)
def _build_scale_tensor(self, scale_float, shape):
"""
Fill a 2D tensor with a constant scale value
"""
bs = scale_float.shape[0]
scale_tensor = None
for b in range(bs):
a_tensor = torch.Tensor(1, 1, *shape)
a_tensor.fill_(scale_float[b])
if scale_tensor is None:
scale_tensor = a_tensor
else:
scale_tensor = torch.cat([scale_tensor, a_tensor])
scale_tensor = scale_tensor.cuda()
return scale_tensor
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
x_size = x.size()
s2_features, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
conv_aspp_ = self.bot_aspp(aspp)
conv_s2 = self.bot_fine(s2_features)
# spatial attention here.
#conv_aspp_ = self.asnb(conv_s2, conv_aspp_)
conv_aspp_ = Upsample(conv_aspp_, conv_aspp_.size()[2:])
conv_aspp_shape = conv_aspp_.shape
conv_aspp_ = self.adnb([conv_aspp_],
masks=[conv_aspp_.new_zeros((conv_aspp_.shape[0], conv_aspp_.shape[2], conv_aspp_.shape[3]), dtype=torch.bool)],
pos_embeds=[None])
conv_aspp_ = conv_aspp_.transpose(-1, -2).view(conv_aspp_shape)
conv_aspp = Upsample(conv_aspp_, s2_features.size()[2:])
cat_s4 = [conv_s2, conv_aspp]
cat_s4_attn = [conv_s2, conv_aspp]
cat_s4 = torch.cat(cat_s4, 1)
cat_s4_attn = torch.cat(cat_s4_attn, 1)
final = self.final(cat_s4)
scale_attn = self.scale_attn(cat_s4_attn)
out = Upsample(final, x_size[2:])
scale_attn = Upsample(scale_attn, x_size[2:])
if self.attn_2b:
logit_attn = scale_attn[:, 0:1, :, :]
aspp_attn = scale_attn[:, 1:, :, :]
else:
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def DeepV3R50(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='resnet-50', criterion=criterion)
def DeepV3W38(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion)
def DeepV3W38Fuse(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion,
fuse_aspp=True)
def DeepV3W38Fuse2(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='wrn38', criterion=criterion,
fuse_aspp=True, attn_2b=True)
def DeepV3EffB4(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='efficientnet_b4',
criterion=criterion)
def DeepV3EffB4Fuse(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='efficientnet_b4',
criterion=criterion, fuse_aspp=True)
def DeepV3X71(num_classes, criterion):
return MscaleV3Plus(num_classes, trunk='xception71', criterion=criterion)
class MscaleDeeper(MscaleBase):
"""
Panoptic DeepLab-style semantic segmentation network
stride8 only
"""
def __init__(self, num_classes, trunk='wrn38', criterion=None,
fuse_aspp=False, attn_2b=False):
super(MscaleDeeper, self).__init__()
self.criterion = criterion
self.fuse_aspp = fuse_aspp
self.attn_2b = attn_2b
self.backbone, s2_ch, s4_ch, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.aspp, aspp_out_ch = get_aspp(high_level_ch, bottleneck_ch=256,
output_stride=8)
self.convs2 = nn.Conv2d(s2_ch, 32, kernel_size=1, bias=False)
self.convs4 = nn.Conv2d(s4_ch, 64, kernel_size=1, bias=False)
self.conv_up1 = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.conv_up2 = ConvBnRelu(256 + 64, 256, kernel_size=5, padding=2)
self.conv_up3 = ConvBnRelu(256 + 32, 256, kernel_size=5, padding=2)
self.conv_up5 = nn.Conv2d(256, num_classes, kernel_size=1, bias=False)
# Scale-attention prediction head
if self.attn_2b:
attn_ch = 2
else:
attn_ch = 1
self.scale_attn = make_attn_head(in_ch=256,
out_ch=attn_ch)
if cfg.OPTIONS.INIT_DECODER:
initialize_weights(self.convs2, self.convs4, self.conv_up1,
self.conv_up2, self.conv_up3, self.conv_up5,
self.scale_attn)
def _fwd(self, x, aspp_lo=None, aspp_attn=None):
s2_features, s4_features, final_features = self.backbone(x)
s2_features = self.convs2(s2_features)
s4_features = self.convs4(s4_features)
aspp = self.aspp(final_features)
if self.fuse_aspp and \
aspp_lo is not None and aspp_attn is not None:
aspp_attn = scale_as(aspp_attn, aspp)
aspp_lo = scale_as(aspp_lo, aspp)
aspp = aspp_attn * aspp_lo + (1 - aspp_attn) * aspp
x = self.conv_up1(aspp)
x = Upsample2(x)
x = torch.cat([x, s4_features], 1)
x = self.conv_up2(x)
x = Upsample2(x)
x = torch.cat([x, s2_features], 1)
up3 = self.conv_up3(x)
out = self.conv_up5(up3)
out = Upsample2(out)
scale_attn = self.scale_attn(up3)
scale_attn = Upsample2(scale_attn)
if self.attn_2b:
logit_attn = scale_attn[:, 0:1, :, :]
aspp_attn = scale_attn[:, 1:, :, :]
else:
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def DeeperW38(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='wrn38')
def DeeperX71(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='xception71')
def DeeperEffB4(num_classes, criterion, s2s4=True):
return MscaleDeeper(num_classes=num_classes, criterion=criterion,
trunk='efficientnet_b4')
class MscaleBasic(MscaleBase):
"""
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(MscaleBasic, self).__init__()
self.criterion = criterion
self.backbone, _, _, high_level_ch = get_trunk(
trunk_name=trunk, output_stride=8)
self.cls_head = make_seg_head(in_ch=high_level_ch,
out_ch=num_classes)
self.scale_attn = make_attn_head(in_ch=high_level_ch,
out_ch=1)
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
_, _, final_features = self.backbone(x)
attn = self.scale_attn(final_features)
pred = self.cls_head(final_features)
attn = scale_as(attn, x)
pred = scale_as(pred, x)
return pred, attn, None, None
def HRNet(num_classes, criterion, s2s4=None):
return MscaleBasic(num_classes=num_classes, criterion=criterion,
trunk='hrnetv2')
class ASPP(MscaleBase):
"""
ASPP-based Mscale
"""
def __init__(self, num_classes, trunk='hrnetv2', criterion=None):
super(ASPP, self).__init__()
self.criterion = criterion
self.backbone, s2_ch, _s4_ch, high_level_ch = get_trunk(trunk)
self.aspp, aspp_out_ch = get_aspp(high_level_ch,
bottleneck_ch=cfg.MODEL.ASPP_BOT_CH,
output_stride=8)
self.bot_aspp = nn.Conv2d(aspp_out_ch, 256, kernel_size=1, bias=False)
self.final = make_seg_head(in_ch=256, out_ch=num_classes)
self.scale_attn = make_attn_head(in_ch=256, out_ch=1)
initialize_weights(self.final)
def _fwd(self, x, aspp_lo=None, aspp_attn=None, scale_float=None):
x_size = x.size()
_, _, final_features = self.backbone(x)
aspp = self.aspp(final_features)
aspp = self.bot_aspp(aspp)
final = self.final(aspp)
scale_attn = self.scale_attn(aspp)
out = Upsample(final, x_size[2:])
scale_attn = Upsample(scale_attn, x_size[2:])
logit_attn = scale_attn
aspp_attn = scale_attn
return out, logit_attn, aspp_attn, aspp
def HRNet_ASP(num_classes, criterion, s2s4=None):
return ASPP(num_classes=num_classes, criterion=criterion, trunk='hrnetv2')
|
StarcoderdataPython
|
137451
|
from qqai.classes import *
class DetectFace(QQAIFaceClass):
"""人脸检测与分析"""
api = 'https://api.ai.qq.com/fcgi-bin/face/face_detectface'
class DetectMultiFace(QQAIPicClass):
"""多人脸检测"""
api = 'https://api.ai.qq.com/fcgi-bin/face/face_detectmultiface'
class FaceCompare(QQAIClass):
"""人脸对比"""
api = 'https://api.ai.qq.com/fcgi-bin/face/face_facecompare'
def make_params(self, image_a_param, image_b_param):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'image_a': self.get_base64(image_a_param),
'image_b': self.get_base64(image_b_param),
}
params['sign'] = self.get_sign(params)
return params
def run(self, image_a_param, image_b_param):
params = self.make_params(image_a_param, image_b_param)
response = self.call_api(params)
result = json.loads(response.text)
return result
class DetectCrossAgeFace(QQAIClass):
"""跨年龄人脸识别"""
api = 'https://api.ai.qq.com/fcgi-bin/face/face_detectcrossageface'
def make_params(self, source_image_param, target_image_param):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'source_image': self.get_base64(source_image_param),
'target_image': self.get_base64(target_image_param),
}
params['sign'] = self.get_sign(params)
return params
def run(self, source_image_param, target_image_param):
params = self.make_params(source_image_param, target_image_param)
response = self.call_api(params)
result = json.loads(response.text)
return result
class FaceShape(QQAIFaceClass):
"""五官定位"""
api = 'https://api.ai.qq.com/fcgi-bin/face/face_faceshape'
class FaceIdentify(QQAIClass):
"""人脸识别"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_faceidentify'
def make_params(self, image, group_id, topn):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'image': self.get_base64(image),
'group_id': group_id,
'topn': topn,
}
params['sign'] = self.get_sign(params)
return params
def run(self, image, group_id, topn=9):
params = self.make_params(image, group_id, topn)
response = self.call_api(params)
result = json.loads(response.text)
return result
class FaceVerify(QQAIClass):
"""人脸验证"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_faceverify'
def make_params(self, image, person_id):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'image': self.get_base64(image),
'person_id': person_id,
}
params['sign'] = self.get_sign(params)
return params
def run(self, image, person_id):
params = self.make_params(image, person_id)
response = self.call_api(params)
result = json.loads(response.text)
return result
class NewPerson(QQAIClass):
"""个体创建"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_newperson'
def make_params(self, group_ids, person_id, image, person_name, tag=None):
"""获取调用接口的参数"""
if type(group_ids) == str:
group_ids_param = group_ids
else:
group_ids_param = '|'.join(group_ids)
# 这里是猜测。文档中疑似转义发生错误,留下反斜杠,之后的字符不见了
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'group_ids': group_ids_param,
'person_id': person_id,
'image': self.get_base64(image),
'person_name': person_name,
}
if tag is not None:
params['tag'] = tag
params['sign'] = self.get_sign(params)
return params
def run(self, group_ids, person_id, image, person_name, tag=None):
params = self.make_params(group_ids, person_id, image, person_name, tag)
response = self.call_api(params)
result = json.loads(response.text)
return result
class DelPerson(QQAIFacePersonClass):
"""删除个体"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_delperson'
class AddFace(QQAIClass):
"""个体创建"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_addface'
def make_params(self, person_id, images, tag):
"""获取调用接口的参数"""
if type(images) == str or hasattr(images, 'read'):
images_param = self.get_base64(images)
else:
if len(images) > 5:
raise ValueError('No more than 5 images input in one request')
else:
images_param = '|'.join(map(self.get_base64, images))
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'person_id': person_id,
'images': images_param,
'tag': tag,
}
params['sign'] = self.get_sign(params)
return params
def run(self, person_id, images, tag):
params = self.make_params(person_id, images, tag)
response = self.call_api(params)
result = json.loads(response.text)
return result
class DelFace(QQAIClass):
"""删除人脸"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_delface'
def make_params(self, person_id, face_ids):
"""获取调用接口的参数"""
if type(face_ids) == str:
face_ids_param = face_ids
else:
face_ids_param = '|'.join(face_ids)
# 这里是猜测。文档中疑似转义发生错误,留下反斜杠,之后的字符不见了
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'person_id': person_id,
'face_ids': face_ids_param,
}
params['sign'] = self.get_sign(params)
return params
def run(self, person_id, face_ids):
params = self.make_params(person_id, face_ids)
response = self.call_api(params)
result = json.loads(response.text)
return result
class SetInfo(QQAIClass):
"""设置信息"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_setinfo'
def make_params(self, person_id, person_name=None, tag=None):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'person_id': person_id,
}
if person_name is not None:
params['person_name'] = person_name
if tag is not None:
params['tag'] = tag
params['sign'] = self.get_sign(params)
return params
def run(self, person_id, person_name=None, tag=None):
params = self.make_params(person_id, person_name, tag)
response = self.call_api(params)
result = json.loads(response.text)
return result
class GetInfo(QQAIFacePersonClass):
"""获取信息"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_getinfo'
class GetGroupIds(QQAIClass):
"""获取组列表"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_getgroupids'
def make_params(self):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
}
params['sign'] = self.get_sign(params)
return params
def run(self):
params = self.make_params()
response = self.call_api(params)
result = json.loads(response.text)
return result
class GetPersonIds(QQAIClass):
"""获取个体列表"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_getpersonids'
def make_params(self, group_id):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'group_id': group_id
}
params['sign'] = self.get_sign(params)
return params
def run(self, group_id):
params = self.make_params(group_id)
response = self.call_api(params)
result = json.loads(response.text)
return result
class GetFaceIds(QQAIClass):
"""获取人脸列表"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_getfaceids'
def make_params(self, person_id):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'person_id': person_id
}
params['sign'] = self.get_sign(params)
return params
def run(self, person_id):
params = self.make_params(person_id)
response = self.call_api(params)
result = json.loads(response.text)
return result
class GetFaceInfo(QQAIClass):
"""获取人脸信息"""
api = ' https://api.ai.qq.com/fcgi-bin/face/face_getfaceinfo'
def make_params(self, face_id):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'person_id': face_id
}
params['sign'] = self.get_sign(params)
return params
def run(self, face_id):
params = self.make_params(face_id)
response = self.call_api(params)
result = json.loads(response.text)
return result
|
StarcoderdataPython
|
150953
|
<filename>lowest grade.py
marksheet = []
scores = []
n = int(input())
for i in range(n):
name = input()
score = float(input())
marksheet += [[name, score]]
scores += [score]
li = sorted(set(scores))[1]
for n, s in marksheet:
if s == li:
print(n)
|
StarcoderdataPython
|
4901946
|
import subprocess
import os
import pandas as pd
import numpy as np
from datetime import datetime
import requests
import json
def get_john_hopkins():
git_pull = subprocess.Popen( "git pull",
cwd = os.path.dirname('C:/Users/hosha/applied_data_science_covid19/data/raw/COVID-19/' ),
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
(out, error) = git_pull.communicate()
print("Error : " + str(error))
print("Out : " + str(out))
def get_current_data_germany():
#16 states
#data = requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
#400 regions/Landkreis
data = requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/ArcGIS/rest/services/RKI_Landkreisdaten/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
json_object = json.loads(data.content)
full_list = []
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list = pd.DataFrame(full_list)
pd_full_list.to_csv('C:/Users/hosha/applied_data_science_covid19/data/raw/NPGEO/GER_state_data.csv',sep=';')
print('Number of regions rows: '+str(pd_full_list.shape[0]))
if __name__ == '__main__':
get_john_hopkins()
get_current_data_germany()
|
StarcoderdataPython
|
8136100
|
<gh_stars>0
"""
File: boggle.py
Name: 陳筱涵
----------------------------------------
TODO:
"""
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
import time
FILE = 'dictionary.txt'
# Global
dictionary = []
word_dic = {}
def main():
"""
TODO: the user will key in the 4 letters four times, and the system will automatic to link the letter and to find
the word whether is in the dictionary.
"""
read_dictionary()
count = 0
row1 = str(input('1 row of letters: '))
row1 = row1.lower()
count = 0
column = 0
if check_the_letter(row1, count, column):
row2 = str(input('2 row of letters: '))
row2 = row2.lower()
column += 1
if check_the_letter(row2, count, column):
row3 = str(input('3 row of letters: '))
row3 = row3.lower()
column += 1
if check_the_letter(row3, count, column):
row4 = str(input('4 row of letters: '))
row4 = row4.lower()
column += 1
if check_the_letter(row4, count, column):
find_list = []
# x = time.time()
for i in range(4):
for j in range(4):
game_start(i, j, "", find_list, {})
# y = time.time()
# print(y-x)
for word in find_list:
count += 1
# print(f'Found "{word}"')
print(f'There are {count} word in total')
else:
print('Illegal input')
else:
print('Illegal input')
else:
print('Illegal input')
else:
print('Illegal input')
def game_start(x, y, word, find_list, take_out):
"""
:param x: it's x position coordinate
:param y: it's y position coordinate
:param word: find the word that I would like to comparison
:param find_list: the list is through the game and get the word and also in dictionary
:param take_out: to avoid the letter duplicate use
:return: not need to return
"""
if word in dictionary and len(word) >= 4 and word not in find_list:
find_list.append(word)
print(f'Found "{word}"')
game_start(x, y, word, find_list, take_out)
else:
for i in range(-1, 2):
for j in range(-1, 2):
if 4 > i + x >= 0:
if 4 > j + y >= 0:
x1 = x + i
y1 = y + j
if (x1, y1) in word_dic:
word += word_dic[(x1, y1)]
if has_prefix2(word, find_list) is False:
take_out[(x1, y1)] = word_dic[(x1, y1)]
word_dic.pop((x1, y1))
if has_prefix(word):
game_start(x1, y1, word, find_list, take_out)
word = word[0:-1]
word_dic[(x1, y1)] = take_out[(x1, y1)]
take_out.pop((x1, y1))
def check_the_letter(word, count, column):
"""
:param word: the word which is the user key in
:param count: it's x position coordinate
:param column: it's y position coordinate
:return: if the word is meet the principle, return True
"""
last = ""
global word_dic
for letter in word:
if letter.isalpha():
word_dic[(count, column)] = letter
count += 1
if last == "":
last = letter
else:
if last.isalpha() is True and letter == " ":
last = letter
elif last == " " and letter.isalpha() is True:
last = letter
else:
return False
return True
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
with open(FILE, 'r') as f:
for line in f:
word = line.strip()
dictionary.append(word)
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for w in dictionary:
if w.startswith(sub_s):
return True
return False
def has_prefix2(word, find):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for w in find:
if w.startswith(word):
return True
return False
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.