hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0680313f08baec10f4a8b5ea84ee4ba0c56d7a7
| 382 |
py
|
Python
|
workpy/base.py
|
cofear/workpy
|
74df24f1b9de519cf33897a648e6ae563cdd0368
|
[
"MIT"
] | null | null | null |
workpy/base.py
|
cofear/workpy
|
74df24f1b9de519cf33897a648e6ae563cdd0368
|
[
"MIT"
] | null | null | null |
workpy/base.py
|
cofear/workpy
|
74df24f1b9de519cf33897a648e6ae563cdd0368
|
[
"MIT"
] | null | null | null |
def llist(s):
return [i.split() for i in s.split('\n') if len(i) > 0]
def hump(s):
a = [i for i in uline(s).split('_') if len(i) > 0]
return a[0].lower()+''.join(i.capitalize() for i in a[1:])
def uline(s):
b = ''
a = str(s)
for i, v in enumerate(a):
b += '_'+v if v.isupper() and a[i-1].islower() else v
return '_'.join([i.lower() for i in b.split('_') if len(i) > 0])
| 25.466667 | 65 | 0.554974 |
1dad4a43dbfcf604df04f16ee888f8058f63b543
| 2,005 |
py
|
Python
|
Array/Merge_Intervals.py
|
shua2018ti/Google
|
3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b
|
[
"MIT"
] | 87 |
2015-07-15T20:41:09.000Z
|
2022-03-08T13:55:38.000Z
|
Array/Merge_Intervals.py
|
shua2018ti/Google
|
3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b
|
[
"MIT"
] | 59 |
2015-03-19T22:26:41.000Z
|
2015-07-25T17:58:08.000Z
|
Array/Merge_Intervals.py
|
shua2018ti/Google
|
3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b
|
[
"MIT"
] | 45 |
2015-07-15T20:41:12.000Z
|
2022-02-01T20:18:07.000Z
|
'''
Given a collection of intervals, merge all overlapping intervals.
For example,
Given [1,3],[2,6],[8,10],[15,18],
return [1,6],[8,10],[15,18].
'''
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
length = len(intervals)
if length == 0 or length == 1:
return intervals
intervals.sort(key = lambda x:x.start)
result = []
pre_start = None
pre_end = None
for node in intervals:
if pre_start == None and pre_end == None:
pre_start = node.start
pre_end = node.end
else:
if node.start <= pre_end:
if node.end > pre_end:
pre_end = node.end
if node.start > pre_end:
newnode = Interval(pre_start, pre_end)
result.append(newnode)
pre_start = node.start
pre_end = node.end
newnode = Interval(pre_start, pre_end)
result.append(newnode)
return result
# Wrong Answer !!!
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
length = len(intervals)
if length == 0 or length == 1: return intervals
intervals.sort()
result = []
pre = intervals[0]
for i in xrange(1,length):
flag = False
if intervals[i].start > pre.end:
result.append(pre)
else:
newnode = Interval(pre.start, max(intervals[i].end,pre.end))
# Wrong!! Here we did not handle the case: [1,8],[2,5],[3,6],[4,7],[5,9]!!
result.append(newnode)
flag = True
pre = intervals[i]
if not flag:
result.append(pre)
return result
| 30.378788 | 90 | 0.494763 |
f19177c2ae6044f5fb814eec4f2f1ce180dabd83
| 6,313 |
py
|
Python
|
test/dialect/mysql/test_query.py
|
EvaSDK/sqlalchemy
|
0a60865d527331a4df9db0fc8a15038108075bca
|
[
"MIT"
] | 1 |
2016-07-26T14:47:04.000Z
|
2016-07-26T14:47:04.000Z
|
test/dialect/mysql/test_query.py
|
EvaSDK/sqlalchemy
|
0a60865d527331a4df9db0fc8a15038108075bca
|
[
"MIT"
] | null | null | null |
test/dialect/mysql/test_query.py
|
EvaSDK/sqlalchemy
|
0a60865d527331a4df9db0fc8a15038108075bca
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import testing
class IdiosyncrasyTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@testing.emits_warning()
def test_is_boolean_symbols_despite_no_native(self):
is_(
testing.db.scalar(select([cast(true().is_(true()), Boolean)])),
True
)
is_(
testing.db.scalar(select([cast(true().isnot(true()), Boolean)])),
False
)
is_(
testing.db.scalar(select([cast(false().is_(false()), Boolean)])),
True
)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer, primary_key=True),
Column('description', String(50)),
mysql_engine='MyISAM'
)
matchtable = Table('matchtable', metadata,
Column('id', Integer, primary_key=True),
Column('title', String(200)),
Column('category_id', Integer, ForeignKey('cattable.id')),
mysql_engine='MyISAM'
)
metadata.create_all()
cattable.insert().execute([
{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'},
])
matchtable.insert().execute([
{'id': 1,
'title': 'Agile Web Development with Ruby On Rails',
'category_id': 2},
{'id': 2,
'title': 'Dive Into Python',
'category_id': 1},
{'id': 3,
'title': "Programming Matz's Ruby",
'category_id': 2},
{'id': 4,
'title': 'The Definitive Guide to Django',
'category_id': 1},
{'id': 5,
'title': 'Python in a Nutshell',
'category_id': 1}
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('mysql+mysqlconnector', 'uses pyformat')
def test_expression_format(self):
format = testing.db.dialect.paramstyle == 'format' and '%s' or '?'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
@testing.fails_on('mysql+mysqldb', 'uses format')
@testing.fails_on('mysql+pymysql', 'uses format')
@testing.fails_on('mysql+cymysql', 'uses format')
@testing.fails_on('mysql+oursql', 'uses format')
@testing.fails_on('mysql+pyodbc', 'uses format')
@testing.fails_on('mysql+zxjdbc', 'uses format')
def test_expression_pyformat(self):
format = '%(title_1)s'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
def test_simple_match(self):
results = (matchtable.select().
where(matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([2, 5], [r.id for r in results])
def test_not_match(self):
results = (matchtable.select().
where(~matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = (matchtable.select().
where(matchtable.c.title.match("Matz's")).
execute().
fetchall())
eq_([3], [r.id for r in results])
def test_return_value(self):
# test [ticket:3263]
result = testing.db.execute(
select([
matchtable.c.title.match('Agile Ruby Programming').label('ruby'),
matchtable.c.title.match('Dive Python').label('python'),
matchtable.c.title
]).order_by(matchtable.c.id)
).fetchall()
eq_(
result,
[
(2.0, 0.0, 'Agile Web Development with Ruby On Rails'),
(0.0, 2.0, 'Dive Into Python'),
(2.0, 0.0, "Programming Matz's Ruby"),
(0.0, 0.0, 'The Definitive Guide to Django'),
(0.0, 1.0, 'Python in a Nutshell')
]
)
def test_or_match(self):
results1 = (matchtable.select().
where(or_(matchtable.c.title.match('nutshell'),
matchtable.c.title.match('ruby'))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('nutshell ruby')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = (matchtable.select().
where(and_(matchtable.c.title.match('python'),
matchtable.c.title.match('nutshell'))).
execute().
fetchall())
eq_([5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('+python +nutshell')).
execute().
fetchall())
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (matchtable.select().
where(and_(cattable.c.id==matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshell')))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results])
| 35.072222 | 81 | 0.518929 |
a329ed6b5b857302bfa1532e16aefa50a0d0e4a2
| 363 |
py
|
Python
|
examples/djangoproject/djangoproject/urls.py
|
seanbrant/pykss
|
c01b0f97c3ebfff32b4bdb3e6e2e1616c5c4ea3e
|
[
"BSD-3-Clause"
] | 12 |
2015-01-31T19:24:56.000Z
|
2018-09-11T10:02:28.000Z
|
examples/djangoproject/djangoproject/urls.py
|
seanbrant/pykss
|
c01b0f97c3ebfff32b4bdb3e6e2e1616c5c4ea3e
|
[
"BSD-3-Clause"
] | 7 |
2016-08-02T15:13:15.000Z
|
2021-06-10T18:51:13.000Z
|
examples/djangoproject/djangoproject/urls.py
|
kundo/pykss
|
5ea14041b7cb0bb3f8596213126b119df918b5ea
|
[
"BSD-3-Clause"
] | 6 |
2015-01-23T17:19:27.000Z
|
2019-03-20T09:04:46.000Z
|
from django.conf.urls import patterns, url
from django.views.generic.base import TemplateView
from pykss.contrib.django.views import StyleguideView
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html'), name='index'),
url(r'^styleguide/$', StyleguideView.as_view(template_name='styleguide.html'), name='styleguide'),
)
| 33 | 102 | 0.757576 |
94be0b90c1659da749ccf150b99d348dd535560b
| 1,992 |
py
|
Python
|
orodja.py
|
nomad2001/dvojke-ali-trojke
|
9c7a0460d2012a285dcda122418995796d213cab
|
[
"MIT"
] | null | null | null |
orodja.py
|
nomad2001/dvojke-ali-trojke
|
9c7a0460d2012a285dcda122418995796d213cab
|
[
"MIT"
] | null | null | null |
orodja.py
|
nomad2001/dvojke-ali-trojke
|
9c7a0460d2012a285dcda122418995796d213cab
|
[
"MIT"
] | null | null | null |
import csv
import json
import os
import requests
import sys
global count
count = 0
def pripravi_imenik(ime_datoteke):
'''Če še ne obstaja, pripravi prazen imenik za dano datoteko.'''
imenik = os.path.dirname(ime_datoteke)
if imenik:
os.makedirs(imenik, exist_ok=True)
def shrani_spletno_stran(url, ime_datoteke, vsili_prenos=False):
'''Vsebino strani na danem naslovu shrani v datoteko z danim imenom.'''
try:
print(f'Shranjujem {url} ...', end='')
sys.stdout.flush()
if os.path.isfile(ime_datoteke) and not vsili_prenos:
print('shranjeno že od prej!')
return
r = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
print('stran ne obstaja!')
else:
global count
if r.text != "":
count = count + 1
pripravi_imenik(ime_datoteke)
with open(ime_datoteke, 'a', encoding='utf-8') as datoteka:
datoteka.write("tekma\n")
datoteka.write(r.text)
print('shranjeno!')
print(count)
else:
print('preskočeno!')
print(count)
def vsebina_datoteke(ime_datoteke):
'''Vrne niz z vsebino datoteke z danim imenom.'''
with open(ime_datoteke, encoding='utf-8') as datoteka:
return datoteka.read()
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
'''Iz seznama slovarjev ustvari CSV datoteko z glavo.'''
pripravi_imenik(ime_datoteke)
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
writer.writerows(slovarji)
def zapisi_json(objekt, ime_datoteke):
'''Iz danega objekta ustvari JSON datoteko.'''
pripravi_imenik(ime_datoteke)
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)
| 31.619048 | 75 | 0.64508 |
176a5af9bed2ffe4a27e09b8c2972ea7fe5b4059
| 14,676 |
py
|
Python
|
Paired Recordings/Preprint Code/5_Figure 6 and supplement - Spike Reliability.py
|
kampff-lab/sc.io
|
0d110856ebbb2d20bf13c49c3b5d77bf622c9df4
|
[
"MIT"
] | 17 |
2018-05-10T09:37:15.000Z
|
2021-07-11T17:51:43.000Z
|
Paired Recordings/Preprint Code/5_Figure 6 and supplement - Spike Reliability.py
|
kampff-lab/sc.io
|
0d110856ebbb2d20bf13c49c3b5d77bf622c9df4
|
[
"MIT"
] | 1 |
2019-09-10T09:00:26.000Z
|
2020-05-23T13:47:42.000Z
|
Paired Recordings/Preprint Code/5_Figure 6 and supplement - Spike Reliability.py
|
kampff-lab/sc.io
|
0d110856ebbb2d20bf13c49c3b5d77bf622c9df4
|
[
"MIT"
] | 16 |
2018-04-06T18:27:34.000Z
|
2020-05-30T18:44:59.000Z
|
#%%Imports
import pandas
import numpy as np
import matplotlib.pyplot as plt
import os
from collections import defaultdict
import random
import math
import scipy.stats as stats
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
#%%
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
def rolling_mean(a, w):
r = np.empty(a.shape)
r.fill(np.nan)
for i in range(w - 1, a.shape[0]):
r[i] = np.mean(a[(i-w+1):i+1])
return r
def rolling_median(a, w):
r = np.empty(a.shape)
r.fill(np.nan)
for i in range(w - 1, a.shape[0]):
r[i] = np.median(a[(i-w+1):i+1])
return r
def rolling_std(a, w):
r = np.empty(a.shape)
r.fill(np.nan)
for i in range(w - 1, a.shape[0]):
r[i] = np.std(a[(i-w+1):i+1])
return r
#%%Data frames and structures
listcells = os.listdir('E:/code/for analysis')
suffix = '_aligned_analysis'
data_summary = pandas.read_excel('C:/Users/Andre Marques-Smith/Dropbox/Patch-Clamp Summary 2.0.xlsx')
cells_above_10uV = data_summary.index[data_summary['JTA Peak-Peak Amplitude'] >= 10].tolist()
#excluded_cells = [2, 6, 9, 10, 12, 21, 23]
excluded_cells = [2, 5, 9, 12, 21, 23]
cells_above_50uV = data_summary.index[data_summary['JTA Peak-Peak Amplitude'] >= 50].tolist()
cells_to_analyse = cells_above_50uV
#Excluding c27 - too many spikes confounding GT spike
cells_to_analyse.pop(5)
#%%Data structures
parsed_spikes = np.empty((len(cells_to_analyse), 2), dtype = object)
summary_stats = np.empty((len(cells_to_analyse), 8, 4), dtype = float) #8 features, mean, std, cv
pcent_spikes = np.empty((len(cells_to_analyse),1), dtype = float)
#%%
output_dir = 'C:/Users/Andre Marques-Smith/Dropbox/Paired Recordings biorxiv/repro/fig6/'
for cell in cells_to_analyse:
paths = defaultdict(list)
cell_idx = listcells.index(data_summary.loc[cell]['Cell']+suffix)
aligned_directory = 'E:/code/for analysis/'+listcells[cell_idx]
row = cells_to_analyse.index(cell)
#paths = defaultdict(list)
#use cell id or type in string ('c45') below
#cell = listcells.index('c14'+suffix)
#aligned_directory = 'E:/code/for analysis/'+listcells[cell]
#
for file in os.listdir(aligned_directory):
print file
if file.endswith('meta.npy'):
paths['meta'].append(aligned_directory + '/' + file)
elif file.endswith('patch_preprocessed.npy'):
paths['patch_v'].append(aligned_directory + '/' + file)
elif file.endswith('sta_wc.npy'):
paths['patch_sta'].append(aligned_directory + '/' + file)
elif file.endswith('wc_spike_samples.npy'):
paths['patch_spikes'].append(aligned_directory + '/' + file)
elif file.endswith('npx_preprocessed.bin'):
paths['npx_v'].append(aligned_directory + '/' + file)
elif file.endswith('sta_windows_chan_array.npy'):
paths['npx_sta_array'].append(aligned_directory + '/' + file)
elif file.endswith('sta_np_by_channel.npy'):
paths['npx_sta_mean'].append(aligned_directory + '/' + file)
elif file.endswith('channels_by_pk.npy'):
paths['npx_chan_peaks'].append(aligned_directory + '/' + file)
else:
pass
#%
expt_meta = np.load(paths['meta'][0]).item()
patch_v = np.load(paths['patch_v'][0])#, dtype = 'float64')
patch_sta = np.load(paths['patch_sta'][0])#, dtype = 'float64')
patch_spikes = np.load(paths['patch_spikes'][0])#, dtype = 'int16')
#
npx_voltage = np.memmap( paths['npx_v'][0], mode = 'c', dtype=np.int16 )
npx_voltage = npx_voltage.reshape((expt_meta['npx'][0][0], expt_meta['npx'][0][1]))
#
npx_sta_array = np.load(paths['npx_sta_array'][0])#, dtype = 'int16')
npx_sta_mean = np.load(paths['npx_sta_mean'][0])#, dtype = 'float64')
npx_chan_peaks = np.load(paths['npx_chan_peaks'][0])#, dtype = 'float64')
central_chan = int(npx_chan_peaks[0,0])
#
m = len(patch_v)/float(len(npx_voltage[0,:]))
#%
npx_gt_spikes = [int(i/m) for i in patch_spikes]
gt_pk_lag = np.argmin(npx_sta_mean[central_chan])
samprate = 30000.0
#Extract spike features of ground truth spikes
gt_spike_features = np.empty((len(npx_gt_spikes), 9), dtype=float)
for spike in range(len(npx_gt_spikes)):
t_ia = gt_pk_lag
t_ib = t_ia+1
baseline = np.median(npx_sta_array[central_chan,:,spike])
#t_ia is the first point of threshold crossing for a spike (when it goes 'down')
#t_ib is the last point (when it goes 'up')
#To determine t_ia, I use npx_sta_array, which has the extracellular snippets for every patch spike. I go to the time point where, on average, the patch spike peak
#was, and start treading back one sample at a time, until I cross the threshold (baseline). That procedure is repeated in the opposite direction to get t_ib.
while npx_sta_array[central_chan, t_ia, spike] <= baseline:
t_ia -= 1
if t_ia < 30:
pass
while npx_sta_array[central_chan, t_ib, spike] <= baseline:
t_ib += 1
if t_ib > 80:
pass
neg_pk_i = t_ia + np.argmin( npx_sta_array[central_chan,t_ia:t_ib,spike] )
neg_pk_v = 2.34375 * npx_sta_array[central_chan, neg_pk_i, spike] - baseline
pos_pk_i = t_ib + np.argmax( npx_sta_array[central_chan,t_ib:t_ib+20,spike] )
pos_pk_v = 2.34375 * npx_sta_array[central_chan, pos_pk_i, spike] - baseline
pk2pk = pos_pk_v - neg_pk_v
duration = (pos_pk_i - neg_pk_i) * 1000/samprate
if duration <0:
if spike >=1:
duration = gt_spike_features[spike-1, 2]
half_amp = (neg_pk_v/2.34375 + npx_sta_array[central_chan,t_ia,spike])/2
half_amp_ia = t_ia + np.where(npx_sta_array[central_chan,t_ia:,spike] <=half_amp)[0][0]
half_amp_ib = neg_pk_i + np.where(npx_sta_array[central_chan,neg_pk_i:,spike] >=half_amp)[0][0]
half_width = (half_amp_ib - half_amp_ia) * 1000/samprate
if half_width <0:
if spike >=1:
half_width = gt_spike_features[spike-1, 1]
latency = (neg_pk_i - 60) * 1000/samprate
symmetry = (neg_pk_i - t_ia) / float(0.001+(pos_pk_i - t_ib))
if symmetry > 10:
if spike >=1:
symmetry = gt_spike_features[spike-1, 3]
if pos_pk_v != 0:
neg_pos_r = abs(neg_pk_v/pos_pk_v)
else:
neg_pos_r = abs(neg_pk_v/(pos_pk_v+0.1))
gt_spike_features[spike, 0] = pk2pk
gt_spike_features[spike, 1] = half_width
gt_spike_features[spike, 2] = duration
gt_spike_features[spike, 3] = symmetry
gt_spike_features[spike, 4] = neg_pos_r
gt_spike_features[spike, 5] = latency
gt_spike_features[spike, 6] = abs(neg_pk_v)
gt_spike_features[spike, 7] = pos_pk_v
#GT spike vector is the average of the extracted spike features of all ground-truth spikes. Here to exclude background activity that may have summated with a GT spike,
#I compute a z vector for each spike, which is how much its features differed from the average spike's features, for that cell.
# Good spikes are below a mean difference threshold, bad spikes are above and get excluded from further reliability analysis.
gt_spike_vector = np.mean(gt_spike_features[:,0:8], axis = 0)
gt_spike_vector_std = np.std(gt_spike_features[:,0:8], axis = 0)
z_vector = np.empty((1,8), dtype = float)
#Get mean z score for each spike
for spike in range(len(patch_spikes)):
for feature in range(len(gt_spike_features[0,:])-1):
z_vector[0,feature] = abs((gt_spike_features[spike, feature] - gt_spike_vector[feature])/gt_spike_vector_std[feature])
gt_spike_features[spike, 8] = np.mean(z_vector)
#
good_spikes = np.where(gt_spike_features[:,8]<=1.0)[0]
bad_spikes = np.where(gt_spike_features[:,8]>1.0)[0]
#
pcent_spikes[ cells_to_analyse.index(cell) ] = round(100*len(good_spikes)/float(len(patch_spikes)),1)
#
rolling_plot = np.empty((8, len(good_spikes),2), dtype=float)
#
feature_labels = ['Peak-peak\namplitude (uV)', 'Half-width (ms)', 'Duration (ms)', 'Duration\nSymmetry', 'Negative-positive\nratio', 'Latency\nto patch (ms)', 'Negative\nPeak (uV)', 'Positive\nPeak (uV)' ]
#
fig, axs = plt.subplots(4,2, sharex = 'col', sharey=False, figsize=(15,10))
axs = axs.ravel()
#Rolling plots show the rolling mean over 100 spike events +- the rolling STD of 100 events. Here I calculate rolling mean and stdev for each feature as well.
for feature in range(8):
rolling_plot[feature,:,0] = rolling_mean(gt_spike_features[good_spikes, feature], 100 )
rolling_plot[feature,:,1] = rolling_std(gt_spike_features[good_spikes, feature], 100 )
axs[feature].scatter(range(len(good_spikes)), gt_spike_features[good_spikes, feature ], s = 10, c = '#011993', alpha = 0.3)
axs[feature].plot( rolling_plot[feature,:,0], c = 'r' )
axs[feature].fill_between(range(len(good_spikes)), rolling_plot[feature,:,0] - rolling_plot[feature,:,1], rolling_plot[feature,:,0] + rolling_plot[feature,:,1], color = 'orange' )
axs[feature].set_ylabel(feature_labels[feature], fontweight = 'bold', fontsize = 12)
axs[feature].tick_params(axis='both',labelsize = 10)
axs[1].set_ylim([0, 1])
axs[2].set_ylim([0, 2])
axs[5].set_ylim([-1, 1])
plt.subplots_adjust(top = 0.95, bottom = 0.05, left = 0.15, right = 0.85)
fig.suptitle(data_summary.loc[cell]['Cell'], fontweight = 'bold')
#
plt.savefig( output_dir + data_summary.loc[cell]['Cell'] + '.png', dpi = 300)
plt.close()
X = np.empty(len(good_spikes), dtype = int)
for i in range(len(good_spikes)):
X[i] = i
X = X.reshape(-1, 1)
linreg = LinearRegression()
#Here I calculate summary statistics for the running mean and standard deviation. I obtain a grand mean of every feature per cell, which is the mean of the running average,
#and a grand standard deviation which is the mean of the running standard deviation. This is done in order to attempt to control for the slow timescale change in features
#that we often observed, which we cannot exclude may be due to simultaneously recording the neuron with a patch electrode. I also calculate the coefficient of variation.
#Moreover, I also calculate the linear regression slope coefficient for every cell and feature, checking if there is an overall tendency for a particular feature
#to move in a particular direction over time. Features are taken from here and plotted in graphpad.
for feature in range(8):
summary_stats[row, feature, 0] = np.nanmean(rolling_plot[feature, :, 0])
summary_stats[row, feature, 1] = np.nanmean(rolling_plot[feature, :, 1])
summary_stats[row, feature, 2] = abs(summary_stats[row, feature, 1] / summary_stats[row, feature, 0])
Y = gt_spike_features[good_spikes, feature]
linreg.fit(X, Y)
summary_stats[row, feature, 3] = linreg.coef_[0]
#
parsed_spikes[row, 0] = 2.34375*npx_sta_array[central_chan,30:90, good_spikes]
parsed_spikes[row, 1] = 2.34375*npx_sta_array[central_chan,30:90, bad_spikes]
#%%subplot for "good" spikes - Fig 6A
spike_splits, axspikes = plt.subplots(3, 3, sharex = False, sharey = False, figsize=(7.5,15))
axspikes = axspikes.ravel()
#
for cell in cells_to_analyse:
# for spike in range(len(parsed_spikes[cells_to_analyse.index(cell), 1])):
# axspikes[cells_to_analyse.index(cell)].plot( parsed_spikes[cells_to_analyse.index(cell), 1][spike] - np.median(parsed_spikes[cells_to_analyse.index(cell), 1][spike] ), alpha = 0.1, color = 'orange')
#
for spike in range(len(parsed_spikes[cells_to_analyse.index(cell), 0])):
axspikes[cells_to_analyse.index(cell)].plot(parsed_spikes[cells_to_analyse.index(cell), 0][spike] - np.median( parsed_spikes[cells_to_analyse.index(cell), 0][spike] ), alpha = 0.05, color = 'blue', lw= 0.7)
axspikes[cells_to_analyse.index(cell)].set_title('%s'%(pcent_spikes[cells_to_analyse.index(cell)][0])+'%', fontweight = 'bold', loc='right', y = 0.05, color = 'blue' )
axspikes[cells_to_analyse.index(cell)].plot(np.mean( parsed_spikes[cells_to_analyse.index(cell), 0], axis = 0 ), color = 'c' )
axspikes[cells_to_analyse.index(cell)].set_xticks([0,15, 30, 45, 60])
axspikes[cells_to_analyse.index(cell)].set_xticklabels([-1, -0.5, 0, 0.5, 1])
axspikes[cells_to_analyse.index(cell)].set_title(data_summary.loc[cell]['Cell'], fontweight = 'bold', color = 'k' )
plt.subplots_adjust(hspace = 0.3, wspace = 0.3)
#%%
plt.savefig( 'C:/Users/Andre Marques-Smith/Dropbox/Paired Recordings biorxiv/Figure 5_materials/good_spikes.png', dpi = 300)
plt.close()
#%%subplot for "bad" spikes - Supplement to Fig 6-1A
spike_splits, axspikes = plt.subplots(3, 3, sharex = False, sharey = False, figsize=(7.5,15))
axspikes = axspikes.ravel()
for cell in cells_to_analyse:
# for spike in range(len(parsed_spikes[cells_to_analyse.index(cell), 1])):
# axspikes[cells_to_analyse.index(cell)].plot( parsed_spikes[cells_to_analyse.index(cell), 1][spike] - np.median(parsed_spikes[cells_to_analyse.index(cell), 1][spike] ), alpha = 0.1, color = 'orange')
#
for spike in range(len(parsed_spikes[cells_to_analyse.index(cell), 1])):
axspikes[cells_to_analyse.index(cell)].plot(parsed_spikes[cells_to_analyse.index(cell), 1][spike] - np.median( parsed_spikes[cells_to_analyse.index(cell), 1][spike] ), alpha = 0.05, color = 'orange', lw= 0.7)
axspikes[cells_to_analyse.index(cell)].set_xticks([0,15, 30, 45, 60])
axspikes[cells_to_analyse.index(cell)].set_xticklabels([-1, -0.5, 0, 0.5, 1])
axspikes[cells_to_analyse.index(cell)].set_title(data_summary.loc[cell]['Cell'], fontweight = 'bold', color = 'k' )
plt.subplots_adjust(hspace = 0.3, wspace = 0.3)
#
plt.savefig( 'C:/Users/Andre Marques-Smith/Dropbox/Paired Recordings biorxiv/Figure 5_materials/bad_spikes.png', dpi = 300)
plt.close()
| 51.135889 | 217 | 0.65631 |
9e1c26cc130b635bef7b127f1b8c5adb440121c3
| 871 |
py
|
Python
|
448.Find-All-Numbers-Disappeared-in-an-Array.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | 18 |
2018-07-14T12:45:37.000Z
|
2022-03-26T14:51:04.000Z
|
448.Find-All-Numbers-Disappeared-in-an-Array.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | null | null | null |
448.Find-All-Numbers-Disappeared-in-an-Array.py
|
mickey0524/leetcode
|
6bedeb6ff29b02a97178cca464c5fd639951801f
|
[
"MIT"
] | 3 |
2019-05-29T04:09:22.000Z
|
2021-06-07T23:37:46.000Z
|
# https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/
#
# algorithms
# Easy (53.99%)
# Total Accepted: 170,160
# Total Submissions: 315,198
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
length = len(nums)
for i in xrange(length):
if nums[i] == i + 1:
continue
j = i
while nums[j] != j + 1:
tmp = nums[nums[j] - 1]
if tmp == nums[j]:
break
nums[nums[j] - 1] = nums[j]
nums[j] = tmp
res = []
for i in xrange(length):
if nums[i] != i + 1:
res += (i + 1),
return res
| 24.194444 | 73 | 0.405281 |
c6ac0fc8513192375c44cf7e9c47490c32b955c8
| 7,642 |
py
|
Python
|
configs/_base_/models/htc_without_semantic_swin_fpn.py
|
ace19-dev/CBNetV2
|
b08cfa8bbe438cd72651da3049ec4829f168ba81
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/models/htc_without_semantic_swin_fpn.py
|
ace19-dev/CBNetV2
|
b08cfa8bbe438cd72651da3049ec4829f168ba81
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/models/htc_without_semantic_swin_fpn.py
|
ace19-dev/CBNetV2
|
b08cfa8bbe438cd72651da3049ec4829f168ba81
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='HybridTaskCascade',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=False),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=4,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=4,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=4,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 34.423423 | 79 | 0.448966 |
775721980050bcea76134cb90c3f2e0b9d2acb44
| 20,641 |
py
|
Python
|
tests/python/unittest/test_image.py
|
sandutsar/incubator-mxnet
|
3ae7ddfd9c99f40a33c4cb716b3810e5463b810a
|
[
"Apache-2.0",
"MIT"
] | 1 |
2021-05-11T18:04:50.000Z
|
2021-05-11T18:04:50.000Z
|
tests/python/unittest/test_image.py
|
sandutsar/incubator-mxnet
|
3ae7ddfd9c99f40a33c4cb716b3810e5463b810a
|
[
"Apache-2.0",
"MIT"
] | 2 |
2022-01-13T03:57:21.000Z
|
2022-03-12T01:01:35.000Z
|
tests/python/unittest/test_image.py
|
sandutsar/incubator-mxnet
|
3ae7ddfd9c99f40a33c4cb716b3810e5463b810a
|
[
"Apache-2.0",
"MIT"
] | 1 |
2019-12-11T19:22:13.000Z
|
2019-12-11T19:22:13.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
import scipy.ndimage
from mxnet.test_utils import *
from common import xfail_when_nonstandard_decimal_separator
import shutil
import tempfile
import unittest
import pytest
mx.npx.reset_np()
def _get_data(url, dirname):
import os, tarfile
download(url, dirname=dirname, overwrite=False)
fname = os.path.join(dirname, url.split('/')[-1])
tar = tarfile.open(fname)
source_images = [os.path.join(dirname, x.name) for x in tar.getmembers() if x.isfile()]
if len(source_images) < 1 or not os.path.isfile(source_images[0]):
# skip extracting if exists
tar.extractall(path=dirname)
tar.close()
return source_images
def _generate_objects():
num = np.random.randint(1, 10)
xy = np.random.rand(num, 2)
wh = np.random.rand(num, 2) / 2
left = (xy[:, 0] - wh[:, 0])[:, np.newaxis]
right = (xy[:, 0] + wh[:, 0])[:, np.newaxis]
top = (xy[:, 1] - wh[:, 1])[:, np.newaxis]
bot = (xy[:, 1] + wh[:, 1])[:, np.newaxis]
boxes = np.maximum(0., np.minimum(1., np.hstack((left, top, right, bot))))
cid = np.random.randint(0, 20, size=num)
label = np.hstack((cid[:, np.newaxis], boxes)).ravel().tolist()
return [2, 5] + label
def _test_imageiter_last_batch(imageiter_list, assert_data_shape):
test_iter = imageiter_list[0]
# test batch data shape
for _ in range(3):
for batch in test_iter:
assert batch.data[0].shape == assert_data_shape
test_iter.reset()
# test last batch handle(discard)
test_iter = imageiter_list[1]
i = 0
for batch in test_iter:
i += 1
assert i == 5
# test last_batch_handle(pad)
test_iter = imageiter_list[2]
i = 0
for batch in test_iter:
if i == 0:
first_three_data = batch.data[0][:2]
if i == 5:
last_three_data = batch.data[0][1:]
i += 1
assert i == 6
assert np.array_equal(first_three_data.asnumpy(), last_three_data.asnumpy())
# test last_batch_handle(roll_over)
test_iter = imageiter_list[3]
i = 0
for batch in test_iter:
if i == 0:
first_image = batch.data[0][0]
i += 1
assert i == 5
test_iter.reset()
first_batch_roll_over = test_iter.next()
assert np.array_equal(
first_batch_roll_over.data[0][1].asnumpy(), first_image.asnumpy())
assert first_batch_roll_over.pad == 2
# test iteratopr work properly after calling reset several times when last_batch_handle is roll_over
for _ in test_iter:
pass
test_iter.reset()
first_batch_roll_over_twice = test_iter.next()
assert np.array_equal(
first_batch_roll_over_twice.data[0][2].asnumpy(), first_image.asnumpy())
assert first_batch_roll_over_twice.pad == 1
# we've called next once
i = 1
for _ in test_iter:
i += 1
# test the third epoch with size 6
assert i == 6
# test shuffle option for sanity test
test_iter = imageiter_list[4]
for _ in test_iter:
pass
class TestImage(unittest.TestCase):
IMAGES_URL = "https://repo.mxnet.io/gluon/dataset/test/test_images-9cebe48a.tar.gz"
def setUp(self):
self.IMAGES_DIR = tempfile.mkdtemp()
self.IMAGES = _get_data(self.IMAGES_URL, self.IMAGES_DIR)
print("Loaded {} images".format(len(self.IMAGES)))
def tearDown(self):
if self.IMAGES_DIR:
print("cleanup {}".format(self.IMAGES_DIR))
shutil.rmtree(self.IMAGES_DIR)
def test_imread_not_found(self):
with pytest.raises(mx.base.MXNetError):
x = mx.img.image.imread("/139810923jadjsajlskd.___adskj/blah.jpg")
def test_imread_vs_imdecode(self):
for img in self.IMAGES:
with open(img, 'rb') as fp:
str_image = fp.read()
image = mx.image.imdecode(str_image, to_rgb=0)
image_read = mx.img.image.imread(img)
same(image.asnumpy(), image_read.asnumpy())
def test_imdecode(self):
try:
import cv2
except ImportError:
raise unittest.SkipTest("Unable to import cv2.")
for img in self.IMAGES:
with open(img, 'rb') as fp:
str_image = fp.read()
image = mx.image.imdecode(str_image, to_rgb=0)
cv_image = cv2.imread(img)
assert_almost_equal(image.asnumpy(), cv_image)
def test_imdecode_bytearray(self):
try:
import cv2
except ImportError:
return
for img in self.IMAGES:
with open(img, 'rb') as fp:
str_image = bytearray(fp.read())
image = mx.image.imdecode(str_image, to_rgb=0)
cv_image = cv2.imread(img)
assert_almost_equal(image.asnumpy(), cv_image)
def test_imdecode_empty_buffer(self):
with pytest.raises(mx.base.MXNetError):
mx.image.imdecode(b'', to_rgb=0)
def test_imdecode_invalid_image(self):
with pytest.raises(mx.base.MXNetError):
image = mx.image.imdecode(b'clearly not image content')
def test_scale_down(self):
assert mx.image.scale_down((640, 480), (720, 120)) == (640, 106)
assert mx.image.scale_down((360, 1000), (480, 500)) == (360, 375)
assert mx.image.scale_down((300, 400), (0, 0)) == (0, 0)
def test_resize_short(self):
try:
import cv2
except ImportError:
raise unittest.SkipTest("Unable to import cv2")
for img in self.IMAGES:
cv_img = cv2.imread(img)
mx_img = mx.nd.array(cv_img[:, :, (2, 1, 0)])
h, w, _ = cv_img.shape
for _ in range(3):
new_size = np.random.randint(1, 1000)
if h > w:
new_h, new_w = new_size * h // w, new_size
else:
new_h, new_w = new_size, new_size * w // h
for interp in range(0, 2):
# area-based/lanczos don't match with cv2?
cv_resized = cv2.resize(cv_img, (new_w, new_h), interpolation=interp)
mx_resized = mx.image.resize_short(mx_img, new_size, interp)
assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3)
def test_imresize(self):
try:
import cv2
except ImportError:
raise unittest.SkipTest("Unable to import cv2")
for img in self.IMAGES:
cv_img = cv2.imread(img)
mx_img = mx.nd.array(cv_img[:, :, (2, 1, 0)])
new_h = np.random.randint(1, 1000)
new_w = np.random.randint(1, 1000)
for interp_val in range(0, 2):
cv_resized = cv2.resize(cv_img, (new_w, new_h), interpolation=interp_val)
mx_resized = mx.image.imresize(mx_img, new_w, new_h, interp=interp_val)
assert_almost_equal(mx_resized.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3)
out_img = mx.nd.zeros((new_h, new_w, 3), dtype=mx_img.dtype)
mx.image.imresize(mx_img, new_w, new_h, interp=interp_val, out=out_img)
assert_almost_equal(out_img.asnumpy()[:, :, (2, 1, 0)], cv_resized, atol=3)
def test_color_normalize(self):
for _ in range(10):
mean = np.random.rand(3) * 255
std = np.random.rand(3) + 1
width = np.random.randint(100, 500)
height = np.random.randint(100, 500)
src = np.random.rand(height, width, 3) * 255.
mx_result = mx.image.color_normalize(mx.nd.array(src),
mx.nd.array(mean), mx.nd.array(std))
assert_almost_equal(mx_result.asnumpy(), (src - mean) / std, atol=1e-3)
def test_imageiter(self):
print(self.IMAGES)
im_list = [[np.random.randint(0, 5), x] for x in self.IMAGES]
fname = os.path.join(self.IMAGES_DIR, 'test_imageiter.lst')
file_list = ['\t'.join([str(k), str(np.random.randint(0, 5)), x])
for k, x in enumerate(self.IMAGES)]
with open(fname, 'w') as f:
for line in file_list:
f.write(line + '\n')
test_list = ['imglist', 'path_imglist']
for dtype in ['int32', 'float32', 'int64', 'float64']:
for test in test_list:
imglist = im_list if test == 'imglist' else None
path_imglist = fname if test == 'path_imglist' else None
imageiter_list = [
mx.image.ImageIter(2, (3, 224, 224), label_width=1, imglist=imglist,
path_imglist=path_imglist, path_root=self.IMAGES_DIR, dtype=dtype),
mx.image.ImageIter(3, (3, 224, 224), label_width=1, imglist=imglist,
path_imglist=path_imglist, path_root=self.IMAGES_DIR, dtype=dtype, last_batch_handle='discard'),
mx.image.ImageIter(3, (3, 224, 224), label_width=1, imglist=imglist,
path_imglist=path_imglist, path_root=self.IMAGES_DIR, dtype=dtype, last_batch_handle='pad'),
mx.image.ImageIter(3, (3, 224, 224), label_width=1, imglist=imglist,
path_imglist=path_imglist, path_root=self.IMAGES_DIR, dtype=dtype, last_batch_handle='roll_over'),
mx.image.ImageIter(3, (3, 224, 224), label_width=1, imglist=imglist, shuffle=True,
path_imglist=path_imglist, path_root=self.IMAGES_DIR, dtype=dtype, last_batch_handle='pad')
]
_test_imageiter_last_batch(imageiter_list, (2, 3, 224, 224))
def test_copyMakeBorder(self):
try:
import cv2
except ImportError:
raise unittest.SkipTest("Unable to import cv2")
for img in self.IMAGES:
cv_img = cv2.imread(img)
mx_img = mx.nd.array(cv_img)
top = np.random.randint(1, 10)
bot = np.random.randint(1, 10)
left = np.random.randint(1, 10)
right = np.random.randint(1, 10)
new_h, new_w, _ = mx_img.shape
new_h += top + bot
new_w += left + right
val = [np.random.randint(1, 255)] * 3
for type_val in range(0, 5):
cv_border = cv2.copyMakeBorder(cv_img, top, bot, left, right, borderType=type_val, value=val)
mx_border = mx.image.copyMakeBorder(mx_img, top, bot, left, right, type=type_val, values=val)
assert_almost_equal(mx_border.asnumpy(), cv_border)
out_img = mx.nd.zeros((new_h , new_w, 3), dtype=mx_img.dtype)
mx.image.copyMakeBorder(mx_img, top, bot, left, right, type=type_val, values=val, out=out_img)
assert_almost_equal(out_img.asnumpy(), cv_border)
def test_augmenters(self):
# ColorNormalizeAug
mean = np.random.rand(3) * 255
std = np.random.rand(3) + 1
width = np.random.randint(100, 500)
height = np.random.randint(100, 500)
src = np.random.rand(height, width, 3) * 255.
# We test numpy and mxnet NDArray inputs
color_norm_aug = mx.image.ColorNormalizeAug(mean=mx.nd.array(mean), std=std)
out_image = color_norm_aug(mx.nd.array(src))
assert_almost_equal(out_image.asnumpy(), (src - mean) / std, atol=1e-3)
# only test if all augmenters will work
# TODO(Joshua Zhang): verify the augmenter outputs
im_list = [[0, x] for x in self.IMAGES]
test_iter = mx.image.ImageIter(2, (3, 224, 224), label_width=1, imglist=im_list,
resize=640, rand_crop=True, rand_resize=True, rand_mirror=True, mean=True,
std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1,
hue=0.1, pca_noise=0.1, rand_gray=0.2, inter_method=10, path_root=self.IMAGES_DIR, shuffle=True)
for batch in test_iter:
pass
def test_image_detiter(self):
im_list = [_generate_objects() + [x] for x in self.IMAGES]
det_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root=self.IMAGES_DIR)
for _ in range(3):
for _ in det_iter:
pass
det_iter.reset()
val_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root=self.IMAGES_DIR)
det_iter = val_iter.sync_label_shape(det_iter)
assert det_iter.data_shape == val_iter.data_shape
assert det_iter.label_shape == val_iter.label_shape
# test batch_size is not divisible by number of images
det_iter = mx.image.ImageDetIter(4, (3, 300, 300), imglist=im_list, path_root=self.IMAGES_DIR)
for _ in det_iter:
pass
# test file list with last batch handle
fname = os.path.join(self.IMAGES_DIR, 'test_imagedetiter.lst')
im_list = [[k] + _generate_objects() + [x] for k, x in enumerate(self.IMAGES)]
with open(fname, 'w') as f:
for line in im_list:
line = '\t'.join([str(k) for k in line])
f.write(line + '\n')
imageiter_list = [
mx.image.ImageDetIter(2, (3, 400, 400),
path_imglist=fname, path_root=self.IMAGES_DIR),
mx.image.ImageDetIter(3, (3, 400, 400),
path_imglist=fname, path_root=self.IMAGES_DIR, last_batch_handle='discard'),
mx.image.ImageDetIter(3, (3, 400, 400),
path_imglist=fname, path_root=self.IMAGES_DIR, last_batch_handle='pad'),
mx.image.ImageDetIter(3, (3, 400, 400),
path_imglist=fname, path_root=self.IMAGES_DIR, last_batch_handle='roll_over'),
mx.image.ImageDetIter(3, (3, 400, 400), shuffle=True,
path_imglist=fname, path_root=self.IMAGES_DIR, last_batch_handle='pad')
]
_test_imageiter_last_batch(imageiter_list, (2, 3, 400, 400))
def test_det_augmenters(self):
# only test if all augmenters will work
# TODO(Joshua Zhang): verify the augmenter outputs
im_list = [_generate_objects() + [x] for x in self.IMAGES]
det_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root=self.IMAGES_DIR,
resize=640, rand_crop=1, rand_pad=1, rand_gray=0.1, rand_mirror=True, mean=True,
std=np.array([1.1, 1.03, 1.05]), brightness=0.1, contrast=0.1, saturation=0.1,
pca_noise=0.1, hue=0.1, inter_method=10, min_object_covered=0.5,
aspect_ratio_range=(0.2, 5), area_range=(0.1, 4.0), min_eject_coverage=0.5,
max_attempts=50)
for batch in det_iter:
pass
def test_random_size_crop(self):
# test aspect ratio within bounds
width = np.random.randint(100, 500)
height = np.random.randint(100, 500)
src = np.random.rand(height, width, 3) * 255.
ratio = (0.75, 1)
epsilon = 0.05
out, (x0, y0, new_w, new_h) = mx.image.random_size_crop(mx.nd.array(src), size=(width, height), area=0.08, ratio=ratio)
_, pts = mx.image.center_crop(mx.nd.array(src), size=(width, height))
if (x0, y0, new_w, new_h) != pts:
assert ratio[0] - epsilon <= float(new_w)/new_h <= ratio[1] + epsilon, \
'ration of new width and height out of the bound{}/{}={}'.format(new_w, new_h, float(new_w)/new_h)
@xfail_when_nonstandard_decimal_separator
def test_imrotate(self):
# test correctness
xlin = np.expand_dims(np.linspace(0, 0.5, 30), axis=1)
ylin = np.expand_dims(np.linspace(0, 0.5, 60), axis=0)
np_img = np.expand_dims(xlin + ylin, axis=2)
# rotate with imrotate
nd_img = mx.nd.array(np_img.transpose((2, 0, 1))) # convert to CHW
rot_angle = 6
args = {'src': nd_img, 'rotation_degrees': rot_angle, 'zoom_in': False, 'zoom_out': False}
nd_rot = mx.image.imrotate(**args)
npnd_rot = nd_rot.asnumpy().transpose((1, 2, 0))
# rotate with scipy
scipy_rot = scipy.ndimage.rotate(np_img, rot_angle, axes=(1, 0), reshape=False,
order=1, mode='constant', prefilter=False)
# cannot compare the edges (where image ends) because of different behavior
assert_almost_equal(scipy_rot[10:20, 20:40, :], npnd_rot[10:20, 20:40, :])
# test if execution raises exceptions in any allowed mode
# batch mode
img_in = mx.nd.random.uniform(0, 1, (5, 3, 30, 60), dtype=np.float32)
nd_rots = mx.nd.array([1, 2, 3, 4, 5], dtype=np.float32)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': False}
_ = mx.image.imrotate(**args)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': True}
_ = mx.image.imrotate(**args)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': False}
_ = mx.image.imrotate(**args)
# single image mode
nd_rots = 11
img_in = mx.nd.random.uniform(0, 1, (3, 30, 60), dtype=np.float32)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': False}
_ = mx.image.imrotate(**args)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': True}
_ = mx.image.imrotate(**args)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': False}
_ = mx.image.imrotate(**args)
# test if exceptions are correctly raised
# batch exception - zoom_in=zoom_out=True
img_in = mx.nd.random.uniform(0, 1, (5, 3, 30, 60), dtype=np.float32)
nd_rots = mx.nd.array([1, 2, 3, 4, 5], dtype=np.float32)
args={'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': True}
with pytest.raises(ValueError):
mx.image.imrotate(**args)
# single image exception - zoom_in=zoom_out=True
img_in = mx.nd.random.uniform(0, 1, (3, 30, 60), dtype=np.float32)
nd_rots = 11
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': True, 'zoom_out': True}
with pytest.raises(ValueError):
mx.image.imrotate(**args)
# batch of images with scalar rotation
img_in = mx.nd.stack(nd_img, nd_img, nd_img)
nd_rots = 6
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': False}
out = mx.image.imrotate(**args)
for img in out:
img = img.asnumpy().transpose((1, 2, 0))
assert_almost_equal(scipy_rot[10:20, 20:40, :], img[10:20, 20:40, :])
# single image exception - single image with vector rotation
img_in = mx.nd.random.uniform(0, 1, (3, 30, 60), dtype=np.float32)
nd_rots = mx.nd.array([1, 2, 3, 4, 5], dtype=np.float32)
args = {'src': img_in, 'rotation_degrees': nd_rots, 'zoom_in': False, 'zoom_out': False}
with pytest.raises(TypeError):
mx.image.imrotate(**args)
def test_random_rotate(self):
angle_limits = [-5., 5.]
src_single_image = mx.nd.random.uniform(0, 1, (3, 30, 60),
dtype=np.float32)
out_single_image = mx.image.random_rotate(src_single_image,
angle_limits)
self.assertEqual(out_single_image.shape, (3, 30, 60))
src_batch_image = mx.nd.stack(src_single_image,
src_single_image,
src_single_image)
out_batch_image = mx.image.random_rotate(src_batch_image,
angle_limits)
self.assertEqual(out_batch_image.shape, (3, 3, 30, 60))
| 46.073661 | 127 | 0.59939 |
eae54590b0a874499e3cc264c7a64e4b3546945a
| 3,417 |
py
|
Python
|
ooobuild/lo/awt/uno_control_group_box_model.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/awt/uno_control_group_box_model.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/awt/uno_control_group_box_model.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
import typing
from abc import abstractproperty
from .uno_control_model import UnoControlModel as UnoControlModel_c8ce0c58
if typing.TYPE_CHECKING:
from .font_descriptor import FontDescriptor as FontDescriptor_bc110c0a
from ..util.color import Color as Color_68e908c5
class UnoControlGroupBoxModel(UnoControlModel_c8ce0c58):
"""
Service Class
specifies the standard model of a UnoControlGroupBox.
**since**
OOo 3.1
See Also:
`API UnoControlGroupBoxModel <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1awt_1_1UnoControlGroupBoxModel.html>`_
"""
__ooo_ns__: str = 'com.sun.star.awt'
__ooo_full_ns__: str = 'com.sun.star.awt.UnoControlGroupBoxModel'
__ooo_type_name__: str = 'service'
@abstractproperty
def Enabled(self) -> bool:
"""
determines whether the control is enabled or disabled.
"""
@abstractproperty
def FontDescriptor(self) -> 'FontDescriptor_bc110c0a':
"""
specifies the font attributes of the text in the control.
"""
@abstractproperty
def FontEmphasisMark(self) -> int:
"""
specifies the com.sun.star.text.FontEmphasis value of the text in the control.
"""
@abstractproperty
def FontRelief(self) -> int:
"""
specifies the com.sun.star.text.FontRelief value of the text in the control.
"""
@abstractproperty
def HelpText(self) -> str:
"""
specifies the help text of the control.
"""
@abstractproperty
def HelpURL(self) -> str:
"""
specifies the help URL of the control.
"""
@abstractproperty
def Label(self) -> str:
"""
specifies the label of the control.
"""
@abstractproperty
def Printable(self) -> bool:
"""
specifies that the control will be printed with the document.
"""
@abstractproperty
def TextColor(self) -> 'Color_68e908c5':
"""
specifies the text color (RGB) of the control.
"""
@abstractproperty
def TextLineColor(self) -> 'Color_68e908c5':
"""
specifies the text line color (RGB) of the control.
"""
@abstractproperty
def WritingMode(self) -> int:
"""
denotes the writing mode used in the control, as specified in the com.sun.star.text.WritingMode2 constants group.
Only com.sun.star.text.WritingMode2.LR_TB and com.sun.star.text.WritingMode2.RL_TB are supported at the moment.
**since**
OOo 3.1
"""
__all__ = ['UnoControlGroupBoxModel']
| 28.239669 | 146 | 0.657302 |
fd425e02c260465ad9788485f3e5014d8c9ca4d1
| 2,725 |
py
|
Python
|
data/cirq_new/cirq_program/startCirq_pragma493.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma493.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma493.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=17
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=14
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma493.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 32.831325 | 92 | 0.645505 |
b34fdf0a49caff104394b8985a7504d096b7396b
| 61,106 |
py
|
Python
|
osm_ro/vimconn_openvim.py
|
igordcard/RO
|
d9f2b88a86bb04c44f40008c719f6ff6cd7aea2f
|
[
"Apache-2.0"
] | null | null | null |
osm_ro/vimconn_openvim.py
|
igordcard/RO
|
d9f2b88a86bb04c44f40008c719f6ff6cd7aea2f
|
[
"Apache-2.0"
] | null | null | null |
osm_ro/vimconn_openvim.py
|
igordcard/RO
|
d9f2b88a86bb04c44f40008c719f6ff6cd7aea2f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: [email protected]
##
'''
vimconnector implements all the methods to interact with openvim using the openvim API.
'''
__author__="Alfonso Tierno, Gerardo Garcia"
__date__ ="$26-aug-2014 11:09:29$"
import vimconn
import requests
import json
import yaml
import logging
import math
from openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
vlan1000_schema, integer0_schema
from jsonschema import validate as js_v, exceptions as js_e
from urllib import quote
'''contain the openvim virtual machine status to openmano status'''
vmStatus2manoFormat={'ACTIVE':'ACTIVE',
'PAUSED':'PAUSED',
'SUSPENDED': 'SUSPENDED',
'INACTIVE':'INACTIVE',
'CREATING':'BUILD',
'ERROR':'ERROR','DELETED':'DELETED'
}
netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
}
host_schema = {
"type":"object",
"properties":{
"id": id_schema,
"name": name_schema,
},
"required": ["id"]
}
image_schema = {
"type":"object",
"properties":{
"id": id_schema,
"name": name_schema,
},
"required": ["id","name"]
}
server_schema = {
"type":"object",
"properties":{
"id":id_schema,
"name": name_schema,
},
"required": ["id","name"]
}
new_host_response_schema = {
"title":"host response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"host": host_schema
},
"required": ["host"],
"additionalProperties": False
}
get_images_response_schema = {
"title":"openvim images response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"images":{
"type":"array",
"items": image_schema,
}
},
"required": ["images"],
"additionalProperties": False
}
get_hosts_response_schema = {
"title":"openvim hosts response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"hosts":{
"type":"array",
"items": host_schema,
}
},
"required": ["hosts"],
"additionalProperties": False
}
get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
get_server_response_schema = {
"title":"openvim server response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"servers":{
"type":"array",
"items": server_schema,
}
},
"required": ["servers"],
"additionalProperties": False
}
new_tenant_response_schema = {
"title":"tenant response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"tenant":{
"type":"object",
"properties":{
"id": id_schema,
"name": nameshort_schema,
"description":description_schema,
"enabled":{"type" : "boolean"}
},
"required": ["id"]
}
},
"required": ["tenant"],
"additionalProperties": False
}
new_network_response_schema = {
"title":"network response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"network":{
"type":"object",
"properties":{
"id":id_schema,
"name":name_schema,
"type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
"shared":{"type":"boolean"},
"tenant_id":id_schema,
"admin_state_up":{"type":"boolean"},
"vlan":vlan1000_schema
},
"required": ["id"]
}
},
"required": ["network"],
"additionalProperties": False
}
# get_network_response_schema = {
# "title":"get network response information schema",
# "$schema": "http://json-schema.org/draft-04/schema#",
# "type":"object",
# "properties":{
# "network":{
# "type":"object",
# "properties":{
# "id":id_schema,
# "name":name_schema,
# "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
# "shared":{"type":"boolean"},
# "tenant_id":id_schema,
# "admin_state_up":{"type":"boolean"},
# "vlan":vlan1000_schema
# },
# "required": ["id"]
# }
# },
# "required": ["network"],
# "additionalProperties": False
# }
new_port_response_schema = {
"title":"port response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"port":{
"type":"object",
"properties":{
"id":id_schema,
},
"required": ["id"]
}
},
"required": ["port"],
"additionalProperties": False
}
get_flavor_response_schema = {
"title":"openvim flavors response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"flavor":{
"type":"object",
"properties":{
"id": id_schema,
"name": name_schema,
"extended": {"type":"object"},
},
"required": ["id", "name"],
}
},
"required": ["flavor"],
"additionalProperties": False
}
new_flavor_response_schema = {
"title":"flavor response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"flavor":{
"type":"object",
"properties":{
"id":id_schema,
},
"required": ["id"]
}
},
"required": ["flavor"],
"additionalProperties": False
}
get_image_response_schema = {
"title":"openvim images response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"image":{
"type":"object",
"properties":{
"id": id_schema,
"name": name_schema,
},
"required": ["id", "name"],
}
},
"required": ["flavor"],
"additionalProperties": False
}
new_image_response_schema = {
"title":"image response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"image":{
"type":"object",
"properties":{
"id":id_schema,
},
"required": ["id"]
}
},
"required": ["image"],
"additionalProperties": False
}
new_vminstance_response_schema = {
"title":"server response information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"server":{
"type":"object",
"properties":{
"id":id_schema,
},
"required": ["id"]
}
},
"required": ["server"],
"additionalProperties": False
}
get_processor_rankings_response_schema = {
"title":"processor rankings information schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"type":"object",
"properties":{
"rankings":{
"type":"array",
"items":{
"type":"object",
"properties":{
"model": description_schema,
"value": integer0_schema
},
"additionalProperties": False,
"required": ["model","value"]
}
},
"additionalProperties": False,
"required": ["rankings"]
}
}
class vimconnector(vimconn.vimconnector):
def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
log_level="DEBUG", config={}, persistent_info={}):
vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
self.tenant = None
self.headers_req = {'content-type': 'application/json'}
self.logger = logging.getLogger('openmano.vim.openvim')
self.persistent_info = persistent_info
if tenant_id:
self.tenant = tenant_id
def __setitem__(self,index, value):
'''Set individuals parameters
Throw TypeError, KeyError
'''
if index=='tenant_id':
self.tenant = value
elif index=='tenant_name':
self.tenant = None
vimconn.vimconnector.__setitem__(self,index, value)
def _get_my_tenant(self):
'''Obtain uuid of my tenant from name
'''
if self.tenant:
return self.tenant
url = self.url+'/tenants?name='+ quote(self.tenant_name)
self.logger.info("Getting VIM tenant_id GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
try:
tenant_list = vim_response.json()["tenants"]
if len(tenant_list) == 0:
raise vimconn.vimconnNotFoundException("No tenant found for name '%s'" % str(self.tenant_name))
elif len(tenant_list) > 1:
raise vimconn.vimconnConflictException ("More that one tenant found for name '%s'" % str(self.tenant_name))
self.tenant = tenant_list[0]["id"]
return self.tenant
except Exception as e:
raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
def _format_jsonerror(self,http_response):
#DEPRECATED, to delete in the future
try:
data = http_response.json()
return data["error"]["description"]
except:
return http_response.text
def _format_in(self, http_response, schema):
#DEPRECATED, to delete in the future
try:
client_data = http_response.json()
js_v(client_data, schema)
#print "Input data: ", str(client_data)
return True, client_data
except js_e.ValidationError as exc:
print "validate_in error, jsonschema exception ", exc.message, "at", exc.path
return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
def _remove_extra_items(self, data, schema):
deleted=[]
if type(data) is tuple or type(data) is list:
for d in data:
a= self._remove_extra_items(d, schema['items'])
if a is not None: deleted.append(a)
elif type(data) is dict:
for k in data.keys():
if 'properties' not in schema or k not in schema['properties'].keys():
del data[k]
deleted.append(k)
else:
a = self._remove_extra_items(data[k], schema['properties'][k])
if a is not None: deleted.append({k:a})
if len(deleted) == 0: return None
elif len(deleted) == 1: return deleted[0]
else: return deleted
def _format_request_exception(self, request_exception):
'''Transform a request exception into a vimconn exception'''
if isinstance(request_exception, js_e.ValidationError):
raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))
elif isinstance(request_exception, requests.exceptions.HTTPError):
raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
else:
raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
def _check_http_request_response(self, request_response):
'''Raise a vimconn exception if the response is not Ok'''
if request_response.status_code >= 200 and request_response.status_code < 300:
return
if request_response.status_code == vimconn.HTTP_Unauthorized:
raise vimconn.vimconnAuthException(request_response.text)
elif request_response.status_code == vimconn.HTTP_Not_Found:
raise vimconn.vimconnNotFoundException(request_response.text)
elif request_response.status_code == vimconn.HTTP_Conflict:
raise vimconn.vimconnConflictException(request_response.text)
else:
raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
def new_tenant(self,tenant_name,tenant_description):
'''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
#print "VIMConnector: Adding a new tenant to VIM"
payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
payload_req = json.dumps(payload_dict)
try:
url = self.url_admin+'/tenants'
self.logger.info("Adding a new tenant %s", url)
vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_tenant_response_schema)
#r = self._remove_extra_items(response, new_tenant_response_schema)
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
tenant_id = response['tenant']['id']
return tenant_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def delete_tenant(self,tenant_id):
'''Delete a tenant from VIM. Returns the old tenant identifier'''
try:
url = self.url_admin+'/tenants/'+tenant_id
self.logger.info("Delete a tenant DELETE %s", url)
vim_response = requests.delete(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return tenant_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_tenant_list(self, filter_dict={}):
'''Obtain tenants of VIM
filter_dict can contain the following keys:
name: filter by tenant name
id: filter by tenant uuid/id
<other VIM specific>
Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
'''
filterquery=[]
filterquery_text=''
for k,v in filter_dict.iteritems():
filterquery.append(str(k)+'='+str(v))
if len(filterquery)>0:
filterquery_text='?'+ '&'.join(filterquery)
try:
url = self.url+'/tenants'+filterquery_text
self.logger.info("get_tenant_list GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return vim_response.json()["tenants"]
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None): #, **vim_specific):
'''Adds a tenant network to VIM'''
'''Returns the network identifier'''
try:
self._get_my_tenant()
if net_type=="bridge":
net_type="bridge_data"
payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
if vlan:
payload_req["provider:vlan"] = vlan
# payload_req.update(vim_specific)
url = self.url+'/networks'
self.logger.info("Adding a new network POST: %s DATA: %s", url, str(payload_req))
vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_network_response_schema)
#r = self._remove_extra_items(response, new_network_response_schema)
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
network_id = response['network']['id']
return network_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_network_list(self, filter_dict={}):
'''Obtain tenant networks of VIM
Filter_dict can be:
name: network name
id: network uuid
public: boolean
tenant_id: tenant
admin_state_up: boolean
status: 'ACTIVE'
Returns the network list of dictionaries
'''
try:
if 'tenant_id' not in filter_dict:
filter_dict["tenant_id"] = self._get_my_tenant()
elif not filter_dict["tenant_id"]:
del filter_dict["tenant_id"]
filterquery=[]
filterquery_text=''
for k,v in filter_dict.iteritems():
filterquery.append(str(k)+'='+str(v))
if len(filterquery)>0:
filterquery_text='?'+ '&'.join(filterquery)
url = self.url+'/networks'+filterquery_text
self.logger.info("Getting network list GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
return response['networks']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_network(self, net_id):
'''Obtain network details of network id'''
try:
url = self.url+'/networks/'+net_id
self.logger.info("Getting network GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
return response['network']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def delete_network(self, net_id):
'''Deletes a tenant network from VIM'''
'''Returns the network identifier'''
try:
self._get_my_tenant()
url = self.url+'/networks/'+net_id
self.logger.info("Deleting VIM network DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
#self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return net_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_flavor(self, flavor_id):
'''Obtain flavor details from the VIM'''
try:
self._get_my_tenant()
url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
self.logger.info("Getting flavor GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_flavor_response_schema)
r = self._remove_extra_items(response, get_flavor_response_schema)
if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
return response['flavor']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def new_flavor(self, flavor_data):
'''Adds a tenant flavor to VIM'''
'''Returns the flavor identifier'''
try:
new_flavor_dict = flavor_data.copy()
for device in new_flavor_dict.get('extended', {}).get('devices', ()):
if 'image name' in device:
del device['image name']
numas = new_flavor_dict.get('extended', {}).get('numas')
if numas:
numa = numas[0]
# translate memory, cpus to EPA
if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
numa["paired-threads"] = new_flavor_dict["vcpus"]
if "memory" not in numa:
numa["memory"] = int(math.ceil(new_flavor_dict["ram"]/1024.0))
for iface in numa.get("interfaces", ()):
if not iface.get("bandwidth"):
iface["bandwidth"] = "1 Mbps"
new_flavor_dict["name"] = flavor_data["name"][:64]
self._get_my_tenant()
payload_req = json.dumps({'flavor': new_flavor_dict})
url = self.url+'/'+self.tenant+'/flavors'
self.logger.info("Adding a new VIM flavor POST %s", url)
vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_flavor_response_schema)
r = self._remove_extra_items(response, new_flavor_response_schema)
if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
flavor_id = response['flavor']['id']
return flavor_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def delete_flavor(self,flavor_id):
'''Deletes a tenant flavor from VIM'''
'''Returns the old flavor_id'''
try:
self._get_my_tenant()
url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
self.logger.info("Deleting VIM flavor DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
#self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return flavor_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image(self, image_id):
'''Obtain image details from the VIM'''
try:
self._get_my_tenant()
url = self.url+'/'+self.tenant+'/images/'+image_id
self.logger.info("Getting image GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_image_response_schema)
r = self._remove_extra_items(response, get_image_response_schema)
if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
return response['image']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def new_image(self,image_dict):
''' Adds a tenant image to VIM, returns image_id'''
try:
self._get_my_tenant()
new_image_dict={'name': image_dict['name'][:64]}
if image_dict.get('description'):
new_image_dict['description'] = image_dict['description']
if image_dict.get('metadata'):
new_image_dict['metadata'] = yaml.load(image_dict['metadata'])
if image_dict.get('location'):
new_image_dict['path'] = image_dict['location']
payload_req = json.dumps({"image":new_image_dict})
url=self.url + '/' + self.tenant + '/images'
self.logger.info("Adding a new VIM image POST %s", url)
vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_image_response_schema)
r = self._remove_extra_items(response, new_image_response_schema)
if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
image_id = response['image']['id']
return image_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def delete_image(self, image_id):
'''Deletes a tenant image from VIM'''
'''Returns the deleted image_id'''
try:
self._get_my_tenant()
url = self.url + '/'+ self.tenant +'/images/'+image_id
self.logger.info("Deleting VIM image DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
#self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return image_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image_id_from_path(self, path):
'''Get the image id from image path in the VIM database. Returns the image_id'''
try:
self._get_my_tenant()
url=self.url + '/' + self.tenant + '/images?path='+quote(path)
self.logger.info("Getting images GET %s", url)
vim_response = requests.get(url)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, get_images_response_schema)
#r = self._remove_extra_items(response, get_images_response_schema)
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
if len(response['images'])==0:
raise vimconn.vimconnNotFoundException("Image not found at VIM with path '%s'", path)
elif len(response['images'])>1:
raise vimconn.vimconnConflictException("More than one image found at VIM with path '%s'", path)
return response['images'][0]['id']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_image_list(self, filter_dict={}):
'''Obtain tenant images from VIM
Filter_dict can be:
name: image name
id: image uuid
checksum: image checksum
location: image path
Returns the image list of dictionaries:
[{<the fields at Filter_dict plus some VIM specific>}, ...]
List can be empty
'''
try:
self._get_my_tenant()
filterquery=[]
filterquery_text=''
for k,v in filter_dict.iteritems():
filterquery.append(str(k)+'='+str(v))
if len(filterquery)>0:
filterquery_text='?'+ '&'.join(filterquery)
url = self.url+'/'+self.tenant+'/images'+filterquery_text
self.logger.info("Getting image list GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
return response['images']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def new_vminstancefromJSON(self, vm_data):
'''Adds a VM instance to VIM'''
'''Returns the instance identifier'''
try:
self._get_my_tenant()
except Exception as e:
return -vimconn.HTTP_Not_Found, str(e)
print "VIMConnector: Adding a new VM instance from JSON to VIM"
payload_req = vm_data
try:
vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
except requests.exceptions.RequestException as e:
print "new_vminstancefromJSON Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
res,http_content = self._format_in(vim_response, new_image_response_schema)
#print http_content
if res:
r = self._remove_extra_items(http_content, new_image_response_schema)
if r is not None: print "Warning: remove extra items ", r
#print http_content
vminstance_id = http_content['server']['id']
print "Tenant image id: ",vminstance_id
return vim_response.status_code,vminstance_id
else: return -vimconn.HTTP_Bad_Request,http_content
else:
#print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "%s": not possible to add new vm instance. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
#print text
return -vim_response.status_code,text
def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
availability_zone_index=None, availability_zone_list=None):
"""Adds a VM instance to VIM
Params:
start: indicates if VM must start or boot in pause mode. Ignored
image_id,flavor_id: image and flavor uuid
net_list: list of interfaces, each one is a dictionary with:
name:
net_id: network uuid to connect
vpci: virtual vcpi to assign
model: interface model, virtio, e2000, ...
mac_address:
use: 'data', 'bridge', 'mgmt'
type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
vim_id: filled/added by this function
#TODO ip, security groups
Returns a tuple with the instance identifier and created_items or raises an exception on error
created_items can be None or a dictionary where this method can include key-values that will be passed to
the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
"""
self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
try:
self._get_my_tenant()
# net_list = []
# for k,v in net_dict.items():
# print k,v
# net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
# net_list_string = ', '.join(net_list)
virtio_net_list=[]
for net in net_list:
if not net.get("net_id"):
continue
net_dict = {'uuid': net["net_id"]}
if net.get("type"):
if net["type"] == "SR-IOV":
net_dict["type"] = "VF"
elif net["type"] == "PCI-PASSTHROUGH":
net_dict["type"] = "PF"
else:
net_dict["type"] = net["type"]
if net.get("name"):
net_dict["name"] = net["name"]
if net.get("vpci"):
net_dict["vpci"] = net["vpci"]
if net.get("model"):
if net["model"] == "VIRTIO":
net_dict["model"] = "virtio"
else:
net_dict["model"] = net["model"]
if net.get("mac_address"):
net_dict["mac_address"] = net["mac_address"]
virtio_net_list.append(net_dict)
payload_dict={ "name": name[:64],
"description": description,
"imageRef": image_id,
"flavorRef": flavor_id,
"networks": virtio_net_list
}
if start != None:
payload_dict["start"] = start
payload_req = json.dumps({"server": payload_dict})
url = self.url+'/'+self.tenant+'/servers'
self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
#r = self._remove_extra_items(response, new_vminstance_response_schema)
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
vminstance_id = response['server']['id']
#connect data plane interfaces to network
for net in net_list:
if net["type"]=="virtual":
if not net.get("net_id"):
continue
for iface in response['server']['networks']:
if "name" in net:
if net["name"]==iface["name"]:
net["vim_id"] = iface['iface_id']
break
elif "net_id" in net:
if net["net_id"]==iface["net_id"]:
net["vim_id"] = iface['iface_id']
break
else: #dataplane
for numa in response['server'].get('extended',{}).get('numas',() ):
for iface in numa.get('interfaces',() ):
if net['name'] == iface['name']:
net['vim_id'] = iface['iface_id']
#Code bellow is not needed, current openvim connect dataplane interfaces
#if net.get("net_id"):
##connect dataplane interface
# result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
# if result < 0:
# error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
# print "new_vminstance: " + error_text
# self.delete_vminstance(vminstance_id)
# return result, error_text
break
return vminstance_id, None
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def get_vminstance(self, vm_id):
'''Returns the VM instance information from VIM'''
try:
self._get_my_tenant()
url = self.url+'/'+self.tenant+'/servers/'+vm_id
self.logger.info("Getting vm GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
#r = self._remove_extra_items(response, new_vminstance_response_schema)
#if r is not None:
# self.logger.warn("Warning: remove extra items %s", str(r))
return response['server']
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def delete_vminstance(self, vm_id, created_items=None):
'''Removes a VM instance from VIM, returns the deleted vm_id'''
try:
self._get_my_tenant()
url = self.url+'/'+self.tenant+'/servers/'+vm_id
self.logger.info("Deleting VIM vm DELETE %s", url)
vim_response = requests.delete(url, headers=self.headers_req)
self._check_http_request_response(vim_response)
#self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
return vm_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def refresh_vms_status(self, vm_list):
'''Refreshes the status of the virtual machines'''
try:
self._get_my_tenant()
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
vm_dict={}
for vm_id in vm_list:
vm={}
#print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
try:
url = self.url+'/'+self.tenant+'/servers/'+ vm_id
self.logger.info("Getting vm GET %s", url)
vim_response = requests.get(url, headers = self.headers_req)
self._check_http_request_response(vim_response)
response = vim_response.json()
js_v(response, new_vminstance_response_schema)
if response['server']['status'] in vmStatus2manoFormat:
vm['status'] = vmStatus2manoFormat[ response['server']['status'] ]
else:
vm['status'] = "OTHER"
vm['error_msg'] = "VIM status reported " + response['server']['status']
if response['server'].get('last_error'):
vm['error_msg'] = response['server']['last_error']
vm["vim_info"] = yaml.safe_dump(response['server'])
#get interfaces info
try:
management_ip = False
url2 = self.url+'/ports?device_id='+ quote(vm_id)
self.logger.info("Getting PORTS GET %s", url2)
vim_response2 = requests.get(url2, headers = self.headers_req)
self._check_http_request_response(vim_response2)
client_data = vim_response2.json()
if isinstance(client_data.get("ports"), list):
vm["interfaces"]=[]
for port in client_data.get("ports"):
interface={}
interface['vim_info'] = yaml.safe_dump(port)
interface["mac_address"] = port.get("mac_address")
interface["vim_net_id"] = port.get("network_id")
interface["vim_interface_id"] = port["id"]
interface["ip_address"] = port.get("ip_address")
if interface["ip_address"]:
management_ip = True
if interface["ip_address"] == "0.0.0.0":
interface["ip_address"] = None
vm["interfaces"].append(interface)
except Exception as e:
self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
if vm['status'] == "ACTIVE" and not management_ip:
vm['status'] = "ACTIVE:NoMgmtIP"
except vimconn.vimconnNotFoundException as e:
self.logger.error("Exception getting vm status: %s", str(e))
vm['status'] = "DELETED"
vm['error_msg'] = str(e)
except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
self.logger.error("Exception getting vm status: %s", str(e))
vm['status'] = "VIM_ERROR"
vm['error_msg'] = str(e)
vm_dict[vm_id] = vm
return vm_dict
def refresh_nets_status(self, net_list):
'''Get the status of the networks
Params: the list of network identifiers
Returns a dictionary with:
net_id: #VIM id of this network
status: #Mandatory. Text with one of:
# DELETED (not found at vim)
# VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
# OTHER (Vim reported other status not understood)
# ERROR (VIM indicates an ERROR status)
# ACTIVE, INACTIVE, DOWN (admin down),
# BUILD (on building process)
#
error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
'''
try:
self._get_my_tenant()
except requests.exceptions.RequestException as e:
self._format_request_exception(e)
net_dict={}
for net_id in net_list:
net = {}
#print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
try:
net_vim = self.get_network(net_id)
if net_vim['status'] in netStatus2manoFormat:
net["status"] = netStatus2manoFormat[ net_vim['status'] ]
else:
net["status"] = "OTHER"
net["error_msg"] = "VIM status reported " + net_vim['status']
if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
net["status"] = "DOWN"
if net_vim.get('last_error'):
net['error_msg'] = net_vim['last_error']
net["vim_info"] = yaml.safe_dump(net_vim)
except vimconn.vimconnNotFoundException as e:
self.logger.error("Exception getting net status: %s", str(e))
net['status'] = "DELETED"
net['error_msg'] = str(e)
except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
self.logger.error("Exception getting net status: %s", str(e))
net['status'] = "VIM_ERROR"
net['error_msg'] = str(e)
net_dict[net_id] = net
return net_dict
def action_vminstance(self, vm_id, action_dict, created_items={}):
'''Send and action over a VM instance from VIM'''
'''Returns the status'''
try:
self._get_my_tenant()
if "console" in action_dict:
raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
self.logger.info("Action over VM instance POST %s", url)
vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
self._check_http_request_response(vim_response)
return None
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
#NOT USED METHODS in current version
def host_vim2gui(self, host, server_dict):
'''Transform host dictionary from VIM format to GUI format,
and append to the server_dict
'''
if type(server_dict) is not dict:
print 'vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary'
return
RAD={}
occupation={}
for numa in host['host']['numas']:
RAD_item={}
occupation_item={}
#memory
RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
#cpus
RAD_item['cpus']={}
RAD_item['cpus']['cores'] = []
RAD_item['cpus']['eligible_cores'] = []
occupation_item['cores']=[]
for _ in range(0, len(numa['cores']) / 2):
RAD_item['cpus']['cores'].append( [] )
for core in numa['cores']:
RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
#ports
RAD_item['ports']={}
occupation_item['ports']={}
for iface in numa['interfaces']:
RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] / iface['Mbps']) + "%" }
RAD[ numa['numa_socket'] ] = RAD_item
occupation[ numa['numa_socket'] ] = occupation_item
server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
def get_hosts_info(self):
'''Get the information of deployed hosts
Returns the hosts content'''
#obtain hosts list
url=self.url+'/hosts'
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print "get_hosts_info Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print "vim get", url, "response:", vim_response.status_code, vim_response.json()
#print vim_response.status_code
#print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
#TODO: get error
print 'vimconnector.get_hosts_info error getting host list %d %s' %(vim_response.status_code, vim_response.json())
return -vim_response.status_code, "Error getting host list"
res,hosts = self._format_in(vim_response, get_hosts_response_schema)
if res==False:
print "vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts
return vimconn.HTTP_Internal_Server_Error, hosts
#obtain hosts details
hosts_dict={}
for host in hosts['hosts']:
url=self.url+'/hosts/'+host['id']
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print "get_hosts_info Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print "vim get", url, "response:", vim_response.status_code, vim_response.json()
if vim_response.status_code != 200:
print 'vimconnector.get_hosts_info error getting detailed host %d %s' %(vim_response.status_code, vim_response.json())
continue
res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
if res==False:
print "vimconnector.get_hosts_info error parsing GET HOSTS/%s vim response" % host['id'], host_detail
continue
#print 'host id '+host['id'], json.dumps(host_detail, indent=4)
self.host_vim2gui(host_detail, hosts_dict)
return 200, hosts_dict
def get_hosts(self, vim_tenant):
'''Get the hosts and deployed instances
Returns the hosts content'''
#obtain hosts list
url=self.url+'/hosts'
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print "get_hosts Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print "vim get", url, "response:", vim_response.status_code, vim_response.json()
#print vim_response.status_code
#print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
#TODO: get error
print 'vimconnector.get_hosts error getting host list %d %s' %(vim_response.status_code, vim_response.json())
return -vim_response.status_code, "Error getting host list"
res,hosts = self._format_in(vim_response, get_hosts_response_schema)
if res==False:
print "vimconnector.get_host error parsing GET HOSTS vim response", hosts
return vimconn.HTTP_Internal_Server_Error, hosts
#obtain instances from hosts
for host in hosts['hosts']:
url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print "get_hosts Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print "vim get", url, "response:", vim_response.status_code, vim_response.json()
if vim_response.status_code != 200:
print 'vimconnector.get_hosts error getting instances at host %d %s' %(vim_response.status_code, vim_response.json())
continue
res,servers = self._format_in(vim_response, get_server_response_schema)
if res==False:
print "vimconnector.get_host error parsing GET SERVERS/%s vim response" % host['id'], servers
continue
#print 'host id '+host['id'], json.dumps(host_detail, indent=4)
host['instances'] = servers['servers']
return 200, hosts['hosts']
def get_processor_rankings(self):
'''Get the processor rankings in the VIM database'''
url=self.url+'/processor_ranking'
try:
vim_response = requests.get(url)
except requests.exceptions.RequestException as e:
print "get_processor_rankings Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print "vim get", url, "response:", vim_response.status_code, vim_response.json()
#print vim_response.status_code
#print json.dumps(vim_response.json(), indent=4)
if vim_response.status_code != 200:
#TODO: get error
print 'vimconnector.get_processor_rankings error getting processor rankings %d %s' %(vim_response.status_code, vim_response.json())
return -vim_response.status_code, "Error getting processor rankings"
res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
return res, rankings['rankings']
def new_host(self, host_data):
'''Adds a new host to VIM'''
'''Returns status code of the VIM response'''
payload_req = host_data
try:
url = self.url_admin+'/hosts'
self.logger.info("Adding a new host POST %s", url)
vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
self._check_http_request_response(vim_response)
self.logger.debug(vim_response.text)
#print json.dumps(vim_response.json(), indent=4)
response = vim_response.json()
js_v(response, new_host_response_schema)
r = self._remove_extra_items(response, new_host_response_schema)
if r is not None:
self.logger.warn("Warning: remove extra items %s", str(r))
host_id = response['host']['id']
return host_id
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
def new_external_port(self, port_data):
'''Adds a external port to VIM'''
'''Returns the port identifier'''
#TODO change to logging exception code policies
print "VIMConnector: Adding a new external port"
payload_req = port_data
try:
vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
except requests.exceptions.RequestException as e:
self.logger.error("new_external_port Exception: ", str(e))
return -vimconn.HTTP_Not_Found, str(e.args[0])
print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
res, http_content = self._format_in(vim_response, new_port_response_schema)
#print http_content
if res:
r = self._remove_extra_items(http_content, new_port_response_schema)
if r is not None: print "Warning: remove extra items ", r
#print http_content
port_id = http_content['port']['id']
print "Port id: ",port_id
return vim_response.status_code,port_id
else: return -vimconn.HTTP_Bad_Request,http_content
else:
#print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "%s": not possible to add new external port. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
#print text
return -vim_response.status_code,text
def new_external_network(self,net_name,net_type):
'''Adds a external network to VIM (shared)'''
'''Returns the network identifier'''
#TODO change to logging exception code policies
print "VIMConnector: Adding external shared network to VIM (type " + net_type + "): "+ net_name
payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
try:
vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
except requests.exceptions.RequestException as e:
self.logger.error( "new_external_network Exception: ", e.args)
return -vimconn.HTTP_Not_Found, str(e.args[0])
print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
res,http_content = self._format_in(vim_response, new_network_response_schema)
#print http_content
if res:
r = self._remove_extra_items(http_content, new_network_response_schema)
if r is not None: print "Warning: remove extra items ", r
#print http_content
network_id = http_content['network']['id']
print "Network id: ",network_id
return vim_response.status_code,network_id
else: return -vimconn.HTTP_Bad_Request,http_content
else:
#print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "%s": not possible to add new external network. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
#print text
return -vim_response.status_code,text
def connect_port_network(self, port_id, network_id, admin=False):
'''Connects a external port to a network'''
'''Returns status code of the VIM response'''
#TODO change to logging exception code policies
print "VIMConnector: Connecting external port to network"
payload_req = '{"port":{"network_id":"' + network_id + '"}}'
if admin:
if self.url_admin==None:
return -vimconn.HTTP_Unauthorized, "datacenter cannot contain admin URL"
url= self.url_admin
else:
url= self.url
try:
vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
except requests.exceptions.RequestException as e:
print "connect_port_network Exception: ", e.args
return -vimconn.HTTP_Not_Found, str(e.args[0])
print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
res,http_content = self._format_in(vim_response, new_port_response_schema)
#print http_content
if res:
r = self._remove_extra_items(http_content, new_port_response_schema)
if r is not None: print "Warning: remove extra items ", r
#print http_content
port_id = http_content['port']['id']
print "Port id: ",port_id
return vim_response.status_code,port_id
else: return -vimconn.HTTP_Bad_Request,http_content
else:
print vim_response.text
jsonerror = self._format_jsonerror(vim_response)
text = 'Error in VIM "%s": not possible to connect external port to network. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
print text
return -vim_response.status_code,text
| 45.330861 | 174 | 0.579256 |
4463314721311911ab2467053820a7ea25747474
| 1,704 |
py
|
Python
|
example.py
|
ruandocini/pyScopus
|
cec3477301056524c8c83ab124042899692a5263
|
[
"MIT"
] | null | null | null |
example.py
|
ruandocini/pyScopus
|
cec3477301056524c8c83ab124042899692a5263
|
[
"MIT"
] | null | null | null |
example.py
|
ruandocini/pyScopus
|
cec3477301056524c8c83ab124042899692a5263
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import time
import extractor as ext
query = 'TITLE-ABS-KEY ( temperature OR rain* OR precipitat* OR warm* OR summer OR winter OR spring OR autumn OR meteorolog* OR weather OR forecast* OR humidit* OR wave OR wind OR tide* OR "ocean current" OR "maritime current" OR "sea current" OR season* OR climat* OR drought OR "El Nino" OR cooling OR heat* ) AND TITLE-ABS-KEY ( "blue amazon" OR ( brazil* W/3 ( coast* OR beach* OR "continental margin*" OR "exclusive economic zone" OR ocean* OR sea* OR shore OR marine* OR "atlantic ocean*" OR offshore OR island* OR maritime OR bay OR marine OR archipelago OR "continental shelf" OR estuar* ) ) OR ( petrobras AND ( coast* OR beach* OR "continental margin*" OR "exclusive economic zone" OR ocean* OR sea* OR shore OR marine* OR "atlantic ocean*" OR offshore OR island* OR maritime OR bay OR marine OR archipelago OR "continental shelf" OR estuar* ) ) ) AND ( LIMIT-TO ( SUBJAREA , "EART" ) OR LIMIT-TO ( SUBJAREA , "ENVI" ) OR LIMIT-TO ( SUBJAREA , "ENGI" ) OR LIMIT-TO ( SUBJAREA , "ENER" ) OR LIMIT-TO ( SUBJAREA , "MULT" ) OR LIMIT-TO ( SUBJAREA , "PHYS" ) OR LIMIT-TO ( SUBJAREA , "COMP" ) OR LIMIT-TO ( SUBJAREA , "BUSI" ) OR LIMIT-TO ( SUBJAREA , "MATH" ) OR LIMIT-TO ( SUBJAREA , "ECON" ) OR LIMIT-TO ( SUBJAREA , "DECI" ) )'
subj = '&subj="EART"%2ct%2c"ENVI"%2ct%2c"ENGI"%2ct%2c"ENER"%2ct%2c"MULT"%2ct%2c"PHYS"%2ct%2c"COMP"%2ct%2c"BUSI"%2ct%2c"MATH"%2ct%2c"ECON"%2ct%2c"DECI"%2ct'
ext.get_scopus_id(query,subj)
obj = ext.get_abstract('20044366781')
print(obj)
| 106.5 | 1,390 | 0.640258 |
411f387b9ed9356ea2f50cdb6a217105b330bc07
| 5,232 |
py
|
Python
|
proj2_test_code.py
|
Nucliam/comp10001-2021s1
|
61441aa9b3c89a3938046e4301a8f75cae70a8f8
|
[
"MIT"
] | 2 |
2021-05-01T01:55:36.000Z
|
2021-05-01T04:48:35.000Z
|
proj2_test_code.py
|
Nucliam/comp10001-2021s1
|
61441aa9b3c89a3938046e4301a8f75cae70a8f8
|
[
"MIT"
] | null | null | null |
proj2_test_code.py
|
Nucliam/comp10001-2021s1
|
61441aa9b3c89a3938046e4301a8f75cae70a8f8
|
[
"MIT"
] | 7 |
2021-05-19T03:08:42.000Z
|
2021-05-26T05:44:57.000Z
|
#
# Test harness for COMP10001 Project 2, 2021s1
#
# Author: Tim Baldwin
#
# Date: 26/4/2021
#
# Version 1.0
#
import sys
try:
import program as submission # import the file `program.py'
import tests # import the file `tests.py'
try:
import tests_extra
except ImportError:
print("WARNING: No 'tests_extra.py' file found ... running only the tests in 'tests.py'\n")
# exit if one of the above statements doesn't execute properly, presumably because `program.py' didn't import properly
except ImportError as detail:
print(f"ERROR: {detail}")
raise
except Exception as detail:
print(f"ERROR: {detail}")
raise
####################################################################################
#
# name: test()
#
# synposis: run the tests for the supplied function name, and check the outputs against those in `test`
# input(s): the function name to be tested (str)
# output(s): print the tests that are tried, and if unsuccessful, the correct value vs. returned value
#
def test(funct_name, testset):
# print out the names of functions which can be tested in `funct_name` is not in `tests`, and exit
if funct_name not in testset.test_cases:
print(f"ERROR: '{funct_name}' is not a recognised function name; select from: {str(testset.test_cases.keys())}")
return -1
# run test (using valid function name)
print(f"Testing the {funct_name} function ...\n")
correct = 0 # number of tests passed
# run the user-defined function with each of the sets of arguments provided in `tests`, and chech that the
# output is correct
for test in testset.test_cases[funct_name]:
print(f" testing {test[0]} ...")
userval = eval(test[0]) # run the function with the supplied set of arguments (take the string and execute it)
expval = test[1]
# if the returned value is correct, increment `correct` and print a congratulatory print statement
if test_equivalent(userval,expval,funct_name):
correct += 1
print("passed")
# if the returned value is *in*correct, print diagnostics
else:
print("failed")
print(f" * expected = (type '{type(expval)}') {expval}")
print(f" * returned = (type '{type(userval)}') {userval}")
# print the overall number of tests passed vs. attempted
print(f"\n{correct}/{len(testset.test_cases[funct_name])} tests passed for {funct_name}")
#
# end test()
####################################################################################
####################################################################################
#
# name: test_equivalent()
#
# synposis: test for equivalence between two arguments. In particular, for this project
# we need to allow for different orders between hands, as well as different orders
# for the cards in a hand. We do this by converting the a list-of-lists representation
# into a set-of-sets representation. We use frozenset as the standard set() is mutable,
# making it unhashable and thus unsuitable for inclusion in a set.
# input(s): two arguments of arbitrary type
# output(s): Boolean evaluation of the equivalence of the two arguments
#
def test_equivalent(a, b, funct_name):
# we consider lists of lists to be the same as long as their contents are the same,
# ignoring order of both the outer and inner lists
if funct_name == 'play':
type_a = type(a)
type_b = type(b)
if type_a == type_b == str:
return a == b
elif type_a == type_b == tuple:
return a[0] == b[0]
else:
return False
if isinstance(a, list) and isinstance(b, list):
try:
return as_setset(a) == as_setset(b)
except TypeError:
# One of the items was not iterable, we fall back to standard equality
return a == b
# for everything else, we use the built-in notion of equality
else:
return type(a) == type(b) and a == b
#
# end test_equivalent()
####################################################################################
####################################################################################
#
# name: as_setset()
#
# synposis: convert a list-of-lists into a set of sets. This is to facilitate
# order-agnostic checking of output.
# input(s): a list of lists
# output(s): a frozenset of frozensets
#
def as_setset(seq):
return frozenset(frozenset(s) for s in seq)
#
# end as_setset()
####################################################################################
def test_all(testset):
for fn_name in sorted(testset.test_cases):
# if submission implements the given function
if hasattr(submission, fn_name):
test(fn_name,testset)
else:
print("No implementation for '{0}'".format(fn_name))
# A module's __name__ is set to __main__ when it is imported at the top level
# (i.e. not by another module)
if __name__ == "__main__":
test_all(tests)
try:
test_all(tests_extra)
except NameError:
pass
| 31.902439 | 120 | 0.582951 |
41303daeef292f5f1b91542d518a67bfb86a1d0e
| 467 |
py
|
Python
|
Python3/0312-Burst-Balloons/soln-1.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5 |
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0312-Burst-Balloons/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0312-Burst-Balloons/soln-1.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2 |
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def maxCoins(self, nums: List[int]) -> int:
nums = [1] + [num for num in nums if num] + [1]
n = len(nums)
dp = [[0] * n for _ in range(n)]
for length in range(1, n - 1):
for l in range(0, n - 1 - length):
r = l + length + 1
for m in range(l + 1, r):
dp[l][r] = max(dp[l][r], dp[l][m] + nums[l] * nums[m] * nums[r] + dp[m][r])
return dp[0][-1]
| 38.916667 | 95 | 0.423983 |
bec59677770b50bb173450dacdec0a9674051307
| 1,480 |
py
|
Python
|
Lab2/main.py
|
EricCharnesky/CIS2001-Winter2021
|
a633e32026726f985eea1fe7dd0da3497fff45aa
|
[
"MIT"
] | 3 |
2021-02-04T22:56:48.000Z
|
2021-07-17T05:47:24.000Z
|
Lab2/main.py
|
EricCharnesky/CIS2001-Winter2021
|
a633e32026726f985eea1fe7dd0da3497fff45aa
|
[
"MIT"
] | null | null | null |
Lab2/main.py
|
EricCharnesky/CIS2001-Winter2021
|
a633e32026726f985eea1fe7dd0da3497fff45aa
|
[
"MIT"
] | 1 |
2021-02-07T01:40:24.000Z
|
2021-02-07T01:40:24.000Z
|
from matplotlib import pyplot as plt
import sys
import time
def compute_time_to_add(list_to_add_to, item):
start = time.perf_counter()
list_to_add_to.append(item)
end = time.perf_counter()
return end-start
def compute_time_to_remove_from_index(list_to_remove_from, index_to_remove_from):
start = time.perf_counter()
list_to_remove_from.pop(index_to_remove_from)
end = time.perf_counter()
return end - start
def compute_time_to_insert_at_index(list_to_insert_into, index_to_insert_into):
start = time.perf_counter()
list_to_insert_into.insert(index_to_insert_into, index_to_insert_into) # index, then value to add, the value doesn't matter
end = time.perf_counter()
return end - start
#timings = []
#some_list = []
#values = range(10_000_000)
#for n in values:
# timings.append(compute_time_to_add(some_list, n))
# timings = []
# size_of_list = []
# memory_size_of_list = []
# values = list(range(100_000))
# for n in range(len(values)):
# size_of_list.append(len(values))
# timings.append(compute_time_to_remove_from_index(values, 0))
# memory_size_of_list.append(sys.getsizeof(values))
timings = []
some_list = []
length_of_list = []
for n in range(100_000):
length_of_list.append(len(some_list))
timings.append(compute_time_to_insert_at_index(some_list, n))
plt.plot(length_of_list, timings)
plt.show()
#plt.plot(size_of_list, timings)
#plt.plot(size_of_list, memory_size_of_list)
#plt.show()
| 25.964912 | 127 | 0.745946 |
b734d6db8a968728167096f78620ebfb23e18795
| 1,524 |
py
|
Python
|
examples/12_building_subqueries/models/kfai_pets_model.py
|
kungfuai/sql-chemistry
|
98f75bb66eff87cec51eb77ecc2ebba1a1ae0870
|
[
"MIT"
] | 1 |
2021-07-29T15:50:02.000Z
|
2021-07-29T15:50:02.000Z
|
examples/10_group_by_clause_queries/models/kfai_pets_model.py
|
kungfuai/sql-chemistry
|
98f75bb66eff87cec51eb77ecc2ebba1a1ae0870
|
[
"MIT"
] | 3 |
2021-05-20T14:35:53.000Z
|
2021-06-25T12:24:26.000Z
|
examples/11_having_clause_queries/models/kfai_pets_model.py
|
kungfuai/sql-chemistry
|
98f75bb66eff87cec51eb77ecc2ebba1a1ae0870
|
[
"MIT"
] | 2 |
2021-05-04T18:42:44.000Z
|
2021-09-18T16:27:35.000Z
|
from sqlalchemy import Column, ForeignKey, Boolean, String, Integer
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from uuid import uuid4
BaseDbModel = declarative_base()
class EmployeeModel(BaseDbModel):
__tablename__ = "employee"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
employee_name = Column("employee_name", String(100), nullable=False)
pets = relationship("PetModel", back_populates="employees")
class PetModel(BaseDbModel):
__tablename__ = "pet"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
pet_name = Column("pet_name", String(100), nullable=True)
pet_species = Column("pet_species", String(100), nullable=True)
pet_breed = Column("pet_breed", String(100), nullable=True)
employee_id = Column(UUID(as_uuid=True), ForeignKey(EmployeeModel.id), nullable=False)
employees = relationship("EmployeeModel", back_populates="pets")
class OfficePetModel(BaseDbModel):
__tablename__ = "office_pet"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
pet_breed = Column("pet_breed", String(100))
allowed_in_office = Column("allowed_in_office", Boolean)
class InsuranceModel(BaseDbModel):
__tablename__ = "insurance"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid4)
pet_species = Column("pet_species", String(100))
insurance_cost = Column("insurance_cost", Integer)
| 33.130435 | 90 | 0.749344 |
f852e3b7898c876ebcf745ce2111fa2e5aff8306
| 1,419 |
py
|
Python
|
aliyun-python-sdk-gpdb/aliyunsdkgpdb/request/v20160503/DescribeRegionsRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1,001 |
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-gpdb/aliyunsdkgpdb/request/v20160503/DescribeRegionsRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 363 |
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-gpdb/aliyunsdkgpdb/request/v20160503/DescribeRegionsRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 682 |
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkgpdb.endpoint import endpoint_data
class DescribeRegionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gpdb', '2016-05-03', 'DescribeRegions','gpdb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
| 37.342105 | 76 | 0.764623 |
d30209d410becc46c64ac09b42cd21a10407238e
| 5,115 |
py
|
Python
|
Interface/Dialogs/GainCoinsDialog.py
|
Snackhole/PyFifth
|
2a644b81c08d3abc35889fa45eae280dbbcc037d
|
[
"MIT"
] | null | null | null |
Interface/Dialogs/GainCoinsDialog.py
|
Snackhole/PyFifth
|
2a644b81c08d3abc35889fa45eae280dbbcc037d
|
[
"MIT"
] | null | null | null |
Interface/Dialogs/GainCoinsDialog.py
|
Snackhole/PyFifth
|
2a644b81c08d3abc35889fa45eae280dbbcc037d
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtCore
from PyQt5.QtWidgets import QDialog, QGridLayout, QSizePolicy, QPushButton, QSpinBox, QLabel
class GainCoinsDialog(QDialog):
def __init__(self, ParentWindow):
super().__init__(parent=ParentWindow)
# Store Parameters
self.ParentWindow = ParentWindow
# Variables
self.GainedCoins = {}
self.GainedCoins["CP"] = None
self.GainedCoins["SP"] = None
self.GainedCoins["EP"] = None
self.GainedCoins["GP"] = None
self.GainedCoins["PP"] = None
self.Submitted = False
# Inputs Size Policy
self.InputsSizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
# Prompt Label
self.PromptLabel = QLabel("Gain coins:")
self.PromptLabel.setAlignment(QtCore.Qt.AlignCenter)
# Coins Header Labels
self.CPLabel = QLabel("CP")
self.CPLabel.setAlignment(QtCore.Qt.AlignCenter)
self.CPLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.CPLabel.setMargin(5)
self.SPLabel = QLabel("SP")
self.SPLabel.setAlignment(QtCore.Qt.AlignCenter)
self.SPLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.SPLabel.setMargin(5)
self.EPLabel = QLabel("EP")
self.EPLabel.setAlignment(QtCore.Qt.AlignCenter)
self.EPLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.EPLabel.setMargin(5)
self.GPLabel = QLabel("GP")
self.GPLabel.setAlignment(QtCore.Qt.AlignCenter)
self.GPLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.GPLabel.setMargin(5)
self.PPLabel = QLabel("PP")
self.PPLabel.setAlignment(QtCore.Qt.AlignCenter)
self.PPLabel.setFrameStyle(QLabel.StyledPanel | QLabel.Plain)
self.PPLabel.setMargin(5)
# Coins Spin Boxes
self.CPSpinBox = QSpinBox()
self.CPSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.CPSpinBox.setSizePolicy(self.InputsSizePolicy)
self.CPSpinBox.setButtonSymbols(self.CPSpinBox.NoButtons)
self.CPSpinBox.setRange(0, 1000000000)
self.SPSpinBox = QSpinBox()
self.SPSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.SPSpinBox.setSizePolicy(self.InputsSizePolicy)
self.SPSpinBox.setButtonSymbols(self.SPSpinBox.NoButtons)
self.SPSpinBox.setRange(0, 1000000000)
self.EPSpinBox = QSpinBox()
self.EPSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.EPSpinBox.setSizePolicy(self.InputsSizePolicy)
self.EPSpinBox.setButtonSymbols(self.EPSpinBox.NoButtons)
self.EPSpinBox.setRange(0, 1000000000)
self.GPSpinBox = QSpinBox()
self.GPSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.GPSpinBox.setSizePolicy(self.InputsSizePolicy)
self.GPSpinBox.setButtonSymbols(self.GPSpinBox.NoButtons)
self.GPSpinBox.setRange(0, 1000000000)
self.PPSpinBox = QSpinBox()
self.PPSpinBox.setAlignment(QtCore.Qt.AlignCenter)
self.PPSpinBox.setSizePolicy(self.InputsSizePolicy)
self.PPSpinBox.setButtonSymbols(self.PPSpinBox.NoButtons)
self.PPSpinBox.setRange(0, 1000000000)
# Buttons
self.SubmitButton = QPushButton("Submit")
self.SubmitButton.clicked.connect(self.Submit)
self.CancelButton = QPushButton("Cancel")
self.CancelButton.clicked.connect(self.Cancel)
# Layout
self.Layout = QGridLayout()
self.Layout.addWidget(self.PromptLabel, 0, 0, 1, 5)
self.Layout.addWidget(self.CPLabel, 1, 0)
self.Layout.addWidget(self.SPLabel, 1, 1)
self.Layout.addWidget(self.EPLabel, 1, 2)
self.Layout.addWidget(self.GPLabel, 1, 3)
self.Layout.addWidget(self.PPLabel, 1, 4)
self.Layout.addWidget(self.CPSpinBox, 2, 0)
self.Layout.addWidget(self.SPSpinBox, 2, 1)
self.Layout.addWidget(self.EPSpinBox, 2, 2)
self.Layout.addWidget(self.GPSpinBox, 2, 3)
self.Layout.addWidget(self.PPSpinBox, 2, 4)
self.ButtonsLayout = QGridLayout()
self.ButtonsLayout.addWidget(self.SubmitButton, 0, 0)
self.ButtonsLayout.addWidget(self.CancelButton, 0, 1)
self.Layout.addLayout(self.ButtonsLayout, 3, 0, 1, 5)
self.Layout.setRowStretch(2, 1)
for Column in range(5):
self.Layout.setColumnStretch(Column, 1)
self.setLayout(self.Layout)
# Set Window Title and Icon
self.setWindowTitle(self.ParentWindow.ScriptName)
self.setWindowIcon(self.ParentWindow.WindowIcon)
# Select CP Spin Box
self.CPSpinBox.selectAll()
# Execute Dialog
self.exec_()
def Submit(self):
self.GainedCoins["CP"] = self.CPSpinBox.value()
self.GainedCoins["SP"] = self.SPSpinBox.value()
self.GainedCoins["EP"] = self.EPSpinBox.value()
self.GainedCoins["GP"] = self.GPSpinBox.value()
self.GainedCoins["PP"] = self.PPSpinBox.value()
self.Submitted = True
self.close()
def Cancel(self):
self.close()
| 40.595238 | 92 | 0.668622 |
8dd8572bb41c67d9b0d65e0b726eea1e143bdd6c
| 9,620 |
py
|
Python
|
ngeo_browse_server/control/management/commands/ngeo_check_overlapping_time.py
|
EOX-A/ngeo-b
|
b55315c7955b4c2b68cbd7f8276ac890ee19106e
|
[
"MIT"
] | 4 |
2016-08-05T17:33:41.000Z
|
2020-07-10T21:30:13.000Z
|
ngeo_browse_server/control/management/commands/ngeo_check_overlapping_time.py
|
EOX-A/ngeo-b
|
b55315c7955b4c2b68cbd7f8276ac890ee19106e
|
[
"MIT"
] | 23 |
2015-10-29T17:52:06.000Z
|
2021-07-20T09:52:18.000Z
|
ngeo_browse_server/control/management/commands/ngeo_check_overlapping_time.py
|
EOX-A/ngeo-b
|
b55315c7955b4c2b68cbd7f8276ac890ee19106e
|
[
"MIT"
] | 5 |
2015-07-21T09:33:57.000Z
|
2019-11-28T22:55:03.000Z
|
#------------------------------------------------------------------------------
#
# Project: ngEO Browse Server <https://github.com/EOX-A/ngeo-b>
# Authors: Lubomir Bucek <[email protected]>
#
#------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#------------------------------------------------------------------------------
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from eoxserver.core.system import System
from eoxserver.core.util.timetools import getDateTime, isotime
from ngeo_browse_server.config.models import BrowseLayer, Browse
from ngeo_browse_server.control.management.commands import LogToConsoleMixIn
import traceback
from json import dumps
logger = logging.getLogger(__name__)
class Command(LogToConsoleMixIn, BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--browse-type',
dest='browse_type',
help=("The browses of browse type to be searched.")
),
make_option('--start',
dest='start',
help=("The start date and time in ISO 8601 format.")
),
make_option('--end',
dest='end',
help=("The end date and time in ISO 8601 format.")
),
)
args = ("--browse-type=<browse-type> "
"[--start=<start-date-time>] [--end=<end-date-time>]")
help = ("For given timestamp and browse type, searches for a continuous array of browses",
"which overlap in time with given timestamp."
"The search is a cascade and ends only when a browse is found which"
"does not intersect in time with the previous/next one."
"Returns the full time interval start-end as a JSON string.")
def handle(self, *args, **kwargs):
System.init()
# parse command arguments
self.verbosity = int(kwargs.get("verbosity", 1))
traceback_conf = kwargs.get("traceback", False)
# in case this function is used repeatedly, add logger handle only during first run
if len([handler for handler in logging.getLogger("ngeo_browse_server").handlers if not isinstance(handler, logging.StreamHandler)]) > 0:
self.set_up_logging(["ngeo_browse_server"], self.verbosity, traceback_conf)
browse_type = kwargs.get("browse_type")
start = kwargs.get("start")
end = kwargs.get("end")
# parse start/end if given
if start:
start = getDateTime(start)
if end:
end = getDateTime(end)
results = self.handle_query(start, end, browse_type)
logger.info("Finished querying for intersecting time intervals. Returning merged_start %s merged_end %s" % (results["merged_start"].strftime("%Y%m%dT%H%M%S"), results["merged_end"].strftime("%Y%m%dT%H%M%S")))
results.update((key, isotime(value)) for key, value in results.items())
return dumps(results)
def wasMerged(self, t1start, t1end, t2start, t2end):
"""
Two browses were merged if their time intervals intersect
or one of them has a single timestamp and is intesecting the other.
Continuous intervals (t1e == t2s) are not merged.
"""
intersects = (
(t1start == t1end or t2start == t2end) and (t2start <= t1end and t2end >= t1start)
) or (
t2start < t1end and t2end > t1start
)
return intersects
def handle_query(self, start, end, browse_type):
try:
browse_layer_model = BrowseLayer.objects.get(browse_type=browse_type)
except BrowseLayer.DoesNotExist:
logger.error("Browse layer with browse type'%s' does "
"not exist" % browse_type)
raise CommandError("Browse layer with browse type'%s' does "
"not exist" % browse_type)
# get sorted distinct time entries by start_time, end_time
logger.debug("Starting query for unique and sorted browses.")
browses_qs = Browse.objects.all().filter(browse_layer=browse_layer_model
).values(
'start_time', 'end_time'
).distinct(
'start_time', 'end_time'
).order_by(
'start_time', 'end_time'
)
new_start = start
new_end = end
if len(browses_qs) > 0:
try:
logger.debug("Finding first time intersection of given interval and query results.")
for i in range(len(browses_qs)):
# find first intersection of given interval and database results
if self.wasMerged(start, end, browses_qs[i]['start_time'], browses_qs[i]['end_time']):
logger.debug("Intersection found at query result with start_time %s end_time %s" % (browses_qs[i]['start_time'], browses_qs[i]['end_time']))
# find merged_start_time
repeat = True
current_index = i # do not modify for loop iterator
while repeat:
# search backward until no intersect
if current_index > 0:
if self.wasMerged(browses_qs[current_index]['start_time'], browses_qs[current_index]['end_time'],
browses_qs[current_index - 1]['start_time'], browses_qs[current_index - 1]['end_time']):
# still found intersection, move one left in list
current_index -= 1
else:
# no intersection, save start_time
logger.debug("No other intersection found, saving the current start_time %s as new_start" % browses_qs[i]['start_time'])
new_start = browses_qs[current_index]['start_time']
repeat = False
else:
# reached start of list
logger.debug("Reached start of list, saving the current start_time %s as new_start" % browses_qs[i]['start_time'])
new_start = browses_qs[current_index]['start_time']
repeat = False
# find merged_end_time
repeat = True
current_index = i
while repeat:
# search forward until no intersect
if current_index < len(browses_qs) - 1:
if self.wasMerged(browses_qs[current_index]['start_time'], browses_qs[current_index]['end_time'],
browses_qs[current_index + 1]['start_time'], browses_qs[current_index + 1]['end_time']):
# still found intersection, move one right in list
current_index += 1
else:
# no intersection, save end_time
logger.debug("No other intersection found, saving the current end_time %s as new_end" % browses_qs[i]['end_time'])
new_end = browses_qs[current_index]['end_time']
repeat = False
else:
# reached end of list
logger.debug("Reached end of list, saving the current end_time %s as new_end" % browses_qs[i]['end_time'])
new_end = browses_qs[current_index]['end_time']
repeat = False
if repeat is False:
logger.debug("Finding intersections done, leaving for loop.")
break # go outside of for loop, work done
except Exception as e:
logger.error("Failure during checking of time interval intersection")
logger.error("Exception was '%s': %s" % (type(e).__name__, str(e)))
logger.debug(traceback.format_exc() + "\n")
else:
raise CommandError("Browse layer with browse type '%s' is empty" % browse_type)
results = {
"merged_start": new_start,
"merged_end": new_end
}
return results
| 53.149171 | 216 | 0.568607 |
e9f33f9270f289b500363cac4b62c542d6e1e22d
| 1,712 |
py
|
Python
|
schemas/tests/unit/schema_data.py
|
bcgov/FWBEN
|
d155b11b16b9f3d454d5887cddf13d276d8aebdd
|
[
"Apache-2.0"
] | 1 |
2019-08-17T03:11:12.000Z
|
2019-08-17T03:11:12.000Z
|
schemas/tests/unit/schema_data.py
|
bcgov/FWBEN
|
d155b11b16b9f3d454d5887cddf13d276d8aebdd
|
[
"Apache-2.0"
] | 206 |
2019-08-09T18:30:53.000Z
|
2022-02-27T21:28:50.000Z
|
schemas/tests/unit/schema_data.py
|
bcgov/FWBEN
|
d155b11b16b9f3d454d5887cddf13d276d8aebdd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite data used across many tests.
Test array used in multiple pytests, and several filings that can be used in tests.
"""
# testdata pattern is ({str: environment}, {expected return value})
TEST_SCHEMAS_DATA = [
('address.json'),
('annual_report.json'),
('business.json'),
('change_of_address.json'),
('filing.json'),
('directors.json'),
('change_of_directors.json'),
('task.json'),
('todo.json'),
]
TEST_AR = {
'filing': {
'header': {
'name': 'annualReport',
'date': '2019-04-08',
'filingId': 1
},
'business': {
'cacheId': 1,
'foundingDate': '2007-04-08',
'identifier': 'CP1234567',
'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',
'lastPreBobFilingTimestamp': '2019-04-15T20:05:49.068272+00:00',
'legalName': 'legal name - CP1234567'
},
'annualReport': {
'annualGeneralMeetingDate': '2019-04-08',
'certifiedBy': 'full name',
'email': '[email protected]'
}
}
}
| 32.301887 | 83 | 0.617407 |
1012b866f1f788be775e2fc0f091c3572d0725c7
| 2,972 |
py
|
Python
|
FClip/lr_schedulers.py
|
Delay-Xili/F-Clip
|
ea5a7b2ddba8f4baf57e62962b479d8f0447bd65
|
[
"MIT"
] | 93 |
2021-04-22T03:20:27.000Z
|
2022-03-27T02:21:49.000Z
|
FClip/lr_schedulers.py
|
Delay-Xili/F-Clip
|
ea5a7b2ddba8f4baf57e62962b479d8f0447bd65
|
[
"MIT"
] | 10 |
2021-04-23T09:30:37.000Z
|
2022-02-28T10:24:41.000Z
|
FClip/lr_schedulers.py
|
Delay-Xili/F-Clip
|
ea5a7b2ddba8f4baf57e62962b479d8f0447bd65
|
[
"MIT"
] | 9 |
2021-04-22T05:21:26.000Z
|
2022-03-17T07:57:45.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import torch
import math
from torch.optim.lr_scheduler import _LRScheduler
def init_lr_scheduler(optimizer,
lr_scheduler='multi_step', # learning rate scheduler
stepsize=[20, 40], # step size to decay learning rate
gamma=0.1, # learning rate decay
max_epoch=240,
eta_min=0,
warmUp_epoch=5,
last_epoch=-1
):
if lr_scheduler == 'single_step':
return torch.optim.lr_scheduler.StepLR(optimizer, step_size=stepsize[0], gamma=gamma, last_epoch=last_epoch)
elif lr_scheduler == 'multi_step':
return torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=stepsize, gamma=gamma, last_epoch=last_epoch)
elif lr_scheduler == 'cos_step':
return torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max_epoch, eta_min=eta_min, last_epoch=last_epoch)
elif lr_scheduler == 'warmUp_step':
lr_lambda = lambda epoch: min(1.0, epoch/warmUp_epoch)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda, last_epoch=last_epoch)
elif lr_scheduler == 'warmUpSingle_step':
return WarmUpSingle(optimizer, step_size=stepsize[0], gamma=gamma, warmUp_epoch=warmUp_epoch, last_epoch=last_epoch)
elif lr_scheduler == 'warmUpCos_step':
return WarmUpCosine(optimizer, T_max=max_epoch, eta_min=eta_min, warmUp_epoch=warmUp_epoch, last_epoch=last_epoch)
else:
raise ValueError('Unsupported lr_scheduler: {}'.format(lr_scheduler))
class WarmUpCosine(_LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, warmUp_epoch=5, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
self.warmUp_epoch = warmUp_epoch
super(WarmUpCosine, self).__init__(optimizer, last_epoch)
def get_lr(self):
lr = []
for base_lr in self.base_lrs:
if self.last_epoch <= self.warmUp_epoch:
lr.append(base_lr * self.last_epoch / self.warmUp_epoch)
else:
lr.append(self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2)
return lr
class WarmUpSingle(_LRScheduler):
def __init__(self, optimizer, step_size, gamma=0.1, warmUp_epoch=10, last_epoch=-1):
self.step_size = step_size
self.gamma = gamma
self.warmUp_epoch = warmUp_epoch
super(WarmUpSingle, self).__init__(optimizer, last_epoch)
def get_lr(self):
lr = []
for base_lr in self.base_lrs:
if self.last_epoch <= self.warmUp_epoch:
lr.append(base_lr * self.last_epoch / self.warmUp_epoch)
else:
lr.append(base_lr * self.gamma ** (self.last_epoch // self.step_size))
return lr
| 42.457143 | 125 | 0.651413 |
df190a3420896fcfcd67b9ef9d4ca6e10fc25e06
| 40 |
py
|
Python
|
Replacethesubstring.py
|
Arnabsaha6/Snakify
|
df0c1112ae8a56a275044f786bfd89f746e3ca85
|
[
"MIT"
] | null | null | null |
Replacethesubstring.py
|
Arnabsaha6/Snakify
|
df0c1112ae8a56a275044f786bfd89f746e3ca85
|
[
"MIT"
] | null | null | null |
Replacethesubstring.py
|
Arnabsaha6/Snakify
|
df0c1112ae8a56a275044f786bfd89f746e3ca85
|
[
"MIT"
] | null | null | null |
Code:
print(input().replace('1', 'one'))
| 20 | 34 | 0.625 |
86d9c764207d85fda619a26f01cdcd8db6e3ad75
| 5,693 |
py
|
Python
|
tests/providers/samba/hooks/test_samba.py
|
ahdbilal/airflow
|
37afe55775676e2cb4cf6ed0cfc6c892855d6805
|
[
"Apache-2.0"
] | 4 |
2020-02-16T18:13:54.000Z
|
2021-01-01T03:22:19.000Z
|
tests/providers/samba/hooks/test_samba.py
|
ahdbilal/airflow
|
37afe55775676e2cb4cf6ed0cfc6c892855d6805
|
[
"Apache-2.0"
] | 14 |
2019-11-22T09:24:20.000Z
|
2021-07-09T06:06:59.000Z
|
tests/providers/samba/hooks/test_samba.py
|
ahdbilal/airflow
|
37afe55775676e2cb4cf6ed0cfc6c892855d6805
|
[
"Apache-2.0"
] | 1 |
2020-04-12T00:38:02.000Z
|
2020-04-12T00:38:02.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import call
import smbclient
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.samba.hooks.samba import SambaHook
connection = Connection(host='ip', schema='share', login='username', password='password')
class TestSambaHook(unittest.TestCase):
def test_get_conn_should_fail_if_conn_id_does_not_exist(self):
with self.assertRaises(AirflowException):
SambaHook('conn')
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_get_conn(self, get_conn_mock):
get_conn_mock.return_value = connection
hook = SambaHook('samba_default')
self.assertEqual(smbclient.SambaClient, type(hook.get_conn()))
get_conn_mock.assert_called_once_with('samba_default')
@mock.patch('airflow.providers.samba.hooks.samba.SambaHook.get_conn')
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_push_from_local_should_succeed_if_destination_has_same_name_but_not_a_file(
self, base_conn_mock, samba_hook_mock
):
base_conn_mock.return_value = connection
samba_hook_mock.get_conn.return_value = mock.Mock()
samba_hook_mock.return_value.exists.return_value = True
samba_hook_mock.return_value.isfile.return_value = False
samba_hook_mock.return_value.exists.return_value = True
hook = SambaHook('samba_default')
destination_filepath = "/path/to/dest/file"
local_filepath = "/path/to/local/file"
hook.push_from_local(destination_filepath=destination_filepath, local_filepath=local_filepath)
base_conn_mock.assert_called_once_with('samba_default')
samba_hook_mock.assert_called_once()
samba_hook_mock.return_value.exists.assert_called_once_with(destination_filepath)
samba_hook_mock.return_value.isfile.assert_called_once_with(destination_filepath)
samba_hook_mock.return_value.remove.assert_not_called()
samba_hook_mock.return_value.upload.assert_called_once_with(local_filepath, destination_filepath)
@mock.patch('airflow.providers.samba.hooks.samba.SambaHook.get_conn')
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_push_from_local_should_delete_file_if_exists_and_save_file(
self, base_conn_mock, samba_hook_mock
):
base_conn_mock.return_value = connection
samba_hook_mock.get_conn.return_value = mock.Mock()
samba_hook_mock.return_value.exists.return_value = False
samba_hook_mock.return_value.exists.return_value = False
hook = SambaHook('samba_default')
destination_folder = "/path/to/dest"
destination_filepath = destination_folder + "/file"
local_filepath = "/path/to/local/file"
hook.push_from_local(destination_filepath=destination_filepath, local_filepath=local_filepath)
base_conn_mock.assert_called_once_with('samba_default')
samba_hook_mock.assert_called_once()
samba_hook_mock.return_value.exists.assert_has_calls(
[call(destination_filepath), call(destination_folder)]
)
samba_hook_mock.return_value.isfile.assert_not_called()
samba_hook_mock.return_value.remove.assert_not_called()
samba_hook_mock.return_value.mkdir.assert_called_once_with(destination_folder)
samba_hook_mock.return_value.upload.assert_called_once_with(local_filepath, destination_filepath)
@mock.patch('airflow.providers.samba.hooks.samba.SambaHook.get_conn')
@mock.patch('airflow.hooks.base_hook.BaseHook.get_connection')
def test_push_from_local_should_create_directory_if_not_exist_and_save_file(
self, base_conn_mock, samba_hook_mock
):
base_conn_mock.return_value = connection
samba_hook_mock.get_conn.return_value = mock.Mock()
samba_hook_mock.return_value.exists.return_value = False
samba_hook_mock.return_value.exists.return_value = False
hook = SambaHook('samba_default')
destination_folder = "/path/to/dest"
destination_filepath = destination_folder + "/file"
local_filepath = "/path/to/local/file"
hook.push_from_local(destination_filepath=destination_filepath, local_filepath=local_filepath)
base_conn_mock.assert_called_once_with('samba_default')
samba_hook_mock.assert_called_once()
samba_hook_mock.return_value.exists.assert_has_calls(
[call(destination_filepath), call(destination_folder)]
)
samba_hook_mock.return_value.isfile.assert_not_called()
samba_hook_mock.return_value.remove.assert_not_called()
samba_hook_mock.return_value.mkdir.assert_called_once_with(destination_folder)
samba_hook_mock.return_value.upload.assert_called_once_with(local_filepath, destination_filepath)
| 46.663934 | 105 | 0.764623 |
7b29ad3fecf23aed4ff790cba32bea031a083628
| 1,700 |
py
|
Python
|
python/Lists.py
|
yandong2023/The-sword-pointing-to-offer-code
|
bb7b43222c8f0dac72a31dae8484eb333aca72c1
|
[
"Apache-2.0"
] | 2 |
2021-07-05T23:00:12.000Z
|
2021-07-07T02:37:23.000Z
|
python/Lists.py
|
yandong2023/leetcode-Top
|
0df7974e073a39a88898431ac6fd873c6d04804f
|
[
"Apache-2.0"
] | null | null | null |
python/Lists.py
|
yandong2023/leetcode-Top
|
0df7974e073a39a88898431ac6fd873c6d04804f
|
[
"Apache-2.0"
] | null | null | null |
class Node:
def __init__(self, initdata):
self.data = initdata
self.next = None
def getData(self):
return self.data
def getNext(self):
return self.next
def setData(self, newdata):
self.next = newdata
def setNext(self, nextNode):
self.next = nextNode
temp = Node(93)
temp.setData(10)
print(temp.getNext())
# 定义一个无序链表
class UnorderedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.head == None
def add(self, item):
temp = Node(item)
temp.setNext(self.head)
self.head = temp
def size(self):
current = self.head
count = 0
while current != None:
count += 1
current = current.getNext()
return count
def search(self, item):
current = self.head
found = False
while current != None and not found:
if current.getData() == item:
found = True
else:
current = current.getNext()
return found
def remove(self, item):
current = self.head
previous = None
found = False
while not found:
if current.getData() == item:
found = True
else:
previous = current
current = current.getNext()
if previous == None:
self.head = current.getNext()
else:
previous.setNext(current.getNext())
myList = UnorderedList()
myList.add(31)
myList.add(77)
myList.add(17)
myList.add(93)
myList.add(26)
myList.add(54)
print(myList.search(17))
myList.remove(54)
print(myList.search(54))
| 21.518987 | 47 | 0.548824 |
bd6f2a93a7ec6954f8fa1055fbe570c4bee89fed
| 15,726 |
py
|
Python
|
simulation/dqn-simulation/r9/dqn-delta.py
|
pgabriela/dqn-jitsi-autoscaler
|
b39eb335e584095ef66a9941dbe0b2ea21a02d4a
|
[
"Apache-2.0"
] | null | null | null |
simulation/dqn-simulation/r9/dqn-delta.py
|
pgabriela/dqn-jitsi-autoscaler
|
b39eb335e584095ef66a9941dbe0b2ea21a02d4a
|
[
"Apache-2.0"
] | null | null | null |
simulation/dqn-simulation/r9/dqn-delta.py
|
pgabriela/dqn-jitsi-autoscaler
|
b39eb335e584095ef66a9941dbe0b2ea21a02d4a
|
[
"Apache-2.0"
] | null | null | null |
import math
import json
import random
import time
import calendar
import pickle
import os
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import namedtuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
SEED = 2701
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
#################
# Replay Memory #
#################
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
#############
# Q-Network #
#############
class DQN(nn.Module):
def __init__(self, num_in_features, num_out_features):
super(DQN, self).__init__()
self.linear1 = nn.Linear(num_in_features, 32)
self.ln1 = nn.LayerNorm(32)
self.linear2 = nn.Linear(32, 64)
self.ln2 = nn.LayerNorm(64)
self.linear3 = nn.Linear(64, 64)
self.ln3 = nn.LayerNorm(64)
self.linear4 = nn.Linear(64, 32)
self.ln4 = nn.LayerNorm(32)
self.out_layer = nn.Linear(32, num_out_features)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[add_class,remove_class, maintain_class]]).
def forward(self, x):
x = F.leaky_relu(self.ln1(self.linear1(x)))
x = F.leaky_relu(self.ln2(self.linear2(x)))
x = F.leaky_relu(self.ln3(self.linear3(x)))
x = F.leaky_relu(self.ln4(self.linear4(x)))
return self.out_layer(x)
###############################
# Hyperparameters & Utilities #
###############################
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('../dbV3.csv')
timeseries = pd.read_csv('../timeseries.csv')
MIN_JVB_NUM = 1
MAX_JVB_NUM = 50
W1 = 30
ACTION_COOLDOWN = 15
LOOKBACK = 5
MEMORY_CAPACITY = 2000
BATCH_SIZE = 64
GAMMA = 0.1
TARGET_UPDATE = 200
N_EPISODES = 300
EPS_START = 1.0
EPS_END = 0.05
EXPLORATION_DUR = (80000 / ACTION_COOLDOWN) * (N_EPISODES / 1.5)
EPS_DECAY = (EPS_START - EPS_END) / EXPLORATION_DUR
EPS_THRESHOLD = EPS_START
# Q-Network paramteres
N_FEATURES = 5
N_ACTIONS = 3
# Initialize
policy_net = DQN(N_FEATURES, N_ACTIONS).to(device)
target_net = DQN(N_FEATURES, N_ACTIONS).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(policy_net.parameters())
memory = ReplayMemory(MEMORY_CAPACITY)
# define reward function
def calc_reward(state, action):
loss_delta = state[0][3].item()
curr_loss = state[0][4].item()
if action == 0:
jvb_num_delta = 1
elif action == 1:
jvb_num_delta = -1
else:
jvb_num_delta = 0
reward = loss_delta * jvb_num_delta
if loss_delta == 0:
if jvb_num_delta == 0:
reward = 1
else:
reward = 0
if curr_loss == 0:
reward = -jvb_num_delta
return reward
# Loss approximation
def loss_from_nearest_points(c, p, tj, ij):
PARTITIONS = 3
losses = []
#conf_partitions = [0, 1, 2, 3]
part_partitions = [1, 5, 9, 13]
tj_partitions = [1, 3, 5, 7]
ij_partitions = [0, 2, 4, 7]
for i in range(PARTITIONS):
#curr_c = conf_partitions[i]
#d = df[df['conferences'] == curr_c]
flag = True
for curr_p in range(part_partitions[i], part_partitions[i+1]):
if not flag:
break
d1 = df[df['participants'] == curr_p]
for curr_tj in range(tj_partitions[i], tj_partitions[i+1]):
if not flag:
break
d2 = d1[d1['jvb_num'] == curr_tj]
for curr_ij in range(ij_partitions[i], ij_partitions[i+1]):
d3 = d2[d2['zero_conf'] == curr_ij]
if len(d3) > 0:
loss = d3['loss'].mean()
participants_scale = p / curr_p
curr_active_jvb_count = curr_tj - curr_ij
if (tj - ij) == 0 or curr_active_jvb_count == 0:
continue
active_jvbs_scale = (tj - ij) / curr_active_jvb_count
loss_scale = participants_scale / active_jvbs_scale
estimated_loss = (loss + 1e-10) * loss_scale
losses.append(estimated_loss)
flag = False
break
return np.mean(losses)
#################
# Training Func #
#################
def optimize_model():
if len(memory) < BATCH_SIZE:
return
policy_net.train()
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return loss
##############
# Simulation #
##############
print("Starting simulation...")
curr_time = time.time()
cummulative_rewards_history = []
epsilon_history = []
losses_dict = pickle.load(open('losses_dict.pkl', 'rb'))
counter = 0
for i_episode in range(N_EPISODES):
# list of [jvb id, conference count] pair of currently running JVBs
# selected with round-robin, removed with graceful shutdown
curr_jvbs = [[0, 0], ]
is_shutting_down = []
prev_state = np.array([0, 1, 1, 0])
prev_action = -1
prev_delta_state = None
latest_losses = []
jvb_num_history = []
rewards_history = []
losses_history = []
miss_count = 0
conf_count_over_time = timeseries['conference_count']
part_count_over_time = timeseries['participant_count']
with open('../logs/conference_count.txt', 'w') as f:
pass
with open('../logs/participant_count.txt', 'w') as f:
pass
with open('../logs/jvb_count.txt', 'w') as f:
pass
with open('../logs/rewards.txt', 'w') as f:
pass
with open('../logs/losses.txt', 'w') as f:
pass
episode_start_time = time.time()
for i in range(len(conf_count_over_time)):
c1 = int(conf_count_over_time[i])
p1 = int(part_count_over_time[i])
# update conferences
try:
new_c = c1 - int(conf_count_over_time[i-1])
except:
new_c = c1
if new_c > 0:
# assign conferences
for c in range(new_c):
jvb_conferences = [x[1] if x[0] not in is_shutting_down else 1e10 for x in curr_jvbs]
least_loaded_idx = np.argmin(jvb_conferences)
curr_jvbs[least_loaded_idx][1] += 1
elif new_c < 0:
# remove conferences
for c in range(abs(new_c)):
for j in curr_jvbs:
if j[1] > 0:
j[1] -= 1
break
# update jvbs (check shutting down jvbs)
for idx in range(len(is_shutting_down) - 1, -1, -1):
for j in curr_jvbs:
if j[0] == is_shutting_down[idx] and j[1] == 0:
curr_jvbs.remove(j)
is_shutting_down.pop(idx)
break
j1 = len(curr_jvbs)
jvb_num_history.append(j1)
z1 = len(list(filter(lambda x: x[1] == 0, curr_jvbs)))
avg_loss = losses_dict.get(c1, {}).get(p1, {}).get(j1, {}).get(z1, -1)
if avg_loss == -1:
miss_count += 1
avg_loss = df[
(df['conferences'] == c1)
& (df['participants'] == p1)
& (df['jvb_num'] == j1)
& (df['zero_conf'] == z1)
]['loss'].mean()
if pd.isna(avg_loss):
if c1 == 0 or p1 == 0:
avg_loss = 0
else:
avg_loss = df[
(df['conferences'] >= c1-1) & (df['conferences'] <= c1+1)
& (df['participants'] >= p1-1) & (df['participants'] <= p1+1)
& (df['jvb_num'] >= j1-1) & (df['jvb_num'] <= j1+1)
& (df['zero_conf'] >= z1-1) & (df['zero_conf'] <= z1+1)
]['loss'].mean()
if pd.isna(avg_loss):
avg_loss = loss_from_nearest_points(c1, p1, j1, z1)
losses_dict.setdefault(c1, {}).setdefault(p1, {}).setdefault(j1, {})[z1] = avg_loss
latest_losses.append(avg_loss)
losses_history.append(avg_loss)
assert j1 <= MAX_JVB_NUM and j1 >= MIN_JVB_NUM
assert z1 <= MAX_JVB_NUM and j1 >= 0
assert z1 <= j1
if (i+1) % ACTION_COOLDOWN == 0:
# Cooldown finished, Agent act
l1 = np.mean(latest_losses[-LOOKBACK:])
latest_losses = []
curr_state = np.array([p1, j1, z1, l1])
state = curr_state - prev_state
p_delta = state[0]
j_delta = state[1]
z_delta = state[2]
l_delta = state[3]
state = [[p_delta, j_delta, z_delta, l_delta, l1]]
state = torch.tensor(state, dtype=torch.float)
if prev_action != -1:
# Push to memory
memory.push(prev_delta_state, prev_action, state, torch.tensor([rewards_history[-1]], dtype=torch.float))
# select action
sample = random.random()
if sample > EPS_THRESHOLD:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
policy_net.eval()
curr_action = policy_net(state).max(1)[1].view(1, 1).item()
else:
curr_action = random.randrange(N_ACTIONS)
EPS_THRESHOLD = max(EPS_THRESHOLD - EPS_DECAY, EPS_END)
# apply action
if curr_action == 0:
# 'Add' class
if len(curr_jvbs) < MAX_JVB_NUM:
curr_jvbs.append([i+1, 0])
elif curr_action == 1:
# 'Remove' class
if len(curr_jvbs) - len(is_shutting_down) > MIN_JVB_NUM:
jvb_pair = None
for j in curr_jvbs:
if j[1] == 0:
jvb_pair = j
break
if jvb_pair:
curr_jvbs.remove(jvb_pair)
else:
if curr_jvbs[0][0] not in is_shutting_down:
is_shutting_down.append(curr_jvbs[0][0])
else:
# 'Maintain' class
pass
# calculate reward
reward = calc_reward(state, curr_action)
rewards_history.append(reward)
# Save metrics to log files for live visualization
with open('../logs/rewards.txt', 'a') as reward_f:
reward_f.write(f"{rewards_history[-1]}\n")
# Save current state & action
prev_state = curr_state
prev_action = torch.tensor([[curr_action]], dtype=torch.long)
prev_delta_state = state
# Train Q-Network
optimize_model()
# Update the target network, copying all weights and biases in DQN
counter += 1
if counter % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
counter = 0
if (i+1) % 500 == 0:
print(f"Timesteps passed: {i+1}", end="\r")
# Save metrics to log files for live visualization
with open('../logs/conference_count.txt', 'a') as conf_f:
conf_f.write(f"{conf_count_over_time[i]}\n")
with open('../logs/participant_count.txt', 'a') as part_f:
part_f.write(f"{part_count_over_time[i]}\n")
with open('../logs/jvb_count.txt', 'a') as jvb_f:
jvb_f.write(f"{jvb_num_history[-1]}\n")
with open('../logs/losses.txt', 'a') as loss_f:
loss_f.write(f"{losses_history[-1]}\n")
cummulative_rewards_history.append(sum(rewards_history))
epsilon_history.append(EPS_THRESHOLD)
print(f"Episode {i_episode+1} - Cummulative Reward: {cummulative_rewards_history[-1]} - Epsilon: {EPS_THRESHOLD} - Miss Count: {miss_count} - Duration: {time.time() - episode_start_time} seconds")
print(f"\nSimulation finished in {time.time() - curr_time} seconds")
torch.save(policy_net.state_dict(), "parameters")
print("Latest model parameters has been saved to parameters")
pickle.dump(losses_dict, open('losses_dict.pkl', 'wb'))
print("Losses dictionary has been saved to losses_dict.pkl")
pickle.dump(cummulative_rewards_history, open('cum_rewards_history.pkl', 'wb'))
print("Cummulative rewards history has been saved to cum_rewards_history.pkl")
pickle.dump(epsilon_history, open('epsilon_history.pkl', 'wb'))
print("Epsilon thresholds history has been saved to epsilon_history.pkl")
plt.subplot(211)
plt.title("Cummulative Rewards over Episodes")
plt.plot(np.arange(len(cummulative_rewards_history)) + 1, cummulative_rewards_history)
plt.subplot(212)
plt.title("Epsilon Thresholds over Episodes")
plt.plot(np.arange(len(epsilon_history)) + 1, epsilon_history)
plt.show()
| 35.740909 | 200 | 0.579168 |
2ea045f7225403c1645c2f200037640c34bcb128
| 1,715 |
py
|
Python
|
django_iris/cursor.py
|
nickmitchko/django-iris
|
2eb59553e554ad022b96bd37921f7f35bbb64e38
|
[
"MIT"
] | 2 |
2022-02-23T12:46:23.000Z
|
2022-02-27T22:14:36.000Z
|
django_iris/cursor.py
|
nickmitchko/django-iris
|
2eb59553e554ad022b96bd37921f7f35bbb64e38
|
[
"MIT"
] | 3 |
2022-03-19T02:04:51.000Z
|
2022-03-19T19:15:55.000Z
|
django_iris/cursor.py
|
nickmitchko/django-iris
|
2eb59553e554ad022b96bd37921f7f35bbb64e38
|
[
"MIT"
] | 3 |
2022-02-15T14:08:45.000Z
|
2022-03-19T17:05:37.000Z
|
class CursorWrapper:
def __init__(self, cursor):
self.cursor = cursor
def _fix_for_params(self, query, params):
if query.endswith(';'):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: "?" % k for k in params}
query = query % args
else:
# Handle params as sequence
args = ['?' for i in range(len(params))]
query = query % tuple(args)
return query, list(params)
def execute(self, query, params=None):
self.times = 0
query, params = self._fix_for_params(query, params)
# print(query, params)
return self.cursor.execute(query, params)
def executemany(self, query, params=None):
self.times = 0
query, params = self._fix_for_params(query, params)
return self.cursor.executemany(query, params)
def close(self):
try:
self.cursor.close()
except:
# already closed
pass
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def fetchall(self):
rows = self.cursor.fetchall()
rows = [tuple(r) for r in rows]
return rows
def fetchmany(self, size=None):
# workaround for endless loop
if self.times > 0:
return []
self.times += 1
rows = self.cursor.fetchmany(size)
rows = [tuple(r) for r in rows]
return rows
def fetchone(self):
row = self.cursor.fetchone()
return tuple(row) if row else None
| 28.114754 | 59 | 0.552187 |
32cbafe0abcf11e10c12a5c38cf1c23a848c8966
| 8,717 |
py
|
Python
|
MoveInfoEnums.py
|
GGelatin/TekkenBot
|
33e9a319a085aa17f355dc6f470dfb15e15020cd
|
[
"MIT"
] | null | null | null |
MoveInfoEnums.py
|
GGelatin/TekkenBot
|
33e9a319a085aa17f355dc6f470dfb15e15020cd
|
[
"MIT"
] | null | null | null |
MoveInfoEnums.py
|
GGelatin/TekkenBot
|
33e9a319a085aa17f355dc6f470dfb15e15020cd
|
[
"MIT"
] | null | null | null |
from enum import Enum
class AttackType(Enum):
"""
"""
# Doesn't hit characters on the ground?
# Very rare, appears on Alisa's chainsaw stance f+2
ANTIAIR_ONLY = 11
THROW = 10 # This is only the attack type *during* the throw animation
LOW_UNBLOCKABLE = 9 # Yoshimitsu's 10 hit combo 2 has one
HIGH_UNBLOCKABLE = 8 # Akuma's focus attack
MID_UNBLOCKABLE = 7
# UNKNOWN_6 = 6 # ????? may not exist
HIGH = 5
SMID = 4
# Special mids that can't be parried.
# Unknown if/what other properties they share.
PROJ = 3
MID = 2
LOW = 1
NA = 0 # This move is not an attack
class SimpleMoveStates(Enum):
"""
"""
UNINITIALIZED = 0
STANDING_FORWARD = 1
STANDING_BACK = 2
STANDING = 3
STEVE = 4 # steve?
CROUCH_FORWARD = 5
CROUCH_BACK = 6
CROUCH = 7
UNKNOWN_TYPE_9 = 9 # Seen on Ling
GROUND_FACEUP = 12
GROUND_FACEDOWN = 13
JUGGLED = 14
KNOCKDOWN = 15
# THE UNDERSTANDING OF THE FOLLOWING VALUES IS NOT COMPLETE
OFF_AXIS_GETUP = 8
UNKNOWN_10 = 10 # Yoshimitsu
UNKNOWN_GETUP_11 = 11
WALL_SPLAT_18 = 18
WALL_SPLAT_19 = 19
TECH_ROLL_OR_FLOOR_BREAK = 20
UNKNOWN_23 = 23 # Kuma
AIRBORNE_24 = 24 # Yoshimitsu
AIRBORNE = 25
AIRBORNE_26 = 26 # Eliza, Chloe
FLY = 27 # Devil Jin 3+4
class ComplexMoveStates(Enum):
"""
These are tracking states>
"""
# This doubles as the nothing state and an attack_starting state.
# Occurs on kazuya's hellsweep
F_MINUS = 0
S_PLUS = 1 # Homing
# Homing, often with screw, seems to more often end up slightly off-axis?
S = 2
A = 3 # This move 'realigns' if you pause before throwing it out
UN04 = 4 # Extremely rare, eliza ff+4, 2 has this
# Realigns either slightly worse or slightly better than C, hard to tell
C_MINUS = 5
A_PLUS = 6 # Realigns very well. Alisa's b+2, 1 has this, extremely rare
C = 7 # This realigns worse than 'A'
# After startup
# Kazuya's ff+3 doesn't have a startup or attack ending flag,
# it's just 0 the whole way through ???
# Lili's d/b+4 doesn't have it after being blocked
END1 = 10
BLOCK = 11
WALK = 12 # Applies to dashing and walking
SIDEROLL_GETUP = 13 # Only happens after side rolling???
SIDEROLL_STAYDOWN = 14
SS = 15 # Sidestep left or right, also applies to juggle techs
# Happens after you stop walking forward or backward, jumping, getting hit,
# going into a stance, and some other places
RECOVERING = 16
UN17 = 17 # f+4 with Ling
UN18 = 18 # King's 1+2+3+4 ki charge
UN20 = 20 # Dragunov's d+3+4 ground stomp
UN22 = 22 # Eddy move
UN23 = 23 # Steve 3+4, 1
SW = 28 # Sidewalk left or right
UNKN = 999999 # Used to indicate a non standard tracking move
class ThrowTechs(Enum):
NONE = 0
# Both, 1 and 2 seem to sometimes include normal throws that can be broken
# with either
TE1 = 1
TE2 = 2
TE1_2 = 3
class StunStates(Enum):
NONE = 0
UNKNOWN_2 = 2 # Lili BT/Jumping/Kicks?
BLOCK = 0x01000100
GETTING_HIT = 0x100
DOING_THE_HITTING = 0x10000
# One frame at the begining of a punish #Also appears during simultaneous
# couterhits
BEING_PUNISHED = 0x10100
BLOCK_NO_HIT = 0x1000000 # Law's UF+4, sometimes???? Proximity guard maybe?
class RawCancelStates(Enum):
STUCK = 0 # Pressing buttons doesn't do anything
# 1 frame occurs during Alisa's u/f 1+2 command grab, also occurs during
# Asuka's parry escapes
UNKNOWN_1 = 1
CANCELABLE = 0x00010000
# Coming out of attack for sure, probably block and hit stun too?
BUFFERABLE = 0x01010000
# Alisa's d+3 and chainsaw stance moves cause this, maybe it's a conditional
# buffer? Also triggers during normal throws
UNKNOWN_2 = 2
# ??? 3 frames at the end of cancel window??? Alisa d+2
MOVE_ENDING_1 = 0x00010001
#??? 1 frame near the end (or at the end?) of cancelable moves
MOVE_ENDING_2 = 0x00010002
# Theory: 1 and 2 refer to 'parryable' states, these include the active
# frames of moves and the throw tech windows of moves
# the next bit is the cancelable/not cancelable bit and finally there's a
# 'is being buffered' bit
# EDIT: Doesn't seem to be parryyable state. Mostly correspond to active
# frames, but not entirely.
class CancelStatesBitmask(Enum):
CANCELABLE = 0x00010000
BUFFERABLE = 0x01000000
PARRYABLE_1 = 0x00000001
PARRYABLE_2 = 0x00000002
class HitOutcome(Enum):
"""
# Note that this information resides on the player BEING hit not the player
# doing the hitting. Also note that there's no counter hit state for side or
# back attacks.
"""
NONE = 0
BLOCKED_STANDING = 1
BLOCKED_CROUCHING = 2
JUGGLE = 3
SCREW = 4
# Xiaoyu's sample combo 3 ends with this, off-axis or right side maybe?
UNKNOWN_SCREW_5 = 5
UNKNOWN_6 = 6 # May not exist???
UNKNOWN_SCREW_7 = 7 # Xiaoyu's sample combo 3 includes this
GROUNDED_FACE_DOWN = 8
GROUNDED_FACE_UP = 9
COUNTER_HIT_STANDING = 10
COUNTER_HIT_CROUCHING = 11
NORMAL_HIT_STANDING = 12
NORMAL_HIT_CROUCHING = 13
NORMAL_HIT_STANDING_LEFT = 14
NORMAL_HIT_CROUCHING_LEFT = 15
NORMAL_HIT_STANDING_BACK = 16
NORMAL_HIT_CROUCHING_BACK = 17
NORMAL_HIT_STANDING_RIGHT = 18
NORMAL_HIT_CROUCHING_RIGHT = 19
class JumpFlagBitmask(Enum):
# GROUND = 0x800000
# LANDING_OR_STANDING = 0x810000
JUMP = 0x820000
class InputDirectionCodes(Enum):
NULL = 0
N = 0x20
u = 0x100
ub = 0x80
uf = 0x200
f = 0x40
b = 0x10
d = 4
df = 8
db = 2
class InputAttackCodes(Enum):
N = 0
x1 = 512
x2 = 1024
x3 = 2048
x4 = 4096
x1x2 = 1536
x1x3 = 2560
x1x4 = 4608
x2x3 = 3072
x2x4 = 5120
x3x4 = 6144
x1x2x3 = 3584
x1x2x4 = 5632
x1x3x4 = 6656
x2x3x4 = 7168
x1x2x3x4 = 7680
xRAGE = 8192
class CharacterCodes(Enum):
PAUL = 0
LAW = 1
KING = 2
YOSHIMITSU = 3
HWOARANG = 4
XIAOYU = 5
JIN = 6
BRYAN = 7
HEIHACHI = 8
KAZUYA = 9 # Also True Devil Kayuza
STEVE = 10
JACK_7 = 11
ASUKA = 12
DEVIL_JIN = 13
FENG = 14
LILI = 15
DRAGUNOV = 16
LEO = 17
LARS = 18
ALISA = 19
CLAUDIO = 20
KATARINA = 21
LUCKY_CHLOE = 22
SHAHEEN = 23
JOSIE = 24
GIGAS = 25
KAZUMI = 26
DEVIL_KAZUMI = 27 # Not selectable
NINA = 28
MASTER_RAVEN = 29
LEE = 30
BOB = 31
AKUMA = 32
KUMA = 33
PANDA = 34
EDDY = 35
ELIZA = 36 # DLC
MIGUEL = 37
TEKKEN_FORCE = 38 # Not selectable
KID_KAZUYA = 39 # Not selectable
JACK_4 = 40 # Not selectable
YOUNG_HEIHACHI = 41 # Not selectable
TRAINING_DUMMY = 42 # Not selectable
GEESE = 43 # DLC
NOCTIS = 44 # DLC
ANNA = 45 # DLC
LEI = 46 # DLC
MARDUK = 47 # DLC
ARMOR_KING = 48 # DLC
JULIA = 49 # DLC
NEGAN = 50 # DLC
# Value when a match starts for (??) frames until char_id loads
NOT_YET_LOADED = 71
NO_SELECTION = 255 # Value if cursor is not shown
class StageIDs(Enum):
"""
"""
MISHIMA_DOJO = 0
FORGOTTEN_REALM = 1
JUNGLE_OUTPOST = 2
ARCTIC_SNOWFALL = 3
TWILIGHT_CONFLICT = 4
DRAGON_NEST = 5
SOUQ = 6
DEVILS_PIT = 7
MISHIMA_BUILDING = 8
ABANDONED_TEMPLE = 9
DUOMO_DI_SIRIO = 30
ARENA = 31
G_CORP_HELIPAD = 32
G_CORP_HELIPAD_NIGHT = 33
BRIMSTONE_AND_FIRE = 35
PRECIPICE_OF_FATE = 36
VIOLET_SYSTEMS = 37
KINDER_GYM = 39
INFINITE_AZURE = 40
GEOMETRIC_PLANE = 41
WARM_UP = 42 # Not selectable
HOWARD_ESTATE = 51
HAMMERHEAD = 52
JUNGLE_OUTPOST_2 = 53
TWILIGHT_CONFLICT_2 = 54
INFINITE_AZURE_2 = 55
LAST_DAY_ON_EARTH = 56 # DLC
class MainMenuIDs(Enum):
"""
"""
STORY = 0
ONLINE = 1
OFFLINE = 2
CUSTOMIZATION = 3
GALLERY = 5
OPTIONS = 6
PLAYER_INFORMATION = 7
STORE = 8
ULTIMATE_TEKKEN_BOWL = 10
QUIT = 11
class OfflineMainMenuIDs(Enum):
"""
"""
ARCADE_BATTLE = 0
TREASURE_BATTLE = 1
VS_BATTLE = 2
PRACTICE = 3
class UniversalAnimationCodes(Enum):
NEUTRAL = 32769
CROUCHING_NEUTRAL = 32770
| 24.905714 | 81 | 0.609843 |
b9bce3cf092096922371d3ad4710243687daa666
| 1,453 |
py
|
Python
|
kws_streaming/models/models.py
|
bryantower/google-research
|
2d5dc20e96152566d0876977e4fb974724f406a6
|
[
"Apache-2.0"
] | 2 |
2020-05-14T06:29:29.000Z
|
2020-08-10T17:30:15.000Z
|
kws_streaming/models/models.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 15 |
2020-09-25T22:39:40.000Z
|
2022-02-10T02:11:48.000Z
|
kws_streaming/models/models.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 2 |
2020-08-29T08:58:30.000Z
|
2021-08-29T09:59:34.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supported models."""
import kws_streaming.models.att_mh_rnn as att_mh_rnn
import kws_streaming.models.att_rnn as att_rnn
import kws_streaming.models.cnn as cnn
import kws_streaming.models.crnn as crnn
import kws_streaming.models.dnn as dnn
import kws_streaming.models.dnn_raw as dnn_raw
import kws_streaming.models.ds_cnn as ds_cnn
import kws_streaming.models.gru as gru
import kws_streaming.models.lstm as lstm
import kws_streaming.models.svdf as svdf
import kws_streaming.models.tc_resnet as tc_resnet
# dict with supported models
MODELS = {
'att_mh_rnn': att_mh_rnn.model,
'att_rnn': att_rnn.model,
'dnn': dnn.model,
'dnn_raw': dnn_raw.model,
'ds_cnn': ds_cnn.model,
'cnn': cnn.model,
'tc_resnet': tc_resnet.model,
'crnn': crnn.model,
'gru': gru.model,
'lstm': lstm.model,
'svdf': svdf.model,
}
| 34.595238 | 74 | 0.756366 |
b7a56df23b04d1231b47581ab16430cfc290fa67
| 4,675 |
py
|
Python
|
modulos/modulo_03/codigoFuente/src/analizadorSintactico.py
|
CodeRevenge/proyecto-seminario-traductores-ii
|
02ecc462450d7814b7045085229d94127bb7749c
|
[
"MIT"
] | 1 |
2020-11-09T05:36:25.000Z
|
2020-11-09T05:36:25.000Z
|
modulos/modulo_03/codigoFuente/src/analizadorSintactico.py
|
CodeRevenge/proyecto-seminario-traductores-ii
|
02ecc462450d7814b7045085229d94127bb7749c
|
[
"MIT"
] | null | null | null |
modulos/modulo_03/codigoFuente/src/analizadorSintactico.py
|
CodeRevenge/proyecto-seminario-traductores-ii
|
02ecc462450d7814b7045085229d94127bb7749c
|
[
"MIT"
] | null | null | null |
from src.analizadorLexico import AnalizadorLexico
from src.arbol import Arbol
class ReglasSintacticas:
def __init__(self):
self.tabla_lr = []
self.extra = []
self.id_reglas = []
self.long_reglas = []
self.id_regla_int = []
self.leer_reglas()
def leer_reglas(self):
file = open('data/compilador.lr','r')
for linea in file.readlines():
linea = linea.split('\t')
if len(linea) > 3:
for index, item in enumerate(linea):
try:
linea[index] = int(item)
except ValueError:
raise Exception('La tabla lr contiene valores invalidos. {}'.format(item))
self.tabla_lr.append(linea)
else:
for index, item in enumerate(linea):
try:
linea[index] = int(item.strip())
except ValueError:
linea[index] = item.strip()
if len(linea) == 1:
self.id_regla_int.append(linea[0])
self.long_reglas.append(0)
self.id_reglas.append(0)
elif len(linea) == 2:
self.id_regla_int.append(linea[1])
self.long_reglas.append(0)
self.id_reglas.append(linea[0])
elif len(linea) == 3:
self.id_regla_int.append(linea[0])
self.long_reglas.append(linea[1])
self.id_reglas.append(linea[2])
self.extra.append(linea)
file.close()
class AnalizadorSintactico(ReglasSintacticas):
def __init__(self):
ReglasSintacticas.__init__(self)
self.entradas = []
self.arbol = Arbol()
def analizadorSintacticoEnteros(self, cadena):
self.cadena = cadena
self.tuplas = []
self.pilaEnteros = [23,0]
self.pilaCadenas = [23,0]
self.entradasEnteros = []
self.tipos = []
self.entradasSimbolos = []
self.item_selected = None
self.salidaEntero = 0
self.analizarSimbolos()
self.temp_tuplas = self.tuplas.copy()
while self.salidaEntero != -1:
if self.temp_tuplas:
self.item_selected = self.temp_tuplas[0]
# print(self.item_selected)
self.salidaEntero = self.tabla_lr[self.pilaEnteros[-1]][self.entradasEnteros[0]]
if self.salidaEntero < -1:
temp = []
for i in range(self.long_reglas[-1*(self.salidaEntero+2)]*2):
self.pilaEnteros.pop()
a = self.pilaCadenas.pop()
if i % 2 == 1:
temp.append(a)
self.entradasEnteros.insert(0,self.id_regla_int[-1*(self.salidaEntero+2)])
self.entradasSimbolos.insert(0,self.id_reglas[-1*(self.salidaEntero+2)])
self.tipos.insert(0,self.id_reglas[-1*(self.salidaEntero+2)])
self.arbol.insertarNodo(self.tipos[0], self.entradasEnteros[0], temp)
elif self.salidaEntero == -1:
return True
elif self.salidaEntero == 0:
print('Error: La sintaxis no es correcta en el/la {} "{}"'.format(self.item_selected[2],self.item_selected[0]))
return False
else:
self.pilaEnteros.append(self.entradasEnteros[0])
self.pilaEnteros.append(self.salidaEntero)
self.pilaCadenas.append(self.entradasSimbolos[0])
self.pilaCadenas.append(self.salidaEntero)
self.entradasEnteros.pop(0)
self.entradasSimbolos.pop(0)
self.tipos.pop(0)
if self.temp_tuplas:
self.temp_tuplas.pop(0)
return True
def analizarSimbolos(self):
# Creamos un objeto de tipo AnalizadorLexico y lo inicializamos con el valor de la cande
analizador = AnalizadorLexico(self.cadena)
# Realizamos un ciclo que continue hasta que el simobolo sea un $
while analizador.caracter != "$":
self.tuplas.append(analizador.siguienteSimbolo())
# Agrega tuplas del estilo (Simbolo:str, #Tipo:int, NombreTipo:str)
self.tuplas.append(("$",23,"PESO"))
for x in range(len(self.tuplas)):
self.entradas.append(self.tuplas[x])
self.entradasSimbolos.append(self.tuplas[x][0])
self.entradasEnteros.append(self.tuplas[x][1])
self.tipos.append(self.tuplas[x][2])
| 39.618644 | 127 | 0.542888 |
c649f6b2c709f2245d3a564df48d315c9b5cab56
| 281 |
py
|
Python
|
ch9-schemas-and-documentation/posts/urls.py
|
NazarMiroshnichenko/djangoforapis_30
|
7ae64f2f7869ffd21453914de8a008d405875bcc
|
[
"MIT"
] | 6 |
2020-09-04T18:06:26.000Z
|
2021-02-27T03:37:57.000Z
|
ch9-schemas-and-documentation/posts/urls.py
|
RicardoVeronica/restapiswithdjango
|
bec24822cd8bc839395dc978b42914d3b85db99f
|
[
"MIT"
] | null | null | null |
ch9-schemas-and-documentation/posts/urls.py
|
RicardoVeronica/restapiswithdjango
|
bec24822cd8bc839395dc978b42914d3b85db99f
|
[
"MIT"
] | 3 |
2020-12-06T15:37:31.000Z
|
2021-09-09T11:07:51.000Z
|
from django.urls import path
from rest_framework.routers import SimpleRouter
from .views import UserViewSet, PostViewSet
router = SimpleRouter()
router.register('users', UserViewSet, basename='users')
router.register('', PostViewSet, basename='posts')
urlpatterns = router.urls
| 25.545455 | 55 | 0.797153 |
021592573df2ea25edcdb2cb7d66476885a1ae1d
| 1,921 |
py
|
Python
|
main.py
|
theoldmoon0602/YARU
|
9b7cfc65d8890e72a0719f2aedb589f05d11221a
|
[
"MIT"
] | null | null | null |
main.py
|
theoldmoon0602/YARU
|
9b7cfc65d8890e72a0719f2aedb589f05d11221a
|
[
"MIT"
] | null | null | null |
main.py
|
theoldmoon0602/YARU
|
9b7cfc65d8890e72a0719f2aedb589f05d11221a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from dateutil.parser import parse
import os
import sys
import json
class Farenda(object):
def __init__(self, teksto, limdato=None):
self.teksto = teksto
try:
self.limdato = parse(limdato).strftime('%Y/%m/%d')
except:
self.limdato = None
def datumoAlFarenda(datumo):
return Farenda(
datumo['teksto'],
datumo['limdato']
)
def farendaAlDatumo(farenda):
return {
'teksto': farenda.teksto,
'limdato': farenda.limdato
}
def aldoniFarendan():
teksto = input("teksto:")
limdato = input("limdato:")
farenda = Farenda(teksto, limdato)
return farenda
def finiFarendan(farendaj, teksto):
novaFarenaj = []
for farenda in farendaj:
if teksto == farenda.teksto:
continue
novaFarenaj.append(farenda)
return novaFarenaj
def montriFarendajn(farendaj):
for farenda in farendaj:
print(farenda.teksto, end="")
if farenda.limdato is not None:
print(" --> limdato is {}".format(farenda.limdato), end="")
print()
def main():
farendaj = []
vojo = os.environ.get("YARU_FILE", "~/.yaru")
# sxargxi datumoj de dosiero
if os.path.exists(vojo):
with open(vojo, "r") as dosiero:
datumoj = json.load(dosiero)
for datumo in datumoj:
farendaj.append(datumoAlFarenda(datumo))
if len(sys.argv) <= 1:
montriFarendajn(farendaj)
elif sys.argv[1] == "aldoni":
farenda = aldoniFarendan()
farendaj.append(farenda)
else:
farendaj = finiFarendan(farendaj, sys.argv[1])
# konservi farendaj en dosiero
datumoj = []
for farenda in farendaj:
datumoj.append(farendaAlDatumo(farenda))
with open(vojo, "w") as dosiero:
json.dump(datumoj, dosiero, ensure_ascii=False)
main()
| 24.316456 | 71 | 0.60177 |
e7095af1686f4ebe41f5e5453541b22ee2b7ae55
| 2,130 |
py
|
Python
|
tests/test_UTILS_expand_prefixes.py
|
kcoyle/dctap-python
|
e688ed244327bc2b92d68b98a66b81d9b03cd60a
|
[
"MIT"
] | 6 |
2021-06-01T18:53:35.000Z
|
2021-12-08T14:38:01.000Z
|
tests/test_UTILS_expand_prefixes.py
|
kcoyle/dctap-python
|
e688ed244327bc2b92d68b98a66b81d9b03cd60a
|
[
"MIT"
] | 9 |
2021-06-02T08:14:38.000Z
|
2021-07-13T07:39:56.000Z
|
tests/test_UTILS_expand_prefixes.py
|
kcoyle/dctap-python
|
e688ed244327bc2b92d68b98a66b81d9b03cd60a
|
[
"MIT"
] | 3 |
2021-06-13T20:03:11.000Z
|
2021-11-21T16:25:29.000Z
|
"""Verify that string is valid as URL."""
import os
import pytest
from dctap.utils import expand_uri_prefixes
def test_utils_expand_uri_prefixes(tmp_path):
"""Expands prefixes in shapes dictionary according as per config settings."""
config_dict = {
"default_shape_identifier": "default",
"prefixes": { ":": "http://example.org/",
"dcterms:": "http://purl.org/dc/terms/",
"wdt:": "http://www.wikidata.org/prop/direct/",
"foaf:": "http://xmlns.com/foaf/0.1/"
}
}
shapes_dict = {'shapes':
[
{
'shapeID': ':book',
'statement_constraints': [
{
'propertyID': 'dcterms:creator',
'valueShape': ':author'
},
{
'propertyID': 'wdt:P1476'
}
]
},
{
'shapeID': ':author',
'statement_constraints': [
{
'propertyID': 'foaf:name',
'valueDataType': 'xsd:string'
}
]
}
]
}
expected_output = {'shapes':
[
{
'shapeID': 'http://example.org/book',
'statement_constraints': [
{
'propertyID': 'http://purl.org/dc/terms/creator',
'valueShape': 'http://example.org/author'
},
{
'propertyID': 'http://www.wikidata.org/prop/direct/P1476'
}
]
},
{
'shapeID': 'http://example.org/author',
'statement_constraints': [
{
'propertyID': 'http://xmlns.com/foaf/0.1/name',
'valueDataType': 'xsd:string'
}
]
}
]
}
assert expand_uri_prefixes(shapes_dict, config_dict) == expected_output
| 30.869565 | 81 | 0.399531 |
47ad7cc06511ed20e3c5ca8573f1639c46e75208
| 1,730 |
py
|
Python
|
setup.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
setup.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
setup.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
from skbuild import setup
from setuptools import find_packages
# # python setup.py install --generator "Sublime Text 2 - Unix Makefiles" -- -- -j8
# # python setup.py install -- -- -j8
package_folder = 'shark'
setup(
name='shark',
version='0.0.1',
description='reinforcement learning project shark',
author='Aimin Huang',
author_email='[email protected],[email protected]',
license='MIT',
python_requires='>=3.6',
packages=find_packages(exclude=("test", "test.*", "docs", "docs.*")), # same as name
cmake_source_dir="shark",
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='reinforcement learning project pytorch',
# install_requires=[
# 'gym>=0.15.0',
# 'tqdm',
# 'numpy',
# 'tensorboard',
# 'torch>=1.2.0',
# ],
)
# print(find_packages())
| 34.6 | 90 | 0.586705 |
e7eec646e08dfc37a5cb67cbf799afca137c3531
| 685 |
py
|
Python
|
tests/test_meaning.py
|
rhyspowell/pybites-tools
|
27b2808ba125735ec76108c3b321a8f463439866
|
[
"MIT"
] | null | null | null |
tests/test_meaning.py
|
rhyspowell/pybites-tools
|
27b2808ba125735ec76108c3b321a8f463439866
|
[
"MIT"
] | null | null | null |
tests/test_meaning.py
|
rhyspowell/pybites-tools
|
27b2808ba125735ec76108c3b321a8f463439866
|
[
"MIT"
] | null | null | null |
import argparse
import pytest
from pybites_tools.meaning import get_meaning, main
from unittest import mock
@pytest.mark.parametrize(
"args, expected",
[
(
argparse.Namespace(origin=False, word="word"),
"The smallest unit of language that has a particular meaning and can be expressed by itself; the smallest discrete, meaningful unit of language. (contrast morpheme.)",
),
(
argparse.Namespace(origin=True, word="word"),
"No origin information available",
),
],
)
def test_main(args, expected, capsys):
main(args)
captured = capsys.readouterr()
assert expected in captured.out
| 27.4 | 179 | 0.654015 |
9f3010fa10768268d3b98c3e1047e4721f408430
| 874 |
py
|
Python
|
ebiznes/apps/service/migrations/0004_auto_20191101_0941.py
|
khaki560/EBiznes
|
a053e329978f6ba4a7565e259d8a843a4d6530a7
|
[
"MIT"
] | null | null | null |
ebiznes/apps/service/migrations/0004_auto_20191101_0941.py
|
khaki560/EBiznes
|
a053e329978f6ba4a7565e259d8a843a4d6530a7
|
[
"MIT"
] | 1 |
2019-10-30T06:26:36.000Z
|
2019-11-20T17:07:25.000Z
|
ebiznes/apps/service/migrations/0004_auto_20191101_0941.py
|
khaki560/EBiznes
|
a053e329978f6ba4a7565e259d8a843a4d6530a7
|
[
"MIT"
] | 1 |
2019-11-12T19:09:05.000Z
|
2019-11-12T19:09:05.000Z
|
# Generated by Django 2.2.5 on 2019-11-01 09:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0003_auto_20191028_1805'),
]
operations = [
migrations.AddField(
model_name='service',
name='phone_number',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone number'),
),
migrations.AddField(
model_name='service',
name='service_logo',
field=models.ImageField(blank=True, max_length=255, null=True, upload_to='logos/', verbose_name='Service logo'),
),
migrations.AddField(
model_name='service',
name='street',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Street'),
),
]
| 30.137931 | 124 | 0.604119 |
377deb739f2c678b9c827669cd5f5a92ce15944c
| 6,204 |
py
|
Python
|
lib/head/non_local_embedded_gaussian.py
|
Wanger-SJTU/SegToolbox.Pytorch
|
d2ad07caa4c8ab5370b36a000472d5e019d36364
|
[
"MIT"
] | 2 |
2019-11-10T14:13:43.000Z
|
2020-02-12T18:06:48.000Z
|
lib/head/non_local_embedded_gaussian.py
|
Wanger-SJTU/SegToolbox.Pytorch
|
d2ad07caa4c8ab5370b36a000472d5e019d36364
|
[
"MIT"
] | 6 |
2019-11-06T08:20:18.000Z
|
2022-03-12T00:03:44.000Z
|
lib/head/non_local_embedded_gaussian.py
|
Wanger-SJTU/SegToolbox.Pytorch
|
d2ad07caa4c8ab5370b36a000472d5e019d36364
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Wang, X., Girshick, R., Gupta, A., & He, K. (2018).
# Non-local neural networks. CVPR
# Code from https://github.com/AlexHex7/Non-local_pytorch.git
import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, index=4, inter_channels=None, dimension=3, sub_sample=True, use_bn=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
assert index > 0
self.dimension = dimension
self.index = index
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if use_bn:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels))
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
for m in self.modules():
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, *inputs):
'''
:param x: (b, c, t, h, w)
:return:
'''
x = inputs[self.index]
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return list(inputs[:self.index]) + [z] + list(inputs[self.index+1:])
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, cfg):
index = cfg.NONLOCAL.PARA.index
in_channels = cfg.NONLOCAL.PARA.in_channels
inter_channels = cfg.NONLOCAL.PARA.inter_channels
sub_sample = cfg.NONLOCAL.PARA.sub_sample
use_bn = cfg.NONLOCAL.PARA.use_bn
super(NONLocalBlock1D, self).__init__(in_channels,
index=index,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
use_bn=use_bn)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, cfg):
index = cfg.NONLOCAL.PARA.index
in_channels = cfg.NONLOCAL.PARA.in_channels
inter_channels = cfg.NONLOCAL.PARA.inter_channels
sub_sample = cfg.NONLOCAL.PARA.sub_sample
use_bn = cfg.NONLOCAL.PARA.use_bn
super(NONLocalBlock2D, self).__init__(in_channels,
index=index,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
use_bn=use_bn)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, cfg):
index = cfg.NONLOCAL.PARA.index
in_channels = cfg.NONLOCAL.PARA.in_channels
inter_channels = cfg.NONLOCAL.PARA.inter_channels
sub_sample = cfg.NONLOCAL.PARA.sub_sample
use_bn = cfg.NONLOCAL.PARA.use_bn
super(NONLocalBlock3D, self).__init__(in_channels,
index=index,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
use_bn=use_bn)
if __name__ == '__main__':
from torch.autograd import Variable
import torch
sub_sample = True
use_bn = True
img = Variable(torch.zeros(2, 3, 20))
net = NONLocalBlock1D(3, sub_sample=sub_sample, use_bn=use_bn)
out = net(img)
print(out.size())
img = Variable(torch.zeros(2, 3, 20, 20))
net = NONLocalBlock2D(3, sub_sample=sub_sample, use_bn=use_bn)
out = net(img)
print(out.size())
img = Variable(torch.randn(2, 3, 10, 20, 20))
net = NONLocalBlock3D(3, sub_sample=sub_sample, use_bn=use_bn)
out = net(img)
print(out.size())
| 37.829268 | 109 | 0.555448 |
19d5df97189e576d911461abf2fc3cb1d40767a9
| 225 |
py
|
Python
|
teste-py3.8/src/Mundo1/aula3/desafios/desafio3.py
|
NicolasMCP/py
|
37c073de01db100afe325fe08b2b56bc9c8a6020
|
[
"MIT"
] | null | null | null |
teste-py3.8/src/Mundo1/aula3/desafios/desafio3.py
|
NicolasMCP/py
|
37c073de01db100afe325fe08b2b56bc9c8a6020
|
[
"MIT"
] | null | null | null |
teste-py3.8/src/Mundo1/aula3/desafios/desafio3.py
|
NicolasMCP/py
|
37c073de01db100afe325fe08b2b56bc9c8a6020
|
[
"MIT"
] | null | null | null |
# Nícolas Ramos
# desenvolvido para ser igual ao pedido no desafio
print('====== DESAFIO 1 ======')
primeiro = int(input('Primeiro número '))
segundo = int(input('Segundo número '))
print(f'A soma é {primeiro + segundo}')
| 22.5 | 50 | 0.671111 |
7e00177fcf7aaa0b3e0bb54ce9178ef56a6196f6
| 691 |
py
|
Python
|
shares/facebook.py
|
cuteredcat/gator
|
7e9183b4fb33f203f38aa512228305a7a4e089d6
|
[
"MIT"
] | null | null | null |
shares/facebook.py
|
cuteredcat/gator
|
7e9183b4fb33f203f38aa512228305a7a4e089d6
|
[
"MIT"
] | null | null | null |
shares/facebook.py
|
cuteredcat/gator
|
7e9183b4fb33f203f38aa512228305a7a4e089d6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gator import app
from shares import SocialNetwork
class Facebook(SocialNetwork):
def __init__(self):
self.name = "facebook"
self.check_interval = app.config["FACEBOOK_CHECK_INTERVAL"]
def get(self, link):
separator = "?"
if "?" in link:
separator = "&"
json = self.json("https://graph.facebook.com/v2.5/%s%sfields=share&access_token=%s" % (link, separator, app.config["FACEBOOK_ACCESS_TOKEN"]))
#print link, json
if json and "share" in json:
return json["share"]["comment_count"] + json["share"]["share_count"]
else:
return 0
| 27.64 | 149 | 0.602026 |
11a66a6bae297dae084e783ec637e8fbd9bfe7a8
| 5,962 |
py
|
Python
|
examples/CSCI-4968-MBE/challenges/crackme0x04/solve.py
|
FantasqueX/angr-doc
|
0db1ed8d34930197411774147b80da1948955897
|
[
"BSD-2-Clause"
] | null | null | null |
examples/CSCI-4968-MBE/challenges/crackme0x04/solve.py
|
FantasqueX/angr-doc
|
0db1ed8d34930197411774147b80da1948955897
|
[
"BSD-2-Clause"
] | null | null | null |
examples/CSCI-4968-MBE/challenges/crackme0x04/solve.py
|
FantasqueX/angr-doc
|
0db1ed8d34930197411774147b80da1948955897
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: David Manouchehri <[email protected]>
# Modern Binary Exploitation
# http://security.cs.rpi.edu/courses/binexp-spring2015/
import angr
import subprocess
# from IPython import embed # pop iPython at the end
def main():
proj = angr.Project('crackme0x04', load_options={"auto_load_libs": False})
cfg = proj.analyses.CFG()
FIND_ADDR = cfg.kb.functions.function(name="exit").addr
AVOID_ADDR = 0x080484fb # dword [esp] = str.Password_Incorrect__n ; [0x8048649:4]=0x73736150 LEA str.Password_Incorrect__n ; "Password Incorrect!." @ 0x8048649
sm = proj.factory.simulation_manager()
sm.explore(find=FIND_ADDR, avoid=AVOID_ADDR)
# embed()
#print(sm.found[0].posix.dumps(1))
return sm.found[0].posix.dumps(0) # .lstrip('+0').rstrip('B')
def test():
# it SHOULD just be 96 but the way angr models scanf means that it could technically be any number of formats
# so we gotta check against ground truth
with open('input', 'wb') as fp:
fp.write(main())
assert subprocess.check_output('./crackme0x04 < input', shell=True) == b'IOLI Crackme Level 0x04\nPassword: Password OK!\n'
if __name__ == '__main__':
print(repr(main()))
"""
[0x080483d0]> pdf @ main
;-- main:
╒ (fcn) sym.main 92
│ ; var int local_78h @ ebp-0x78
│ ; var int arg_4h @ esp+0x4
│ ; UNKNOWN XREF from 0x080483e8 (entry0)
│ ; DATA XREF from 0x080483e7 (entry0)
│ 0x08048509 55 push ebp
│ 0x0804850a 89e5 ebp = esp
│ 0x0804850c 81ec88000000 esp -= 0x88
│ 0x08048512 83e4f0 esp &= 0xfffffff0
│ 0x08048515 b800000000 eax = 0
│ 0x0804851a 83c00f eax += 0xf
│ 0x0804851d 83c00f eax += 0xf
│ 0x08048520 c1e804 eax >>>= 4
│ 0x08048523 c1e004 eax <<<= 4
│ 0x08048526 29c4 esp -= eax
│ 0x08048528 c704245e8604. dword [esp] = str.IOLI_Crackme_Level_0x04_n ; [0x804865e:4]=0x494c4f49 LEA str.IOLI_Crackme_Level_0x04_n ; "IOLI Crackme Level 0x04." @ 0x804865e
│ 0x0804852f e860feffff sym.imp.printf ()
│ 0x08048534 c70424778604. dword [esp] = str.Password: ; [0x8048677:4]=0x73736150 LEA str.Password: ; "Password: " @ 0x8048677
│ 0x0804853b e854feffff sym.imp.printf ()
│ 0x08048540 8d4588 eax = [ebp - local_78h]
│ 0x08048543 89442404 dword [esp + arg_4h] = eax
│ 0x08048547 c70424828604. dword [esp] = 0x8048682 ; [0x8048682:4]=0x7325 ; "%s" @ 0x8048682
│ 0x0804854e e821feffff sym.imp.scanf ()
│ 0x08048553 8d4588 eax = [ebp - local_78h]
│ 0x08048556 890424 dword [esp] = eax
│ 0x08048559 e826ffffff sym.check ()
│ 0x0804855e b800000000 eax = 0
│ 0x08048563 c9
╘ 0x08048564 c3
[0x080483d0]> pdf @ sym.check
╒ (fcn) sym.check 133
│ ; arg int arg_8h @ ebp+0x8
│ ; arg int arg_fh @ ebp+0xf
│ ; arg int arg_13h @ ebp+0x13
│ ; var int arg_4h @ esp+0x4
│ ; var int arg_8h @ esp+0x8
│ ; CALL XREF from 0x08048559 (sym.main)
│ 0x08048484 55 push ebp
│ 0x08048485 89e5 ebp = esp
│ 0x08048487 83ec28 esp -= 0x28
│ 0x0804848a c745f8000000. dword [ebp - local_8h] = 0
│ 0x08048491 c745f4000000. dword [ebp - local_ch] = 0
│ ; JMP XREF from 0x080484f9 (sym.check)
│ ┌─> 0x08048498 8b4508 eax = dword [ebp + arg_8h] ; [0x8:4]=0
│ │ 0x0804849b 890424 dword [esp] = eax
│ │ 0x0804849e e8e1feffff sym.imp.strlen ()
│ │ 0x080484a3 3945f4 if (dword [ebp - local_ch] == eax ; [0x13:4]=256
│ ┌──< 0x080484a6 7353 jae 0x80484fb
│ ││ 0x080484a8 8b45f4 eax = dword [ebp - local_ch]
│ ││ 0x080484ab 034508 eax += dword [ebp + arg_8h]
│ ││ 0x080484ae 0fb600 eax = byte [eax]
│ ││ 0x080484b1 8845f3 byte [ebp - local_dh] = al
│ ││ 0x080484b4 8d45fc eax = [ebp - local_4h]
│ ││ 0x080484b7 89442408 dword [esp + arg_8h] = eax
│ ││ 0x080484bb c74424043886. dword [esp + arg_4h] = 0x8048638 ; [0x8048638:4]=0x50006425 ; "%d" @ 0x8048638
│ ││ 0x080484c3 8d45f3 eax = [ebp - local_dh]
│ ││ 0x080484c6 890424 dword [esp] = eax
│ ││ 0x080484c9 e8d6feffff sym.imp.sscanf ()
│ ││ 0x080484ce 8b55fc edx = dword [ebp - local_4h]
│ ││ 0x080484d1 8d45f8 eax = [ebp - local_8h]
│ ││ 0x080484d4 0110 dword [eax] += edx
│ ││ 0x080484d6 837df80f if (dword [ebp - local_8h] == 0xf ; [0xf:4]=0x3000200
│ ┌───< 0x080484da 7518 notZero 0x80484f4)
│ │││ 0x080484dc c704243b8604. dword [esp] = str.Password_OK__n ; [0x804863b:4]=0x73736150 LEA str.Password_OK__n ; "Password OK!." @ 0x804863b
│ │││ 0x080484e3 e8acfeffff sym.imp.printf ()
│ │││ 0x080484e8 c70424000000. dword [esp] = 0
│ │││ 0x080484ef e8c0feffff sym.imp.exit ()
│ └───> 0x080484f4 8d45f4 eax = [ebp - local_ch]
│ ││ 0x080484f7 ff00 dword [eax]++
│ │└─< 0x080484f9 eb9d goto 0x8048498
│ └──> 0x080484fb c70424498604. dword [esp] = str.Password_Incorrect__n ; [0x8048649:4]=0x73736150 LEA str.Password_Incorrect__n ; "Password Incorrect!." @ 0x8048649
│ 0x08048502 e88dfeffff sym.imp.printf ()
│ 0x08048507 c9
╘ 0x08048508 c3
"""
| 51.396552 | 188 | 0.553673 |
3b0b6b0cd4f28796406bd1ab7ac4d1f1088363c5
| 276 |
py
|
Python
|
kivy_ios/recipes/pyobjus/__init__.py
|
Terunami/kivy-ios-test
|
d128db448b6b5a9ca320a4d06f8d4cd094a08897
|
[
"MIT"
] | 548 |
2015-01-02T23:04:29.000Z
|
2022-03-21T11:24:59.000Z
|
kivy_ios/recipes/pyobjus/__init__.py
|
Terunami/kivy-ios-test
|
d128db448b6b5a9ca320a4d06f8d4cd094a08897
|
[
"MIT"
] | 511 |
2015-01-07T08:07:42.000Z
|
2022-03-31T18:22:07.000Z
|
kivy_ios/recipes/pyobjus/__init__.py
|
Terunami/kivy-ios-test
|
d128db448b6b5a9ca320a4d06f8d4cd094a08897
|
[
"MIT"
] | 258 |
2015-02-01T21:42:40.000Z
|
2022-03-09T21:14:00.000Z
|
from kivy_ios.toolchain import CythonRecipe
class PyobjusRecipe(CythonRecipe):
version = "master"
url = "https://github.com/kivy/pyobjus/archive/{version}.zip"
library = "libpyobjus.a"
depends = ["python"]
pre_build_ext = True
recipe = PyobjusRecipe()
| 21.230769 | 65 | 0.702899 |
87efccafeb6d4785b6946780462e004b0b999eb7
| 271 |
py
|
Python
|
EjemplosClase/while.py
|
fwchj/cursoABMPythonPublic
|
b19215c2de8c414f68ab99d1aaaccc9db454870b
|
[
"MIT"
] | 2 |
2019-12-17T01:54:10.000Z
|
2020-02-13T18:25:29.000Z
|
EjemplosClase/while.py
|
fwchj/cursoABMPythonPublic
|
b19215c2de8c414f68ab99d1aaaccc9db454870b
|
[
"MIT"
] | null | null | null |
EjemplosClase/while.py
|
fwchj/cursoABMPythonPublic
|
b19215c2de8c414f68ab99d1aaaccc9db454870b
|
[
"MIT"
] | 4 |
2019-08-30T00:08:55.000Z
|
2021-03-19T18:25:05.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 19:09:43 2019
@author: florian
"""
i = 1
while (i<15):
print(i)
i+=1
print("--------")
suma = 0
for x in (2**p for p in range(1,7)):
print(x)
suma +=x
print("--------")
print(suma)
| 14.263158 | 36 | 0.516605 |
bfb49e732275c3ace4fa395b0a774637372963af
| 428 |
py
|
Python
|
src/read_file.py
|
xryuseix/proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | null | null | null |
src/read_file.py
|
xryuseix/proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | 5 |
2020-04-28T18:13:26.000Z
|
2020-05-17T19:09:42.000Z
|
src/read_file.py
|
xryuseix/Proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | null | null | null |
import csv
def readFile(path, is_csv=False):
text = ""
word_list = []
with open(path) as f:
if is_csv:
reader = csv.reader(f)
for row in reader:
for i, r in enumerate(row):
if i:
word_list.append([r, row[0]])
else:
text += f.read()
if is_csv:
return word_list
else:
return text
| 21.4 | 53 | 0.450935 |
831ba16b05c75f367f61f23e6eb2a237d1710661
| 4,930 |
py
|
Python
|
opensanctions/crawlers/everypolitician.py
|
opensanctions/opennames
|
39675797b0e70e71f54edff2b8e623e23aef9c15
|
[
"MIT"
] | 3 |
2017-04-14T21:27:07.000Z
|
2017-04-25T14:57:22.000Z
|
opensanctions/crawlers/everypolitician.py
|
opensanctions/opennames
|
39675797b0e70e71f54edff2b8e623e23aef9c15
|
[
"MIT"
] | null | null | null |
opensanctions/crawlers/everypolitician.py
|
opensanctions/opennames
|
39675797b0e70e71f54edff2b8e623e23aef9c15
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Dict, Optional, Set
from opensanctions.core import Context
from opensanctions import helpers as h
def crawl(context: Context):
data = context.fetch_json(context.dataset.data.url)
for country in data:
for legislature in country.get("legislatures", []):
code = country.get("code").lower()
context.log.info("Country: %s" % code)
crawl_legislature(context, code, legislature)
def crawl_legislature(context: Context, country, legislature):
lastmod_ = int(legislature.get("lastmod"))
lastmod = datetime.utcfromtimestamp(lastmod_)
url = legislature.get("popolo_url")
# this isn't being updated, hence long interval:
data = context.fetch_json(url, cache_days=30)
persons: Dict[str, Optional[str]] = {}
for person in data.pop("persons", []):
pid = person.get("id")
persons[pid] = parse_person(context, person, country, lastmod)
organizations: Dict[str, Optional[str]] = {}
for org in data.pop("organizations", []):
org_id = org.pop("id", None)
org_id = context.lookup_value("org_id", org_id, org_id)
if org_id is None:
continue
name = org.pop("name", org.pop("sort_name", None))
organizations[org_id] = name
events = data.pop("events", [])
events = {e.get("id"): e for e in events}
for membership in data.pop("memberships", []):
parse_membership(context, membership, persons, organizations, events)
def parse_person(context: Context, data, country, lastmod):
person_id = data.pop("id", None)
person = context.make("Person")
person.id = context.make_slug(person_id)
person.add("nationality", country)
name = data.get("name")
if name is None or name.lower().strip() in ("unknown",):
return
person.add("modifiedAt", lastmod.date())
person.add("name", data.pop("name", None))
person.add("alias", data.pop("sort_name", None))
for other in data.pop("other_names", []):
person.add("alias", other.get("name"))
person.add("gender", data.pop("gender", None))
person.add("title", data.pop("honorific_prefix", None))
person.add("title", data.pop("honorific_suffix", None))
person.add("firstName", data.pop("given_name", None))
person.add("lastName", data.pop("family_name", None))
person.add("fatherName", data.pop("patronymic_name", None))
person.add("birthDate", data.pop("birth_date", None))
person.add("deathDate", data.pop("death_date", None))
person.add("email", h.clean_emails(data.pop("email", None)))
person.add("notes", data.pop("summary", None))
person.add("topics", "role.pep")
for link in data.pop("links", []):
url = link.get("url")
if link.get("note") in ("website", "blog", "twitter", "facebook"):
person.add("website", url)
# elif "Wikipedia (" in link.get("note") and "wikipedia.org" in url:
# person.add("wikipediaUrl", url)
# elif "wikipedia" in link.get("note") and "wikipedia.org" in url:
# person.add("wikipediaUrl", url)
# else:
# person.log.info("Unknown URL", url=url, note=link.get("note"))
for ident in data.pop("identifiers", []):
identifier = ident.get("identifier")
scheme = ident.get("scheme")
if scheme == "wikidata" and identifier.startswith("Q"):
person.add("wikidataId", identifier)
for contact_detail in data.pop("contact_details", []):
value = contact_detail.get("value")
if "email" == contact_detail.get("type"):
person.add("email", h.clean_emails(value))
if "phone" == contact_detail.get("type"):
person.add("phone", h.clean_phones(value))
if h.check_person_cutoff(person):
return
# data.pop("image", None)
# data.pop("images", None)
# if len(data):
# pprint(data)
context.emit(person, target=True)
# entities[person_id] = person.id
return person.id
def parse_membership(context: Context, data, persons, organizations, events):
person_id = persons.get(data.pop("person_id", None))
org_name = organizations.get(data.pop("organization_id", None))
if person_id and org_name:
period_id = data.get("legislative_period_id")
period = events.get(period_id, {})
comment = data.pop("role", None)
comment = comment or period.get("name")
starts = [data.get("start_date"), period.get("start_date")]
ends = [data.get("end_date"), period.get("end_date")]
# for source in data.get("sources", []):
# membership.add("sourceUrl", source.get("url"))
position = h.make_position(org_name, comment, starts, ends, [])
person = context.make("Person")
person.id = person_id
person.add("position", position)
context.emit(person, target=True)
| 38.515625 | 77 | 0.628195 |
4b161f7290894aa68ba291885c434e6daac19228
| 3,734 |
bzl
|
Python
|
tests/unit/proxy_test.bzl
|
SeleniumHQ/rules_jvm_external
|
906875b0d5eaaf61a8ca2c9c3835bde6f435d011
|
[
"Apache-2.0"
] | 3 |
2017-12-01T19:46:36.000Z
|
2018-03-27T16:55:21.000Z
|
tests/unit/proxy_test.bzl
|
SeleniumHQ/rules_jvm_external
|
906875b0d5eaaf61a8ca2c9c3835bde6f435d011
|
[
"Apache-2.0"
] | 3 |
2018-01-23T22:52:54.000Z
|
2018-04-12T12:57:46.000Z
|
tests/unit/proxy_test.bzl
|
SeleniumHQ/rules_jvm_external
|
906875b0d5eaaf61a8ca2c9c3835bde6f435d011
|
[
"Apache-2.0"
] | 5 |
2017-12-04T15:57:04.000Z
|
2018-04-05T09:49:32.000Z
|
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
load("//private:proxy.bzl", "get_java_proxy_args")
def _java_proxy_parsing_empty_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(env, [], get_java_proxy_args("", "", ""))
asserts.equals(env, [], get_java_proxy_args(None, None, None))
return unittest.end(env)
java_proxy_parsing_empty_test = unittest.make(_java_proxy_parsing_empty_test_impl)
def _java_proxy_parsing_no_scheme_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
[
"-Dhttp.proxyHost=localhost",
"-Dhttp.proxyPort=8888",
"-Dhttps.proxyHost=localhost",
"-Dhttps.proxyPort=8843",
"-Dhttp.nonProxyHosts='google.com'",
],
get_java_proxy_args("localhost:8888", "localhost:8843", "google.com"),
)
return unittest.end(env)
java_proxy_parsing_no_scheme_test = unittest.make(_java_proxy_parsing_no_scheme_test_impl)
def _java_proxy_parsing_no_user_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
[
"-Dhttp.proxyHost=example.com",
"-Dhttp.proxyPort=80",
"-Dhttps.proxyHost=secureexample.com",
"-Dhttps.proxyPort=443",
"-Dhttp.nonProxyHosts='google.com'",
],
get_java_proxy_args("http://example.com:80", "https://secureexample.com:443", "google.com"),
)
return unittest.end(env)
java_proxy_parsing_no_user_test = unittest.make(_java_proxy_parsing_no_user_test_impl)
def _java_proxy_parsing_no_port_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
[
"-Dhttp.proxyHost=example.com",
"-Dhttps.proxyHost=secureexample.com",
"-Dhttp.nonProxyHosts='google.com'",
],
get_java_proxy_args("http://example.com", "https://secureexample.com", "google.com"),
)
return unittest.end(env)
java_proxy_parsing_no_port_test = unittest.make(_java_proxy_parsing_no_port_test_impl)
def _java_proxy_parsing_trailing_slash_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
[
"-Dhttp.proxyHost=example.com",
"-Dhttp.proxyPort=80",
"-Dhttps.proxyHost=secureexample.com",
"-Dhttps.proxyPort=443",
"-Dhttp.nonProxyHosts='google.com'",
],
get_java_proxy_args("http://example.com:80", "https://secureexample.com:443/", "google.com"),
)
return unittest.end(env)
java_proxy_parsing_trailing_slash_test = unittest.make(_java_proxy_parsing_trailing_slash_test_impl)
def _java_proxy_parsing_all_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
[
"-Dhttp.proxyUser=user1",
"-Dhttp.proxyPassword=pass1",
"-Dhttp.proxyHost=example.com",
"-Dhttp.proxyPort=80",
"-Dhttps.proxyUser=user2",
"-Dhttps.proxyPassword=pass2",
"-Dhttps.proxyHost=secureexample.com",
"-Dhttps.proxyPort=443",
"-Dhttp.nonProxyHosts='google.com|localhost'",
],
get_java_proxy_args("http://user1:[email protected]:80", "https://user2:[email protected]:443", "google.com,localhost"),
)
return unittest.end(env)
java_proxy_parsing_all_test = unittest.make(_java_proxy_parsing_all_test_impl)
def proxy_test_suite():
unittest.suite(
"proxy_tests",
java_proxy_parsing_empty_test,
java_proxy_parsing_no_scheme_test,
java_proxy_parsing_no_user_test,
java_proxy_parsing_no_port_test,
java_proxy_parsing_trailing_slash_test,
java_proxy_parsing_all_test,
)
| 34.256881 | 134 | 0.654526 |
895da9d71112ee309dc00c26dd4c1d87577f5c73
| 5,716 |
py
|
Python
|
T2GEORES/lloyd_relaxation.py
|
jejimenezm/T2GEORES
|
9cb7392ff5d720d06d3b82426a6074b179e073ac
|
[
"MIT"
] | null | null | null |
T2GEORES/lloyd_relaxation.py
|
jejimenezm/T2GEORES
|
9cb7392ff5d720d06d3b82426a6074b179e073ac
|
[
"MIT"
] | null | null | null |
T2GEORES/lloyd_relaxation.py
|
jejimenezm/T2GEORES
|
9cb7392ff5d720d06d3b82426a6074b179e073ac
|
[
"MIT"
] | null | null | null |
from scipy.spatial import Voronoi
import numpy as np
class Field():
'''
Create a Voronoi map that can be used to run Lloyd
relaxation on an array of 2D points. For background,
see: https://en.wikipedia.org/wiki/Lloyd%27s_algorithm
'''
def __init__(self, *args, **kwargs):
'''
Store the points and bounding box of the points to which
Lloyd relaxation will be applied.
@param np.array `arr`: a numpy array with shape n, 2, where n
is the number of 2D points to be moved
@param float `epsilon`: the delta between the input point
domain and the pseudo-points used to constrain the points
'''
arr = args[0]
if not isinstance(arr, np.ndarray) or arr.shape[1] != 2:
raise Exception('Please provide a numpy array with shape n,2')
self.points = arr
# find the bounding box of the input data
self.domains = self.get_domains(arr)
# ensure no two points have the exact same coords
self.bb_points = self.get_bb_points(arr)
self.constrain = kwargs.get('constrain', True)
self.build_voronoi()
def constrain_points(self):
'''
Update any points that have drifted beyond the boundaries of this space
'''
for point in self.points:
if point[0] < self.domains['x']['min']: point[0] = self.domains['x']['min']
if point[0] > self.domains['x']['max']: point[0] = self.domains['x']['max']
if point[1] < self.domains['y']['min']: point[1] = self.domains['y']['min']
if point[1] > self.domains['y']['max']: point[1] = self.domains['y']['max']
def get_domains(self, arr):
'''
Return an object with the x, y domains of `arr`
'''
x = arr[:, 0]
y = arr[:, 1]
return {
'x': {
'min': min(x),
'max': max(x),
},
'y': {
'min': min(y),
'max': max(y),
}
}
def get_bb_points(self, arr):
'''
Given an array of 2D points, return the four vertex bounding box
'''
return np.array([
[self.domains['x']['min'], self.domains['y']['min']],
[self.domains['x']['max'], self.domains['y']['min']],
[self.domains['x']['min'], self.domains['y']['max']],
[self.domains['x']['max'], self.domains['y']['max']],
])
def build_voronoi(self):
'''
Build a voronoi map from self.points. For background on
self.voronoi attributes, see: https://docs.scipy.org/doc/scipy/
reference/generated/scipy.spatial.Voronoi.html
'''
# build the voronoi tessellation map
self.voronoi = Voronoi(self.points, qhull_options='Qbb Qc Qx')
# constrain voronoi vertices within bounding box
if self.constrain:
for idx, vertex in enumerate(self.voronoi.vertices):
x, y = vertex
if x < self.domains['x']['min']:
self.voronoi.vertices[idx][0] = self.domains['x']['min']
if x > self.domains['x']['max']:
self.voronoi.vertices[idx][0] = self.domains['x']['max']
if y < self.domains['y']['min']:
self.voronoi.vertices[idx][1] = self.domains['y']['min']
if y > self.domains['y']['max']:
self.voronoi.vertices[idx][1] = self.domains['y']['max']
def find_centroid(self, vertices):
'''
Find the centroid of a Voroni region described by `vertices`,
and return a np array with the x and y coords of that centroid.
The equation for the method used here to find the centroid of a
2D polygon is given here: https://en.wikipedia.org/wiki/
Centroid#Of_a_polygon
@params: np.array `vertices` a numpy array with shape n,2
@returns np.array a numpy array that defines the x, y coords
of the centroid described by `vertices`
'''
area = 0
centroid_x = 0
centroid_y = 0
for i in range(len(vertices)-1):
step = (vertices[i , 0] * vertices[i+1, 1]) - \
(vertices[i+1, 0] * vertices[i , 1])
area += step
centroid_x += (vertices[i, 0] + vertices[i+1, 0]) * step
centroid_y += (vertices[i, 1] + vertices[i+1, 1]) * step
area /= 2
# prevent division by zero - equation linked above
if area == 0: area += 0.0000001
centroid_x = (1.0/(6.0*area)) * centroid_x
centroid_y = (1.0/(6.0*area)) * centroid_y
# prevent centroids from escaping bounding box
if self.constrain:
if centroid_x < self.domains['x']['min']: centroid_x = self.domains['x']['min']
if centroid_x > self.domains['x']['max']: centroid_x = self.domains['x']['max']
if centroid_y < self.domains['y']['min']: centroid_y = self.domains['y']['min']
if centroid_y > self.domains['y']['max']: centroid_y = self.domains['y']['max']
return np.array([centroid_x, centroid_y])
def relax(self):
'''
Moves each point to the centroid of its cell in the voronoi
map to "relax" the points (i.e. jitter the points so as
to spread them out within the space).
'''
centroids = []
for idx in self.voronoi.point_region:
# the region is a series of indices into self.voronoi.vertices
# remove point at infinity, designated by index -1
region = [i for i in self.voronoi.regions[idx] if i != -1]
# enclose the polygon
region = region + [region[0]]
# get the vertices for this regioncd
verts = self.voronoi.vertices[region]
# find the centroid of those vertices
centroids.append(self.find_centroid(verts))
self.points = np.array(centroids)
self.constrain_points()
self.build_voronoi()
def get_points(self):
'''
Return the input points in the new projected positions
@returns np.array a numpy array that contains the same number
of observations in the input points, in identical order
'''
return self.points
| 36.407643 | 85 | 0.618964 |
d4b44deee3d473b7d8fedd3e1c5d54dfc2050ff2
| 230 |
py
|
Python
|
references/api/serializers.py
|
varianks/django_patronus3
|
3369d02b75b017af3f14f5ee45b292b39b5782c2
|
[
"MIT"
] | null | null | null |
references/api/serializers.py
|
varianks/django_patronus3
|
3369d02b75b017af3f14f5ee45b292b39b5782c2
|
[
"MIT"
] | 9 |
2021-03-30T13:37:06.000Z
|
2022-03-12T00:35:25.000Z
|
references/api/serializers.py
|
varianks/django_patronus3
|
3369d02b75b017af3f14f5ee45b292b39b5782c2
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from ..models import Reference
class ReferenceSerializer(serializers.ModelSerializer):
class Meta:
model = Reference
fields = ['id','title','description','author','link']
| 32.857143 | 61 | 0.726087 |
a0ce24c186c9c40273aac124ad3c4f586d17aff0
| 5,256 |
py
|
Python
|
src/res2net.py
|
weijun88/SANet
|
68da18e4a682cfe8f4e9f679bd52fee07f3f562b
|
[
"MIT"
] | 19 |
2021-08-03T01:36:26.000Z
|
2022-03-22T08:08:16.000Z
|
src/res2net.py
|
weijun88/SANet
|
68da18e4a682cfe8f4e9f679bd52fee07f3f562b
|
[
"MIT"
] | 3 |
2021-09-04T03:17:06.000Z
|
2022-02-26T03:41:34.000Z
|
src/res2net.py
|
weijun88/SANet
|
68da18e4a682cfe8f4e9f679bd52fee07f3f562b
|
[
"MIT"
] | 4 |
2021-08-25T11:11:41.000Z
|
2022-03-23T08:17:07.000Z
|
import torch.nn as nn
import math
import torch
import torch.nn.functional as F
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, baseWidth=26, scale=4, stype='normal'):
super(Bottle2neck, self).__init__()
width = int(math.floor(planes*(baseWidth/64.0)))
self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale - 1
if stype == 'stage':
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
convs, bns = [], []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv2d(width * scale, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i == 0 or self.stype == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = F.relu(self.bns[i](sp), inplace=True)
out = sp if i == 0 else torch.cat((out, sp), 1)
if self.scale != 1 and self.stype == 'normal':
out = torch.cat((out, spx[self.nums]), 1)
elif self.scale != 1 and self.stype == 'stage':
out = torch.cat((out, self.pool(spx[self.nums])), 1)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
x = self.downsample(x)
return F.relu(out+x, inplace=True)
class Res2Net(nn.Module):
def __init__(self, layers, snapshot, baseWidth=26, scale=4):
self.inplanes = 64
super(Res2Net, self).__init__()
self.snapshot = snapshot
self.baseWidth = baseWidth
self.scale = scale
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, 3, 2, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, 1, 1, bias=False)
)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(Bottle2neck, 64, layers[0])
self.layer2 = self._make_layer(Bottle2neck, 128, layers[1], stride=2)
self.layer3 = self._make_layer(Bottle2neck, 256, layers[2], stride=2)
self.layer4 = self._make_layer(Bottle2neck, 512, layers[3], stride=2)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample=downsample, stype='stage', baseWidth=self.baseWidth, scale=self.scale)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth, scale=self.scale))
return nn.Sequential(*layers)
def forward(self, x):
out1 = F.relu(self.bn1(self.conv1(x)), inplace=True)
out1 = F.max_pool2d(out1, kernel_size=3, stride=2, padding=1)
out2 = self.layer1(out1)
out3 = self.layer2(out2)
out4 = self.layer3(out3)
out5 = self.layer4(out4)
return out2, out3, out4, out5
def initialize(self):
self.load_state_dict(torch.load(self.snapshot), strict=False)
def Res2Net50():
return Res2Net([3, 4, 6, 3], '../res/res2net50_v1b_26w_4s-3cf99910.pth')
def weight_init(module):
for n, m in module.named_children():
print('initialize: '+n)
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
if m.weight is not None:
nn.init.ones_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Sequential):
weight_init(m)
elif isinstance(m, (nn.ReLU, nn.PReLU)):
pass
else:
m.initialize()
| 40.122137 | 137 | 0.583714 |
e1efec3d776857c8a09493ac1a2ee6e151e71434
| 610 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 5 |
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/multimodal/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-03-03T08:03:53.000Z
|
2022-03-03T08:03:53.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from .base import BaseMultimodal
from .actbert import ActBert
__all__ = ['BaseMultimodal', 'ActBert']
| 35.882353 | 74 | 0.757377 |
4455786f3194daf0dac275e41d216f6279e9d181
| 18,780 |
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/v20200301/sql_resource_sql_user_defined_function.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20200301/sql_resource_sql_user_defined_function.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20200301/sql_resource_sql_user_defined_function.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlResourceSqlUserDefinedFunctionArgs', 'SqlResourceSqlUserDefinedFunction']
@pulumi.input_type
class SqlResourceSqlUserDefinedFunctionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
container_name: pulumi.Input[str],
database_name: pulumi.Input[str],
options: pulumi.Input['CreateUpdateOptionsArgs'],
resource: pulumi.Input['SqlUserDefinedFunctionResourceArgs'],
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_defined_function_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SqlResourceSqlUserDefinedFunction resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input['CreateUpdateOptionsArgs'] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input['SqlUserDefinedFunctionResourceArgs'] resource: The standard JSON format of a userDefinedFunction
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] user_defined_function_name: Cosmos DB userDefinedFunction name.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "options", options)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_defined_function_name is not None:
pulumi.set(__self__, "user_defined_function_name", user_defined_function_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
"""
Cosmos DB container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def options(self) -> pulumi.Input['CreateUpdateOptionsArgs']:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: pulumi.Input['CreateUpdateOptionsArgs']):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['SqlUserDefinedFunctionResourceArgs']:
"""
The standard JSON format of a userDefinedFunction
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['SqlUserDefinedFunctionResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userDefinedFunctionName")
def user_defined_function_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB userDefinedFunction name.
"""
return pulumi.get(self, "user_defined_function_name")
@user_defined_function_name.setter
def user_defined_function_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_defined_function_name", value)
class SqlResourceSqlUserDefinedFunction(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlUserDefinedFunctionResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_defined_function_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure Cosmos DB userDefinedFunction.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlUserDefinedFunctionResourceArgs']] resource: The standard JSON format of a userDefinedFunction
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
:param pulumi.Input[str] user_defined_function_name: Cosmos DB userDefinedFunction name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlUserDefinedFunctionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB userDefinedFunction.
:param str resource_name: The name of the resource.
:param SqlResourceSqlUserDefinedFunctionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlUserDefinedFunctionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlUserDefinedFunctionResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_defined_function_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlUserDefinedFunctionArgs.__new__(SqlResourceSqlUserDefinedFunctionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["location"] = location
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["user_defined_function_name"] = user_defined_function_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20190801:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20191212:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20200401:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20200901:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20210115:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20210315:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlUserDefinedFunction"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlUserDefinedFunction")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlUserDefinedFunction, __self__).__init__(
'azure-native:documentdb/v20200301:SqlResourceSqlUserDefinedFunction',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlUserDefinedFunction':
"""
Get an existing SqlResourceSqlUserDefinedFunction resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlUserDefinedFunctionArgs.__new__(SqlResourceSqlUserDefinedFunctionArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlUserDefinedFunction(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlUserDefinedFunctionGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| 57.431193 | 2,180 | 0.695847 |
3aac6e9bca947c6c51002b90d72877ff5d975d93
| 1,650 |
py
|
Python
|
main.py
|
justinmckeown/hashmeashlee
|
83e7e9dc94c207a255c4ac55b784cd9cc6c12f22
|
[
"MIT"
] | null | null | null |
main.py
|
justinmckeown/hashmeashlee
|
83e7e9dc94c207a255c4ac55b784cd9cc6c12f22
|
[
"MIT"
] | null | null | null |
main.py
|
justinmckeown/hashmeashlee
|
83e7e9dc94c207a255c4ac55b784cd9cc6c12f22
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooser
#from plyer import filechooser
import diectorydive as dd
import kivyfilechooser as fc
class MyLayout(GridLayout):
#Initialize infinitre keywords
def __init__(self, **kwargs):
#call constructor
super(MyLayout, self).__init__(**kwargs)
#set direction of boxlayout
#Set columns
self.cols = 4
self.rows = 4
self.row_force_default=True
self.row_default_height=40
self.padding =100
#add widgets
self.add_widget(Label(text="File Path: ", size_hint_x=None, width=100))
self.filepath = TextInput(multiline=False, size_hint_x=None, width=300)
self.add_widget(self.filepath)
#submit button
self.submit = Button(text="Submit", size_hint_x=None, width=200)
self.submit.bind(on_press=self.press)
self.add_widget(self.submit)
def press(self, instance):
#file_path = self.filepath.text
#print(f'Hello. Is this the correct path: {file_path}')
#path = filechooser.open_file(title="Pick a CSV file..",
# filters=[("Comma-separated Values", "*.csv")])
#print(path)
pass
class HashsterApp(App):
def build(self):
return MyLayout()
if __name__ == '__main__':
HashsterApp().run()
| 26.190476 | 79 | 0.651515 |
1c7a493ea81a6d9c392f07ac0fa18e4f02975bc3
| 1,978 |
py
|
Python
|
examples/rabbitmq_extensions/length_limit.py
|
deslum/pika
|
d8af8a573b3535e02540c2e5a14c7b34e276adc0
|
[
"BSD-3-Clause"
] | 1 |
2020-11-25T07:31:56.000Z
|
2020-11-25T07:31:56.000Z
|
examples/rabbitmq_extensions/length_limit.py
|
deslum/pika
|
d8af8a573b3535e02540c2e5a14c7b34e276adc0
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rabbitmq_extensions/length_limit.py
|
deslum/pika
|
d8af8a573b3535e02540c2e5a14c7b34e276adc0
|
[
"BSD-3-Clause"
] | null | null | null |
import pika
from threading import Thread
HOST = 'localhost'
USER = 'guest'
PASSWORD = 'guest'
VHOST = '/'
MSG = "Test message for RabbitMQ"
QUEUE = 'length_limit_queue'
class RabbitMQ():
def __init__(self,
exchange='messages',
host='localhost',
user='guest',
password='guest',
virtual_host='/'):
self.exchange = exchange
self.virtual_host = virtual_host
self.credentials = pika.PlainCredentials(user, password)
self.parameters = pika.ConnectionParameters(
host=host, virtual_host=virtual_host, credentials=self.credentials)
self.rmq_connect()
def close(self):
self.connection.close()
def rmq_connect(self):
self.connection = pika.BlockingConnection(self.parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange=self.exchange,
exchange_type='direct',
durable=True,
auto_delete=False)
self._queues_declare()
def _queues_declare(self):
self.channel.queue_declare(
QUEUE,
durable=True,
auto_delete=False,
arguments={
"x-max-length": 10,
"x-overflow": "reject-publish"
})
self.channel.queue_bind(
exchange=self.exchange, queue=QUEUE, routing_key=QUEUE)
def send_msg(self, msg, queue):
properties = pika.BasicProperties(delivery_mode=2)
self.channel.basic_publish(
exchange=self.exchange,
routing_key=queue,
body=msg,
properties=properties)
def producer():
rmq_connection = RabbitMQ(
host=HOST, user=USER, password=PASSWORD, virtual_host=VHOST)
for x in range(100):
rmq_connection.send_msg("{} {}".format(MSG, x), QUEUE)
if __name__ == '__main__':
Thread(target=producer).start()
| 28.666667 | 79 | 0.596562 |
982242572e1595afd14568c275f78053a239f6f5
| 941 |
py
|
Python
|
estate_app/estate_app/doctype/surveysites2/survey_api.py
|
khaledasem/estate_app-ver4
|
6c4097e7627f8bbe87916cd5cdcae8f9c692954c
|
[
"MIT"
] | null | null | null |
estate_app/estate_app/doctype/surveysites2/survey_api.py
|
khaledasem/estate_app-ver4
|
6c4097e7627f8bbe87916cd5cdcae8f9c692954c
|
[
"MIT"
] | null | null | null |
estate_app/estate_app/doctype/surveysites2/survey_api.py
|
khaledasem/estate_app-ver4
|
6c4097e7627f8bbe87916cd5cdcae8f9c692954c
|
[
"MIT"
] | null | null | null |
import frappe
@frappe.whitelist()
def get_contracted_site(name):
return frappe.db.sql(f"""SELECT * FROM tabNewSites WHERE name='{name}';""", as_dict = True)
@frappe.whitelist()
def get_government_info(city):
return frappe.db.sql(f"""SELECT * FROM tabgovern1 WHERE name1='{city}';""", as_dict = True)
@frappe.whitelist()
def set_last_code(name, last_code):
doc = frappe.get_doc('govern1', name)
doc.last_code = last_code
doc.save()
return 'r'
@frappe.whitelist()
def set_is_replaced_of_old_site(old_sitename):
frappe.db.set_value('NewSites', old_sitename, 'is_replaced', 'Yes')
@frappe.whitelist()
def getUserInfo_api():
# [user_info] = frappe.db.sql(f"""select full_name, phone from tabUser where name ="{frappe.session.user}";""")
# frappe.msgprint(_(f"user is: {[user_info]}, role is {role[0]}"))
return frappe.db.sql(f"""select full_name, phone, role_profile_name from tabUser where name ="{frappe.session.user}";""")
| 29.40625 | 122 | 0.721573 |
20dd63ec77ea7b88d2db5c311861aab62e6b93b5
| 10,299 |
py
|
Python
|
sushichef.py
|
learningequality/sushi-chef-stanford-digital-medic
|
2c20e5f272add7893723ecd9cae00f0e04c68624
|
[
"MIT"
] | null | null | null |
sushichef.py
|
learningequality/sushi-chef-stanford-digital-medic
|
2c20e5f272add7893723ecd9cae00f0e04c68624
|
[
"MIT"
] | 5 |
2020-05-26T19:13:40.000Z
|
2022-03-12T00:31:43.000Z
|
sushichef.py
|
learningequality/sushi-chef-stanford-digital-medic
|
2c20e5f272add7893723ecd9cae00f0e04c68624
|
[
"MIT"
] | 1 |
2021-07-23T06:11:11.000Z
|
2021-07-23T06:11:11.000Z
|
#!/usr/bin/env python
import hashlib
import json
import os
import re
import sys
from io import BytesIO
from ricecooker.utils import downloader, html_writer
from ricecooker.chefs import SushiChef
from ricecooker.classes import nodes, files, questions, licenses
from ricecooker.config import LOGGER # Use LOGGER to print messages
from ricecooker.exceptions import raise_for_invalid_channel
from le_utils.constants import exercises, content_kinds, file_formats, format_presets, languages
from bs4 import BeautifulSoup
from PIL import Image
# Run constants
################################################################################
CHANNEL_NAME = "Stanford Digital MEdIC Coronavirus Toolkit" # Name of Kolibri channel
CHANNEL_SOURCE_ID = "sushi-chef-stanford-digital-medic" # Unique ID for content source
CHANNEL_DOMAIN = "digitalmedic.stanford.edu" # Who is providing the content
CHANNEL_LANGUAGE = "en" # Language of channel
CHANNEL_DESCRIPTION = "From the Stanford Center for " \
"Health Education, these infographics and visual " \
"materials provide key information on high-priority " \
"topics related to the prevention and understanding of COVID-19."
CHANNEL_THUMBNAIL = "https://pbs.twimg.com/profile_images/989193568752300032/UQpMU-sU_400x400.jpg"
# Additional constants
################################################################################
# Folder to store pdfs of images
DOCUMENT_DOWNLOAD_DIR = 'documents'
if not os.path.exists(DOCUMENT_DOWNLOAD_DIR):
os.makedirs(DOCUMENT_DOWNLOAD_DIR)
# Main page collection brandfolder
ENGLISH_COLLECTION_URL = "https://brandfolder.com/digitalmedic/covid-19"
ENGLISH_ASSETS_URL = "https://brandfolder.com/api/v4/collections/{collection}/sections/{section}/assets?sort_by=position&order=ASC&search=&fast_jsonapi=true"
EXCLUDED_TOPIC_IDS = [262354, 261412]
FILE_STORAGE_URL = "https://brandfolder.com/api/v4/assets/{id}/attachments?fields=url,thumbnail_url"
# Multi-language content constants
SLIDESHOWS_URL = "https://brandfolder.com/digitalmedic/covid-19-multiple-languages"
SLIDESHOW_ASSETS_URL = "https://brandfolder.com/api/v4/collections/{collection}/sections/{section}/assets?sort_by=position&order=ASC&strict_search=false&fast_jsonapi=true"
LICENSE = licenses.CC_BY_SALicense(copyright_holder="Stanford Center for Health Education")
LANGUAGE_MAP = {
'Afrikaans': 'af',
'Arabic': 'ar',
'English': 'en',
'French': 'fr',
'Hindi': 'hi',
'isiXhosa': 'xh',
'isiZulu': 'zul',
'Kiswahili': 'sw',
'Mandarin Chinese - simple': 'zh-CN',
'Mandarin Chinese - Traditional': 'zh-Hant',
'Portuguese': 'pt',
'Setswana': 'tn',
'Spanish': 'es',
'Tetun': None,
}
# The chef subclass
################################################################################
class StanfordDigitalMedicChef(SushiChef):
"""
This class converts content from the content source into the format required by Kolibri,
then uploads the {channel_name} channel to Kolibri Studio.
Your command line script should call the `main` method as the entry point,
which performs the following steps:
- Parse command line arguments and options (run `./sushichef.py -h` for details)
- Call the `SushiChef.run` method which in turn calls `pre_run` (optional)
and then the ricecooker function `uploadchannel` which in turn calls this
class' `get_channel` method to get channel info, then `construct_channel`
to build the contentnode tree.
For more info, see https://ricecooker.readthedocs.io
"""
channel_info = {
'CHANNEL_SOURCE_DOMAIN': CHANNEL_DOMAIN,
'CHANNEL_SOURCE_ID': CHANNEL_SOURCE_ID,
'CHANNEL_TITLE': CHANNEL_NAME,
'CHANNEL_LANGUAGE': CHANNEL_LANGUAGE,
'CHANNEL_THUMBNAIL': CHANNEL_THUMBNAIL,
'CHANNEL_DESCRIPTION': CHANNEL_DESCRIPTION,
}
# Your chef subclass can override/extend the following method:
# get_channel: to create ChannelNode manually instead of using channel_info
# pre_run: to perform preliminary tasks, e.g., crawling and scraping website
# __init__: if need to customize functionality or add command line arguments
def construct_channel(self, *args, **kwargs):
"""
Creates ChannelNode and build topic tree
Args:
- args: arguments passed in on the command line
- kwargs: extra options passed in as key="value" pairs on the command line
For example, add the command line option lang="fr" and the value
"fr" will be passed along to `construct_channel` as kwargs['lang'].
Returns: ChannelNode
"""
channel = self.get_channel(*args, **kwargs) # Create ChannelNode from data in self.channel_info
scrape_english_collection(channel)
scrape_multilanguage_slideshows(channel)
return channel
# HELPER FUNCTIONS
################################################################################
def get_collection_key(contents):
return re.search(r"var SOURCE\s*=\s*\{.+, resource_key: \"(.+)\"[^\}]+", contents.text).group(1)
def create_slideshow(images, source_id, title, language_name):
"""
images: {url: str, caption: str}
"""
thumbnailFile = files.ThumbnailFile(images[0]['url'])
if '--slides' in sys.argv:
slides = [
files.SlideImageFile(image['url'], caption=image.get('caption', ''))
for image in images
]
return nodes.SlideshowNode(
source_id=source_id,
title=title,
license=LICENSE,
language=LANGUAGE_MAP[language_name],
files=[thumbnailFile] + slides
)
# Create PDF
filename = hashlib.md5(source_id.encode('utf-8')).hexdigest()
pdfpath = '{}{}{}.pdf'.format(DOCUMENT_DOWNLOAD_DIR, os.path.sep, filename)
if not os.path.exists(pdfpath):
image_list = []
for image in images:
img = Image.open(BytesIO(downloader.read(image['url'])))
if img.mode == 'RGBA':
img = img.convert('RGB')
image_list.append(img)
image_list[0].save(pdfpath, save_all=True, append_images=image_list[1:])
return nodes.DocumentNode(
source_id=source_id,
title=title,
license=LICENSE,
language=LANGUAGE_MAP[language_name],
files=[thumbnailFile, files.DocumentFile(pdfpath)]
)
# SCRAPING FUNCTIONS
################################################################################
def scrape_english_collection(channel):
LOGGER.info('Scraping English collection...')
english_topic = nodes.TopicNode(source_id=ENGLISH_COLLECTION_URL, title="English")
channel.add_child(english_topic)
contents = BeautifulSoup(downloader.read(ENGLISH_COLLECTION_URL), 'html5lib')
collection_key = get_collection_key(contents)
topic_selection = contents.find('div', {'class': 'asset-list'}).find('div')
topic_list = [t for t in json.loads(topic_selection['data-react-props'])['sections'] if t['id'] not in EXCLUDED_TOPIC_IDS]
for topic in topic_list:
LOGGER.info(' {}'.format(topic['name'].encode('utf-8')))
topic_node = nodes.TopicNode(source_id=topic['section_key'], title=topic['name'])
english_topic.add_child(topic_node)
# Scrape items in the topic
url = ENGLISH_ASSETS_URL.format(collection=collection_key, section=topic['section_key'])
scrape_collection_files(topic_node, url)
def scrape_collection_files(topic, url):
assets = json.loads(downloader.read(url))['data']
images = []
for asset in assets:
if asset['attributes']['extension'] == 'png':
images.append({
'url': asset['attributes']['thumbnail_url'].replace('element.png', '[email protected]'),
'caption': asset['attributes']['name']
})
elif asset['attributes']['extension'] == 'mp4':
video_data = json.loads(downloader.read(FILE_STORAGE_URL.format(id=asset['id'])))
video = video_data['data'][0]['attributes']
topic.add_child(nodes.VideoNode(
source_id=video['url'],
title=asset['attributes']['name'],
license=LICENSE,
files=[
files.VideoFile(video['url']),
files.ThumbnailFile(video['thumbnail_url'])
]
))
else:
LOGGER.warning('Unable to add {} from {}'.format(asset['attributes']['extension'], url))
# Add images to slideshow node
if len(images):
topic.add_child(create_slideshow(
images,
url,
topic.title,
'English'
))
def scrape_multilanguage_slideshows(channel):
LOGGER.info('Scraping multi-language content...')
contents = BeautifulSoup(downloader.read(SLIDESHOWS_URL), 'html5lib')
collection_key = get_collection_key(contents)
languages_selection = contents.find('div', {'class': 'asset-list'}).find('div')
language_list = json.loads(languages_selection['data-react-props'])['sections']
for language in language_list:
asset_url = SLIDESHOW_ASSETS_URL.format(collection='qac6i4-foozd4-68u325', section=language['section_key'])
slide_data = json.loads(downloader.read(asset_url))['data']
translated_name = languages.getlang(LANGUAGE_MAP[language['name']]).native_name if LANGUAGE_MAP[language['name']] else language['name']
LOGGER.info(' {}'.format(translated_name.encode('utf-8')))
slides = [
{ 'url': slide['attributes']['thumbnail_url'].replace('element.png', '[email protected]')}
for slide in slide_data
]
if len(slides):
channel.add_child(create_slideshow(
slides,
asset_url,
translated_name,
language['name']
))
# CLI
################################################################################
if __name__ == '__main__':
# This code runs when sushichef.py is called from the command line
# Use `-- --slides` to use slideshows instead of pdfs
chef = StanfordDigitalMedicChef()
chef.main()
| 40.547244 | 171 | 0.635693 |
dc3ad0813f20302bcb3e3bb7b3797a5b66c5a4b4
| 7,102 |
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/tests/test_course_metadata_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3 |
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/tests/test_course_metadata_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/lib/xmodule/xmodule/tests/test_course_metadata_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1 |
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Tests for course_metadata_utils.
"""
from collections import namedtuple
from datetime import datetime, timedelta
from unittest import TestCase
from pytz import utc
import pytest
from xmodule.block_metadata_utils import (
display_name_with_default,
display_name_with_default_escaped,
url_name_for_block
)
from xmodule.course_metadata_utils import (
DEFAULT_START_DATE,
clean_course_key,
course_start_date_is_default,
has_course_ended,
has_course_started,
number_for_course_location
)
from xmodule.modulestore.tests.utils import (
MixedModulestoreBuilder,
MongoModulestoreBuilder,
VersioningModulestoreBuilder
)
_TODAY = datetime.now(utc)
_LAST_MONTH = _TODAY - timedelta(days=30)
_LAST_WEEK = _TODAY - timedelta(days=7)
_NEXT_WEEK = _TODAY + timedelta(days=7)
class CourseMetadataUtilsTestCase(TestCase):
"""
Tests for course_metadata_utils.
"""
def setUp(self):
"""
Set up module store testing capabilities and initialize test courses.
"""
super().setUp()
mongo_builder = MongoModulestoreBuilder()
split_builder = VersioningModulestoreBuilder()
mixed_builder = MixedModulestoreBuilder([('mongo', mongo_builder), ('split', split_builder)])
with mixed_builder.build_without_contentstore() as (__, mixed_store):
with mixed_store.default_store('mongo'):
self.demo_course = mixed_store.create_course(
org="edX",
course="DemoX.1",
run="Fall_2014",
user_id=-3, # -3 refers to a "testing user"
fields={
"start": _LAST_MONTH,
"end": _LAST_WEEK
}
)
with mixed_store.default_store('split'):
self.html_course = mixed_store.create_course(
org="UniversityX",
course="CS-203",
run="Y2096",
user_id=-3, # -3 refers to a "testing user"
fields={
"start": _NEXT_WEEK,
"display_name": "Intro to <div>html</div>"
}
)
def test_course_metadata_utils(self):
"""
Test every single function in course_metadata_utils.
"""
def mock_strftime_localized(date_time, format_string):
"""
Mock version of strftime_localized used for testing purposes.
Because we don't have a real implementation of strftime_localized
to work with (strftime_localized is provided by the XBlock runtime,
which we don't have access to for this test case), we must declare
this dummy implementation. This does NOT behave like a real
strftime_localized should. It purposely returns a really dumb value
that's only useful for testing purposes.
Arguments:
date_time (datetime): datetime to be formatted.
format_string (str): format specifier. Valid values include:
- 'DATE_TIME'
- 'TIME'
- 'SHORT_DATE'
- 'LONG_DATE'
Returns (str): format_string + " " + str(date_time)
"""
if format_string in ['DATE_TIME', 'TIME', 'SHORT_DATE', 'LONG_DATE']:
return format_string + " " + date_time.strftime("%Y-%m-%d %H:%M:%S")
else:
raise ValueError("Invalid format string :" + format_string)
def noop_gettext(text): # lint-amnesty, pylint: disable=unused-variable
"""Dummy implementation of gettext, so we don't need Django."""
return text
test_datetime = datetime(1945, 2, 6, 4, 20, 00, tzinfo=utc)
advertised_start_parsable = "2038-01-19 03:14:07"
FunctionTest = namedtuple('FunctionTest', 'function scenarios')
TestScenario = namedtuple('TestScenario', 'arguments expected_return')
function_tests = [
FunctionTest(clean_course_key, [
# Test with a Mongo course and '=' as padding.
TestScenario(
(self.demo_course.id, '='),
"course_MVSFQL2EMVWW6WBOGEXUMYLMNRPTEMBRGQ======"
),
# Test with a Split course and '~' as padding.
TestScenario(
(self.html_course.id, '~'),
"course_MNXXK4TTMUWXMMJ2KVXGS5TFOJZWS5DZLAVUGUZNGIYDGK2ZGIYDSNQ~"
),
]),
FunctionTest(url_name_for_block, [
TestScenario((self.demo_course,), self.demo_course.location.block_id),
TestScenario((self.html_course,), self.html_course.location.block_id),
]),
FunctionTest(display_name_with_default_escaped, [
# Test course with no display name.
TestScenario((self.demo_course,), "Empty"),
# Test course with a display name that contains characters that need escaping.
TestScenario((self.html_course,), "Intro to html"),
]),
FunctionTest(display_name_with_default, [
# Test course with no display name.
TestScenario((self.demo_course,), "Empty"),
# Test course with a display name that contains characters that need escaping.
TestScenario((self.html_course,), "Intro to <div>html</div>"),
]),
FunctionTest(number_for_course_location, [
TestScenario((self.demo_course.location,), "DemoX.1"),
TestScenario((self.html_course.location,), "CS-203"),
]),
FunctionTest(has_course_started, [
TestScenario((self.demo_course.start,), True),
TestScenario((self.html_course.start,), False),
]),
FunctionTest(has_course_ended, [
TestScenario((self.demo_course.end,), True),
TestScenario((self.html_course.end,), False),
]),
FunctionTest(course_start_date_is_default, [
TestScenario((test_datetime, advertised_start_parsable), False),
TestScenario((test_datetime, None), False),
TestScenario((DEFAULT_START_DATE, advertised_start_parsable), False),
TestScenario((DEFAULT_START_DATE, None), True),
]),
]
for function_test in function_tests:
for scenario in function_test.scenarios:
actual_return = function_test.function(*scenario.arguments)
assert actual_return == scenario.expected_return
# Even though we don't care about testing mock_strftime_localized,
# we still need to test it with a bad format string in order to
# satisfy the coverage checker.
with pytest.raises(ValueError):
mock_strftime_localized(test_datetime, 'BAD_FORMAT_SPECIFIER')
| 40.352273 | 101 | 0.588989 |
ed6459a627a7082a87293e537bafd2fb96b25e4b
| 19,703 |
py
|
Python
|
mmdet/models/relation_heads/relation_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | null | null | null |
mmdet/models/relation_heads/relation_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | null | null | null |
mmdet/models/relation_heads/relation_head.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | null | null | null |
# ---------------------------------------------------------------
# relation_head.py
# Set-up time: 2020/5/22 下午4:09
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: [email protected] [OR] [email protected]
# ---------------------------------------------------------------
from ..registry import HEADS
import torch
import torch.nn as nn
import torch.nn.functional as F
from .. import builder
from ..losses import accuracy
from mmdet.datasets import build_dataset
import os
from .approaches import (FrequencyBias, RelationSampler,
PostProcessor, LinearRanker, LSTMRanker, TransformerRanker, get_weak_key_rel_labels)
from mmdet.core import force_fp32
from mmdet.core import get_classes, get_predicates, get_attributes, get_verbs, get_prepositions
import numpy as np
import mmcv
from mmdet.core import bbox2roi
import itertools
import copy
@HEADS.register_module
class RelationHead(nn.Module):
"""
The basic class of all the relation head.
"""
def __init__(self,
dataset_config,
head_config,
bbox_roi_extractor=None,
relation_roi_extractor=None,
relation_sampler=None,
relation_ranker=None,
use_bias=True,
use_statistics=True,
num_classes=151,
num_predicates=51,
loss_object=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_relation=None
):
"""
The public parameters that shared by various relation heads are
initialized here.
"""
super(RelationHead, self).__init__()
self.use_bias = use_bias
self.num_classes = num_classes
self.num_predicates = num_predicates
# upgrade some submodule attribute to this head
self.head_config = head_config
self.use_gt_box = self.head_config.use_gt_box
self.use_gt_label = self.head_config.use_gt_label
self.with_visual_bbox = (bbox_roi_extractor is not None and bbox_roi_extractor.with_visual_bbox) or \
(relation_roi_extractor is not None and relation_roi_extractor.with_visual_bbox)
self.with_visual_mask = (bbox_roi_extractor is not None and bbox_roi_extractor.with_visual_mask) or \
(relation_roi_extractor is not None and relation_roi_extractor.with_visual_mask)
self.with_visual_point = (bbox_roi_extractor is not None and bbox_roi_extractor.with_visual_point) or \
(relation_roi_extractor is not None and relation_roi_extractor.with_visual_point)
self.dataset_config = dataset_config
if self.use_gt_box:
if self.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
if bbox_roi_extractor is not None:
self.bbox_roi_extractor = builder.build_relation_roi_extractor(bbox_roi_extractor)
if relation_roi_extractor is not None:
self.relation_roi_extractor = builder.build_relation_roi_extractor(relation_roi_extractor)
if relation_sampler is not None:
relation_sampler.update(dict(use_gt_box=self.use_gt_box))
self.relation_sampler = RelationSampler(**relation_sampler)
self.post_processor = PostProcessor()
# relation ranker: a standard component
if relation_ranker is not None:
ranker = relation_ranker.pop('type')
#self.supervised_form = relation_ranker.pop('supervised_form')
self.comb_factor = relation_ranker.pop('comb_factor', 0.5)
self.area_form = relation_ranker.pop('area_form', 'rect')
loss_ranking_relation = relation_ranker.pop('loss')
self.loss_ranking_relation = builder.build_loss(loss_ranking_relation)
if loss_ranking_relation.type != 'CrossEntropyLoss':
num_out = 1
else:
num_out = 2
relation_ranker.update(dict(num_out=num_out))
self.relation_ranker = eval(ranker)(**relation_ranker)
if loss_object is not None:
self.loss_object = builder.build_loss(loss_object)
if loss_relation is not None:
self.loss_relation = builder.build_loss(loss_relation)
if use_statistics:
cache_dir = dataset_config.pop('cache', None)
print('Loading Statistics...')
if cache_dir is None:
raise FileNotFoundError('The cache_dir for caching the statistics is not provided.')
if os.path.exists(cache_dir):
statistics = torch.load(cache_dir, map_location=torch.device("cpu"))
else:
dataset = build_dataset(dataset_config)
result = dataset.get_statistics()
statistics = {
'fg_matrix': result['fg_matrix'],
'pred_dist': result['pred_dist'],
'obj_classes': result['obj_classes'],
'rel_classes': result['rel_classes'],
'att_classes': result['att_classes'],
}
torch.save(statistics, cache_dir)
self.statistics = statistics
print('\n Statistics created!')
self.obj_classes, self.rel_classes, self.att_classes = statistics['obj_classes'], statistics['rel_classes'], \
statistics['att_classes']
else:
self.obj_classes, self.rel_classes, self.att_classes = get_classes(dataset_config.type), \
get_predicates('visualgenome'), \
get_attributes('visualgenome')
self.obj_classes.insert(0, '__background__')
self.rel_classes.insert(0, '__background__')
self.att_classes.insert(0, '__background__')
assert self.num_classes == len(self.obj_classes)
assert self.num_predicates == len(self.rel_classes)
if self.use_bias:
assert self.with_statistics
# convey statistics into FrequencyBias to avoid loading again
self.freq_bias = FrequencyBias(self.head_config, self.statistics)
@property
def with_bbox_roi_extractor(self):
return hasattr(self, 'bbox_roi_extractor') and self.bbox_roi_extractor is not None
@property
def with_relation_roi_extractor(self):
return hasattr(self, 'relation_roi_extractor') and self.relation_roi_extractor is not None
@property
def with_statistics(self):
return hasattr(self, 'statistics') and self.statistics is not None
@property
def with_bias(self):
return hasattr(self, 'freq_bias') and self.freq_bias is not None
@property
def with_loss_object(self):
return hasattr(self, 'loss_object') and self.loss_object is not None
@property
def with_loss_relation(self):
return hasattr(self, 'loss_relation') and self.loss_relation is not None
@property
def with_relation_ranker(self):
return hasattr(self, 'relation_ranker') and self.relation_ranker is not None
def init_weights(self):
if self.with_bbox_roi_extractor:
self.bbox_roi_extractor.init_weights()
if self.with_relation_roi_extractor:
self.relation_roi_extractor.init_weights()
self.context_layer.init_weights()
def frontend_features(self, img, img_meta, det_result, gt_result):
bboxes, masks, points = det_result.bboxes, det_result.masks, copy.deepcopy(det_result.points)
# train/val or: for finetuning on the dataset without relationship annotations
if gt_result is not None and gt_result.rels is not None:
if self.mode in ['predcls', 'sgcls']:
sample_function = self.relation_sampler.gtbox_relsample
else:
sample_function = self.relation_sampler.detect_relsample
sample_res = sample_function(det_result, gt_result)
if len(sample_res) == 4:
rel_labels, rel_pair_idxes, rel_matrix, key_rel_labels = sample_res
else:
rel_labels, rel_pair_idxes, rel_matrix = sample_res
key_rel_labels = None
else:
rel_labels, rel_matrix, key_rel_labels = None, None, None
rel_pair_idxes = self.relation_sampler.prepare_test_pairs(det_result)
det_result.rel_pair_idxes = rel_pair_idxes
det_result.relmaps = rel_matrix
det_result.target_rel_labels = rel_labels
det_result.target_key_rel_labels = key_rel_labels
rois = bbox2roi(bboxes)
# merge image-wise masks or points
if masks is not None:
masks = list(itertools.chain(*masks))
if points is not None:
aug_points = []
for pts_list in points:
for pts in pts_list:
pts = pts.view(-1, 2) # (:, [x, y])
pts += torch.from_numpy(np.random.normal(0, 0.02, size=pts.shape)).to(pts)
#pts -= torch.mean(pts, dim=0, keepdim=True)
pts /= torch.max(torch.sqrt(torch.sum(pts ** 2, dim=1)))
aug_points.append(pts)
points = aug_points
# extract the unary roi features and union roi features.
roi_feats = self.bbox_roi_extractor(img, img_meta, rois, masks=masks, points=points)
union_feats = self.relation_roi_extractor(img, img_meta, rois,
rel_pair_idx=rel_pair_idxes, masks=masks, points=points)
# breakpoint()
# roi_feats, ([92, 1024],)
# union_feats, ([1352, 1024],)
# det_result, Result object
# FIXME: Tensor + Tuple??
# return roi_feats + union_feats + (det_result,)
return roi_feats, union_feats, det_result
def forward(self, **kwargs):
raise NotImplementedError
def relation_ranking_forward(self, input, det_result, gt_result, num_rels, is_testing):
# predict the ranking
# tensor
ranking_scores = self.relation_ranker(input.detach(),
det_result, self.relation_roi_extractor.union_rois)
# (1) weak supervision, KLDiv:
if self.loss_ranking_relation.__class__.__name__ == 'KLDivLoss':
if not is_testing: # include training and validation
# list form
det_result.target_key_rel_labels = get_weak_key_rel_labels(det_result, gt_result,
self.comb_factor, self.area_form)
ranking_scores = ranking_scores.view(-1)
ranking_scores = ranking_scores.split(num_rels, 0)
else:
ranking_scores = ranking_scores.view(-1)
ranking_scores = torch.sigmoid(ranking_scores).split(num_rels, dim=0)
# (2) CEloss: the predicted one is the binary classification, 2 columns
if self.loss_ranking_relation.__class__.__name__ == 'CrossEntropyLoss':
if not is_testing:
det_result.target_key_rel_labels = torch.cat(det_result.target_key_rel_labels, dim=-1)
else:
ranking_scores = F.softmax(ranking_scores, dim=-1)[:, 1].view(-1).split(num_rels, 0)
# Margin loss, DR loss
elif self.loss_ranking_relation.__class__.__name__ == 'SigmoidDRLoss':
if not is_testing:
ranking_scores = ranking_scores.view(-1)
ranking_scores = ranking_scores.split(num_rels, 0)
else:
ranking_scores = ranking_scores.view(-1)
ranking_scores = torch.sigmoid(ranking_scores).split(num_rels, dim=0)
det_result.ranking_scores = ranking_scores
return det_result
def loss(self, det_result):
obj_scores, rel_scores, target_labels, target_rel_labels, add_for_losses, head_spec_losses = \
det_result.refine_scores, \
det_result.rel_scores, \
det_result.target_labels, \
det_result.target_rel_labels, \
det_result.add_losses, \
det_result.head_spec_losses
losses = dict()
if self.with_loss_object and obj_scores is not None:
# fix: the val process
if isinstance(target_labels, (tuple, list)):
target_labels = torch.cat(target_labels, dim=-1)
if isinstance(obj_scores, (tuple, list)):
obj_scores = torch.cat(obj_scores, dim=0)
losses['loss_object'] = self.loss_object(
obj_scores,
target_labels)
losses['acc_object'] = accuracy(obj_scores, target_labels)
if self.with_loss_relation and rel_scores is not None:
if isinstance(target_rel_labels, (tuple, list)):
target_rel_labels = torch.cat(target_rel_labels, dim=-1)
if isinstance(rel_scores, (tuple, list)):
rel_scores = torch.cat(rel_scores, dim=0)
losses['loss_relation'] = self.loss_relation(
rel_scores,
target_rel_labels)
losses['acc_relation'] = accuracy(rel_scores, target_rel_labels)
if self.with_relation_ranker:
target_key_rel_labels = det_result.target_key_rel_labels
ranking_scores = det_result.ranking_scores
avg_factor = torch.nonzero(target_key_rel_labels!=-1).view(-1).size(0) if isinstance(target_key_rel_labels, torch.Tensor) else None
losses['loss_ranking_relation'] = self.loss_ranking_relation(ranking_scores, target_key_rel_labels,
avg_factor=avg_factor)
# if self.supervised_form == 'weak':
# # use the KLdiv loss: the label is the soft distribution
# bs = 0
# losses['loss_ranking_relation'] = 0
# for ranking_score, target_key_rel_label in zip(ranking_scores, target_key_rel_labels):
# bs += ranking_score.size(0)
# losses['loss_ranking_relation'] += torch.nn.KLDivLoss(reduction='none')(F.log_softmax(ranking_score, dim=-1),
# target_key_rel_label).sum(-1)
# losses['loss_ranking_relation'] = losses['loss_ranking_relation'] / bs
# else:
# #TODO: firstly try the CE loss function, or you may try the margin loss
# #TODO: Check the margin loss
# #loss_func = builder.build_loss(self.loss_ranking_relation)
# losses['loss_ranking_relation'] = self.loss_ranking_relation(ranking_scores, target_key_rel_labels)
if add_for_losses is not None:
for loss_key, loss_item in add_for_losses.items():
if isinstance(loss_item, list): # loss_vctree_binary
loss_ = [F.binary_cross_entropy_with_logits(l[0], l[1]) for l in loss_item]
loss_ = sum(loss_) / len(loss_)
losses[loss_key] = loss_
elif isinstance(loss_item, tuple):
if isinstance(loss_item[1], (list, tuple)):
target = torch.cat(loss_item[1], -1)
else:
target = loss_item[1]
losses[loss_key] = F.cross_entropy(loss_item[0], target)
else:
raise NotImplementedError
if head_spec_losses is not None:
# this losses have been calculated in the specific relation head
losses.update(head_spec_losses)
return losses
def get_result(self, det_result, scale_factor, rescale, key_first=False):
"""
for test forward
:param det_result:
:return:
"""
result = self.post_processor(det_result, key_first=key_first)
for k, v in result.__dict__.items():
if k != 'add_losses' and k != 'head_spec_losses' and v is not None and len(v) > 0:
_v = v[0] # remove the outer list
if isinstance(_v, torch.Tensor):
result.__setattr__(k, _v.cpu().numpy())
elif isinstance(_v, list): # for mask
result.__setattr__(k, [__v.cpu().numpy() for __v in _v])
else:
result.__setattr__(k, _v) # e.g., img_shape, is a tuple
if rescale:
if result.bboxes is not None:
result.bboxes[:, :4] = result.bboxes[:, :4] / scale_factor
if result.refine_bboxes is not None:
result.refine_bboxes[:, :4] = result.refine_bboxes[:, :4] / scale_factor
if result.masks is not None:
resize_masks = []
for bbox, mask in zip(result.refine_bboxes, result.masks):
_bbox = bbox.astype(np.int32)
w = max(_bbox[2] - _bbox[0] + 1, 1)
h = max(_bbox[3] - _bbox[1] + 1, 1)
resize_masks.append(mmcv.imresize(mask.astype(np.uint8), (w, h)))
result.masks = resize_masks
if result.points is not None:
resize_points = []
for points in result.points:
resize_points.append(points / scale_factor)
result.points = resize_points
# if needed, adjust the form for object detection evaluation
result.formatted_bboxes, result.formatted_masks = [], []
if result.refine_bboxes is None:
result.formatted_bboxes = [
np.zeros((0, 5), dtype=np.float32) for i in range(self.num_classes - 1)
]
else:
result.formatted_bboxes = [
result.refine_bboxes[result.refine_labels == i + 1, :] for i in range(self.num_classes - 1)]
if result.masks is None:
result.formatted_masks = [
[] for i in range(self.num_classes - 1)
]
else:
result.formatted_masks = [[] for i in range(self.num_classes - 1)]
for i in range(len(result.masks)):
result.formatted_masks[result.refine_labels[i] - 1].append(result.masks[i])
# to save the space, drop the saliency maps, if it exists
if result.saliency_maps is not None:
result.saliency_maps = None
return result
def process_ignore_objects(self, input, ignore_classes):
"""
An API used in inference stage for processing the data when some object classes should be ignored.
"""
ignored_input = input.clone()
ignored_input[:, ignore_classes] = 0.
return ignored_input
| 46.36 | 144 | 0.584175 |
6a1a573c3e518a6f5976fba80dd6ae14844a19d9
| 4,140 |
py
|
Python
|
quadpy/sphere/_lebedev/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/sphere/_lebedev/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
quadpy/sphere/_lebedev/__init__.py
|
whzup/quadpy
|
ca8bd2f9c5a4ae30dc85d8fb79217602bd42525e
|
[
"MIT"
] | null | null | null |
import json
import os
import re
from ...helpers import article
from .._helpers import SphereScheme, cartesian_to_spherical, untangle2
# Sphere integration schemes from a series of publications, in chronological order
# <https://en.wikipedia.org/wiki/Lebedev_quadrature>
# <https://people.sc.fsu.edu/~jburkardt/datasets/sphere_lebedev_rule/sphere_lebedev_rule.html>
citations = [
article(
authors="V.I. Lebedev",
title="Values of the nodes and weights of ninth to seventeenth order Gauss-Markov quadrature formulae invariant under the octahedron group with inversion",
journal="Computational Mathematics and Mathematical Physics",
volume="15",
year="1975",
pages="44-51",
),
article(
authors=["V.I. Lebedev"],
year="1976",
title="Quadratures on a sphere",
journal="Zh. Vȳchisl. Mat. Mat. Fiz.",
volume="16",
number="2",
pages="293–306",
url="https://doi.org/10.1016/0041-5553(76)90100-2",
),
article(
authors=["V.I. Lebedev"],
title="Spherical quadrature formulas exact to orders 25-29",
journal="Siberian Mathematical Journal",
volume="18",
year="1977",
pages="99-107",
),
article(
authors=["V.I. Lebedev", "A.L. Skorokhodov"],
title="Quadrature formulas of orders 41, 47, and 53 for the sphere",
journal="Russian Acad. Sci. Dokl. Math.",
volume="45",
year="1992",
pages="587-592",
),
article(
authors=["V.I. Lebedev"],
title="A quadrature formula for the sphere of 59th algebraic order of accuracy",
journal="Russian Acad. Sci. Dokl. Math.",
volume="50",
year="1995",
pages="283-286",
),
article(
authors=["V.I. Lebedev", "D.N. Laikov"],
title="A quadrature formula for the sphere of the 131st algebraic order of accuracy",
journal="Doklady Mathematics",
volume="59",
number="3",
year="1999",
pages="477-481",
),
]
def _read(index):
name = f"Lebedev({index})"
this_dir = os.path.dirname(os.path.realpath(__file__))
m = re.match("([0-9]+)([a-z]*)", index)
filename = "lebedev_{:03d}{}.json".format(int(m.group(1)), m.group(2))
with open(os.path.join(this_dir, filename), "r") as f:
data = json.load(f)
degree = data.pop("degree")
points, weights = untangle2(data)
azimuthal_polar = cartesian_to_spherical(points)
return SphereScheme(name, weights, points, azimuthal_polar, degree, citations)
def lebedev_003a():
return _read("003a")
def lebedev_003b():
return _read("003b")
def lebedev_003c():
return _read("003c")
def lebedev_005():
return _read("005")
def lebedev_007():
return _read("007")
def lebedev_009():
return _read("009")
def lebedev_011():
return _read("011")
def lebedev_013():
return _read("013")
def lebedev_015():
return _read("015")
def lebedev_017():
return _read("017")
def lebedev_019():
return _read("019")
def lebedev_021():
return _read("021")
def lebedev_023():
return _read("023")
def lebedev_025():
return _read("025")
def lebedev_027():
return _read("027")
def lebedev_029():
return _read("029")
def lebedev_031():
return _read("031")
def lebedev_035():
return _read("035")
def lebedev_041():
return _read("041")
def lebedev_047():
return _read("047")
def lebedev_053():
return _read("053")
def lebedev_059():
return _read("059")
def lebedev_065():
return _read("065")
def lebedev_071():
return _read("071")
def lebedev_077():
return _read("077")
def lebedev_083():
return _read("083")
def lebedev_089():
return _read("089")
def lebedev_095():
return _read("095")
def lebedev_101():
return _read("101")
def lebedev_107():
return _read("107")
def lebedev_113():
return _read("113")
def lebedev_119():
return _read("119")
def lebedev_125():
return _read("125")
def lebedev_131():
return _read("131")
| 19.078341 | 163 | 0.618116 |
2711750260f2afd0bfdff3b3873aafefffa90611
| 4,339 |
py
|
Python
|
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_Rects.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | 1 |
2019-05-13T00:40:11.000Z
|
2019-05-13T00:40:11.000Z
|
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_Rects.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_Rects.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from duckietown_msgs/Rects.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import duckietown_msgs.msg
class Rects(genpy.Message):
_md5sum = "f5b74b2b15b5d4d2f299389f9f4ca7f8"
_type = "duckietown_msgs/Rects"
_has_header = False #flag to mark the presence of a Header object
_full_text = """duckietown_msgs/Rect[] rects
================================================================================
MSG: duckietown_msgs/Rect
# all in pixel coordinate
# (x, y, w, h) defines a rectangle
int32 x
int32 y
int32 w
int32 h
"""
__slots__ = ['rects']
_slot_types = ['duckietown_msgs/Rect[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
rects
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Rects, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.rects is None:
self.rects = []
else:
self.rects = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.rects)
buff.write(_struct_I.pack(length))
for val1 in self.rects:
_x = val1
buff.write(_struct_4i.pack(_x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.rects is None:
self.rects = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.rects = []
for i in range(0, length):
val1 = duckietown_msgs.msg.Rect()
_x = val1
start = end
end += 16
(_x.x, _x.y, _x.w, _x.h,) = _struct_4i.unpack(str[start:end])
self.rects.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.rects)
buff.write(_struct_I.pack(length))
for val1 in self.rects:
_x = val1
buff.write(_struct_4i.pack(_x.x, _x.y, _x.w, _x.h))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.rects is None:
self.rects = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.rects = []
for i in range(0, length):
val1 = duckietown_msgs.msg.Rect()
_x = val1
start = end
end += 16
(_x.x, _x.y, _x.w, _x.h,) = _struct_4i.unpack(str[start:end])
self.rects.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4i = struct.Struct("<4i")
| 31.671533 | 123 | 0.626412 |
c6ee9aafcd8fbba69eab3e1516031801425bafe3
| 2,355 |
py
|
Python
|
src/psdtools.py
|
Constuelo/email_generator
|
d8dff15a77ccf8ef6db7393edb44139e07ab3d20
|
[
"MIT"
] | 1 |
2019-01-17T19:40:02.000Z
|
2019-01-17T19:40:02.000Z
|
src/psdtools.py
|
Constuelo/email_generator
|
d8dff15a77ccf8ef6db7393edb44139e07ab3d20
|
[
"MIT"
] | 5 |
2019-01-17T14:16:06.000Z
|
2019-07-04T20:19:39.000Z
|
src/psdtools.py
|
Constuelo/photoshop_email_generator
|
d8dff15a77ccf8ef6db7393edb44139e07ab3d20
|
[
"MIT"
] | null | null | null |
import sys
from src.helpers import colour_to_hex
from src.encode import encode
def list_of_psd_layers(artboard):
lst = []
for layer in reversed(list(artboard)):
if (
layer.is_visible()
and layer.kind == "group"
and "header".lower() not in layer.name.lower()
):
lst.append(layer)
return lst
def list_of_modules(modules):
lst = []
for module in modules:
if module.is_visible() and module.kind == "group":
try:
lst.append((module.name, extract_psd_module_text(module)))
except KeyError as e:
pass
else:
lst.append(module.name)
return lst
def get_mobile_artboard(psd_load):
for artboard in psd_load:
if "MOBILE".lower() in artboard.name.lower() and artboard.kind == "artboard":
return artboard
if not artboard:
print(f"There was a problem.")
print(f'Please ensure the artboard name includes "mobile"')
sys.exit()
def extract_psd_module_text(module):
lst = []
try:
for layer in reversed(list(module.descendants())):
if (
layer.is_visible()
and layer.kind == "smartobject"
and "spacer".lower() in layer.name.lower()
):
lst.append(layer.name)
elif layer.is_visible() and layer.kind == "type":
""" font_type """
style_sheet = layer.engine_dict["StyleRun"]["RunArray"][0]
font_type = layer.text.rstrip().replace("\r", " ")
font_size = int(
round(style_sheet["StyleSheet"]["StyleSheetData"]["FontSize"] / 2)
)
font_tracking = f"{style_sheet['StyleSheet']['StyleSheetData']['Tracking'] / 1000:.2f}"
font_color = style_sheet["StyleSheet"]["StyleSheetData"]["FillColor"][
"Values"
]
font_color = colour_to_hex(list(font_color))
type_array = [
layer.name,
font_type,
font_size,
font_tracking,
font_color,
]
lst.append(type_array)
except AttributeError:
pass
return lst
| 30.192308 | 103 | 0.523142 |
cb9a0b23d3845f2a5ca68809a4238823bafe54d8
| 5,164 |
py
|
Python
|
examples/python/reconstruction_system/initialize_config.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 1,455 |
2021-07-27T19:44:50.000Z
|
2022-03-31T19:39:21.000Z
|
examples/python/reconstruction_system/initialize_config.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 1,439 |
2021-07-27T16:02:52.000Z
|
2022-03-31T22:29:05.000Z
|
examples/python/reconstruction_system/initialize_config.py
|
amoran-symbio/Open3D
|
ae7e44e0dcef11a5df763819d47dec8c5bd5294b
|
[
"MIT"
] | 339 |
2021-07-28T03:07:28.000Z
|
2022-03-31T13:38:00.000Z
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
# examples/python/reconstruction_system/initialize_config.py
import os
import sys
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(pyexample_path)
from utility.file import check_folder_structure, extract_rgbd_frames
def set_default_value(config, key, value):
if key not in config:
config[key] = value
def initialize_config(config):
# set default parameters if not specified
set_default_value(config, "depth_map_type", "redwood")
set_default_value(config, "n_frames_per_fragment", 100)
set_default_value(config, "n_keyframes_per_n_frame", 5)
set_default_value(config, "min_depth", 0.3)
set_default_value(config, "max_depth", 3.0)
set_default_value(config, "voxel_size", 0.05)
set_default_value(config, "max_depth_diff", 0.07)
set_default_value(config, "depth_scale", 1000)
set_default_value(config, "preference_loop_closure_odometry", 0.1)
set_default_value(config, "preference_loop_closure_registration", 5.0)
set_default_value(config, "tsdf_cubic_size", 3.0)
set_default_value(config, "icp_method", "color")
set_default_value(config, "global_registration", "ransac")
set_default_value(config, "python_multi_threading", True)
# `slac` and `slac_integrate` related parameters.
# `voxel_size` and `min_depth` paramters from previous section,
# are also used in `slac` and `slac_integrate`.
set_default_value(config, "max_iterations", 5)
set_default_value(config, "sdf_trunc", 0.04)
set_default_value(config, "block_count", 40000)
set_default_value(config, "distance_threshold", 0.07)
set_default_value(config, "fitness_threshold", 0.3)
set_default_value(config, "regularizer_weight", 1)
set_default_value(config, "method", "slac")
set_default_value(config, "device", "CPU:0")
set_default_value(config, "save_output_as", "pointcloud")
set_default_value(config, "folder_slac", "slac/")
set_default_value(config, "template_optimized_posegraph_slac",
"optimized_posegraph_slac.json")
# path related parameters.
set_default_value(config, "folder_fragment", "fragments/")
set_default_value(config, "subfolder_slac",
"slac/%0.3f/" % config["voxel_size"])
set_default_value(config, "template_fragment_posegraph",
"fragments/fragment_%03d.json")
set_default_value(config, "template_fragment_posegraph_optimized",
"fragments/fragment_optimized_%03d.json")
set_default_value(config, "template_fragment_pointcloud",
"fragments/fragment_%03d.ply")
set_default_value(config, "folder_scene", "scene/")
set_default_value(config, "template_global_posegraph",
"scene/global_registration.json")
set_default_value(config, "template_global_posegraph_optimized",
"scene/global_registration_optimized.json")
set_default_value(config, "template_refined_posegraph",
"scene/refined_registration.json")
set_default_value(config, "template_refined_posegraph_optimized",
"scene/refined_registration_optimized.json")
set_default_value(config, "template_global_mesh", "scene/integrated.ply")
set_default_value(config, "template_global_traj", "scene/trajectory.log")
if config["path_dataset"].endswith(".bag"):
assert os.path.isfile(config["path_dataset"]), (
f"File {config['path_dataset']} not found.")
print("Extracting frames from RGBD video file")
config["path_dataset"], config["path_intrinsic"], config[
"depth_scale"] = extract_rgbd_frames(config["path_dataset"])
| 49.180952 | 79 | 0.688613 |
5684bf064343281075737322748fce688191ad58
| 1,830 |
py
|
Python
|
Python/smallest-range-ii.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1 |
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/smallest-range-ii.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/smallest-range-ii.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1 |
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(nlogn)
# Space: O(1)
# 910
# Given an array A of integers, for each integer A[i]
# we need to choose either x = -K or x = K, and add x to A[i] (only once).
#
# After this process, we have some array B.
#
# Return the smallest possible difference
# between the maximum value of B and the minimum value of B.
#
# Example 1:
#
# Input: A = [1], K = 0
# Output: 0
# Explanation: B = [1]
# Example 2:
#
# Input: A = [0,10], K = 2
# Output: 6
# Explanation: B = [2,8]
# Example 3:
#
# Input: A = [1,3,6], K = 3
# Output: 3
# Explanation: B = [4,6,3]
#
# Note:
# - 1 <= A.length <= 10000
# - 0 <= A[i] <= 10000
# - 0 <= K <= 10000
# Solution:
# Intuition is increase smaller A[i] (go up) and decrease larger A[i] (go down).
# Formalize this concept: if A[i] < A[j], we don't need to consider when A[i] goes down
# while A[j] goes up. Because the interval (A[i]+K, A[j]-K) is a subset of (A[i]-K, A[j]+K).
#
# For sorted A, *say A[i] is the largest i that goes up.* We don't care A[j]-K for 0<=j<i due to
# the above reason. Both A[0]+K, A[1]+K, ... A[i]+K and A[i+1]-K, A[i+2]-K, ... A[-1]-K are
# mono increasing sequences, we only care the extremal values at 2 ends.
# Then A[0]+K, A[i]+K, A[i+1]-K, A[-1]-K are the only relevant values for calculating the answer:
# every other value is between one of these extremal values.
class Solution(object):
def smallestRangeII(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
A.sort()
result = A[-1]-A[0] # this is the answer if A[-1] is the largest i that goes up
for i in xrange(len(A)-1):
result = min(result,
max(A[i]+K, A[-1]-K) - min(A[0]+K, A[i+1]-K))
# high ends of 2 sequences - low ends of 2 sequences
return result
| 31.016949 | 97 | 0.574317 |
14524a40578c18021c0d3bf903aa6108a1f39231
| 6,831 |
py
|
Python
|
python_bindings/setup.py
|
NancyLi1013/nmslib
|
9db508b0d9965e93f37edf609bb63dd55fd6448a
|
[
"Apache-2.0"
] | 1 |
2020-09-18T04:35:45.000Z
|
2020-09-18T04:35:45.000Z
|
python_bindings/setup.py
|
NancyLi1013/nmslib
|
9db508b0d9965e93f37edf609bb63dd55fd6448a
|
[
"Apache-2.0"
] | null | null | null |
python_bindings/setup.py
|
NancyLi1013/nmslib
|
9db508b0d9965e93f37edf609bb63dd55fd6448a
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
import struct
__version__ = '2.0.5'
if sys.platform.startswith("win") and struct.calcsize("P") * 8 == 32:
raise RuntimeError("Windows 32-bit is not supported.")
dep_list = ['pybind11>=2.2.3', 'psutil']
py_version = tuple([int(v) for v in sys.version.split('.')[:2]])
if py_version == (2, 7):
dep_list.append('numpy>=1.10.0,<1.17')
elif py_version < (3, 5):
raise RuntimeError("Python version 2.7 or >=3.5 required.")
else:
dep_list.append('numpy>=1.10.0')
print('Dependence list:', dep_list)
libdir = os.path.join(".", "similarity_search")
if not os.path.isdir(libdir) and sys.platform.startswith("win"):
# If the nmslib symlink doesn't work (windows symlink support w/ git is
# a little iffy), fallback to use a relative path
libdir = os.path.join("..", "similarity_search")
library_file = os.path.join(libdir, "release", "libNonMetricSpaceLib.a")
source_files = ['nmslib.cc', 'tensorflow/cpu_feature_guard.cc', 'tensorflow/cpu_info.cc']
libraries = []
extra_objects = []
if os.path.exists(library_file):
# if we have a prebuilt nmslib library file, use that.
extra_objects.append(library_file)
else:
# Otherwise build all the files here directly (excluding extras which need boost)
exclude_files = set("""space_sqfd.cc dummy_app.cc main.cc""".split())
full_file_list = list(os.walk(os.path.join(libdir, "src")))
for root, subdirs, files in full_file_list:
source_files.extend(os.path.join(root, f) for f in files
if f.endswith(".cc") and f not in exclude_files)
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
Extension(
'nmslib',
source_files,
include_dirs=[os.path.join(libdir, "include"),
"tensorflow",
get_pybind_include(),
get_pybind_include(user=True)],
libraries=libraries,
language='c++',
extra_objects=extra_objects,
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': [ '/EHsc', '/openmp', '/O2'],
'unix': [ '-O3'],
}
arch_list = '-march -msse -msse2 -msse3 -mssse3 -msse4 -msse4a -msse4.1 -msse4.2 -mavx -mavx2'.split()
if 'ARCH' in os.environ:
# /arch:[IA32|SSE|SSE2|AVX|AVX2|ARMv7VE|VFPv4]
# See https://docs.microsoft.com/en-us/cpp/build/reference/arch-x86
c_opts['msvc'].append("/arch:{}".format(os.environ['ARCH'])) # bugfix
no_arch_flag=True
if 'CFLAGS' in os.environ:
for flag in arch_list:
if flag in os.environ["CFLAGS"]:
no_arch_flag=False
break
if no_arch_flag:
c_opts['unix'].append('-march=native')
link_opts = {
'unix': [],
'msvc': [],
}
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
link_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
else:
c_opts['unix'].append("-fopenmp")
link_opts['unix'].extend(['-fopenmp', '-pthread'])
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' %
self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' %
self.distribution.get_version())
print('Extra compilation arguments:', opts)
# extend include dirs here (don't assume numpy/pybind11 are installed when first run, since
# pip could have installed them as part of executing this script
import numpy as np
for ext in self.extensions:
ext.extra_compile_args.extend(opts)
ext.extra_link_args.extend(self.link_opts.get(ct, []))
ext.include_dirs.extend([
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
# Path to numpy headers
np.get_include()
])
build_ext.build_extensions(self)
setup(
name='nmslib',
version=__version__,
description='Non-Metric Space Library (NMSLIB)',
author='B. Naidan, L. Boytsov, Yu. Malkov, B. Frederickson, D. Novak, et al.',
url='https://github.com/searchivarius/nmslib',
long_description="""Non-Metric Space Library (NMSLIB) is an efficient cross-platform
similarity search library and a toolkit for evaluation of similarity search methods.
The goal of the project is to create an effective and comprehensive toolkit for searching
in generic and non-metric spaces. Even though the library contains a variety of metric-space
access methods, our main focus is on generic and approximate search methods, in particular,
on methods for non-metric spaces. NMSLIB is possibly the first library with a principled
support for non-metric space searching.""",
ext_modules=ext_modules,
install_requires=dep_list,
setup_requires=dep_list,
cmdclass={'build_ext': BuildExt},
test_suite="tests",
zip_safe=False,
)
| 35.578125 | 106 | 0.633875 |
85d0fe63fda15a3fcab26cfb0ad61313a1441c9c
| 153 |
py
|
Python
|
bin/hexes/hexahexes-triangle-1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/hexes/hexahexes-triangle-1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/hexes/hexahexes-triangle-1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1 |
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
""" solutions"""
import puzzler
from puzzler.puzzles.hexahexes import HexahexesTriangle1
puzzler.run(HexahexesTriangle1)
| 15.3 | 56 | 0.771242 |
93c5e3880176af9ae3596dfac17dc3f56151e36d
| 3,441 |
py
|
Python
|
demo/dogeyenose_detector.py
|
jireh-father/mmdetection_210507
|
857c78d2caef8842909cb46d870570b4dd176792
|
[
"Apache-2.0"
] | null | null | null |
demo/dogeyenose_detector.py
|
jireh-father/mmdetection_210507
|
857c78d2caef8842909cb46d870570b4dd176792
|
[
"Apache-2.0"
] | null | null | null |
demo/dogeyenose_detector.py
|
jireh-father/mmdetection_210507
|
857c78d2caef8842909cb46d870570b4dd176792
|
[
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, save_result_pyplot
import glob
import os
import time
import shutil
from PIL import Image
import numpy as np
class DogEyeNoseDetector:
def __init__(self):
pass
def is_intersect(a, b): # returns None if rectangles don't intersect
dx = min(a[2], b[2]) - max(a[0], b[0])
dy = min(a[3], b[3]) - max(a[1], b[1])
return dx >= 0 and dy >= 0
def main():
parser = ArgumentParser()
parser.add_argument('--imgs', help='Image file')
parser.add_argument('--output_dir')
parser.add_argument('--config', help='Config file')
parser.add_argument('--checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
print("loading")
model = init_detector(args.config, args.checkpoint, device=args.device)
print("loaded")
os.makedirs(os.path.join(args.output_dir, 'vis'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'crop'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'nodetected'), exist_ok=True)
img_files = glob.glob(args.imgs)
print(img_files)
for img in img_files:
print(os.path.basename(img))
# test a single image
start = time.time()
result = inference_detector(model, img)
print(time.time() - start)
# show the results
if len(result) < 1 or (len(result[0]) < 1 and len(result[1]) < 1):
shutil.copy(img, os.path.join(args.output_dir, 'nodetected'))
continue
output_path = os.path.join(args.output_dir, 'vis', os.path.splitext(os.path.basename(img))[0] + ".jpg")
if len(result[0]) > 0:
eye_scors = result[0][:, 4]
eye_scors_indices = np.argsort(eye_scors)
eye_scors_indices = eye_scors_indices[::-1]
highest_score_eye = result[0][eye_scors_indices[0]]
second_score_eye = None
second_score_eye_idx = None
for idx in range(1, len(eye_scors_indices)):
if not is_intersect(highest_score_eye, result[0][eye_scors_indices[idx]]):
second_score_eye = result[0][eye_scors_indices[idx]]
second_score_eye_idx = idx
break
if second_score_eye is None and len(result[0]) > 1:
second_score_eye_idx = 1
if second_score_eye_idx:
result[0] = result[0][[0, second_score_eye_idx], :]
else:
result[0] = result[0][:1, :]
if len(result[1]) > 0:
max_nose_idx = np.argmax(result[1][:, 4])
result[1] = result[1][max_nose_idx:max_nose_idx + 1, :]
save_result_pyplot(model, img, result, output_path, score_thr=args.score_thr)
im = Image.open(img).convert("RGB")
for j, bbox in enumerate(result[0]):
if bbox[4] < args.score_thr:
continue
crop_im = im.crop([int(b) for b in bbox[:-1]])
crop_im.save(
os.path.join(args.output_dir, 'crop', os.path.splitext(os.path.basename(img))[0] + "_{}.jpg".format(j)))
if __name__ == '__main__':
main()
| 38.233333 | 120 | 0.609997 |
4caf0f80303d255537e0f1abf06d60aa27273241
| 534 |
py
|
Python
|
test_fixtures/integrations/torch/__init__.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 52 |
2021-09-24T17:52:34.000Z
|
2022-03-29T22:55:02.000Z
|
test_fixtures/integrations/torch/__init__.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 90 |
2021-09-29T04:23:29.000Z
|
2022-03-31T21:23:02.000Z
|
test_fixtures/integrations/torch/__init__.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 8 |
2021-11-13T01:56:22.000Z
|
2022-02-27T03:29:42.000Z
|
import torch.nn as nn
from tango.integrations.torch import Model
@Model.register("basic_regression")
class BasicRegression(Model):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 1)
self.sigmoid = nn.Sigmoid()
self.mse = nn.MSELoss()
def forward(self, x, y=None):
pred = self.sigmoid(self.linear(x))
out = {"pred": pred}
if y is not None:
out["loss"] = self.mse(pred, y)
return out
def _to_params(self):
return {}
| 23.217391 | 43 | 0.586142 |
59b9a026963b3bcc0a9587f2f076546628ede8de
| 7,326 |
py
|
Python
|
sdk/python/pulumi_azure_native/databoxedge/v20190701/get_storage_account_credential.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/databoxedge/v20190701/get_storage_account_credential.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/databoxedge/v20190701/get_storage_account_credential.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetStorageAccountCredentialResult',
'AwaitableGetStorageAccountCredentialResult',
'get_storage_account_credential',
]
@pulumi.output_type
class GetStorageAccountCredentialResult:
"""
The storage account credential.
"""
def __init__(__self__, account_key=None, account_type=None, alias=None, blob_domain_name=None, connection_string=None, id=None, name=None, ssl_status=None, storage_account_id=None, type=None, user_name=None):
if account_key and not isinstance(account_key, dict):
raise TypeError("Expected argument 'account_key' to be a dict")
pulumi.set(__self__, "account_key", account_key)
if account_type and not isinstance(account_type, str):
raise TypeError("Expected argument 'account_type' to be a str")
pulumi.set(__self__, "account_type", account_type)
if alias and not isinstance(alias, str):
raise TypeError("Expected argument 'alias' to be a str")
pulumi.set(__self__, "alias", alias)
if blob_domain_name and not isinstance(blob_domain_name, str):
raise TypeError("Expected argument 'blob_domain_name' to be a str")
pulumi.set(__self__, "blob_domain_name", blob_domain_name)
if connection_string and not isinstance(connection_string, str):
raise TypeError("Expected argument 'connection_string' to be a str")
pulumi.set(__self__, "connection_string", connection_string)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if ssl_status and not isinstance(ssl_status, str):
raise TypeError("Expected argument 'ssl_status' to be a str")
pulumi.set(__self__, "ssl_status", ssl_status)
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
pulumi.set(__self__, "storage_account_id", storage_account_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_name and not isinstance(user_name, str):
raise TypeError("Expected argument 'user_name' to be a str")
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional['outputs.AsymmetricEncryptedSecretResponse']:
"""
Encrypted storage key.
"""
return pulumi.get(self, "account_key")
@property
@pulumi.getter(name="accountType")
def account_type(self) -> str:
"""
Type of storage accessed on the storage account.
"""
return pulumi.get(self, "account_type")
@property
@pulumi.getter
def alias(self) -> str:
"""
Alias for the storage account.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="blobDomainName")
def blob_domain_name(self) -> Optional[str]:
"""
Blob end point for private clouds.
"""
return pulumi.get(self, "blob_domain_name")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
Connection string for the storage account. Use this string if username and account key are not specified.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sslStatus")
def ssl_status(self) -> str:
"""
Signifies whether SSL needs to be enabled or not.
"""
return pulumi.get(self, "ssl_status")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[str]:
"""
Id of the storage account.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[str]:
"""
Username for the storage account.
"""
return pulumi.get(self, "user_name")
class AwaitableGetStorageAccountCredentialResult(GetStorageAccountCredentialResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStorageAccountCredentialResult(
account_key=self.account_key,
account_type=self.account_type,
alias=self.alias,
blob_domain_name=self.blob_domain_name,
connection_string=self.connection_string,
id=self.id,
name=self.name,
ssl_status=self.ssl_status,
storage_account_id=self.storage_account_id,
type=self.type,
user_name=self.user_name)
def get_storage_account_credential(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageAccountCredentialResult:
"""
The storage account credential.
:param str device_name: The device name.
:param str name: The storage account credential name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20190701:getStorageAccountCredential', __args__, opts=opts, typ=GetStorageAccountCredentialResult).value
return AwaitableGetStorageAccountCredentialResult(
account_key=__ret__.account_key,
account_type=__ret__.account_type,
alias=__ret__.alias,
blob_domain_name=__ret__.blob_domain_name,
connection_string=__ret__.connection_string,
id=__ret__.id,
name=__ret__.name,
ssl_status=__ret__.ssl_status,
storage_account_id=__ret__.storage_account_id,
type=__ret__.type,
user_name=__ret__.user_name)
| 36.63 | 212 | 0.649331 |
5ebed3f65121be78defed6fd231067b9b353bc8c
| 7,898 |
bzl
|
Python
|
tools/maven_info.bzl
|
ansman/dagger
|
732323b6c0e9dc9dfd92029db64fe91f0786da51
|
[
"Apache-2.0"
] | 17,481 |
2015-01-01T03:47:42.000Z
|
2022-03-31T22:30:34.000Z
|
tools/maven_info.bzl
|
ansman/dagger
|
732323b6c0e9dc9dfd92029db64fe91f0786da51
|
[
"Apache-2.0"
] | 1,982 |
2015-01-01T18:16:55.000Z
|
2022-03-31T18:35:19.000Z
|
tools/maven_info.bzl
|
ansman/dagger
|
732323b6c0e9dc9dfd92029db64fe91f0786da51
|
[
"Apache-2.0"
] | 2,552 |
2015-01-03T15:58:02.000Z
|
2022-03-30T11:10:08.000Z
|
# Copyright (C) 2019 The Dagger Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skylark rules to collect Maven artifacts information.
"""
load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
# TODO(b/142057516): Unfork this file once we've settled on a more general API.
MavenInfo = provider(
fields = {
"artifact": """
The Maven coordinate for the artifact that is exported by this target, if one exists.
""",
"has_srcs": """
True if this library contains srcs..
""",
"all_transitive_deps": """
All transitive deps of the target with srcs.
""",
"maven_nearest_artifacts": """
The nearest maven deps of the target.
""",
"maven_transitive_deps": """
All transitive deps that are included in some maven dependency.
""",
},
)
_EMPTY_MAVEN_INFO = MavenInfo(
artifact = None,
has_srcs = False,
maven_nearest_artifacts = depset(),
maven_transitive_deps = depset(),
all_transitive_deps = depset(),
)
_MAVEN_COORDINATES_PREFIX = "maven_coordinates="
def _collect_maven_info_impl(target, ctx):
tags = getattr(ctx.rule.attr, "tags", [])
srcs = getattr(ctx.rule.attr, "srcs", [])
deps = getattr(ctx.rule.attr, "deps", [])
exports = getattr(ctx.rule.attr, "exports", [])
artifact = None
for tag in tags:
if tag in ("maven:compile_only", "maven:shaded"):
return [_EMPTY_MAVEN_INFO]
if tag.startswith(_MAVEN_COORDINATES_PREFIX):
artifact = tag[len(_MAVEN_COORDINATES_PREFIX):]
all_deps = [dep.label for dep in (deps + exports) if dep[MavenInfo].has_srcs]
all_transitive_deps = [dep[MavenInfo].all_transitive_deps for dep in (deps + exports)]
maven_artifacts = []
maven_nearest_artifacts = []
maven_deps = []
maven_transitive_deps = []
for dep in (deps + exports):
# If the dep is itself a maven artifact, add it and all of its transitive deps.
# Otherwise, just propagate its transitive maven deps.
if dep[MavenInfo].artifact or dep[MavenInfo] == _EMPTY_MAVEN_INFO:
if (dep[MavenInfo].artifact):
maven_artifacts.append(dep[MavenInfo].artifact)
maven_deps.append(dep.label)
maven_transitive_deps.append(dep[MavenInfo].all_transitive_deps)
else:
maven_nearest_artifacts.append(dep[MavenInfo].maven_nearest_artifacts)
maven_transitive_deps.append(dep[MavenInfo].maven_transitive_deps)
return [MavenInfo(
artifact = artifact,
has_srcs = len(srcs) > 0,
maven_nearest_artifacts = depset(maven_artifacts, transitive = maven_nearest_artifacts),
maven_transitive_deps = depset(maven_deps, transitive = maven_transitive_deps),
all_transitive_deps = depset(all_deps, transitive = all_transitive_deps),
)]
collect_maven_info = aspect(
attr_aspects = [
"deps",
"exports",
],
doc = """
Collects the Maven information for targets, their dependencies, and their transitive exports.
""",
implementation = _collect_maven_info_impl,
)
def _fake_java_library(name, deps = None, exports = None, is_artifact = True):
src_file = ["%s.java" % name]
native.genrule(
name = "%s_source_file" % name,
outs = src_file,
cmd = "echo 'package pkg; class %s {}' > $@" % name,
)
native.java_library(
name = name,
srcs = src_file,
tags = ["maven_coordinates=%s:_:_" % name] if is_artifact else [],
deps = deps or [],
exports = exports or [],
)
def _maven_info_test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
expected = ctx.attr.artifact if ctx.attr.artifact else None,
actual = ctx.attr.target[MavenInfo].artifact,
msg = "MavenInfo.artifact",
)
asserts.equals(
env,
expected = sorted([ctx.label.relative(dep) for dep in ctx.attr.maven_transitive_deps]),
actual = sorted(ctx.attr.target[MavenInfo].maven_transitive_deps.to_list()),
msg = "MavenInfo.maven_transitive_deps",
)
asserts.equals(
env,
expected = sorted([ctx.label.relative(dep) for dep in ctx.attr.all_transitive_deps]),
actual = sorted(ctx.attr.target[MavenInfo].all_transitive_deps.to_list()),
msg = "MavenInfo.all_transitive_deps",
)
return unittest.end(env)
_maven_info_test = unittest.make(
_maven_info_test_impl,
attrs = {
"target": attr.label(aspects = [collect_maven_info]),
"artifact": attr.string(),
"maven_transitive_deps": attr.string_list(),
"all_transitive_deps": attr.string_list(),
},
)
def maven_info_tests():
"""Tests for `pom_file` and `MavenInfo`.
"""
_fake_java_library(name = "A")
_fake_java_library(
name = "DepOnA",
deps = [":A"],
)
_maven_info_test(
name = "a_test",
target = ":A",
artifact = "A:_:_",
maven_transitive_deps = [],
all_transitive_deps = [],
)
_maven_info_test(
name = "dependencies_test",
target = ":DepOnA",
artifact = "DepOnA:_:_",
maven_transitive_deps = [":A"],
all_transitive_deps = [":A"],
)
_fake_java_library(
name = "ExportsA",
exports = [":A"],
)
_maven_info_test(
name = "exports_test",
target = ":ExportsA",
artifact = "ExportsA:_:_",
maven_transitive_deps = [":A"],
all_transitive_deps = [":A"],
)
_fake_java_library(
name = "TransitiveExports",
exports = [":ExportsA"],
)
_maven_info_test(
name = "transitive_exports_test",
target = ":TransitiveExports",
artifact = "TransitiveExports:_:_",
maven_transitive_deps = [":ExportsA", ":A"],
all_transitive_deps = [":ExportsA", ":A"],
)
_fake_java_library(
name = "TransitiveDeps",
deps = [":ExportsA"],
)
_maven_info_test(
name = "transitive_deps_test",
target = ":TransitiveDeps",
artifact = "TransitiveDeps:_:_",
maven_transitive_deps = [":ExportsA", ":A"],
all_transitive_deps = [":ExportsA", ":A"],
)
_fake_java_library(name = "Node1", is_artifact = False)
_maven_info_test(
name = "test_node1",
target = ":Node1",
maven_transitive_deps = [],
all_transitive_deps = [],
)
_fake_java_library(name = "Node2_Artifact", deps = [":Node1"])
_maven_info_test(
name = "test_node2",
target = ":Node2_Artifact",
artifact = "Node2_Artifact:_:_",
maven_transitive_deps = [],
all_transitive_deps = [":Node1"],
)
_fake_java_library(name = "Node3", deps = [":Node2_Artifact"], is_artifact = False)
_maven_info_test(
name = "test_node3",
target = ":Node3",
maven_transitive_deps = [":Node1", ":Node2_Artifact"],
all_transitive_deps = [":Node1", ":Node2_Artifact"],
)
_fake_java_library(name = "Node4", deps = [":Node3"], is_artifact = False)
_maven_info_test(
name = "test_node4",
target = ":Node4",
maven_transitive_deps = [":Node1", ":Node2_Artifact"],
all_transitive_deps = [":Node1", ":Node2_Artifact", ":Node3"],
)
| 32.368852 | 97 | 0.622816 |
d4655ce9b53572d682ed903f7f1d1552dc29f15a
| 1,587 |
py
|
Python
|
sa/profiles/Eltex/MA4000/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84 |
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/Eltex/MA4000/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22 |
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/Eltex/MA4000/ping.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23 |
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Eltex.MA4000.ping
# ---------------------------------------------------------------------
# Copyright (C) 2007-2014 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.iping import IPing
class Script(BaseScript):
name = "Eltex.MA4000.ping"
interface = IPing
rx_result = re.compile(
r"^(?P<count>\d+) packets transmitted, (?P<success>\d+) packets "
r"received, \d+% packet loss$",
re.MULTILINE,
)
rx_stat = re.compile(
r"^round-trip min/avg/max = (?P<min>.+)/(?P<avg>.+)/(?P<max>.+) ms$", re.MULTILINE
)
def execute(self, address, count=None, source_address=None, size=None, df=None):
cmd = "ping %s" % address
"""
# Don't implemented, may be in future firmware revisions ?
if count:
cmd += " count %d" % int(count)
if size:
cmd += " size %d" % int(size)
if source_address:
cmd+=" source %s" % source_address
if df:
cmd+=" df-bit"
"""
ping = self.cli(cmd)
result = self.rx_result.search(ping)
r = {"success": result.group("success"), "count": result.group("count")}
stat = self.rx_stat.search(ping)
if stat:
r.update({"min": stat.group("min"), "avg": stat.group("avg"), "max": stat.group("max")})
return r
| 32.387755 | 100 | 0.487083 |
a8b2d1cebaa0fc2a41c9eb0caca9c093a08be3c1
| 3,007 |
py
|
Python
|
scripts/fine_tune_hyperparams.py
|
WEgeophysics/watex
|
21616ce35372a095c3dd624f82a5282b15cb2c91
|
[
"MIT"
] | 3 |
2021-06-19T02:16:46.000Z
|
2021-07-16T15:56:49.000Z
|
scripts/fine_tune_hyperparams.py
|
WEgeophysics/watex
|
21616ce35372a095c3dd624f82a5282b15cb2c91
|
[
"MIT"
] | null | null | null |
scripts/fine_tune_hyperparams.py
|
WEgeophysics/watex
|
21616ce35372a095c3dd624f82a5282b15cb2c91
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. synopsis: Create your model and fine tune its hyperparameters.
with your dataset.
Created on Fri Sep 24 21:28:48 2021
@author: @Daniel03
"""
from sklearn.linear_model import LogisticRegression , SGDClassifier
from sklearn.svm import SVC, LinearSVC
from watex.viewer.mlplot import MLPlots
from watex.modeling.validation import multipleGridSearches
# Test data
from watex.datasets import fetch_data
X_prepared, y_prepared = fetch_data('Bagoue dataset prepared')
#cross validation Kfold
cv = 7
# type of scores
scoring ='neg_mean_squared_error'#accuracy'#'neg_mean_squared_error'#
# random state for estimator s
random_state =42
# kind of grid search
kind ='GridSearchCV'
#save to joblib
save2joblib =True
# differnts
logreg_clf = LogisticRegression(random_state =random_state)
linear_svc_clf = LinearSVC(random_state =random_state)
sgd_clf = SGDClassifier(random_state = random_state)
svc_clf = SVC(random_state =random_state)
# build estimators
estimators = (svc_clf,linear_svc_clf, logreg_clf )
# plot fine tuned params:
plot_fineTune =False
# save to joblib
# once the best model found. save it to job lib
gridParams =([
{'C':[1e-2, 1e-1, 1, 10, 100], 'gamma':[5, 2, 1, 1e-1, 1e-2, 1e-3],'kernel':['rbf']},
{'kernel':['sigmoid'],'degree':[1, 3,5, 7], 'coef0':[1, 2, 3], 'C': [1e-2, 1e-1, 1, 10, 100]}
],
[{'C':[1e-2, 1e-1, 1, 10, 100], 'loss':['hinge']}],
[dict()],
# [dict()]
)
_clfs, _dclfs, joblib= multipleGridSearches(X= X_prepared,
y= y_prepared,
estimators = estimators,
grid_params = gridParams ,
cv =cv,
scoring =scoring,
verbose =1,
save_to_joblib =save2joblib
)
if plot_fineTune :
# clfs =[(_clfs[i][1], _clfs[i][3]) for i in range(len(_clfs))]
scores = [ _clfs[i][3] for i in range(len(_clfs))]
clfs =['SVM:score mean=75.86%', 'LinearSVC:score mean= ', 'LogisticRegression:score mean=74.16%']
plot_kws = {'fig_size':(12, 8),
'lc':(.9,0.,.8),
'lw' :3., # line width
'font_size':7.,
'show_grid' :True, # visualize grid
'galpha' :0.2, # grid alpha
'glw':.5, # grid line width
'gwhich' :'major', # minor ticks
# 'fs' :3., # coeff to manage font_size
'xlabel':'Cross-validation (CV)',
'ylabel': 'Scores',
# 'ylim':[0.5,1.]
}
mlObj =MLPlots(**plot_kws)
lcs_kws ={'lc':['k', 'k', 'k'], #(.9,0.,.8)
'ls':['-', ':', '-.']}
mlObj.plotModelvsCV(clfs =clfs, scores =scores, **lcs_kws)
| 35.376471 | 101 | 0.535085 |
1587837a0bfc4581c6734989b4c8c51a8258ce10
| 36,979 |
py
|
Python
|
src/python/WMCore/Services/DBS/DBS3Reader.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | null | null | null |
src/python/WMCore/Services/DBS/DBS3Reader.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | null | null | null |
src/python/WMCore/Services/DBS/DBS3Reader.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
_DBSReader_
Readonly DBS Interface
"""
from __future__ import print_function, division
import logging
from collections import defaultdict
from RestClient.ErrorHandling.RestClientExceptions import HTTPError
from dbs.apis.dbsClient import DbsApi
from dbs.exceptions.dbsClientException import dbsClientException
from retry import retry
from Utils.IteratorTools import grouper
from WMCore.Services.DBS.DBSErrors import DBSReaderError, formatEx3
### Needed for the pycurl comment, leave it out for now
# from WMCore.Services.pycurl_manager import getdata as multi_getdata
def remapDBS3Keys(data, stringify=False, **others):
"""Fields have been renamed between DBS2 and 3, take fields from DBS3
and map to DBS2 values
"""
mapping = {'num_file': 'NumberOfFiles', 'num_files': 'NumberOfFiles', 'num_event': 'NumberOfEvents',
'num_block': 'NumberOfBlocks', 'num_lumi': 'NumberOfLumis',
'event_count': 'NumberOfEvents', 'run_num': 'RunNumber',
'file_size': 'FileSize', 'block_size': 'BlockSize',
'file_count': 'NumberOfFiles', 'open_for_writing': 'OpenForWriting',
'logical_file_name': 'LogicalFileName',
'adler32': 'Adler32', 'check_sum': 'Checksum', 'md5': 'Md5',
'block_name': 'BlockName', 'lumi_section_num': 'LumiSectionNumber'}
mapping.update(others)
formatFunc = lambda x: str(x) if stringify and isinstance(x, unicode) else x
for name, newname in mapping.iteritems():
if name in data:
data[newname] = formatFunc(data[name])
return data
@retry(tries=3, delay=1)
def getDataTiers(dbsUrl):
"""
Function to retrieve all the datatiers from DBS.
NOTE: to be used with some caching (MemoryCacheStruct)
:param dbsUrl: the DBS URL string
:return: a list of strings/datatiers
"""
dbs = DbsApi(dbsUrl)
return [tier['data_tier_name'] for tier in dbs.listDataTiers()]
# emulator hook is used to swap the class instance
# when emulator values are set.
# Look WMQuality.Emulators.EmulatorSetup module for the values
# @emulatorHook
class DBS3Reader(object):
"""
_DBSReader_
General API for reading data from DBS
"""
def __init__(self, url, logger=None, **contact):
# instantiate dbs api object
try:
self.dbsURL = url
self.dbs = DbsApi(url, **contact)
self.logger = logger or logging.getLogger(self.__class__.__name__)
except dbsClientException as ex:
msg = "Error in DBSReader with DbsApi\n"
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
def _getLumiList(self, blockName=None, lfns=None, validFileOnly=1):
"""
currently only take one lfn but dbs api need be updated
"""
try:
if blockName:
lumiLists = self.dbs.listFileLumis(block_name=blockName, validFileOnly=validFileOnly)
elif lfns:
lumiLists = []
for slfn in grouper(lfns, 50):
lumiLists.extend(self.dbs.listFileLumiArray(logical_file_name=slfn))
else:
# shouldn't call this with both blockName and lfns empty
# but still returns empty dict for that case
return {}
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.listFileLumiArray(%s)\n" % lfns
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
lumiDict = {}
for lumisItem in lumiLists:
lumiDict.setdefault(lumisItem['logical_file_name'], [])
item = {}
item["RunNumber"] = lumisItem['run_num']
item['LumiSectionNumber'] = lumisItem['lumi_section_num']
if lumisItem.get('event_count', None) is not None:
item['EventCount'] = lumisItem['event_count']
lumiDict[lumisItem['logical_file_name']].append(item)
# TODO: add key for lumi and event pair.
return lumiDict
def checkDBSServer(self):
"""
check whether dbs server is up and running
returns {"dbs_instance": "prod/global", "dbs_version": "3.3.144"}
"""
try:
return self.dbs.serverinfo()
except dbsClientException as ex:
msg = "Error in "
msg += "DBS server is not up: %s" % self.dbsURL
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
def listPrimaryDatasets(self, match='*'):
"""
_listPrimaryDatasets_
return a list of primary datasets, The full dataset name must be provided
pattern based mathcing is no longer supported.
If no expression is provided, all datasets are returned
"""
try:
result = self.dbs.listPrimaryDatasets(primary_ds_name=match)
except dbsClientException as ex:
msg = "Error in DBSReader.listPrimaryDataset(%s)\n" % match
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
result = [x['primary_ds_name'] for x in result]
return result
def matchProcessedDatasets(self, primary, tier, process):
"""
_matchProcessedDatasets_
return a list of Processed datasets
"""
result = []
try:
datasets = self.dbs.listDatasets(primary_ds_name=primary, data_tier_name=tier, detail=True)
except dbsClientException as ex:
msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
for dataset in datasets:
dataset = remapDBS3Keys(dataset, processed_ds_name='Name')
dataset['PathList'] = [dataset['dataset']]
if dataset['Name'] == process:
result.append(dataset)
return result
def listRuns(self, dataset=None, block=None):
"""
it gets list of DbsRun object but for our purpose
only list of number is collected.
DbsRun (RunNumber,
NumberOfEvents,
NumberOfLumiSections,
TotalLuminosity,
StoreNumber,
StartOfRungetLong,
EndOfRun,
CreationDate,
CreatedBy,
LastModificationDate,
LastModifiedBy
)
"""
runs = []
try:
if block:
results = self.dbs.listRuns(block_name=block)
else:
results = self.dbs.listRuns(dataset=dataset)
except dbsClientException as ex:
msg = "Error in DBSReader.listRuns(%s, %s)\n" % (dataset, block)
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
for x in results:
runs.extend(x['run_num'])
return runs
def listRunLumis(self, dataset=None, block=None):
"""
It gets a list of DBSRun objects and returns the number of lumisections per run
DbsRun (RunNumber,
NumberOfEvents,
NumberOfLumiSections,
TotalLuminosity,
StoreNumber,
StartOfRungetLong,
EndOfRun,
CreationDate,
CreatedBy,
LastModificationDate,
LastModifiedBy
)
"""
# Pointless code in python3
if isinstance(block, str):
block = unicode(block)
if isinstance(dataset, str):
dataset = unicode(dataset)
try:
if block:
results = self.dbs.listRuns(block_name=block)
else:
results = self.dbs.listRuns(dataset=dataset)
except dbsClientException as ex:
msg = "Error in DBSReader.listRuns(%s, %s)\n" % (dataset, block)
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
# send runDict format as result, this format is for sync with dbs2 call
# which has {run_number: num_lumis} but dbs3 call doesn't return num Lumis
# So it returns {run_number: None}
# TODO: After DBS2 is completely removed change the return format more sensible one
runDict = {}
for x in results:
for runNumber in x["run_num"]:
runDict[runNumber] = None
return runDict
def listProcessedDatasets(self, primary, dataTier='*'):
"""
_listProcessedDatasets_
return a list of Processed datasets for the primary and optional
data tier value
"""
try:
result = self.dbs.listDatasets(primary_ds_name=primary, data_tier_name=dataTier)
except dbsClientException as ex:
msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
result = [x['dataset'].split('/')[2] for x in result]
return result
def listDatasetFiles(self, datasetPath):
"""
_listDatasetFiles_
Get list of files for dataset
"""
return [x['logical_file_name'] for x in self.dbs.listFileArray(dataset=datasetPath)]
def listDatatiers(self):
"""
_listDatatiers_
Get a list of datatiers known by DBS.
"""
return [tier['data_tier_name'] for tier in self.dbs.listDataTiers()]
def listDatasetFileDetails(self, datasetPath, getParents=False, getLumis=True, validFileOnly=1):
"""
TODO: This is completely wrong need to be redone. or be removed - getting dataset altogether
might be to costly
_listDatasetFileDetails_
Get list of lumis, events, and parents for each file in a dataset
Return a dict where the keys are the files, and for each file we have something like:
{ 'NumberOfEvents': 545,
'BlockName': '/HighPileUp/Run2011A-v1/RAW#dd6e0796-cbcc-11e0-80a9-003048caaace',
'Lumis': {173658: [8, 12, 9, 14, 19, 109, 105]},
'Parents': [],
'Checksum': '22218315',
'Adler32': 'a41a1446',
'FileSize': 286021145,
'ValidFile': 1
}
"""
fileDetails = self.getFileListByDataset(dataset=datasetPath, validFileOnly=validFileOnly, detail=True)
blocks = set() # the set of blocks of the dataset
# Iterate over the files and prepare the set of blocks and a dict where the keys are the files
files = {}
for f in fileDetails:
blocks.add(f['block_name'])
files[f['logical_file_name']] = remapDBS3Keys(f, stringify=True)
files[f['logical_file_name']]['ValidFile'] = f['is_file_valid']
files[f['logical_file_name']]['Lumis'] = {}
files[f['logical_file_name']]['Parents'] = []
# Iterate over the blocks and get parents and lumis
for blockName in blocks:
# get the parents
if getParents:
parents = self.dbs.listFileParents(block_name=blockName)
for p in parents:
if p['logical_file_name'] in files: # invalid files are not there if validFileOnly=1
files[p['logical_file_name']]['Parents'].extend(p['parent_logical_file_name'])
if getLumis:
# get the lumis
file_lumis = self.dbs.listFileLumis(block_name=blockName)
for f in file_lumis:
if f['logical_file_name'] in files: # invalid files are not there if validFileOnly=1
if f['run_num'] in files[f['logical_file_name']]['Lumis']:
files[f['logical_file_name']]['Lumis'][f['run_num']].extend(f['lumi_section_num'])
else:
files[f['logical_file_name']]['Lumis'][f['run_num']] = f['lumi_section_num']
return files
def crossCheck(self, datasetPath, *lfns):
"""
_crossCheck_
For the dataset provided, check that the lfns listed all exist
in the dataset.
Return the list of lfns that are in the dataset
"""
allLfns = self.dbs.listFileArray(dataset=datasetPath, validFileOnly=1, detail=False)
setOfAllLfns = set(allLfns)
setOfKnownLfns = set(lfns)
return list(setOfAllLfns.intersection(setOfKnownLfns))
def crossCheckMissing(self, datasetPath, *lfns):
"""
_crossCheckMissing_
As cross check, but return value is a list of files that
are *not* known by DBS
"""
allLfns = self.dbs.listFileArray(dataset=datasetPath, validFileOnly=1, detail=False)
setOfAllLfns = set(allLfns)
setOfKnownLfns = set(lfns)
knownFiles = setOfAllLfns.intersection(setOfKnownLfns)
unknownFiles = setOfKnownLfns.difference(knownFiles)
return list(unknownFiles)
def getDBSSummaryInfo(self, dataset=None, block=None):
"""
Get dataset summary includes # of files, events, blocks and total size
"""
if dataset:
self.checkDatasetPath(dataset)
try:
if block:
summary = self.dbs.listFileSummaries(block_name=block, validFileOnly=1)
else:
summary = self.dbs.listFileSummaries(dataset=dataset, validFileOnly=1)
except Exception as ex:
msg = "Error in DBSReader.getDBSSummaryInfo(%s, %s)\n" % (dataset, block)
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if not summary: # missing data or all files invalid
return {}
result = remapDBS3Keys(summary[0], stringify=True)
result['path'] = dataset if dataset else ''
result['block'] = block if block else ''
return result
def listFileBlocks(self, dataset, onlyClosedBlocks=False, blockName=None):
"""
_listFileBlocks_
Retrieve a list of fileblock names for a dataset
"""
self.checkDatasetPath(dataset)
args = {'dataset': dataset, 'detail': False}
if blockName:
args['block_name'] = blockName
if onlyClosedBlocks:
args['detail'] = True
try:
blocks = self.dbs.listBlocks(**args)
except dbsClientException as ex:
msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if onlyClosedBlocks:
result = [x['block_name'] for x in blocks if str(x['open_for_writing']) != "1"]
else:
result = [x['block_name'] for x in blocks]
return result
def listOpenFileBlocks(self, dataset):
"""
_listOpenFileBlocks_
Retrieve a list of open fileblock names for a dataset
"""
self.checkDatasetPath(dataset)
try:
blocks = self.dbs.listBlocks(dataset=dataset, detail=True)
except dbsClientException as ex:
msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
result = [x['block_name'] for x in blocks if str(x['open_for_writing']) == "1"]
return result
def blockExists(self, fileBlockName):
"""
_blockExists_
Check to see if block with name provided exists in the DBS
Instance.
Return True if exists, False if not
"""
self.checkBlockName(fileBlockName)
try:
blocks = self.dbs.listBlocks(block_name=fileBlockName)
except Exception as ex:
msg = "Error in "
msg += "DBSReader.blockExists(%s)\n" % fileBlockName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if len(blocks) == 0:
return False
return True
def listFilesInBlock(self, fileBlockName, lumis=True, validFileOnly=1):
"""
_listFilesInBlock_
Get a list of files in the named fileblock
TODO: lumis can be false when lumi splitting is not required
However WMBSHelper expect file['LumiList'] to get the run number
so for now it will be always true.
We need to clean code up when dbs2 is completely deprecated.
calling lumis for run number is expensive.
"""
if not self.blockExists(fileBlockName):
msg = "DBSReader.listFilesInBlock(%s): No matching data"
raise DBSReaderError(msg % fileBlockName)
try:
files = self.dbs.listFileArray(block_name=fileBlockName, validFileOnly=validFileOnly, detail=True)
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.listFilesInBlock(%s)\n" % fileBlockName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if lumis:
lumiDict = self._getLumiList(blockName=fileBlockName, validFileOnly=validFileOnly)
result = []
for fileInfo in files:
if lumis:
fileInfo["LumiList"] = lumiDict[fileInfo['logical_file_name']]
result.append(remapDBS3Keys(fileInfo, stringify=True))
return result
def listFilesInBlockWithParents(self, fileBlockName, lumis=True, validFileOnly=1):
"""
_listFilesInBlockWithParents_
Get a list of files in the named fileblock including
the parents of that file.
TODO: lumis can be false when lumi splitting is not required
However WMBSHelper expect file['LumiList'] to get the run number
so for now it will be always true.
"""
if not self.blockExists(fileBlockName):
msg = "DBSReader.listFilesInBlockWithParents(%s): No matching data"
raise DBSReaderError(msg % fileBlockName)
try:
# TODO: shoud we get only valid block for this?
files = self.dbs.listFileParents(block_name=fileBlockName)
fileDetails = self.listFilesInBlock(fileBlockName, lumis, validFileOnly)
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.listFilesInBlockWithParents(%s)\n" % (
fileBlockName,)
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
childByParents = defaultdict(list)
for f in files:
# Probably a child can have more than 1 parent file
for fp in f['parent_logical_file_name']:
childByParents[fp].append(f['logical_file_name'])
parentsLFNs = childByParents.keys()
if len(parentsLFNs) == 0:
msg = "Error in "
msg += "DBSReader.listFilesInBlockWithParents(%s)\n There is no parents files" % (
fileBlockName)
raise DBSReaderError(msg)
parentFilesDetail = []
# TODO: slicing parentLFNs util DBS api is handling that.
# Remove slicing if DBS api handles
for pLFNs in grouper(parentsLFNs, 50):
parentFilesDetail.extend(self.dbs.listFileArray(logical_file_name=pLFNs, detail=True))
if lumis:
parentLumis = self._getLumiList(lfns=parentsLFNs)
parentsByLFN = defaultdict(list)
for pf in parentFilesDetail:
parentLFN = pf['logical_file_name']
dbsFile = remapDBS3Keys(pf, stringify=True)
if lumis:
dbsFile["LumiList"] = parentLumis[parentLFN]
for childLFN in childByParents[parentLFN]:
parentsByLFN[childLFN].append(dbsFile)
for fileInfo in fileDetails:
fileInfo["ParentList"] = parentsByLFN[fileInfo['logical_file_name']]
return fileDetails
def lfnsInBlock(self, fileBlockName):
"""
_lfnsInBlock_
LFN list only for block, details = False => faster query
"""
if not self.blockExists(fileBlockName):
msg = "DBSReader.lfnsInBlock(%s): No matching data"
raise DBSReaderError(msg % fileBlockName)
try:
lfns = self.dbs.listFileArray(block_name=fileBlockName, validFileOnly=1, detail=False)
return lfns
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.listFilesInBlock(%s)\n" % fileBlockName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
def listFileBlockLocation(self, fileBlockNames):
"""
_listFileBlockLocation_
Get origin_site_name of a block
"""
singleBlockName = None
if isinstance(fileBlockNames, basestring):
singleBlockName = fileBlockNames
fileBlockNames = [fileBlockNames]
for block in fileBlockNames:
self.checkBlockName(block)
locations = {}
node_filter = set(['UNKNOWN', None])
blocksInfo = {}
try:
for block in fileBlockNames:
blocksInfo.setdefault(block, [])
# there should be only one element with a single origin site string ...
for blockInfo in self.dbs.listBlockOrigin(block_name=block):
blocksInfo[block].append(blockInfo['origin_site_name'])
except dbsClientException as ex:
msg = "Error in DBS3Reader: self.dbs.listBlockOrigin(block_name=%s)\n" % fileBlockNames
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
for block in fileBlockNames:
valid_nodes = set(blocksInfo.get(block, [])) - node_filter
locations[block] = list(valid_nodes)
# returning single list if a single block is passed
if singleBlockName:
return locations[singleBlockName]
return locations
def getFileBlock(self, fileBlockName):
"""
Retrieve a list of files in the block; a flag whether the
block is still open or not; and it used to resolve the block
location via PhEDEx.
:return: a dictionary in the format of:
{"PhEDExNodeNames" : [],
"Files" : { LFN : Events },
"IsOpen" : True|False}
"""
result = {"PhEDExNodeNames": [], # FIXME: we better get rid of this line!
"Files": self.listFilesInBlock(fileBlockName),
"IsOpen": self.blockIsOpen(fileBlockName)}
return result
def getFileBlockWithParents(self, fileBlockName):
"""
Retrieve a list of parent files in the block; a flag whether the
block is still open or not; and it used to resolve the block
location via PhEDEx.
:return: a dictionary in the format of:
{"PhEDExNodeNames" : [],
"Files" : { LFN : Events },
"IsOpen" : True|False}
"""
if isinstance(fileBlockName, str):
fileBlockName = unicode(fileBlockName)
if not self.blockExists(fileBlockName):
msg = "DBSReader.getFileBlockWithParents(%s): No matching data"
raise DBSReaderError(msg % fileBlockName)
result = {"PhEDExNodeNames": [], # FIXME: we better get rid of this line!
"Files": self.listFilesInBlockWithParents(fileBlockName),
"IsOpen": self.blockIsOpen(fileBlockName)}
return result
def listBlockParents(self, blockName):
"""
Return a list of parent blocks for a given child block name
"""
# FIXME: note the different returned data structure
result = []
self.checkBlockName(blockName)
blocks = self.dbs.listBlockParents(block_name=blockName)
result = [block['parent_block_name'] for block in blocks]
return result
def blockIsOpen(self, blockName):
"""
_blockIsOpen_
Return True if named block is open, false if not, or if block
doenst exist
"""
self.checkBlockName(blockName)
blockInstance = self.dbs.listBlocks(block_name=blockName, detail=True)
if len(blockInstance) == 0:
return False
blockInstance = blockInstance[0]
isOpen = blockInstance.get('open_for_writing', 1)
if isOpen == 0:
return False
return True
def blockToDatasetPath(self, blockName):
"""
_blockToDatasetPath_
Given a block name, get the dataset Path associated with that
Block.
Returns the dataset path, or None if not found
"""
self.checkBlockName(blockName)
try:
blocks = self.dbs.listBlocks(block_name=blockName, detail=True)
except Exception as ex:
msg = "Error in "
msg += "DBSReader.blockToDatasetPath(%s)\n" % blockName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if blocks == []:
return None
pathname = blocks[-1].get('dataset', None)
return pathname
def listDatasetLocation(self, datasetName):
"""
_listDatasetLocation_
List the origin SEs where there is at least a block of the given
dataset.
"""
self.checkDatasetPath(datasetName)
locations = set()
try:
blocksInfo = self.dbs.listBlockOrigin(dataset=datasetName)
except dbsClientException as ex:
msg = "Error in DBSReader: dbsApi.listBlocks(dataset=%s)\n" % datasetName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
if not blocksInfo: # no data location from dbs
return list()
for blockInfo in blocksInfo:
locations.update(blockInfo['origin_site_name'])
locations.difference_update(['UNKNOWN', None]) # remove entry when SE name is 'UNKNOWN'
return list(locations)
def checkDatasetPath(self, pathName):
"""
_checkDatasetPath_
"""
if pathName in ("", None):
raise DBSReaderError("Invalid Dataset Path name: => %s <=" % pathName)
else:
try:
result = self.dbs.listDatasets(dataset=pathName, dataset_access_type='*')
if len(result) == 0:
raise DBSReaderError("Dataset %s doesn't exist in DBS %s" % (pathName, self.dbsURL))
except (dbsClientException, HTTPError) as ex:
msg = "Error in "
msg += "DBSReader.checkDatasetPath(%s)\n" % pathName
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
return
def checkBlockName(self, blockName):
"""
_checkBlockName_
"""
if blockName in ("", "*", None):
raise DBSReaderError("Invalid Block name: => %s <=" % blockName)
def getFileListByDataset(self, dataset, validFileOnly=1, detail=True):
"""
_getFileListByDataset_
Given a dataset, retrieves all blocks, lfns and number of events (among other
not really important info).
Returns a list of dict.
"""
try:
fileList = self.dbs.listFileArray(dataset=dataset, validFileOnly=validFileOnly, detail=detail)
return fileList
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.getFileListByDataset(%s)\n" % dataset
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
def listDatasetParents(self, childDataset):
"""
list the the parents dataset path given childDataset
"""
try:
parentList = self.dbs.listDatasetParents(dataset=childDataset)
return parentList
except dbsClientException as ex:
msg = "Error in "
msg += "DBSReader.listDatasetParents(%s)\n" % childDataset
msg += "%s\n" % formatEx3(ex)
raise DBSReaderError(msg)
# def getListFilesByLumiAndDataset(self, dataset, files):
# "Unsing pycurl to get all the child parents pair for given dataset"
#
# urls = ['%s/data/dbs/fileparentbylumis?block_name=%s' % (
# self.dbsURL, b["block_name"]) for b in self.dbs.listBlocks(dataset=dataset)]
#
# data = multi_getdata(urls, ckey(), cert())
# rdict = {}
# for row in data:
# try:
# data = json.loads(row['data'])
# rdict[req] = data['result'][0] # we get back {'result': [workflow]} dict
# except Exception as exp:
# print("ERROR: fail to load data as json record, error=%s" % str(exp))
# print(row)
# return rdict
def getParentFilesGivenParentDataset(self, parentDataset, childLFNs):
"""
returns parent files for given childLFN when DBS doesn't have direct parent child relationship in DB
Only use this for finding missing parents
:param parentDataset: parent dataset for childLFN
:param childLFN: a file in child dataset
:return: set of parent files for childLFN
"""
fInfo = self.dbs.listFileLumiArray(logical_file_name=childLFNs)
parentFiles = defaultdict(set)
for f in fInfo:
pFileList = self.dbs.listFiles(dataset=parentDataset, run_num=f['run_num'], lumi_list=f['lumi_section_num'])
pFiles = set([x['logical_file_name'] for x in pFileList])
parentFiles[f['logical_file_name']] = parentFiles[f['logical_file_name']].union(pFiles)
return parentFiles
def getParentFilesByLumi(self, childLFN):
"""
get the parent file's lfns by lumi (This might not be the actual parentage relations in DBS just parentage by Lumis).
use for only specific lfn for validating purpose, for the parentage fix use findAndInsertMissingParentage
:param childLFN:
:return: list of dictionary with parent files for given child LFN and parent dataset
[{"ParentDataset": /abc/bad/ddd, "ParentFiles": [alf, baf, ...]]
"""
childDatasets = self.dbs.listDatasets(logical_file_name=childLFN)
result = []
for i in childDatasets:
parents = self.dbs.listDatasetParents(dataset=i["dataset"])
for parent in parents:
parentFiles = self.getParentFilesGivenParentDataset(parent['parent_dataset'], childLFN)
result.append({"ParentDataset": parent['parent_dataset'], "ParentFiles": list(parentFiles)})
return result
def insertFileParents(self, childBlockName, childParentsIDPairs):
"""
:param childBlockName: child block name
:param childParentsIDPairs: list of list child and parent file ids, i.e. [[1,2], [3,4]...]
dbs validate child ids from the childBlockName
:return: None
"""
return self.dbs.insertFileParents({"block_name": childBlockName, "child_parent_id_list": childParentsIDPairs})
def findAndInsertMissingParentage(self, childBlockName, parentData, insertFlag=True):
"""
:param childBlockName: child block name
:param parentData: a dictionary with complete parent dataset file/run/lumi information
:param insertFlag: boolean to allow parentage insertion into DBS or not
:return: number of file parents pair inserted
"""
# in the format of: {'fileid': [[run_num1, lumi1], [run_num1, lumi2], etc]
# e.g. {'554307997': [[1, 557179], [1, 557178], [1, 557181],
childBlockData = self.dbs.listBlockTrio(block_name=childBlockName)
# runs the actual mapping logic, like {"child_id": ["parent_id", "parent_id2", ...], etc
mapChildParent = {}
# there should be only 1 item, but we better be safe
for item in childBlockData:
for childFileID in item:
for runLumiPair in item[childFileID]:
frozenKey = frozenset(runLumiPair)
parentId = parentData.get(frozenKey)
if parentId is None:
msg = "Child file id: %s, with run/lumi: %s, has no match in the parent dataset"
self.logger.warning(msg, childFileID, frozenKey)
continue
mapChildParent.setdefault(childFileID, set())
mapChildParent[childFileID].add(parentId)
if insertFlag and mapChildParent:
# convert dictionary to list of unique childID, parentID tuples
listChildParent = []
for childID in mapChildParent:
for parentID in mapChildParent[childID]:
listChildParent.append([int(childID), int(parentID)])
self.dbs.insertFileParents({"block_name": childBlockName, "child_parent_id_list": listChildParent})
return len(mapChildParent)
def listBlocksWithNoParents(self, childDataset):
"""
:param childDataset: child dataset for
:return: set of child blocks with no parentBlock
"""
allBlocks = self.dbs.listBlocks(dataset=childDataset)
blockNames = []
for block in allBlocks:
blockNames.append(block['block_name'])
parentBlocks = self.dbs.listBlockParents(block_name=blockNames)
cblock = set()
for pblock in parentBlocks:
cblock.add(pblock['this_block_name'])
noParentBlocks = set(blockNames) - cblock
return noParentBlocks
def listFilesWithNoParents(self, childBlockName):
"""
:param childBlockName:
:return:
"""
allFiles = self.dbs.listFiles(block_name=childBlockName)
parentFiles = self.dbs.listFileParents(block_name=childBlockName)
allFileNames = set()
for fInfo in allFiles:
allFileNames.add(fInfo['logical_file_name'])
cfile = set()
for pFile in parentFiles:
cfile.add(pFile['logical_file_name'])
noParentFiles = allFileNames - cfile
return list(noParentFiles)
def fixMissingParentageDatasets(self, childDataset, insertFlag=True):
"""
:param childDataset: child dataset need to set the parentage correctly.
:return: blocks which failed to insert parentage. for retry
"""
pDatasets = self.listDatasetParents(childDataset)
self.logger.info("Parent datasets for %s are: %s", childDataset, pDatasets)
# print("parent datasets %s\n" % pDatasets)
# pDatasets format is
# [{'this_dataset': '/SingleMuon/Run2016D-03Feb2017-v1/MINIAOD', 'parent_dataset_id': 13265209, 'parent_dataset': '/SingleMuon/Run2016D-23Sep2016-v1/AOD'}]
if not pDatasets:
self.logger.warning("No parent dataset found for child dataset %s", childDataset)
return {}
parentFullInfo = self.getParentDatasetTrio(childDataset)
blocks = self.listBlocksWithNoParents(childDataset)
failedBlocks = []
self.logger.info("Found %d blocks without parentage information", len(blocks))
for blockName in blocks:
try:
self.logger.info("Fixing parentage for block: %s", blockName)
numFiles = self.findAndInsertMissingParentage(blockName, parentFullInfo, insertFlag=insertFlag)
self.logger.debug("%s file parentage added for block %s", numFiles, blockName)
except Exception as ex:
self.logger.exception("Parentage updated failed for block %s", blockName)
failedBlocks.append(blockName)
return failedBlocks
def getParentDatasetTrio(self, childDataset):
"""
Provided a dataset name, return all the parent dataset information, such as:
- file ids, run number and lumi section
NOTE: This API is meant to be used by the StepChainParentage thread only!!!
:param childDataset: name of the child dataset
:return: a dictionary where the key is a set of run/lumi, its value is the fileid
"""
# this will return data in the format of:
# {'554307997': [[1, 557179], [1, 557178],...
# such that: key is file id, in each list is [run_number, lumi_section_numer].
parentFullInfo = self.dbs.listParentDSTrio(dataset=childDataset)
# runs the actual mapping logic, like {"child_id": ["parent_id", "parent_id2", ...], etc
parentFrozenData = {}
for item in parentFullInfo:
for fileId in item:
for runLumiPair in item[fileId]:
frozenKey = frozenset(runLumiPair)
parentFrozenData[frozenKey] = fileId
return parentFrozenData
| 38.044239 | 163 | 0.601774 |
0088d6191608a263f4d4efecc18fe6c0d095a0f4
| 2,773 |
py
|
Python
|
nxbender/__init__.py
|
jeanlego/nxBender
|
48b066d93d615b8171aa0b6967deca4f53c51f3c
|
[
"BSD-3-Clause"
] | null | null | null |
nxbender/__init__.py
|
jeanlego/nxBender
|
48b066d93d615b8171aa0b6967deca4f53c51f3c
|
[
"BSD-3-Clause"
] | null | null | null |
nxbender/__init__.py
|
jeanlego/nxBender
|
48b066d93d615b8171aa0b6967deca4f53c51f3c
|
[
"BSD-3-Clause"
] | null | null | null |
import configargparse
import requests
import logging
import getpass
from colorlog import ColoredFormatter
parser = configargparse.ArgumentParser(
description='Connect to a netExtender VPN',
default_config_files=['/etc/nxbender', '~/.nxbender'],
)
parser.add_argument('-c', '--conf', is_config_file=True)
parser.add_argument('-s', '--server', required=True)
parser.add_argument('-P', '--port', type=int, default=443, help='Server port - default 443')
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=False)
parser.add_argument('-d', '--domain', required=True)
parser.add_argument('-f', '--fingerprint', help='Verify server\'s SSL certificate has this fingerprint. Overrides all other certificate verification.')
parser.add_argument('-m', '--max-line', type=int, default=1500, help='Maximum length of a single line of PPP data sent to the server')
parser.add_argument('--pinentry', help='Program to use to prompt for interactive responses eg. OTP codes. Specify "none" to just prompt on the terminal.')
parser.add_argument('--debug', action='store_true', help='Show debugging information')
parser.add_argument('-q', '--quiet', action='store_true', help='Don\'t output basic info whilst running')
parser.add_argument('--show-ppp-log', action='store_true', help='Print PPP log messages to stdout')
def main():
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.quiet:
loglevel = logging.WARNING
else:
loglevel = logging.INFO
if not args.password:
args.password = getpass.getpass()
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(message_log_color)s%(message)s",
secondary_log_colors={
'message': {
'ERROR': 'red',
'CRITICAL': 'red'
}
}
)
logging.basicConfig(level=loglevel)
logging.getLogger().handlers[0].setFormatter(formatter)
if args.debug:
try:
from http.client import HTTPConnection # py3
except ImportError:
from httplib import HTTPConnection # py2
HTTPConnection.debuglevel = 2
from . import nx, sslconn
sess = nx.NXSession(args)
try:
sess.run()
except requests.exceptions.SSLError as e:
logging.error("SSL error: %s" % e)
# print the server's fingerprint for the user to consider
sslconn.print_fingerprint(args.server, args.port)
except requests.exceptions.ConnectionError as e:
message = e.message.reason.message.split(':')[1:][-1] # yuk
logging.error("Error connecting to remote host: %s" % message)
| 36.012987 | 154 | 0.65669 |
c1ea1bd5e02efa510aa87bd25bb052a9bf42da4c
| 741 |
py
|
Python
|
problem_solving/python/algorithms/implementation/sequence_equation.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | null | null | null |
problem_solving/python/algorithms/implementation/sequence_equation.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | null | null | null |
problem_solving/python/algorithms/implementation/sequence_equation.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | 1 |
2020-06-04T09:23:19.000Z
|
2020-06-04T09:23:19.000Z
|
def permutation_equation(p):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/permutation-equation/problem
Given a sequence of n integers, p(1), p(2), ..., p(n) where each element is distinct and satisfies 1 <= p(x) <= n.
For each x where 1 <= x <= n, find any integer y such that p(p(y)) ~ x and return that value
Args:
p (list): list of integers p[i] where 1 <= i <= n
Returns:
list: list of the values that satisfy p(p(y)) ~ x
"""
permutation = []
for i in xrange(1, len(p)+1):
permutation.append(p.index(p.index(i) + 1) + 1)
return permutation
if __name__ == "__main__":
print permutation_equation([2, 3, 1])
print permutation_equation([4, 3, 5, 1, 2])
| 35.285714 | 118 | 0.620783 |
9b6b84a59b855235f201a1fbf216c22f4fad8792
| 1,193 |
py
|
Python
|
datasets/wdbc/wdbc_extract.py
|
fadel/msc-experiments
|
427e92982169f7e55f836fea2fd9865f0f809bd4
|
[
"MIT"
] | null | null | null |
datasets/wdbc/wdbc_extract.py
|
fadel/msc-experiments
|
427e92982169f7e55f836fea2fd9865f0f809bd4
|
[
"MIT"
] | null | null | null |
datasets/wdbc/wdbc_extract.py
|
fadel/msc-experiments
|
427e92982169f7e55f836fea2fd9865f0f809bd4
|
[
"MIT"
] | null | null | null |
import hashlib
import logging
import pandas as pd
import os
import os.path
import wget
DATA_URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
DATA_SHA256 = "d606af411f3e5be8a317a5a8b652b425aaf0ff38ca683d5327ffff94c3695f4a"
DATA_FILE = "wdbc.data"
if __name__ == "__main__":
logging.basicConfig(filename="wdbc_extract.log",
format="%(levelname)s:%(message)s",
level=logging.INFO)
if not os.path.exists(DATA_FILE):
logging.info("Downloading '{}".format(DATA_URL))
wget.download(DATA_URL, DATA_FILE)
with open(DATA_FILE, "rb") as f:
if hashlib.sha256(f.read()).hexdigest() != DATA_SHA256:
logging.error("'{}' is corrupted; aborting".format(DATA_FILE))
exit(1)
data = pd.read_table(DATA_FILE, header=None, delimiter=",")
wdbc_ids = data[0]
wdbc_labels = data[1]
wdbc = data.drop([0, 1], axis=1)
wdbc.to_csv("wdbc.tbl", sep=" ", index=False, header=False)
wdbc_labels.to_csv("wdbc.labels", sep=" ", index=False, header=False)
wdbc_ids.to_csv("wdbc.ids", sep=" ", index=False, header=False)
| 34.085714 | 103 | 0.652976 |
eadcf29bfc7fb68a7c7d2a8034909be70519d37d
| 274 |
py
|
Python
|
one_fm/operations/doctype/checkpoints_form_child_table/checkpoints_form_child_table.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 16 |
2021-06-14T23:56:47.000Z
|
2022-03-22T12:05:06.000Z
|
one_fm/operations/doctype/checkpoints_form_child_table/checkpoints_form_child_table.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 119 |
2020-08-17T16:27:45.000Z
|
2022-03-28T12:42:56.000Z
|
one_fm/operations/doctype/checkpoints_form_child_table/checkpoints_form_child_table.py
|
askmetoo/One-FM
|
c93ed63695a3e62ee8129bd9adf563116b749030
|
[
"MIT"
] | 12 |
2021-05-16T13:35:40.000Z
|
2022-02-21T12:41:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class CheckpointsFormChildTable(Document):
pass
| 24.909091 | 49 | 0.784672 |
9117728b98731a3eae74d5d9b7c7d0d1cd3d3213
| 525 |
py
|
Python
|
var/spack/repos/builtin/packages/py-xlrd/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 |
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/py-xlrd/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 |
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/py-xlrd/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 |
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyXlrd(PythonPackage):
"""Library for developers to extract data from Microsoft Excel (tm)
spreadsheet files"""
homepage = 'http://www.python-excel.org/'
url = "https://pypi.io/packages/source/x/xlrd/xlrd-0.9.4.tar.gz"
version('0.9.4', '911839f534d29fe04525ef8cd88fe865')
| 30.882353 | 73 | 0.72 |
f09d6118de643641e53d35d92594ad024a8559c8
| 192 |
py
|
Python
|
app/__init__.py
|
rohansatapathy/rohansatapathy.com
|
7187fe44bc8f6aac1dc6a621d4d67d91ff5f7f01
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
rohansatapathy/rohansatapathy.com
|
7187fe44bc8f6aac1dc6a621d4d67d91ff5f7f01
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
rohansatapathy/rohansatapathy.com
|
7187fe44bc8f6aac1dc6a621d4d67d91ff5f7f01
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_flatpages import FlatPages
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
pages = FlatPages(app)
from app import routes
| 14.769231 | 37 | 0.802083 |
82eb51aec0417f7d036e1937ed4824dfe749dead
| 1,606 |
py
|
Python
|
plenum/test/node_request/test_commit/test_num_of_commit_with_f_plus_one_faults.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_commit/test_num_of_commit_with_f_plus_one_faults.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_commit/test_num_of_commit_with_f_plus_one_faults.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | 2 |
2017-12-13T21:14:54.000Z
|
2021-06-06T15:48:03.000Z
|
from functools import partial
import pytest
from plenum.common.util import getNoInstances
from stp_core.common.util import adict
from plenum.test.node_request.node_request_helper import checkCommitted
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
delaysPrePrepareProcessing, \
changesRequest
nodeCount = 7
# f + 1 faults, i.e, num of faults greater than system can tolerate
faultyNodes = 3
whitelist = ['InvalidSignature',
'cannot process incoming PREPARE']
@pytest.fixture(scope="module")
def setup(txnPoolNodeSet):
# Making nodes faulty such that no primary is chosen
A = txnPoolNodeSet[-3]
B = txnPoolNodeSet[-2]
G = txnPoolNodeSet[-1]
for node in A, B, G:
makeNodeFaulty(
node, changesRequest, partial(
delaysPrePrepareProcessing, delay=90))
# Delaying nomination to avoid becoming primary
# node.delaySelfNomination(10)
return adict(faulties=(A, B, G))
@pytest.fixture(scope="module")
def afterElection(setup):
for n in setup.faulties:
for r in n.replicas:
assert not r.isPrimary
def testNumOfCommitMsgsWithFPlusOneFaults(afterElection, looper,
txnPoolNodeSet, prepared1, noRetryReq):
with pytest.raises(AssertionError):
# To raise an error pass less than the actual number of faults
checkCommitted(looper,
txnPoolNodeSet,
prepared1,
range(getNoInstances(len(txnPoolNodeSet))),
faultyNodes - 1)
| 31.490196 | 81 | 0.663138 |
84c77c4679deebbedf6f1a054a33a5d1383193d1
| 458 |
py
|
Python
|
src/plotfunc.py
|
r4tn3sh/MIMO_detection
|
3bfe80c4c3d7e6cfef4510b6e81683d987dc14ac
|
[
"MIT"
] | 6 |
2020-06-15T21:45:51.000Z
|
2022-01-19T09:37:37.000Z
|
src/plotfunc.py
|
r4tn3sh/MIMO_detection
|
3bfe80c4c3d7e6cfef4510b6e81683d987dc14ac
|
[
"MIT"
] | null | null | null |
src/plotfunc.py
|
r4tn3sh/MIMO_detection
|
3bfe80c4c3d7e6cfef4510b6e81683d987dc14ac
|
[
"MIT"
] | 4 |
2020-07-24T09:32:13.000Z
|
2022-02-25T18:37:07.000Z
|
import matplotlib.pyplot as plt
from numpy import array
# ---------- PLOTS ------------
def plotConstell(y):
"""
Plots the constellation of given samples.
"""
yr = y.real#[a.real for a in y]
yi = y.imag#[a.imag for a in y]
nrow, ncol = y.shape
#p = plt.figure()
p, ax = plt.subplots()
for idx in range(nrow):
plt.scatter(array(yr[idx]), array(yi[idx]), s=3, label='path '+str(idx+1))
ax.legend()
return p
| 25.444444 | 82 | 0.565502 |
a96d380327131be0309bbbc2fadcfb0651a776bd
| 4,781 |
py
|
Python
|
pdfreader/codecs/standard.py
|
tmcclintock/pdfreader
|
2933e7b92b5ab7fd60e1a4a27c251de387ad098a
|
[
"MIT"
] | 77 |
2020-01-07T11:32:53.000Z
|
2022-03-31T08:16:17.000Z
|
pdfreader/codecs/standard.py
|
tmcclintock/pdfreader
|
2933e7b92b5ab7fd60e1a4a27c251de387ad098a
|
[
"MIT"
] | 49 |
2019-12-23T19:33:53.000Z
|
2022-03-06T15:44:41.000Z
|
pdfreader/codecs/standard.py
|
tmcclintock/pdfreader
|
2933e7b92b5ab7fd60e1a4a27c251de387ad098a
|
[
"MIT"
] | 18 |
2020-05-27T19:26:19.000Z
|
2022-02-25T06:42:59.000Z
|
from .codec import Codec
class StandardCodec(Codec):
use_ZapfDingbats = True
name = "StandardEncoding"
encode_table = {'A': b'A', 'Æ': b'\xe1', 'B': b'B', 'C': b'C', 'D': b'D', 'E': b'E', 'F': b'F', 'G': b'G', 'H': b'H',
'I': b'I', 'J': b'J', 'K': b'K', 'L': b'L', 'Ł': b'\xe8', 'M': b'M', 'N': b'N', 'O': b'O',
'Œ': b'\xea', 'Ø': b'\xe9', 'P': b'P', 'Q': b'Q', 'R': b'R', 'S': b'S', 'T': b'T', 'U': b'U',
'V': b'V', 'W': b'W', 'X': b'X', 'Y': b'Y', 'Z': b'Z', 'a': b'a', '´': b'\xc2', 'æ': b'\xf1', '&': b'&',
'^': b'^', '~': b'~', '*': b'*', '@': b'@', 'b': b'b', '\\': b'\\', '|': b'|', '{': b'{', '}': b'}',
'[': b'[', ']': b']', '˘': b'\xc6', '•': b'\xb7', 'c': b'c', 'ˇ': b'\xcf', '¸': b'\xcb', '¢': b'\xa2',
'ˆ': b'\xc3', ':': b':', ',': b',', '¤': b'\xa8', 'd': b'd', '†': b'\xb2', '‡': b'\xb3', '¨': b'\xc8',
'$': b'$', '˙': b'\xc7', 'ı': b'\xf5', 'e': b'e', '8': b'8', '…': b'\xbc', '—': b'\xd0', '–': b'\xb1',
'=': b'=', '!': b'!', '¡': b'\xa1', 'f': b'f', 'fi': b'\xae', '5': b'5', 'fl': b'\xaf', 'ƒ': b'\xa6',
'4': b'4', '⁄': b'\xa4', 'g': b'g', 'ß': b'\xfb', '`': b'\xc1', '>': b'>', '«': b'\xab', '»': b'\xbb',
'‹': b'\xac', '›': b'\xad', 'h': b'h', '˝': b'\xcd', '-': b'-', 'i': b'i', 'j': b'j', 'k': b'k',
'l': b'l', '<': b'<', 'ł': b'\xf8', 'm': b'm', '¯': b'\xc5', 'n': b'n', '9': b'9', '#': b'#', 'o': b'o',
'œ': b'\xfa', '˛': b'\xce', '1': b'1', 'ª': b'\xe3', 'º': b'\xeb', 'ø': b'\xf9', 'p': b'p',
'¶': b'\xb6', '(': b'(', ')': b')', '%': b'%', '.': b'.', '·': b'\xb4', '‰': b'\xbd', '+': b'+',
'q': b'q', '?': b'?', '¿': b'\xbf', '"': b'"', '„': b'\xb9', '“': b'\xaa', '”': b'\xba', '‘': b'`',
'’': b"'", '‚': b'\xb8', "'": b'\xa9', 'r': b'r', '°': b'\xca', 's': b's', '§': b'\xa7', ';': b';',
'7': b'7', '6': b'6', '/': b'/', '\xa0': b' ', '£': b'\xa3', 't': b't', '3': b'3', '˜': b'\xc4',
'2': b'2', 'u': b'u', '_': b'_', 'v': b'v', 'w': b'w', 'x': b'x', 'y': b'y', '¥': b'\xa5', 'z': b'z',
'0': b'0'}
decode_table = {65: 'A', 225: 'AE', 66: 'B', 67: 'C', 68: 'D', 69: 'E', 70: 'F', 71: 'G', 72: 'H', 73: 'I', 74: 'J',
75: 'K', 76: 'L', 232: 'Lslash', 77: 'M', 78: 'N', 79: 'O', 234: 'OE', 233: 'Oslash', 80: 'P', 81: 'Q',
82: 'R', 83: 'S', 84: 'T', 85: 'U', 86: 'V', 87: 'W', 88: 'X', 89: 'Y', 90: 'Z', 97: 'a',
194: 'acute', 241: 'ae', 38: 'ampersand', 94: 'asciicircum', 126: 'asciitilde', 42: 'asterisk',
64: 'at', 98: 'b', 92: 'backslash', 124: 'bar', 123: 'braceleft', 125: 'braceright', 91: 'bracketleft',
93: 'bracketright', 198: 'breve', 183: 'bullet (3)', 99: 'c', 207: 'caron', 203: 'cedilla', 162: 'cent',
195: 'circumflex', 58: 'colon', 44: 'comma', 168: 'currency1', 100: 'd', 178: 'dagger',
179: 'daggerdbl', 200: 'dieresis', 36: 'dollar', 199: 'dotaccent', 245: 'dotlessi', 101: 'e',
56: 'eight', 188: 'ellipsis', 208: 'emdash', 177: 'endash', 61: 'equal', 33: 'exclam',
161: 'exclamdown', 102: 'f', 174: 'fi', 53: 'five', 175: 'fl', 166: 'florin', 52: 'four',
164: 'fraction', 103: 'g', 251: 'germandbls', 193: 'grave', 62: 'greater', 171: 'guillemotleft',
187: 'guillemotright', 172: 'guilsinglleft', 173: 'guilsinglright', 104: 'h', 205: 'hungarumlaut',
45: 'hyphen', 105: 'i', 106: 'j', 107: 'k', 108: 'l', 60: 'less', 248: 'lslash', 109: 'm',
197: 'macron', 110: 'n', 57: 'nine', 35: 'numbersign', 111: 'o', 250: 'oe', 206: 'ogonek', 49: 'one',
227: 'ordfeminine', 235: 'ordmasculine', 249: 'oslash', 112: 'p', 182: 'paragraph', 40: 'parenleft',
41: 'parenright', 37: 'percent', 46: 'period', 180: 'periodcentered', 189: 'perthousand',
43: 'plus', 113: 'q', 63: 'question', 191: 'questiondown', 34: 'quotedbl', 185: 'quotedblbase',
170: 'quotedblleft', 186: 'quotedblright', 96: 'quoteleft', 39: 'quoteright', 184: 'quotesinglbase',
169: 'quotesingle', 114: 'r', 202: 'ring', 115: 's', 167: 'section', 59: 'semicolon', 55: 'seven',
54: 'six', 47: 'slash', 32: 'space', 163: 'sterling', 116: 't', 51: 'three', 196: 'tilde',
50: 'two', 117: 'u', 95: 'underscore', 118: 'v', 119: 'w', 120: 'x', 121: 'y', 165: 'yen', 122: 'z',
48: 'zero'}
| 93.745098 | 124 | 0.356829 |
e0d7a8890a2ea6806a6475df423d3fdb169e2ab6
| 523 |
py
|
Python
|
invsystem/object_adder/migrations/0005_auto_20181227_1206.py
|
Clemens-Dautermann/Inventarium
|
da22fb630a876b0e3c7e7bfe8a84c71c9d2ab0f9
|
[
"MIT"
] | null | null | null |
invsystem/object_adder/migrations/0005_auto_20181227_1206.py
|
Clemens-Dautermann/Inventarium
|
da22fb630a876b0e3c7e7bfe8a84c71c9d2ab0f9
|
[
"MIT"
] | 4 |
2018-12-27T23:45:37.000Z
|
2018-12-30T19:11:23.000Z
|
invsystem/object_adder/migrations/0005_auto_20181227_1206.py
|
Clemens-Dautermann/Inventarium
|
da22fb630a876b0e3c7e7bfe8a84c71c9d2ab0f9
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2018-12-27 11:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('object_adder', '0004_auto_20181223_1300'),
]
operations = [
migrations.AlterField(
model_name='object',
name='category',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='object_adder.Category'),
),
]
| 26.15 | 147 | 0.657744 |
b7e6a7076a78e0189045c7d6a92814fe59c84664
| 1,865 |
py
|
Python
|
doozerlib/assertion.py
|
Ximinhan/doozer
|
eeb33625f88cb264f2681347457dff209da261a4
|
[
"Apache-2.0"
] | 16 |
2018-11-06T16:49:03.000Z
|
2021-11-07T19:48:49.000Z
|
doozerlib/assertion.py
|
Ximinhan/doozer
|
eeb33625f88cb264f2681347457dff209da261a4
|
[
"Apache-2.0"
] | 479 |
2018-11-15T15:37:49.000Z
|
2022-03-31T08:39:44.000Z
|
doozerlib/assertion.py
|
Ximinhan/doozer
|
eeb33625f88cb264f2681347457dff209da261a4
|
[
"Apache-2.0"
] | 38 |
2018-11-07T14:33:15.000Z
|
2021-12-13T13:59:12.000Z
|
"""
The assertion module provides functions that will raise an exception if
the asserted condition is not met.
The use of the FileNotFound exception makes this Python3 ready.
Making them functions keeps the exception definition localized.
"""
import os
import errno
# Create FileNotFound for Python2
try:
import FileNotFoundError
except ImportError:
FileNotFoundError = IOError
# Create ChildProcessError for Python2
try:
import ChildProcessError
except ImportError:
ChildProcessError = IOError
def isdir(path, message):
"""
Raise an exception if the given directory does not exist.
:param path: The path to a directory to be tested
:param message: A custom message to report in the exception
:raises: FileNotFoundError
"""
path = str(path) # Convert from pathlib.Path if necessary
if not os.path.isdir(path):
raise FileNotFoundError(
errno.ENOENT,
"{}: {}".format(message, os.strerror(errno.ENOENT)), path)
def isfile(path, message):
"""
Raise an exception if the given file does not exist.
:param path: The str or pathlib path to a file to be tested
:param message: A custom message to report in the exception
:raises: FileNotFoundError
"""
path = str(path) # Convert from pathlib.Path if necessary
if not os.path.isfile(path):
raise FileNotFoundError(
errno.ENOENT,
"{}: {}".format(message, os.strerror(errno.ENOENT)), path)
def success(exitcode, message):
"""
Raise an IO Error if the return code from a subprocess is non-zero
:param exitcode: The return code from a subprocess run
:param message: A custom message if the process failed
:raises: ChildProcessError
"""
if exitcode != 0:
raise ChildProcessError("Command returned non-zero exit status: %s" % message)
| 28.257576 | 86 | 0.695442 |
aa0340579bf91599ab6a0b6c64ab138b36666de1
| 4,391 |
py
|
Python
|
scrapel/http/response/__init__.py
|
NJoyX/nameko-scrapel
|
7ad43e6b9b14ffe39e50d6c2c9539a9ef5cff2af
|
[
"Apache-2.0"
] | 1 |
2017-12-03T12:47:19.000Z
|
2017-12-03T12:47:19.000Z
|
scrapel/http/response/__init__.py
|
NJoyX/nameko-scrapel
|
7ad43e6b9b14ffe39e50d6c2c9539a9ef5cff2af
|
[
"Apache-2.0"
] | null | null | null |
scrapel/http/response/__init__.py
|
NJoyX/nameko-scrapel
|
7ad43e6b9b14ffe39e50d6c2c9539a9ef5cff2af
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals, print_function, absolute_import
from scrapel.exceptions import NotSupported
from scrapel.http import Request
from scrapel.http.headers import Headers
from six.moves.urllib.parse import urljoin
__author__ = 'Fill Q and Scrapy developers'
__all__ = ['Response']
# @TODO JsonResponse
class Response(object):
type = None
def __init__(self, url, status=200, headers=None, body=b'', request=None):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self._set_url(url)
assert isinstance(
request, Request
), 'Invalid request provided, must be an Request class instance, not {}'.format(type(request))
self.request = request
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError(
"Response.meta not available, this response "
"is not tied to any request"
)
def _get_url(self):
return self._url
def _set_url(self, url):
if isinstance(url, str):
self._url = url
else:
raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
type(url).__name__))
url = property(_get_url, lambda self, url: None)
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b''
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse.")
else:
self._body = body
body = property(_get_body, lambda self, body: None)
def __str__(self):
return "<%d %s>" % (self.status, self.url)
__repr__ = __str__
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those
given new values.
"""
for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self):
"""For subclasses of TextResponse, this will return the body
as text (unicode object in Python 2 and str in Python 3)
"""
raise AttributeError("Response content isn't text")
def css(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None):
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
url = self.urljoin(url)
return Request(url, callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback)
| 33.265152 | 102 | 0.577773 |
5159821abc30141903dd01228c0234705a8c9589
| 278 |
py
|
Python
|
PythonStudy/learnxinyminutes_Class.py
|
CenterLineM/PythonPon
|
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
|
[
"Apache-2.0"
] | null | null | null |
PythonStudy/learnxinyminutes_Class.py
|
CenterLineM/PythonPon
|
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
|
[
"Apache-2.0"
] | null | null | null |
PythonStudy/learnxinyminutes_Class.py
|
CenterLineM/PythonPon
|
ca6a29ec10b095684e90cfcfbf6e0ac1cb5eabe3
|
[
"Apache-2.0"
] | null | null | null |
class Human(object):
# 類屬性 此類共用
species = "H. sapiens"
def __init__(self,name):
# Assign theargument to the instance's name attribute'
self.name = name
def say(self, msg):
return "{name}: {message}".format(name=self.name, message=msg)
# 類方法
| 25.272727 | 70 | 0.625899 |
3e43af14aaf50a1f6002b47528bdb2227adada9b
| 206 |
py
|
Python
|
Pacote-download/aulas_python_cev/ex_29_multa_km.py
|
HLAvieira/Curso-em-Video-Python3
|
04b9fa08acd1f70e39c6671e48330a445e2a77f8
|
[
"MIT"
] | null | null | null |
Pacote-download/aulas_python_cev/ex_29_multa_km.py
|
HLAvieira/Curso-em-Video-Python3
|
04b9fa08acd1f70e39c6671e48330a445e2a77f8
|
[
"MIT"
] | null | null | null |
Pacote-download/aulas_python_cev/ex_29_multa_km.py
|
HLAvieira/Curso-em-Video-Python3
|
04b9fa08acd1f70e39c6671e48330a445e2a77f8
|
[
"MIT"
] | null | null | null |
velocidade = float(input('Digite a velocidade do carro em Km/h ::::: '))
if velocidade > 80.0:
print('você foi multado em R${:.2f} '. format((velocidade-80.0)*7))
else:
print('Velocidade permitida')
| 41.2 | 72 | 0.660194 |
f1df13964525f4c1c4b34cf26c29b60fa1eaa9bc
| 9,937 |
py
|
Python
|
grr/server/grr_response_server/gui/selenium_tests/hosttable_test.py
|
certxlm/grr
|
c2a442a27f656fb18dfa3bce098847e5c5b849d7
|
[
"Apache-2.0"
] | 1 |
2019-08-28T23:48:20.000Z
|
2019-08-28T23:48:20.000Z
|
grr/server/grr_response_server/gui/selenium_tests/hosttable_test.py
|
AjitNair2/grr
|
2a2ea891b3927775872904cdd402a18e7bb3d143
|
[
"Apache-2.0"
] | 2 |
2022-01-15T03:18:12.000Z
|
2022-02-13T22:02:43.000Z
|
grr/server/grr_response_server/gui/selenium_tests/hosttable_test.py
|
acidburn0zzz/grr
|
44e1a5b1630e8101610faaaebe15b19b5ad30cb1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests for host table in search view."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestHostTable(gui_test_lib.SearchClientTestBase):
"""Tests the main content view."""
def setUp(self):
super(TestHostTable, self).setUp()
self.client_ids = self.SetupClients(10)
def testUserLabelIsShownAsBootstrapSuccessLabel(self):
self.AddClientLabel(self.client_ids[0], self.token.username, u"foo")
self.Open("/#/search?q=.")
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label-success:contains('foo')" % self.client_ids[0])
def testSystemLabelIsShownAsRegularBootstrapLabel(self):
self.AddClientLabel(self.client_ids[0], u"GRR", u"bar")
self.Open("/#/search?q=.")
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label:not(.label-success):contains('bar')" % self.client_ids[0])
def testLabelButtonIsDisabledByDefault(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsVisible, "css=button[name=AddLabels][disabled]")
def testLabelButtonIsEnabledWhenClientIsSelected(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsVisible, "css=button[name=AddLabels][disabled]")
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.WaitUntilNot(self.IsVisible, "css=button[name=AddLabels][disabled]")
def testAddClientsLabelsDialogShowsListOfSelectedClients(self):
self.Open("/#/search?q=.")
# Select 3 clients and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[2])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[6])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Check that all 3 client ids are shown in the dialog.
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[2])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[6])
def testLabelIsAppliedCorrectlyViaAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
# Click proceed and check that success message is displayed and that
# proceed button is replaced with close button.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Label was successfully added")
self.WaitUntilNot(
self.IsVisible, "css=*[name=AddClientsLabelsDialog] "
"button[name=Proceed]")
# Click on "Close" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Check that label has appeared in the clients list.
self.WaitUntil(
self.IsVisible, "css=tr:contains('%s') "
"span.label-success:contains('issue 42')" % self.client_ids[0])
def testAppliedLabelBecomesSearchableImmediately(self):
self.Open("/#/search?q=.")
# Select 2 clients and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[1])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
# Click proceed and check that success message is displayed and that
# proceed button is replaced with close button.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Label was successfully added")
self.WaitUntilNot(
self.IsVisible, "css=*[name=AddClientsLabelsDialog] "
"button[name=Proceed]")
# Click on "Close" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Search using the new label and check that the labeled clients are shown.
self.Open("/#main=HostTable&q=label:\"issue 42\"")
self.WaitUntil(self.IsTextPresent, "%s" % self.client_ids[0])
self.WaitUntil(self.IsTextPresent, "%s" % self.client_ids[1])
# Now we test if we can remove the label and if the search index is updated.
# Select 1 client and click 'Remove Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=RemoveLabels]:not([disabled])")
# The label should already be prefilled in the dropdown.
self.WaitUntil(self.IsTextPresent, "issue 42")
self.Click("css=*[name=RemoveClientsLabelsDialog] button[name=Proceed]")
# Open client search with label and check that labeled client is not shown
# anymore.
self.Open("/#main=HostTable&q=label:\"issue 42\"")
self.WaitUntil(self.IsTextPresent, self.client_ids[1])
# This client must not be in the results anymore.
self.assertFalse(self.IsTextPresent(self.client_ids[0]))
def testSelectionIsPreservedWhenAddClientsLabelsDialogIsCancelled(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Click on "Cancel" button and check that dialog has disappeared.
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=*[name=AddClientsLabelsDialog]")
# Ensure that checkbox is still checked
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[0])
def testSelectionIsResetWhenLabelIsAppliedViaAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
# Select 1 client and click 'Add Label' button.
self.Click("css=input.client-checkbox[client_id='%s']" % self.client_ids[0])
self.Click("css=button[name=AddLabels]:not([disabled])")
# Type label name, click on "Proceed" and "Close" buttons.
self.Type("css=*[name=AddClientsLabelsDialog] input[name=labelBox]",
"issue 42")
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Proceed]")
self.Click("css=*[name=AddClientsLabelsDialog] button[name=Close]")
# Ensure that checkbox is not checked anymore.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
def testCheckAllCheckboxSelectsAllClients(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsTextPresent, self.client_ids[0])
# Check that checkboxes for certain clients are unchecked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[6])
# Click on 'check all checkbox'
self.Click("css=input.client-checkbox.select-all")
# Check that checkboxes for certain clients are now checked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:checked" % self.client_ids[6])
# Click once more on 'check all checkbox'.
self.Click("css=input.client-checkbox.select-all")
# Check that checkboxes for certain clients are now again unchecked.
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=input.client-checkbox["
"client_id='%s']:not(:checked)" % self.client_ids[6])
def testClientsSelectedWithSelectAllAreShownInAddClientsLabelsDialog(self):
self.Open("/#/search?q=.")
self.WaitUntil(self.IsTextPresent, self.client_ids[0])
# Click on 'check all checkbox'.
self.Click("css=input.client-checkbox.select-all")
# Click on 'Apply Label' button.
self.Click("css=button[name=AddLabels]:not([disabled])")
# Check that client ids are shown in the dialog.
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[0])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[3])
self.WaitUntil(
self.IsVisible, "css=*[name=AddClientsLabelsDialog]:"
"contains('%s')" % self.client_ids[6])
if __name__ == "__main__":
app.run(test_lib.main)
| 40.394309 | 80 | 0.687531 |
28148ca9d928831a1ae4418d99c1b308e5044b2a
| 12,557 |
py
|
Python
|
Deprecated/move_paper_feed.py
|
pkgw/hera_mc
|
d2769a716a0e68fe709d3834362b94f547136836
|
[
"BSD-2-Clause"
] | null | null | null |
Deprecated/move_paper_feed.py
|
pkgw/hera_mc
|
d2769a716a0e68fe709d3834362b94f547136836
|
[
"BSD-2-Clause"
] | null | null | null |
Deprecated/move_paper_feed.py
|
pkgw/hera_mc
|
d2769a716a0e68fe709d3834362b94f547136836
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Script to handle moving a PAPER feed into HERA hex.
"""
from __future__ import absolute_import, division, print_function
from hera_mc import mc, cm_utils, part_connect, cm_handling, geo_location, cm_hookup, geo_handling
import sys
import copy
def print4test(data, verbosity, comment):
if verbosity == 'h':
print(comment + '\n')
for d in data:
print('\t', d)
elif verbosity == 'm':
print(comment + '\n')
def query_connection(args):
"""
Gets connection information from user
"""
if args.antenna_number is None:
args.antenna_number = raw_input('PAPER antenna number being moved: ')
if args.station_name is None:
args.station_name = raw_input('Station name where it is going: ')
if args.serial_number == -1:
args.serial_number = raw_input("Serial number of HERA station/antenna "
"(use -1 if you don't know): ")
if args.serial_number[0] != 'S':
args.serial_number = 'S/N' + args.serial_number
args.date = cm_utils._query_default('date', args)
return args
def OK_to_add(args, connect, handling, geo):
is_OK_to_add = True
# 1 - check to see if station is in geo_location database (should be)
if not geo.is_in_database(connect.upstream_part):
print("You need to add_station.py", connect.upstream_part,
"to geo_location database")
is_OK_to_add = False
# 2 - check to see if the station is already connected (shouldn't be)
current = cm_utils._get_astropytime(args.date, args.time)
if handling.is_in_connections(connect.upstream_part, 'A'):
print('Error: ', connect.upstream_part, "already connected.")
is_OK_to_add = False
# 3 - check to see if antenna is already connected (should be, but isn't
# necesarily active)
is_connected = handling.is_in_connections(connect.downstream_part, 'P')
if not is_connected:
print('Error: ', connect.downstream_part, 'not present')
is_OK_to_add = False
if not is_connected:
print('Note:', connect.downstream_part, 'is connected, but not active.')
return is_OK_to_add
def stop_previous_parts(args):
"""
This adds stop times to the previously connected rev A antenna and FDP rev A, if needed
"""
current = int(cm_utils._get_astropytime(args.date, args.time).gps)
args.add_new_part = False
is_connected = handling.is_in_connections(args.antenna_number, 'P')
if is_connected: # It is active
print("Stopping part %s %s at %s" % (args.antenna_number, 'P', str(args.date)))
data = [[args.antenna_number, 'P', 'stop_gpstime', current]]
feed = 'FDP' + args.antenna_number.strip('A')
is_connected = handling.is_in_connections(feed, 'A')
if is_connected: # It is active
print("Stopping part %s %s at %s" % (feed, 'A', str(args.date)))
data.append([feed, 'A', 'stop_gpstime', current])
if args.make_update:
part_connect.update_part(args, data)
else:
print4test(data, args.verbosity, 'Test: stop_previous_parts')
def add_new_parts(args):
"""This adds the new rev B antenna and FDA rev B"""
current = int(cm_utils._get_astropytime(args.date, args.time).gps)
args.add_new_part = True
print("Adding part %s %s at %s" % (args.antenna_number, 'T', str(args.date)))
data = [[args.antenna_number, 'T', 'hpn', args.antenna_number]]
data.append([args.antenna_number, 'T', 'hpn_rev', 'T'])
data.append([args.antenna_number, 'T', 'hptype', 'antenna'])
data.append([args.antenna_number, 'T', 'manufacturer_number', args.serial_number])
data.append([args.antenna_number, 'T', 'start_gpstime', current])
feed = 'FD' + args.antenna_number
print("Adding part %s %s at %s" % (feed, 'B', str(args.date)))
mfg_number = 'P' + args.antenna_number.strip('A')
data.append([feed, 'B', 'hpn', feed])
data.append([feed, 'B', 'hpn_rev', 'B'])
data.append([feed, 'B', 'hptype', 'feed'])
data.append([feed, 'B', 'manufacturer_number', mfg_number])
data.append([feed, 'B', 'start_gpstime', current])
if args.make_update:
part_connect.update_part(args, data)
else:
print4test(data, args.verbosity, 'Test: add_new_parts')
def stop_previous_connections(args, handling):
"""This adds stop times to the previous PAPER connections between:
station and antenna rev A
antenna revA and feed rev A
feed rev A and frontend
"""
current = int(cm_utils._get_astropytime(args.date, args.time).gps)
data = []
args.add_new_connection = False
existing = handling.get_connection_dossier(args.antenna_number, 'P', exact_match=True)
for k, c in existing.iteritems():
if k in handling.non_class_connections_dict_entries:
continue
if c.downstream_part == args.antenna_number and c.down_part_rev == 'P':
print("Stopping connection ", c)
station_connection = [c.upstream_part, c.up_part_rev, c.downstream_part,
c.down_part_rev, c.upstream_output_port,
c.downstream_input_port, c.start_gpstime,
'stop_gpstime', current]
data.append(station_connection)
if c.upstream_part == args.antenna_number and c.up_part_rev == 'P':
print("Stopping connection ", c)
feed_connection = [c.upstream_part, c.up_part_rev, c.downstream_part,
c.down_part_rev, c.upstream_output_port,
c.downstream_input_port, c.start_gpstime,
'stop_gpstime', current]
data.append(feed_connection)
feed = feed_connection[2]
feed_rev = feed_connection[3]
existing = handling.get_connections(feed, feed_rev, exact_match=True)
for k, c in existing.iteritems():
if k in handling.non_class_connections_dict_entries:
continue
if c.upstream_part == feed and c.up_part_rev == feed_rev:
print("Stopping connection ", c)
frontend_connection = [c.upstream_part, c.up_part_rev,
c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port,
c.start_gpstime,
'stop_gpstime', current]
data.append(frontend_connection)
if args.make_update:
part_connect.update_connection(args, data)
else:
print4test(data, args.verbosity, 'Test: stop_previous_connections')
def add_new_connection(args, c):
# Add the provided new connection c
print("Adding ", c)
args.add_new_connection = True
data = [[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'upstream_part', c.upstream_part],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'up_part_rev', c.up_part_rev],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'downstream_part', c.downstream_part],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'down_part_rev', c.down_part_rev],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'upstream_output_port', c.upstream_output_port],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'downstream_input_port', c.downstream_input_port],
[c.upstream_part, c.up_part_rev, c.downstream_part, c.down_part_rev,
c.upstream_output_port, c.downstream_input_port, c.start_gpstime,
'start_gpstime', c.start_gpstime]]
if args.make_update:
part_connect.update_connection(args, data)
else:
print4test(data, args. verbosity, 'Test: add_new_connection')
if __name__ == '__main__':
parser = mc.get_mc_argument_parser()
parser.add_argument('-a', '--antenna_number', help="PAPER antenna number",
default=None)
parser.add_argument('-s', '--station_name', help="Name of station (HH# for hera)",
default=None)
parser.add_argument('-n', '--serial_number', help="Serial number of HERA "
"station/antenna", default=-1)
cm_utils.add_date_time_args(parser)
parser.add_argument('--make-update', help="Set to actually change database "
"(otherwise it just shows).",
dest='make_update', action='store_true')
cm_utils.add_verbosity_args(parser)
args = parser.parse_args()
args.verbosity = args.verbosity.lower()
# Add extra args needed for various things
args.add_new_connection = True
args.active = True
args.specify_port = 'all'
args.revision = 'A'
args.show_levels = False
args.mapr_cols = 'all'
args.exact_match = True
if len(sys.argv) == 1:
query = True
elif args.antenna_number is None or args.station_name is None:
query = True
else:
query = False
if query:
args = query_connection(args)
args.station_name = args.station_name.upper()
args.antenna_number = args.antenna_number.upper()
if args.station_name[0] != 'T':
args.station_name = 'HH' + args.station_name
if args.antenna_number[0] != 'A':
args.antenna_number = 'A' + args.antenna_number
connect = part_connect.Connections()
part = part_connect.Parts()
db = mc.connect_to_mc_db(args)
session = db.sessionmaker()
handling = cm_handling.Handling(session)
hookup = cm_hookup.Hookup(session)
geo = geo_handling.Handling(session)
if args.make_update:
print("\nUpdating antenna/feed installation.\n")
else:
print("\nThis will only print out the actions.\n\t'--make-update' to "
"actually make changes.\n")
# This is the new station/antenna connection to be checked
connect.connection(upstream_part=args.station_name, up_part_rev='A',
downstream_part=args.antenna_number, down_part_rev='T',
upstream_output_port='ground', downstream_input_port='ground',
start_gpstime=int(cm_utils._get_astropytime(args.date, args.time).gps))
if OK_to_add(args, connect, handling, geo):
if args.make_update:
print("OK to update -- actually doing it.")
cm_utils.log('move_paper_feed', args=args)
else:
print("This is what would be happening if --make-update was enabled:")
stop_previous_parts(args)
add_new_parts(args)
stop_previous_connections(args, handling)
# Connection is set above to be checked by OK_to_add
add_new_connection(args, connect)
# Adding new antenna/feed connection
feed = 'FD' + args.antenna_number
connect.connection(upstream_part=args.antenna_number, up_part_rev='T',
downstream_part=feed, down_part_rev='B',
upstream_output_port='focus', downstream_input_port='input',
start_gpstime=int(cm_utils._get_astropytime(args.date, args.time).gps),
stop_gpstime=None)
add_new_connection(args, connect)
# Adding new feed/frontend connection
frontend = 'FE' + args.antenna_number
connect.connection(upstream_part=feed, up_part_rev='B',
downstream_part=frontend, down_part_rev='A',
upstream_output_port='terminals', downstream_input_port='input',
start_gpstime=int(cm_utils._get_astropytime(args.date, args.time).gps),
stop_gpstime=None)
add_new_connection(args, connect)
| 44.059649 | 98 | 0.639484 |
77b25be5f0b55f9785d67b872ccf012c035a3615
| 1,086 |
py
|
Python
|
docs/examples/basic_tracer/tests/test_tracer.py
|
nirsky/opentelemetry-python
|
8d09319c43a24b05d14128361de2c9afe8c856b6
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/basic_tracer/tests/test_tracer.py
|
nirsky/opentelemetry-python
|
8d09319c43a24b05d14128361de2c9afe8c856b6
|
[
"Apache-2.0"
] | null | null | null |
docs/examples/basic_tracer/tests/test_tracer.py
|
nirsky/opentelemetry-python
|
8d09319c43a24b05d14128361de2c9afe8c856b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import unittest
class TestBasicTracerExample(unittest.TestCase):
def test_basic_tracer(self):
dirpath = os.path.dirname(os.path.realpath(__file__))
test_script = "{}/../tracer.py".format(dirpath)
output = subprocess.check_output(
(sys.executable, test_script)
).decode()
self.assertIn('"name": "foo"', output)
self.assertIn('"name": "bar"', output)
self.assertIn('"name": "baz"', output)
| 35.032258 | 74 | 0.708103 |
d95217c77f930c4bdf883c1aaf422357cfdc2733
| 99 |
py
|
Python
|
BOJ/week02/string/ex11720.py
|
FridayAlgorithm/taesong_study
|
50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c
|
[
"MIT"
] | null | null | null |
BOJ/week02/string/ex11720.py
|
FridayAlgorithm/taesong_study
|
50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c
|
[
"MIT"
] | null | null | null |
BOJ/week02/string/ex11720.py
|
FridayAlgorithm/taesong_study
|
50c07ee6ead0fb5bb80e0decb03b801cbbbabf9c
|
[
"MIT"
] | 2 |
2020-12-27T15:03:46.000Z
|
2021-03-06T14:13:34.000Z
|
T = int(input())
N = input()
count = 0
for i in range(len(N)):
count += int(N[i])
print(count)
| 14.142857 | 23 | 0.565657 |
58addef4464253009e8ffbb87c8d97fddbdda2be
| 44,719 |
py
|
Python
|
pytorch_transformers/modeling_utils.py
|
ksboy/pytorch-transformers
|
a0867e3b4d2092acfdda02e53902ffa3495bc9a8
|
[
"Apache-2.0"
] | null | null | null |
pytorch_transformers/modeling_utils.py
|
ksboy/pytorch-transformers
|
a0867e3b4d2092acfdda02e53902ffa3495bc9a8
|
[
"Apache-2.0"
] | null | null | null |
pytorch_transformers/modeling_utils.py
|
ksboy/pytorch-transformers
|
a0867e3b4d2092acfdda02e53902ffa3495bc9a8
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .file_utils import cached_path
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
TF_WEIGHTS_NAME = 'model.ckpt'
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
class PretrainedConfig(object):
""" Base class for all configuration classes.
Handle a few common parameters and methods for loading/downloading/saving configurations.
"""
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.torchscript = kwargs.pop('torchscript', False)
def save_pretrained(self, save_directory):
""" Save a configuration object to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a PretrainedConfig from a pre-trained model configuration.
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a saved configuration `file`.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**return_unused_kwargs**: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes:
ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
**kwargs**: (`optional`) dict:
Dictionary of key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used
to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
>>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
>>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
>>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
>>> assert config.output_attention == True
>>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
>>> foo=False, return_unused_kwargs=True)
>>> assert config.output_attention == True
>>> assert unused_kwargs == {'foo': False}
"""
cache_dir = kwargs.pop('cache_dir', None)
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
else:
config_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_config_archive_map.keys()),
config_file))
return None
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = cls.from_json_file(resolved_config_file)
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", config)
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PreTrainedModel(nn.Module):
""" Base class for all models. Handle loading/storing model config and
a simple interface for dowloading and loading pretrained models.
"""
config_class = PretrainedConfig
pretrained_model_archive_map = {}
load_tf_weights = lambda model, config, path: None
base_model_prefix = ""
input_embeddings = None
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self.init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: does nothing and just returns a pointer to the input tokens Embedding Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embedding Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Args:
heads_to_prune: dict of {layer_num (int): list of heads to prune in this layer (list of int)}
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model with its configuration file to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model it-self if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are desactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`).
In this case, ``from_tf`` should be set to True and a configuration object should be
provided as `config` argument. This loading option is slower than converting the TensorFlow
checkpoint in a PyTorch model using the provided conversion scripts and loading
the PyTorch model afterwards.
**model_args**: (`optional`) Sequence:
All remaning positional arguments will be passed to the underlying model's __init__ function
**config**: an optional configuration for the model to use instead of an automatically loaded configuation.
Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or
- the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory).
**state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded
from saved weights file.
This option can be used if you want to create a model from a pretrained configuraton but load your own weights.
In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not
a simpler option.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**output_loading_info**: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
**kwargs**: (`optional`) dict:
Dictionary of key, values to update the configuration object after loading.
Can be used to override selected configuration parameters. E.g. ``output_attention=True``.
- If a configuration is provided with `config`, **kwargs will be directly passed
to the underlying model's __init__ method.
- If a configuration is not provided, **kwargs will be first passed to the pretrained
model configuration class loading function (`PretrainedConfig.from_pretrained`).
Each key of **kwargs that corresponds to a configuration attribute
will be used to override said attribute with the supplied **kwargs value.
Remaining keys that do not correspond to any configuration attribute will
be passed to the underlying model's __init__ function.
Examples::
>>> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
>>> assert model.config.output_attention == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
output_loading_info = kwargs.pop('output_loading_info', False)
pop_layer = kwargs.get('pop_layer', False)
kwargs.pop('pop_layer', None)
# Load config
if config is None:
config, model_kwargs = cls.config_class.from_pretrained(
pretrained_model_name_or_path, *model_args,
cache_dir=cache_dir, return_unused_kwargs=True,
**kwargs
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_model_archive_map.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
# pop_classifier_layer
if pop_layer:
print(state_dict.keys())
state_dict_keys = list(state_dict.keys())
for key in state_dict_keys:
if key.startswith(pop_layer):
print("pop", key)
state_dict.pop(key)
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Load from a PyTorch state_dict
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ''
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
start_prefix = cls.base_model_prefix + '.'
if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
if hasattr(model, 'tie_weights'):
model.tie_weights() # make sure word embedding weights are still tied
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super(PoolerAnswerClass, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super(SQuADHead, self).__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(self, hidden_states, start_positions=None, end_positions=None,
cls_index=None, is_impossible=None, p_mask=None):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super(SequenceSummary, self).__init__()
self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last'
if config.summary_type == 'attn':
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, 'summary_use_proj') and config.summary_use_proj:
if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh':
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, token_ids=None):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
token_ids: [optional] index of the classification token if summary_type == 'token_ids',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'token_ids' and token_ids is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = hidden_states.mean(dim=1)
elif self.summary_type == 'token_ids':
if token_ids is None:
token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
else:
token_ids = token_ids.unsqueeze(-1).unsqueeze(-1)
token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),))
# shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == 'attn':
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| 49.46792 | 157 | 0.643843 |
18d832114f024bc66d52c7382062f8dc2afba1f4
| 1,325 |
py
|
Python
|
egs/librispeech_adv/asr1/local/create_erep_feats_json.py
|
brijmohan/espnet
|
971e43ab429b287517406fa1be3cafd55b992793
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech_adv/asr1/local/create_erep_feats_json.py
|
brijmohan/espnet
|
971e43ab429b287517406fa1be3cafd55b992793
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech_adv/asr1/local/create_erep_feats_json.py
|
brijmohan/espnet
|
971e43ab429b287517406fa1be3cafd55b992793
|
[
"Apache-2.0"
] | null | null | null |
'''
Copies filterbank json and replaces feats and shape with new erep feats
'''
import os
from os.path import exists, join
import json
# for kaldi io
import kaldi_io_py
old_json_dir = 'dump/train_100/deltafalse/split_utt_spk'
new_feats_scp = 'data/erep_train_100/feats.scp'
new_json_dir = 'data/erep_train_100/json'
if not exists(new_json_dir):
os.makedirs(new_json_dir)
print('Create feats dictionary...')
feats_dict = {}
with open(new_feats_scp) as f:
for line in f.read().splitlines():
sp = line.split()
feats_dict[sp[0]] = {
u'path': sp[1],
u'shape': kaldi_io_py.read_mat(sp[1]).shape
}
print('Done reading features!')
print('Reading data jsons...')
djsons = [x for x in os.listdir(old_json_dir) if x.endswith('.json')]
for jsfile in djsons:
print('Reading ' + jsfile)
with open(join(old_json_dir, jsfile)) as f1, open(join(new_json_dir,
jsfile), 'w') as f2:
js = json.load(f1)
utt_ids = js[u'utts'].keys()
for k in utt_ids:
js[u'utts'][k][u'input'][0][u'feat'] = feats_dict[k][u'path']
js[u'utts'][k][u'input'][0][u'shape'] = feats_dict[k][u'shape']
json.dump(js, f2, indent=4)
print('Done!')
| 27.604167 | 79 | 0.592453 |
e4d706eb78658407c729e0374a3d30a79642646d
| 5,472 |
py
|
Python
|
src/sage/categories/examples/posets.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 4 |
2020-07-17T04:49:44.000Z
|
2020-07-29T06:33:51.000Z
|
src/sage/categories/examples/posets.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 2 |
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/categories/examples/posets.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | 7 |
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
"""
Examples of posets
"""
#*****************************************************************************
# Copyright (C) 2011 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.all import Posets
from sage.structure.element_wrapper import ElementWrapper
from sage.sets.set import Set, Set_object_enumerated
from sage.sets.positive_integers import PositiveIntegers
class FiniteSetsOrderedByInclusion(UniqueRepresentation, Parent):
r"""
An example of a poset: finite sets ordered by inclusion
This class provides a minimal implementation of a poset
EXAMPLES::
sage: P = Posets().example(); P
An example of a poset: sets ordered by inclusion
We conclude by running systematic tests on this poset::
sage: TestSuite(P).run(verbose = True)
running ._test_an_element() . . . pass
running ._test_cardinality() . . . pass
running ._test_category() . . . pass
running ._test_elements() . . .
Running the test suite of self.an_element()
running ._test_category() . . . pass
running ._test_eq() . . . pass
running ._test_new() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
pass
running ._test_elements_eq_reflexive() . . . pass
running ._test_elements_eq_symmetric() . . . pass
running ._test_elements_eq_transitive() . . . pass
running ._test_elements_neq() . . . pass
running ._test_eq() . . . pass
running ._test_new() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
running ._test_some_elements() . . . pass
"""
def __init__(self):
r"""
EXAMPLES::
sage: P = Posets().example(); P
An example of a poset: sets ordered by inclusion
sage: P.category()
Category of posets
sage: type(P)
<class 'sage.categories.examples.posets.FiniteSetsOrderedByInclusion_with_category'>
sage: TestSuite(P).run()
"""
Parent.__init__(self, category = Posets())
def _repr_(self):
r"""
TESTS::
sage: S = Posets().example()
sage: S._repr_()
'An example of a poset: sets ordered by inclusion'
"""
return "An example of a poset: sets ordered by inclusion"
def le(self, x, y):
r"""
Returns whether `x` is a subset of `y`
EXAMPLES::
sage: P = Posets().example()
sage: P.le( P(Set([1,3])), P(Set([1,2,3])) )
True
sage: P.le( P(Set([1,3])), P(Set([1,3])) )
True
sage: P.le( P(Set([1,2])), P(Set([1,3])) )
False
"""
return x.value.issubset(y.value)
def an_element(self):
r"""
Returns an element of this poset
EXAMPLES::
sage: B = Posets().example()
sage: B.an_element()
{1, 4, 6}
"""
return self(Set([1,4,6]))
class Element(ElementWrapper):
wrapped_class = Set_object_enumerated
class PositiveIntegersOrderedByDivisibilityFacade(UniqueRepresentation, Parent):
r"""
An example of a facade poset: the positive integers ordered by divisibility
This class provides a minimal implementation of a facade poset
EXAMPLES::
sage: P = Posets().example("facade"); P
An example of a facade poset: the positive integers ordered by divisibility
sage: P(5)
5
sage: P(0)
Traceback (most recent call last):
...
ValueError: Can't coerce `0` in any parent `An example of a facade poset: the positive integers ordered by divisibility` is a facade for
sage: 3 in P
True
sage: 0 in P
False
"""
element_class = type(Set([]))
def __init__(self):
r"""
EXAMPLES::
sage: P = Posets().example("facade"); P
An example of a facade poset: the positive integers ordered by divisibility
sage: P.category()
Category of facade posets
sage: type(P)
<class 'sage.categories.examples.posets.PositiveIntegersOrderedByDivisibilityFacade_with_category'>
sage: TestSuite(P).run()
"""
Parent.__init__(self, facade = (PositiveIntegers(),), category = Posets())
def _repr_(self):
r"""
TESTS::
sage: S = Posets().example("facade")
sage: S._repr_()
'An example of a facade poset: the positive integers ordered by divisibility'
"""
return "An example of a facade poset: the positive integers ordered by divisibility"
def le(self, x, y):
r"""
Returns whether `x` is divisible by `y`
EXAMPLES::
sage: P = Posets().example("facade")
sage: P.le(3, 6)
True
sage: P.le(3, 3)
True
sage: P.le(3, 7)
False
"""
return x.divides(y)
| 31.268571 | 144 | 0.561038 |
39dcc313d4291404f441cc2b1be3e85a7d0687a4
| 2,494 |
py
|
Python
|
align_faces.py
|
contail/Face-Alignment-with-OpenCV-and-Python
|
5d0439aa854b559fa396ce7f3e97748be42c15c9
|
[
"MIT"
] | 13 |
2018-10-05T17:05:20.000Z
|
2022-01-27T00:54:57.000Z
|
align_faces.py
|
contail/Face-Alignment-with-OpenCV-and-Python
|
5d0439aa854b559fa396ce7f3e97748be42c15c9
|
[
"MIT"
] | null | null | null |
align_faces.py
|
contail/Face-Alignment-with-OpenCV-and-Python
|
5d0439aa854b559fa396ce7f3e97748be42c15c9
|
[
"MIT"
] | 6 |
2019-03-18T06:03:41.000Z
|
2022-03-02T18:15:01.000Z
|
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import argparse
import imutils
import dlib
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--root_dir", required=True,
help="path to root directories of input images")
ap.add_argument("-d", "--des_dir", required=True,
help="path to destination directories of output images")
args = vars(ap.parse_args())
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
fa = FaceAligner(predictor, desiredFaceWidth=128)
root_dir = args["root_dir"]
des_dir = args["des_dir"].split("/")[-1]
if not os.path.exists(des_dir):
os.mkdir(des_dir)
input_files = [os.path.join(dp,f) for dp, dn, fn in os.walk(os.path.expanduser(root_dir)) for f in fn]
# loop over the face detections
for input_file in input_files:
try:
print(input_file)
image = cv2.imread(input_file)
image = imutils.resize(image, width=800)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
des_path = '/'.join(['..',des_dir] + input_file.split("/")[2:-1])
des_list = des_path.split("/")
for i in range(len(des_list)):
elem = des_list[:i+1]
if elem and not os.path.exists("/".join(elem)):
os.mkdir("/".join(elem))
file_name = input_file.split("/")[-1]
out_file = os.path.join(des_path,file_name)
# show the original input image and detect faces in the grayscale
# image
rects = detector(gray, 2)
for rect in rects:
# extract the ROI of the *original* face, then align the face
# using facial landmarks
try:
(x, y, w, h) = rect_to_bb(rect)
faceOrig = imutils.resize(image[y:y + h, x:x + w], width=128)
faceAligned = fa.align(image, gray, rect)
# display the output images
if os.path.isfile(out_file):
file_name = out_file.split("\\")[-1]
print(file_name.split(".")[0]+"0"+file_name.split(".")[1])
out_file = os.path.join(des_path, file_name.split(".")[0]+"(0)."+file_name.split(".")[-1])
cv2.imwrite(out_file,faceAligned)
cv2.waitKey(0)
print(out_file)
except:
print("CANNOT SAVE")
continue
except:
pass
| 37.223881 | 110 | 0.595028 |
0716c555ebe53d1dc694a5dc9cafb8f9679ce67a
| 10,424 |
py
|
Python
|
audio/augmentation.py
|
girishdhegde/augmentation
|
02d5859e365d959da368dbf8e8a9e0800faea032
|
[
"MIT"
] | 1 |
2021-08-24T17:57:26.000Z
|
2021-08-24T17:57:26.000Z
|
audio/augmentation.py
|
girishdhegde/augmentation
|
02d5859e365d959da368dbf8e8a9e0800faea032
|
[
"MIT"
] | null | null | null |
audio/augmentation.py
|
girishdhegde/augmentation
|
02d5859e365d959da368dbf8e8a9e0800faea032
|
[
"MIT"
] | 1 |
2021-08-25T08:16:16.000Z
|
2021-08-25T08:16:16.000Z
|
import random
import glob
import torch
import torch.nn as nn
import torchaudio
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import sounddevice as sd
# Author: Girish Hegde
def rms(samples):
"""Root Mean Square (RMS)."""
return torch.sqrt((samples**2).mean())
def calculate_desired_noise_rms(signal_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on:
- https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
- https://github.com/iver56/audiomentations/blob/master/audiomentations/core/utils.py
---------------------------------------------------------------------------
Args:
signal_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
---------------------------------------------------------------------------
snr = rms(signal).to(db)/rms(noise).to(db)
rms(noise) = rms(signal)/snr.to(rms)
to(rms) = 10**(db/20)
hence:
noise_rms = signal_rms/(10**(snr/20))
"""
return signal_rms/(10**(snr/20))
def rms_normalize(samples):
"""Power-normalize samples
Taken from:
https://github.com/asteroid-team/torch-audiomentations/blob/master/torch_audiomentations/utils/io.py
Parameters
----------
samples : (channel, time) Tensor
Single or multichannel samples
Returns
-------
samples: (channel, time) Tensor
Power-normalized samples
"""
rms = samples.square().mean(dim=1).sqrt()
return (samples.t() / (rms + 1e-8)).t()
def db_to_amplitude(db):
return 10**(db/20) # db = 20log(amplitude)
class Shift(nn.Module):
def __init__(
self,
min_shift = -0.5,
max_shift = 0.5,
shift_unit = "fraction",
rollover = True,
p = 0.5,
sample_rate = None,
):
"""
Args:
min_shif (float, optional): Defaults to -0.5.
max_shift (float, optional): Defaults to 0.5.
shift_unit (str, optional): unit of min_shift and max_shift.
"fraction": Fraction of the total sound length
"samples": Number of audio samples
"seconds": Number of seconds
rollover (bool, optional): Defaults to True.
p (float, optional): Defaults to 0.5.
sample_rate ([type], optional): Defaults to None.
"""
super().__init__()
self.min_shif = min_shift
self.max_shif = max_shift
self.shift_unit = shift_unit
self.rollover = rollover
self.p = p
self.sample_rate = sample_rate
if shift_unit == 'fraction':
self.get_shift = self._fraction_shift
elif shift_unit == 'seconds':
s = int(random.uniform(
self.min_shif*self.sample_rate, self.max_shif*self.sample_rate
))
self.get_shift = lambda x: s
elif shift_unit == 'samples':
s = int(random.uniform(self.min_shif, self.max_shif))
self.get_shift = lambda x: s
def _fraction_shift(self, x):
n = x.shape[-1]
return int(random.uniform(self.min_shif*n, self.max_shif*n))
def forward(self, x):
"""
Args:
x ([Tensor]): Waveform of shape [ch, n] or [n]
"""
if random.random() < self.p:
shifts = self.get_shift(x)
x = torch.roll(x, shifts, dims=-1)
if not self.rollover:
if shifts > 0:
x[..., :shifts] = 0.
elif shifts < 0:
x[..., -shifts:] = 0.
return x
class Gain(nn.Module):
def __init__(
self,
min_gain_db = -18.0,
max_gain_db = 6.0,
p = 0.5,
):
""" Multiply the audio by a random amplitude factor to reduce or increase the volume. This
technique can help a model become somewhat invariant to the overall gain of the input audio.
Args:
min_gain_db (float, optional): Defaults to -18.0.
max_gain_db (float, optional): Defaults to 6.0.
p (float, optional): Defaults to 0.5.
"""
super().__init__()
self.min_gain_db = min_gain_db
self.max_gain_db = max_gain_db
self.p = p
def forward(self, x):
"""
Args:
x ([Tensor]): Waveform of shape [ch, n] or [n]
"""
if random.random() < self.p:
x = x*db_to_amplitude(random.uniform(self.min_gain_db, self.max_gain_db))
return x.clamp(-1., 1.)
class GaussianNoise(nn.Module):
def __init__(
self,
min_snr,
max_snr,
p=0.5
):
"""
Args:
min_snr ([float/int]): minimum signal to noise ratio
max_snr ([float/int]): maximum signal to noise ratio
p (float, optional): [description]. Defaults to 0.5.
"""
super().__init__()
self.min_snr = min_snr
self.max_snr = max_snr
self.p = p
def forward(self, x):
"""
Args:
x ([Tensor]): Waveform of shape [ch, n] or [n]
"""
if random.random() < self.p:
xrms = rms(x)
noiserms = calculate_desired_noise_rms(
xrms,
random.uniform(self.min_snr, self.max_snr)
)
x = x + noiserms*torch.randn_like(x)
return x.clamp(-1., 1.)
class BackgroundNoise(nn.Module):
def __init__(
self,
min_snr,
max_snr,
noise,
p=0.5
):
"""
Args:
min_snr ([float/int]): minimum signal to noise ratio
max_snr ([float/int]): maximum signal to noise ratio
noise (Path/List/Tensor, optional): Noise audio
- Path: Directory path to .wav noise files
- List: List noise files path
- Tensor: [total, ch, n] Audio Tensor
p (float, optional): [description]. Defaults to 0.5.
Based on:
https://github.com/asteroid-team/torch-audiomentations/blob/master/torch_audiomentations/utils/io.py
"""
super().__init__()
self.min_snr = min_snr
self.max_snr = max_snr
self.p = p
if isinstance(noise, str):
self.noise = glob.glob(f'{noise}/*.wav')
self.is_file = True
if isinstance(noise, (list, tuple, set)):
self.noise = noise
self.is_file = True
if isinstance(noise, torch.Tensor):
self.noise = noise
self.is_file = False
def random_background(self, audio):
pieces = []
ch, missing_num_samples = audio.shape
while missing_num_samples > 0:
if self.is_file:
noise = random.choice(self.noise)
noise, sr = torchaudio.load(noise)
else:
noise = random.choice(self.noise)
background_ch, background_num_samples = noise.shape
if background_ch < ch:
noise = repeat(noise, '1 n -> b n', b=ch)
if background_num_samples > missing_num_samples:
sample_offset = random.randint(
0, background_num_samples - missing_num_samples
)
background_samples = noise[..., sample_offset: sample_offset+missing_num_samples]
missing_num_samples = 0
else:
background_samples = noise
missing_num_samples -= background_num_samples
pieces.append(background_samples)
# the inner call to rms_normalize ensures concatenated pieces share the same RMS (1)
# the outer call to rms_normalize ensures that the resulting background has an RMS of 1
# (this simplifies "apply_transform" logic)
return rms_normalize(
torch.cat([rms_normalize(piece) for piece in pieces], dim=1)
)
def forward(self, x):
"""
Args:
x ([Tensor]): Waveform of shape [ch, n]
"""
if random.random() < self.p:
xrms = rms(x)
noiserms = calculate_desired_noise_rms(
xrms,
random.uniform(self.min_snr, self.max_snr)
)
noise = self.random_background(x)
x = x + noiserms*noise
return x.clamp(-1., 1.)
if __name__ == '__main__':
from utils import play_audio, plot_waveform, print_stats
audio_path = '../data/voice.wav'
audio, sample_rate = torchaudio.load(audio_path)
audio = audio[0].view(1, -1)
print_stats(audio, sample_rate=sample_rate)
plot_waveform(audio, sample_rate, title='Original Waveform', save='../output/original_audio.png')
play_audio(audio, sample_rate, )
# shift = Shift(
# min_shift = -0.5,
# max_shift = 0.5,
# shift_unit = "fraction",
# rollover = True,
# p = 1.0,
# sample_rate = None,
# )
# shifted = shift(audio)
# plot_waveform(shifted, sample_rate, title='Shift', save='../output/shifted_audio.png')
# play_audio(shifted, sample_rate, '../output/shifted_audio.wav')
# gain = Gain(min_gain_db=-18.0, max_gain_db=6.0, p=1.0)
# boosted = gain(audio)
# print_stats(boosted, sample_rate=sample_rate)
# plot_waveform(boosted, sample_rate, title='Gain', save='../output/gain_audio.png')
# play_audio(boosted, sample_rate, '../output/gain_audio.wav')
# gnoise = GaussianNoise(.5, .7, p=1.0)
# noisy = gnoise(audio)
# print_stats(noisy, sample_rate=sample_rate)
# plot_waveform(noisy, sample_rate, title='Gausian noise', save='../output/gain_audio.png')
# play_audio(noisy, sample_rate, '../output/gnoise_audio.wav')
# bnoise = BackgroundNoise(0.5, 0.7, noise=['../data/train.wav', ], p=1.0)
# noisy = bnoise(audio)
# print_stats(noisy, sample_rate=sample_rate)
# plot_waveform(noisy, sample_rate, title='Backgroud noise', save='../output/gain_audio.png')
# play_audio(noisy, sample_rate, '../output/bnoise_audio.wav')
| 33.197452 | 125 | 0.565138 |
444f1a6e8549a27e356acc04a3029ee841eb1d50
| 1,023 |
py
|
Python
|
pylurch/contract/client/context/training.py
|
tingiskhan/pylurch
|
f94581f7e31fb32fd41d74e55d731147bf64e4ac
|
[
"MIT"
] | null | null | null |
pylurch/contract/client/context/training.py
|
tingiskhan/pylurch
|
f94581f7e31fb32fd41d74e55d731147bf64e4ac
|
[
"MIT"
] | null | null | null |
pylurch/contract/client/context/training.py
|
tingiskhan/pylurch
|
f94581f7e31fb32fd41d74e55d731147bf64e4ac
|
[
"MIT"
] | null | null | null |
from typing import List, Dict, Any
from ...database import Label, Score, Package
from .update import UpdateContext
class TrainingContext(UpdateContext):
def __init__(self, client, training_session):
super().__init__(client, training_session, None)
def add_label(self, label: str):
self._to_commit.put(Label(session_id=self._session.id, label=label))
def add_labels(self, labels: List[str]):
for label in labels:
self.add_label(label)
def add_score(self, key: str, value: float):
self._to_commit.put(Score(session_id=self._session.id, key=key, value=value))
def add_scores(self, scores: Dict[str, Any]):
for k, v in scores.items():
self.add_score(k, v)
def add_package(self, package: str, version: str):
self._to_commit.put(Package(session_id=self._session.id, name=package, version=version))
def add_packages(self, packages: Dict[str, str]):
for k, v in packages.items():
self.add_package(k, v)
| 34.1 | 96 | 0.673509 |
a708d1728ce200ccd6edb80f2dadf9e8c9cc90f3
| 38,350 |
py
|
Python
|
salt/states/smartos.py
|
ev0rtex/salt
|
769cfe1a7de85a35083423af6abb12220f48a07e
|
[
"Apache-2.0"
] | null | null | null |
salt/states/smartos.py
|
ev0rtex/salt
|
769cfe1a7de85a35083423af6abb12220f48a07e
|
[
"Apache-2.0"
] | null | null | null |
salt/states/smartos.py
|
ev0rtex/salt
|
769cfe1a7de85a35083423af6abb12220f48a07e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Management of SmartOS Standalone Compute Nodes
:maintainer: Jorge Schrauwen <[email protected]>
:maturity: new
:depends: vmadm, imgadm
:platform: smartos
.. versionadded:: 2016.3.0
.. code-block:: yaml
vmtest.example.org:
smartos.vm_present:
- config:
reprovision: true
- vmconfig:
image_uuid: c02a2044-c1bd-11e4-bd8c-dfc1db8b0182
brand: joyent
alias: vmtest
quota: 5
max_physical_memory: 512
tags:
label: 'test vm'
owner: 'sjorge'
nics:
"82:1b:8e:49:e9:12":
nic_tag: trunk
mtu: 1500
ips:
- 172.16.1.123/16
- 192.168.2.123/24
vlan_id: 10
"82:1b:8e:49:e9:13":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
filesystems:
"/bigdata":
source: "/bulk/data"
type: lofs
options:
- ro
- nodevices
kvmtest.example.org:
smartos.vm_present:
- vmconfig:
brand: kvm
alias: kvmtest
cpu_type: host
ram: 512
vnc_port: 9
tags:
label: 'test kvm'
owner: 'sjorge'
disks:
disk0
size: 2048
model: virtio
compression: lz4
boot: true
nics:
"82:1b:8e:49:e9:15":
nic_tag: trunk
mtu: 1500
ips:
- dhcp
vlan_id: 30
cleanup_images:
smartos.image_vacuum
.. note::
Keep in mind that when removing properties from vmconfig they will not get
removed from the vm's current configuration, except for nics, disk, tags, ...
they get removed via add_*, set_*, update_*, and remove_*. Properties must
be manually reset to their default value.
The same behavior as when using 'vmadm update'.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
import os
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.files
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'smartos'
def __virtual__():
'''
Provides smartos state provided for SmartOS
'''
if 'vmadm.create' in __salt__ and 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
if len(repo.split('/')) == 2:
return repo, tag
return None, None
def _is_uuid(uuid):
'''
Check if uuid is a valid smartos uuid
Example: e69a0918-055d-11e5-8912-e3ceb6df4cf8
'''
if uuid and list((len(x) for x in uuid.split('-'))) == [8, 4, 4, 4, 12]:
return True
return False
def _is_docker_uuid(uuid):
'''
Check if uuid is a valid smartos docker uuid
Example plexinc/pms-docker:plexpass
'''
repo, tag = _split_docker_uuid(uuid)
return not (not repo and not tag)
def _load_config():
'''
Loads and parses /usbkey/config
'''
config = {}
if os.path.isfile('/usbkey/config'):
with salt.utils.files.fopen('/usbkey/config', 'r') as config_file:
for optval in config_file:
optval = salt.utils.stringutils.to_unicode(optval)
if optval[0] == '#':
continue
if '=' not in optval:
continue
optval = optval.split('=')
config[optval[0].lower()] = optval[1].strip().strip('"')
log.debug('smartos.config - read /usbkey/config: %s', config)
return config
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True
def _parse_vmconfig(config, instances):
'''
Parse vm_present vm config
'''
vmconfig = None
if isinstance(config, (salt.utils.odict.OrderedDict)):
vmconfig = salt.utils.odict.OrderedDict()
for prop in config:
if prop not in instances:
vmconfig[prop] = config[prop]
else:
if not isinstance(config[prop], (salt.utils.odict.OrderedDict)):
continue
vmconfig[prop] = []
for instance in config[prop]:
instance_config = config[prop][instance]
instance_config[instances[prop]] = instance
## some property are lowercase
if 'mac' in instance_config:
instance_config['mac'] = instance_config['mac'].lower()
vmconfig[prop].append(instance_config)
else:
log.error('smartos.vm_present::parse_vmconfig - failed to parse')
return vmconfig
def _get_instance_changes(current, state):
'''
get modified properties
'''
# get keys
current_keys = set(current.keys())
state_keys = set(state.keys())
# compare configs
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
def config_present(name, value):
'''
Ensure configuration property is set to value in /usbkey/config
name : string
name of property
value : string
value of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load confiration
config = _load_config()
# handle bool and None value
if isinstance(value, (bool)):
value = 'true' if value else 'false'
if not value:
value = ""
if name in config:
if six.text_type(config[name]) == six.text_type(value):
# we're good
ret['result'] = True
ret['comment'] = 'property {0} already has value "{1}"'.format(name, value)
else:
# update property
ret['result'] = True
ret['comment'] = 'updated property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
else:
# add property
ret['result'] = True
ret['comment'] = 'added property {0} with value "{1}"'.format(name, value)
ret['changes'][name] = value
config[name] = value
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def config_absent(name):
'''
Ensure configuration property is absent in /usbkey/config
name : string
name of property
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# load configuration
config = _load_config()
if name in config:
# delete property
ret['result'] = True
ret['comment'] = 'property {0} deleted'.format(name)
ret['changes'][name] = None
del config[name]
else:
# we're good
ret['result'] = True
ret['comment'] = 'property {0} is absent'.format(name)
# apply change if needed
if not __opts__['test'] and ret['changes']:
ret['result'] = _write_config(config)
return ret
def source_present(name, source_type='imgapi'):
'''
Ensure an image source is present on the computenode
name : string
source url
source_type : string
source type (imgapi or docker)
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['imgadm.sources']():
# source is present
ret['result'] = True
ret['comment'] = 'image source {0} is present'.format(name)
else:
# add new source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_add'](name, source_type)
ret['result'] = (name in res)
if ret['result']:
ret['comment'] = 'image source {0} added'.format(name)
ret['changes'][name] = 'added'
else:
ret['comment'] = 'image source {0} not added'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def source_absent(name):
'''
Ensure an image source is absent on the computenode
name : string
source url
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['imgadm.sources']():
# source is absent
ret['result'] = True
ret['comment'] = 'image source {0} is absent'.format(name)
else:
# remove source
if __opts__['test']:
res = {}
ret['result'] = True
else:
res = __salt__['imgadm.source_delete'](name)
ret['result'] = (name not in res)
if ret['result']:
ret['comment'] = 'image source {0} deleted'.format(name)
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'image source {0} not deleted'.format(name)
if 'Error' in res:
ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])
return ret
def image_present(name):
'''
Ensure image is present on the computenode
name : string
uuid of image
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if _is_docker_uuid(name) and __salt__['imgadm.docker_to_uuid'](name):
# docker image was imported
ret['result'] = True
ret['comment'] = 'image {0} ({1}) is present'.format(
name,
__salt__['imgadm.docker_to_uuid'](name),
)
elif name in __salt__['imgadm.list']():
# image was already imported
ret['result'] = True
ret['comment'] = 'image {0} is present'.format(name)
else:
# add image
if _is_docker_uuid(name):
# NOTE: we cannot query available docker images
available_images = [name]
else:
available_images = __salt__['imgadm.avail']()
if name in available_images:
if __opts__['test']:
ret['result'] = True
res = {}
if _is_docker_uuid(name):
res['00000000-0000-0000-0000-000000000000'] = name
else:
res[name] = available_images[name]
else:
res = __salt__['imgadm.import'](name)
if _is_uuid(name):
ret['result'] = (name in res)
elif _is_docker_uuid(name):
ret['result'] = __salt__['imgadm.docker_to_uuid'](name) is not None
if ret['result']:
ret['comment'] = 'image {0} imported'.format(name)
ret['changes'] = res
else:
ret['comment'] = 'image {0} was unable to be imported'.format(name)
else:
ret['result'] = False
ret['comment'] = 'image {0} does not exists'.format(name)
return ret
def image_absent(name):
'''
Ensure image is absent on the computenode
name : string
uuid of image
.. note::
computenode.image_absent will only remove the image if it is not used
by a vm.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
uuid = None
if _is_uuid(name):
uuid = name
if _is_docker_uuid(name):
uuid = __salt__['imgadm.docker_to_uuid'](name)
if not uuid or uuid not in __salt__['imgadm.list']():
# image not imported
ret['result'] = True
ret['comment'] = 'image {0} is absent'.format(name)
else:
# check if image in use by vm
if uuid in __salt__['vmadm.list'](order='image_uuid'):
ret['result'] = False
ret['comment'] = 'image {0} currently in use by a vm'.format(name)
else:
# delete image
if __opts__['test']:
ret['result'] = True
else:
image = __salt__['imgadm.get'](uuid)
image_count = 0
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_count += 1
__salt__['imgadm.delete'](image['manifest']['uuid'])
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
__salt__['imgadm.delete'](uuid)
ret['result'] = uuid not in __salt__['imgadm.list']()
if image_count:
ret['comment'] = 'image {0} and {1} children deleted'.format(name, image_count)
else:
ret['comment'] = 'image {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def image_vacuum(name):
'''
Delete images not in use or installed via image_present
.. warning::
Only image_present states that are included via the
top file will be detected.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# list of images to keep
images = []
# retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
continue
# skip if not from this state module
if state['state'] != __virtualname__:
continue
# skip if not image_present
if state['fun'] not in ['image_present']:
continue
# keep images installed via image_present
if 'name' in state:
if _is_uuid(state['name']):
images.append(state['name'])
elif _is_docker_uuid(state['name']):
state['name'] = __salt__['imgadm.docker_to_uuid'](state['name'])
if not state['name']:
continue
images.append(state['name'])
# retrieve images in use by vms
for image_uuid in __salt__['vmadm.list'](order='image_uuid'):
if image_uuid not in images:
images.append(image_uuid)
# purge unused images
ret['result'] = True
for image_uuid in __salt__['imgadm.list']():
if image_uuid in images:
continue
image = __salt__['imgadm.get'](image_uuid)
if image['manifest']['name'] == 'docker-layer':
# NOTE: docker images are made of multiple layers, loop over them
while image:
image_uuid = image['manifest']['uuid']
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if 'origin' in image['manifest']:
image = __salt__['imgadm.get'](image['manifest']['origin'])
else:
image = None
else:
# NOTE: normal images can just be delete
if image_uuid in __salt__['imgadm.delete'](image_uuid):
ret['changes'][image_uuid] = None
else:
ret['result'] = False
ret['comment'] = 'failed to delete images'
if ret['result'] and not ret['changes']:
ret['comment'] = 'no images deleted'
elif ret['result'] and ret['changes']:
ret['comment'] = 'images deleted'
return ret
def vm_present(name, vmconfig, config=None):
'''
Ensure vm is present on the computenode
name : string
hostname of vm
vmconfig : dict
options to set for the vm
config : dict
fine grain control over vm_present
.. note::
The following configuration properties can be toggled in the config parameter.
- kvm_reboot (true) - reboots of kvm zones if needed for a config update
- auto_import (false) - automatic importing of missing images
- reprovision (false) - reprovision on image_uuid changes
- enforce_customer_metadata (true) - false = add metadata only, true = add, update, and remove metadata
- enforce_tags (true) - false = add tags only, true = add, update, and remove tags
.. note::
State ID is used as hostname. Hostnames must be unique.
.. note::
If hostname is provided in vmconfig this will take president over the State ID.
This allows multiple states to be applied to the same vm.
.. note::
The following instances should have a unique ID.
- nic : mac
- filesystem: target
- disk : path or diskN for zvols
e.g. disk0 will be the first disk added, disk1 the 2nd,...
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'kvm_reboot': True,
'auto_import': False,
'reprovision': False,
}
config.update(state_config)
log.debug('smartos.vm_present::%s::config - %s', name, config)
# map special vmconfig parameters
# collections have set/remove handlers
# instances have add/update/remove handlers and a unique id
vmconfig_type = {
'collection': [
'tags',
'customer_metadata',
'internal_metadata',
'routes'
],
'instance': {
'nics': 'mac',
'disks': 'path',
'filesystems': 'target'
},
'create_only': [
'filesystems'
]
}
# parse vmconfig
vmconfig = _parse_vmconfig(vmconfig, vmconfig_type['instance'])
log.debug('smartos.vm_present::%s::vmconfig - %s', name, vmconfig)
# set hostname if needed
if 'hostname' not in vmconfig:
vmconfig['hostname'] = name
# check if vm exists
if vmconfig['hostname'] in __salt__['vmadm.list'](order='hostname'):
# update vm
ret['result'] = True
# expand vmconfig
vmconfig = {
'state': vmconfig,
'current': __salt__['vmadm.get'](vmconfig['hostname'], key='hostname'),
'changed': {},
'reprovision_uuid': None
}
# prepare reprovision
if 'image_uuid' in vmconfig['state']:
vmconfig['reprovision_uuid'] = vmconfig['state']['image_uuid']
vmconfig['state']['image_uuid'] = vmconfig['current']['image_uuid']
# disks need some special care
if 'disks' in vmconfig['state']:
new_disks = []
for disk in vmconfig['state']['disks']:
path = False
if 'disks' in vmconfig['current']:
for cdisk in vmconfig['current']['disks']:
if cdisk['path'].endswith(disk['path']):
path = cdisk['path']
break
if not path:
del disk['path']
else:
disk['path'] = path
new_disks.append(disk)
vmconfig['state']['disks'] = new_disks
# process properties
for prop in vmconfig['state']:
# skip special vmconfig_types
if prop in vmconfig_type['instance'] or \
prop in vmconfig_type['collection'] or \
prop in vmconfig_type['create_only']:
continue
# skip unchanged properties
if prop in vmconfig['current']:
if isinstance(vmconfig['current'][prop], (list)) or isinstance(vmconfig['current'][prop], (dict)):
if vmconfig['current'][prop] == vmconfig['state'][prop]:
continue
else:
if "{0}".format(vmconfig['current'][prop]) == "{0}".format(vmconfig['state'][prop]):
continue
# add property to changeset
vmconfig['changed'][prop] = vmconfig['state'][prop]
# process collections
for collection in vmconfig_type['collection']:
# skip create only collections
if collection in vmconfig_type['create_only']:
continue
# enforcement
enforce = True
if 'enforce_{0}'.format(collection) in config:
enforce = config['enforce_{0}'.format(collection)]
log.debug('smartos.vm_present::enforce_%s = %s', collection, enforce)
# process add and update for collection
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
for prop in vmconfig['state'][collection]:
# skip unchanged properties
if prop in vmconfig['current'][collection] and \
vmconfig['current'][collection][prop] == vmconfig['state'][collection][prop]:
continue
# skip update if not enforcing
if not enforce and prop in vmconfig['current'][collection]:
continue
# create set_ dict
if 'set_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['set_{0}'.format(collection)] = {}
# add property to changeset
vmconfig['changed']['set_{0}'.format(collection)][prop] = vmconfig['state'][collection][prop]
# process remove for collection
if enforce and collection in vmconfig['current'] and vmconfig['current'][collection] is not None:
for prop in vmconfig['current'][collection]:
# skip if exists in state
if collection in vmconfig['state'] and vmconfig['state'][collection] is not None:
if prop in vmconfig['state'][collection]:
continue
# create remove_ array
if 'remove_{0}'.format(collection) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(collection)] = []
# remove property
vmconfig['changed']['remove_{0}'.format(collection)].append(prop)
# process instances
for instance in vmconfig_type['instance']:
# skip create only instances
if instance in vmconfig_type['create_only']:
continue
# add or update instances
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
add_instance = True
# find instance with matching ids
for current_cfg in vmconfig['current'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# ids have matched, disable add instance
add_instance = False
changed = _get_instance_changes(current_cfg, state_cfg)
update_cfg = {}
# handle changes
for prop in changed:
update_cfg[prop] = state_cfg[prop]
# handle new properties
for prop in state_cfg:
# skip empty props like ips, options,..
if isinstance(state_cfg[prop], (list)) and not state_cfg[prop]:
continue
if prop not in current_cfg:
update_cfg[prop] = state_cfg[prop]
# update instance
if update_cfg:
# create update_ array
if 'update_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['update_{0}'.format(instance)] = []
update_cfg[vmconfig_type['instance'][instance]] = state_cfg[vmconfig_type['instance'][instance]]
vmconfig['changed']['update_{0}'.format(instance)].append(update_cfg)
if add_instance:
# create add_ array
if 'add_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['add_{0}'.format(instance)] = []
# add instance
vmconfig['changed']['add_{0}'.format(instance)].append(state_cfg)
# remove instances
if instance in vmconfig['current'] and vmconfig['current'][instance] is not None:
for current_cfg in vmconfig['current'][instance]:
remove_instance = True
# find instance with matching ids
if instance in vmconfig['state'] and vmconfig['state'][instance] is not None:
for state_cfg in vmconfig['state'][instance]:
if vmconfig_type['instance'][instance] not in state_cfg:
continue
if state_cfg[vmconfig_type['instance'][instance]] == current_cfg[vmconfig_type['instance'][instance]]:
# keep instance if matched
remove_instance = False
if remove_instance:
# create remove_ array
if 'remove_{0}'.format(instance) not in vmconfig['changed']:
vmconfig['changed']['remove_{0}'.format(instance)] = []
# remove instance
vmconfig['changed']['remove_{0}'.format(instance)].append(
current_cfg[vmconfig_type['instance'][instance]]
)
# update vm if we have pending changes
kvm_needs_start = False
if not __opts__['test'] and vmconfig['changed']:
# stop kvm if disk updates and kvm_reboot
if vmconfig['current']['brand'] == 'kvm' and config['kvm_reboot']:
if 'add_disks' in vmconfig['changed'] or \
'update_disks' in vmconfig['changed'] or \
'remove_disks' in vmconfig['changed']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
kvm_needs_start = True
__salt__['vmadm.stop'](vm=vmconfig['state']['hostname'], key='hostname')
# do update
rret = __salt__['vmadm.update'](vm=vmconfig['state']['hostname'], key='hostname', **vmconfig['changed'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
ret['comment'] = "{0}".format(rret['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if ret['result']:
if __opts__['test']:
ret['changes'][vmconfig['state']['hostname']] = vmconfig['changed']
if vmconfig['state']['hostname'] in ret['changes'] and ret['changes'][vmconfig['state']['hostname']]:
ret['comment'] = 'vm {0} updated'.format(vmconfig['state']['hostname'])
if config['kvm_reboot'] and vmconfig['current']['brand'] == 'kvm' and not __opts__['test']:
if vmconfig['state']['hostname'] in __salt__['vmadm.list'](order='hostname', search='state=running'):
__salt__['vmadm.reboot'](vm=vmconfig['state']['hostname'], key='hostname')
if kvm_needs_start:
__salt__['vmadm.start'](vm=vmconfig['state']['hostname'], key='hostname')
else:
ret['changes'] = {}
ret['comment'] = 'vm {0} is up to date'.format(vmconfig['state']['hostname'])
if 'image_uuid' in vmconfig['current'] and vmconfig['reprovision_uuid'] != vmconfig['current']['image_uuid']:
if config['reprovision']:
# check required image installed
if vmconfig['reprovision_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
# check if image is available
available_images = __salt__['imgadm.avail']()
if vmconfig['reprovision_uuid'] in available_images and not __opts__['test']:
# import image
__salt__['imgadm.import'](vmconfig['reprovision_uuid'])
# reprovision
rret = __salt__['vmadm.reprovision'](
vm=vmconfig['state']['hostname'],
key='hostname',
image=vmconfig['reprovision_uuid']
)
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['result'] = False
if vmconfig['reprovision_uuid'] not in __salt__['imgadm.list']():
ret['comment'] = 'vm {0} updated, reprovision failed because images {1} not installed'.format(
vmconfig['state']['hostname'],
vmconfig['reprovision_uuid']
)
else:
ret['comment'] = 'vm {0} updated, reprovision failed'.format(
vmconfig['state']['hostname']
)
else:
ret['comment'] = 'vm {0} updated and reprovisioned'.format(vmconfig['state']['hostname'])
if vmconfig['state']['hostname'] not in ret['changes']:
ret['changes'][vmconfig['state']['hostname']] = {}
ret['changes'][vmconfig['state']['hostname']]['image_uuid'] = vmconfig['reprovision_uuid']
else:
log.warning('smartos.vm_present::%s::reprovision - '
'image_uuid in state does not match current, '
'reprovision not allowed',
name)
else:
ret['comment'] = 'vm {0} failed to be updated'.format(vmconfig['state']['hostname'])
if not isinstance(rret, (bool)) and 'Error' in rret:
ret['comment'] = "{0}".format(rret['Error'])
else:
# check required image installed
ret['result'] = True
if 'image_uuid' in vmconfig and vmconfig['image_uuid'] not in __salt__['imgadm.list']():
if config['auto_import']:
# check if image is available
available_images = __salt__['imgadm.avail']()
if vmconfig['image_uuid'] not in available_images:
ret['result'] = False
ret['comment'] = 'image {0} not available'.format(vmconfig['image_uuid'])
elif not __opts__['test']:
if vmconfig['image_uuid'] not in __salt__['imgadm.import'](vmconfig['image_uuid']):
ret['result'] = False
ret['comment'] = 'failed to import image {0}'.format(vmconfig['image_uuid'])
else:
ret['result'] = False
ret['comment'] = 'image {0} not installed'.format(vmconfig['image_uuid'])
# disks need some special care
if 'disks' in vmconfig:
new_disks = []
for disk in vmconfig['disks']:
if 'path' in disk:
del disk['path']
new_disks.append(disk)
vmconfig['disks'] = new_disks
# create vm
if ret['result']:
uuid = __salt__['vmadm.create'](**vmconfig) if not __opts__['test'] else True
if not isinstance(uuid, (bool)) and 'Error' in uuid:
ret['result'] = False
ret['comment'] = "{0}".format(uuid['Error'])
else:
ret['result'] = True
ret['changes'][vmconfig['hostname']] = vmconfig
ret['comment'] = 'vm {0} created'.format(vmconfig['hostname'])
return ret
def vm_absent(name, archive=False):
'''
Ensure vm is absent on the computenode
name : string
hostname of vm
archive : boolean
toggle archiving of vm on removal
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name not in __salt__['vmadm.list'](order='hostname'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} is absent'.format(name)
else:
# delete vm
if not __opts__['test']:
# set archive to true if needed
if archive:
__salt__['vmadm.update'](vm=name, key='hostname', archive_on_delete=True)
ret['result'] = __salt__['vmadm.delete'](name, key='hostname')
else:
ret['result'] = True
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to delete vm {0}'.format(name)
else:
ret['comment'] = 'vm {0} deleted'.format(name)
ret['changes'][name] = None
return ret
def vm_running(name):
'''
Ensure vm is in the running state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=running'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already running'.format(name)
else:
# start the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.start'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to start {0}'.format(name)
else:
ret['changes'][name] = 'running'
ret['comment'] = 'vm {0} started'.format(name)
return ret
def vm_stopped(name):
'''
Ensure vm is in the stopped state on the computenode
name : string
hostname of vm
.. note::
State ID is used as hostname. Hostnames must be unique.
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if name in __salt__['vmadm.list'](order='hostname', search='state=stopped'):
# we're good
ret['result'] = True
ret['comment'] = 'vm {0} already stopped'.format(name)
else:
# stop the vm
ret['result'] = True if __opts__['test'] else __salt__['vmadm.stop'](name, key='hostname')
if not isinstance(ret['result'], bool) and ret['result'].get('Error'):
ret['result'] = False
ret['comment'] = 'failed to stop {0}'.format(name)
else:
ret['changes'][name] = 'stopped'
ret['comment'] = 'vm {0} stopped'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| 34.863636 | 130 | 0.515854 |
cbd378e2d53fa2aa75ba5f05bd1dea49259b57b4
| 32,540 |
py
|
Python
|
ledger/accounts/models.py
|
mohdbakhrayba/ledger
|
0180d6b35bce4d028e9f6081c86feffd7a5dfae3
|
[
"Apache-2.0"
] | null | null | null |
ledger/accounts/models.py
|
mohdbakhrayba/ledger
|
0180d6b35bce4d028e9f6081c86feffd7a5dfae3
|
[
"Apache-2.0"
] | 11 |
2019-03-19T02:03:11.000Z
|
2019-05-31T07:20:59.000Z
|
ledger/accounts/models.py
|
dbca-dragon/ledger
|
6f71699e21c8e502ee805cadc82ee0ec4c004e79
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import os
import zlib
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin
from django.contrib.postgres.fields import JSONField
from django.db import models, IntegrityError, transaction
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from django.dispatch import receiver
from django.db.models import Q
from django.db.models.signals import post_delete, pre_save, post_save
from django.core.exceptions import ValidationError
from reversion import revisions
from reversion.models import Version
from django_countries.fields import CountryField
from social_django.models import UserSocialAuth
from datetime import datetime, date
from ledger.accounts.signals import name_changed, post_clean
from ledger.accounts.utils import get_department_user_compact, in_dbca_domain
from ledger.address.models import UserAddress, Country
class EmailUserManager(BaseUserManager):
"""A custom Manager for the EmailUser model.
"""
use_in_migrations = True
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
"""Creates and saves an EmailUser with the given email and password.
"""
if not email:
raise ValueError('Email must be set')
email = self.normalize_email(email).lower()
if (EmailUser.objects.filter(email__iexact=email) or
Profile.objects.filter(email__iexact=email) or
EmailIdentity.objects.filter(email__iexact=email)):
raise ValueError('This email is already in use')
user = self.model(
email=email, is_staff=is_staff, is_superuser=is_superuser)
user.extra_data = extra_fields
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
@python_2_unicode_compatible
class Document(models.Model):
name = models.CharField(max_length=100, blank=True,
verbose_name='name', help_text='')
description = models.TextField(blank=True,
verbose_name='description', help_text='')
file = models.FileField(upload_to='%Y/%m/%d')
uploaded_date = models.DateTimeField(auto_now_add=True)
@property
def path(self):
return self.file.path
@property
def filename(self):
return os.path.basename(self.path)
def __str__(self):
return self.name or self.filename
class DocumentListener(object):
"""
Event listener for Document.
"""
@staticmethod
@receiver(post_delete, sender=Document)
def _post_delete(sender, instance, **kwargs):
# Pass false so FileField doesn't save the model.
try:
instance.file.delete(False)
except:
# if deleting file is failed, ignore.
pass
@staticmethod
@receiver(pre_save, sender=Document)
def _pre_save(sender, instance, **kwargs):
if instance.pk:
original_instance = Document.objects.get(pk=instance.pk)
setattr(instance, "_original_instance", original_instance)
elif hasattr(instance, "_original_instance"):
delattr(instance, "_original_instance")
@staticmethod
@receiver(post_save, sender=Document)
def _post_save(sender, instance, **kwargs):
original_instance = getattr(instance, "_original_instance") if hasattr(instance, "_original_instance") else None
if original_instance and original_instance.file and instance.file != original_instance.file:
# file changed, delete the original file
try:
original_file.delete(False);
except:
# if deleting file is failed, ignore.
pass
delattr(instance, "_original_instance")
@python_2_unicode_compatible
class BaseAddress(models.Model):
"""Generic address model, intended to provide billing and shipping
addresses.
Taken from django-oscar address AbstrastAddress class.
"""
STATE_CHOICES = (
('ACT', 'ACT'),
('NSW', 'NSW'),
('NT', 'NT'),
('QLD', 'QLD'),
('SA', 'SA'),
('TAS', 'TAS'),
('VIC', 'VIC'),
('WA', 'WA')
)
# Addresses consist of 1+ lines, only the first of which is
# required.
line1 = models.CharField('Line 1', max_length=255)
line2 = models.CharField('Line 2', max_length=255, blank=True)
line3 = models.CharField('Line 3', max_length=255, blank=True)
locality = models.CharField('Suburb / Town', max_length=255)
state = models.CharField(max_length=255, default='WA', blank=True)
country = CountryField(default='AU')
postcode = models.CharField(max_length=10)
# A field only used for searching addresses.
search_text = models.TextField(editable=False)
hash = models.CharField(max_length=255, db_index=True, editable=False)
def __str__(self):
return self.summary
class Meta:
abstract = True
def clean(self):
# Strip all whitespace
for field in ['line1', 'line2', 'line3',
'locality', 'state']:
if self.__dict__[field]:
self.__dict__[field] = self.__dict__[field].strip()
def save(self, *args, **kwargs):
self._update_search_text()
self.hash = self.generate_hash()
super(BaseAddress, self).save(*args, **kwargs)
def _update_search_text(self):
search_fields = filter(
bool, [self.line1, self.line2, self.line3, self.locality,
self.state, str(self.country.name), self.postcode])
self.search_text = ' '.join(search_fields)
@property
def summary(self):
"""Returns a single string summary of the address, separating fields
using commas.
"""
return u', '.join(self.active_address_fields())
# Helper methods
def active_address_fields(self):
"""Return the non-empty components of the address.
"""
fields = [self.line1, self.line2, self.line3,
self.locality, self.state, self.country, self.postcode]
fields = [str(f).strip() for f in fields if f]
return fields
def join_fields(self, fields, separator=u', '):
"""Join a sequence of fields using the specified separator.
"""
field_values = []
for field in fields:
value = getattr(self, field)
field_values.append(value)
return separator.join(filter(bool, field_values))
def generate_hash(self):
"""
Returns a hash of the address summary
"""
return zlib.crc32(self.summary.strip().upper().encode('UTF8'))
class Address(BaseAddress):
user = models.ForeignKey('EmailUser', related_name='profile_addresses')
oscar_address = models.ForeignKey(UserAddress, related_name='profile_addresses')
class Meta:
verbose_name_plural = 'addresses'
unique_together = ('user','hash')
@python_2_unicode_compatible
class EmailIdentity(models.Model):
"""Table used for matching access email address with EmailUser.
"""
user = models.ForeignKey('EmailUser', null=True)
email = models.EmailField(unique=True)
def __str__(self):
return self.email
@python_2_unicode_compatible
class EmailUser(AbstractBaseUser, PermissionsMixin):
"""Custom authentication model for the ledger project.
Password and email are required. Other fields are optional.
"""
email = models.EmailField(unique=True, blank=False)
first_name = models.CharField(max_length=128, blank=False, verbose_name='Given name(s)')
last_name = models.CharField(max_length=128, blank=False)
is_staff = models.BooleanField(
default=False,
help_text='Designates whether the user can log into the admin site.',
)
is_active = models.BooleanField(
default=True,
help_text='Designates whether this user should be treated as active.'
'Unselect this instead of deleting ledger.accounts.',
)
date_joined = models.DateTimeField(default=timezone.now)
TITLE_CHOICES = (
('Mr', 'Mr'),
('Miss', 'Miss'),
('Mrs', 'Mrs'),
('Ms', 'Ms'),
('Dr', 'Dr')
)
title = models.CharField(max_length=100, choices=TITLE_CHOICES, null=True, blank=True,
verbose_name='title', help_text='')
dob = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=False,
verbose_name="date of birth", help_text='')
phone_number = models.CharField(max_length=50, null=True, blank=True,
verbose_name="phone number", help_text='')
mobile_number = models.CharField(max_length=50, null=True, blank=True,
verbose_name="mobile number", help_text='')
fax_number = models.CharField(max_length=50, null=True, blank=True,
verbose_name="fax number", help_text='')
organisation = models.CharField(max_length=300, null=True, blank=True,
verbose_name="organisation", help_text='organisation, institution or company')
residential_address = models.ForeignKey(Address, null=True, blank=False, related_name='+')
postal_address = models.ForeignKey(Address, null=True, blank=True, related_name='+')
billing_address = models.ForeignKey(Address, null=True, blank=True, related_name='+')
identification = models.ForeignKey(Document, null=True, blank=True, on_delete=models.SET_NULL, related_name='identification_document')
senior_card = models.ForeignKey(Document, null=True, blank=True, on_delete=models.SET_NULL, related_name='senior_card')
character_flagged = models.BooleanField(default=False)
character_comments = models.TextField(blank=True)
documents = models.ManyToManyField(Document)
extra_data = JSONField(default=dict)
objects = EmailUserManager()
USERNAME_FIELD = 'email'
def __str__(self):
if self.is_dummy_user:
if self.organisation:
return '{} {} ({})'.format(self.first_name, self.last_name, self.organisation)
return '{} {}'.format(self.first_name, self.last_name)
else:
if self.organisation:
return '{} ({})'.format(self.email, self.organisation)
return '{}'.format(self.email)
def clean(self):
super(EmailUser, self).clean()
self.email = self.email.lower() if self.email else self.email
post_clean.send(sender=self.__class__, instance=self)
def save(self, *args, **kwargs):
if not self.email:
self.email = self.get_dummy_email()
elif in_dbca_domain(self):
# checks and updates department user details from address book after every login
user_details = get_department_user_compact(self.email)
if user_details:
self.phone_number = user_details.get('telephone')
self.mobile_number = user_details.get('mobile_phone')
self.title = user_details.get('title')
self.fax_number = user_details.get('org_unit__location__fax')
super(EmailUser, self).save(*args, **kwargs)
def get_full_name(self):
full_name = '{} {}'.format(self.first_name, self.last_name)
return full_name.strip()
def get_full_name_dob(self):
full_name_dob = '{} {} ({})'.format(self.first_name, self.last_name, self.dob.strftime('%d/%m/%Y'))
return full_name_dob.strip()
def get_short_name(self):
if self.first_name:
return self.first_name.split(' ')[0]
return self.email
def upload_identification(self, request):
with transaction.atomic():
document = Document(file=request.data.dict()['identification'])
document.save()
self.identification = document
self.save()
dummy_email_suffix = "[email protected]"
dummy_email_suffix_len = len(dummy_email_suffix)
@property
def is_dummy_user(self):
return not self.email or self.email[-1 * self.dummy_email_suffix_len:] == self.dummy_email_suffix
@property
def dummy_email(self):
if self.is_dummy_user:
return self.email
else:
return None
def get_dummy_email(self):
# use timestamp plus first name, last name to generate a unique id.
uid = datetime.now().strftime("%Y%m%d%H%M%S%f")
return "{}.{}.{}{}".format(self.first_name, self.last_name, uid, self.dummy_email_suffix)
@property
def username(self):
return self.email
@property
def is_senior(self):
"""
Test if the the user is a senior according to the rules of WA senior
dob is before 1 July 1955; or
dob is between 1 July 1955 and 30 June 1956 and age is 61 or older; or
dob is between 1 July 1956 and 30 June 1957 and age is 62 or older; or
dob is between 1 July 1957 and 30 June 1958 and age is 63 or older; or
dob is between 1 July 1958 and 30 June 1959 and age is 64 or older; or
dob is after 30 June 1959 and age is 65 or older
:return:
"""
return \
self.dob < date(1955, 7, 1) or \
((date(1955, 7, 1) <= self.dob <= date(1956, 6, 30)) and self.age() >= 61) or \
((date(1956, 7, 1) <= self.dob <= date(1957, 6, 30)) and self.age() >= 62) or \
((date(1957, 7, 1) <= self.dob <= date(1958, 6, 30)) and self.age() >= 63) or \
((date(1958, 7, 1) <= self.dob <= date(1959, 6, 30)) and self.age() >= 64) or \
(self.dob > date(1959, 6, 1) and self.age() >= 65)
def age(self):
if self.dob:
today = date.today()
# calculate age with the help of trick int(True) = 1 and int(False) = 0
return today.year - self.dob.year - ((today.month, today.day) < (self.dob.month, self.dob.day))
else:
return -1
def log_user_action(self, action, request=None):
if request:
return EmailUserAction.log_action(self, action, request.user)
else:
pass
def query_emailuser_by_args(**kwargs):
ORDER_COLUMN_CHOICES = [
'title',
'first_name',
'last_name',
'dob',
'email',
'phone_number',
'mobile_number',
'fax_number',
'character_flagged',
'character_comments'
]
draw = int(kwargs.get('draw', None)[0])
length = int(kwargs.get('length', None)[0])
start = int(kwargs.get('start', None)[0])
search_value = kwargs.get('search[value]', None)[0]
order_column = kwargs.get('order[0][column]', None)[0]
order = kwargs.get('order[0][dir]', None)[0]
order_column = ORDER_COLUMN_CHOICES[int(order_column)]
# django orm '-' -> desc
if order == 'desc':
order_column = '-' + order_column
queryset = EmailUser.objects.all()
total = queryset.count()
if search_value:
queryset = queryset.filter(Q(first_name__icontains=search_value) |
Q(last_name__icontains=search_value) |
Q(email__icontains=search_value) |
Q(phone_number__icontains=search_value) |
Q(mobile_number__icontains=search_value) |
Q(fax_number__icontains=search_value))
count = queryset.count()
queryset = queryset.order_by(order_column)[start:start + length]
return {
'items': queryset,
'count': count,
'total': total,
'draw': draw
}
@python_2_unicode_compatible
class UserAction(models.Model):
who = models.ForeignKey(EmailUser, null=False, blank=False)
when = models.DateTimeField(null=False, blank=False, auto_now_add=True)
what = models.TextField(blank=False)
def __str__(self):
return "{what} ({who} at {when})".format(
what=self.what,
who=self.who,
when=self.when
)
class Meta:
abstract = True
app_label = 'accounts'
class EmailUserAction(UserAction):
ACTION_PERSONAL_DETAILS_UPDATE = "User {} Personal Details Updated"
ACTION_CONTACT_DETAILS_UPDATE = "User {} Contact Details Updated"
ACTION_POSTAL_ADDRESS_UPDATE = "User {} Postal Address Updated"
ACTION_ID_UPDATE = "User {} Identification Updated"
emailuser = models.ForeignKey(EmailUser, related_name='action_logs')
class Meta:
app_label = 'accounts'
ordering = ['-when']
@classmethod
def log_action(cls, emailuser, action, user):
return cls.objects.create(
emailuser=emailuser,
who=user,
what=str(action)
)
class EmailUserListener(object):
"""
Event listener for EmailUser
"""
@staticmethod
@receiver(post_delete, sender=EmailUser)
def _post_delete(sender, instance, **kwargs):
# delete the profile's email from email identity and social auth
if not instance.is_dummy_user:
EmailIdentity.objects.filter(email=instance.email, user=instance).delete()
UserSocialAuth.objects.filter(provider="email", uid=instance.email, user=instance).delete()
@staticmethod
@receiver(pre_save, sender=EmailUser)
def _pre_save(sender, instance, **kwargs):
if instance.pk:
original_instance = EmailUser.objects.get(pk=instance.pk)
setattr(instance, "_original_instance", original_instance)
elif hasattr(instance, "_original_instance"):
delattr(instance, "_original_instance")
@staticmethod
@receiver(post_save, sender=EmailUser)
def _post_save(sender, instance, **kwargs):
original_instance = getattr(instance, "_original_instance") if hasattr(instance, "_original_instance") else None
# add user's email to email identity and social auth if not exist
if not instance.is_dummy_user:
EmailIdentity.objects.get_or_create(email=instance.email, user=instance)
if not UserSocialAuth.objects.filter(user=instance, provider="email", uid=instance.email).exists():
user_social_auth = UserSocialAuth.create_social_auth(instance, instance.email, 'email')
user_social_auth.extra_data = {'email': [instance.email]}
user_social_auth.save()
if original_instance and original_instance.email != instance.email:
if not original_instance.is_dummy_user:
# delete the user's email from email identity and social auth
EmailIdentity.objects.filter(email=original_instance.email, user=original_instance).delete()
UserSocialAuth.objects.filter(provider="email", uid=original_instance.email, user=original_instance).delete()
# update profile's email if profile's email is original email
Profile.objects.filter(email=original_instance.email, user=instance).update(email=instance.email)
if original_instance and any([original_instance.first_name != instance.first_name, original_instance.last_name != instance.last_name]):
# user changed first name or last name, send a named_changed signal.
name_changed.send(sender=instance.__class__, user=instance)
class RevisionedMixin(models.Model):
"""
A model tracked by reversion through the save method.
"""
def save(self, **kwargs):
if kwargs.pop('no_revision', False):
super(RevisionedMixin, self).save(**kwargs)
else:
with revisions.create_revision():
revisions.set_user(kwargs.pop('version_user', None))
revisions.set_comment(kwargs.pop('version_comment', ''))
super(RevisionedMixin, self).save(**kwargs)
@property
def created_date(self):
#return revisions.get_for_object(self).last().revision.date_created
return Version.objects.get_for_object(self).last().revision.date_created
@property
def modified_date(self):
#return revisions.get_for_object(self).first().revision.date_created
return Version.objects.get_for_object(self).first().revision.date_created
class Meta:
abstract = True
@python_2_unicode_compatible
class Profile(RevisionedMixin):
user = models.ForeignKey(EmailUser, verbose_name='User', related_name='profiles')
name = models.CharField('Display Name', max_length=100, help_text='e.g Personal, Work, University, etc')
email = models.EmailField('Email')
postal_address = models.ForeignKey(Address, verbose_name='Postal Address', on_delete=models.PROTECT, related_name='profiles')
institution = models.CharField('Institution', max_length=200, blank=True, default='', help_text='e.g. Company Name, Tertiary Institution, Government Department, etc')
@property
def is_auth_identity(self):
"""
Return True if the email is an email identity; otherwise return False.
"""
if not self.email:
return False
if not hasattr(self, "_auth_identity"):
self._auth_identity = EmailIdentity.objects.filter(user=self.user, email=self.email).exists()
return self._auth_identity
def clean(self):
super(Profile, self).clean()
self.email = self.email.lower() if self.email else self.email
post_clean.send(sender=self.__class__, instance=self)
def __str__(self):
if len(self.name) > 0:
return '{} ({})'.format(self.name, self.email)
else:
return '{}'.format(self.email)
@python_2_unicode_compatible
class Organisation(models.Model):
"""This model represents the details of a company or other organisation.
Management of these objects will be delegated to 0+ EmailUsers.
"""
name = models.CharField(max_length=128, unique=True)
abn = models.CharField(max_length=50, null=True, blank=True, verbose_name='ABN')
# TODO: business logic related to identification file upload/changes.
identification = models.FileField(upload_to='%Y/%m/%d', null=True, blank=True)
postal_address = models.ForeignKey('OrganisationAddress', related_name='org_postal_address', blank=True, null=True, on_delete=models.SET_NULL)
billing_address = models.ForeignKey('OrganisationAddress', related_name='org_billing_address', blank=True, null=True, on_delete=models.SET_NULL)
def upload_identification(self, request):
with transaction.atomic():
self.identification = request.data.dict()['identification']
self.save()
def __str__(self):
return self.name
class OrganisationAddress(BaseAddress):
organisation = models.ForeignKey(Organisation, null=True,blank=True, related_name='adresses')
class Meta:
verbose_name_plural = 'organisation addresses'
unique_together = ('organisation','hash')
class ProfileListener(object):
"""
Event listener for Profile
"""
@staticmethod
@receiver(post_delete, sender=Profile)
def _post_delete(sender, instance, **kwargs):
# delete from email identity, and social auth
if instance.user.email == instance.email:
# profile's email is user's email, return
return
# delete the profile's email from email identity and social auth
EmailIdentity.objects.filter(email=instance.email, user=instance.user).delete()
UserSocialAuth.objects.filter(provider="email", uid=instance.email, user=instance.user).delete()
@staticmethod
@receiver(pre_save, sender=Profile)
def _pre_save(sender, instance, **kwargs):
if not hasattr(instance, "auth_identity"):
# not triggered by user.
return
if instance.pk:
original_instance = Profile.objects.get(pk=instance.pk)
setattr(instance, "_original_instance", original_instance)
elif hasattr(instance, "_original_instance"):
delattr(instance, "_original_instance")
@staticmethod
@receiver(post_save, sender=Profile)
def _post_save(sender, instance, **kwargs):
if not hasattr(instance, "auth_identity"):
# not triggered by user.
return
original_instance = getattr(instance, "_original_instance") if hasattr(instance, "_original_instance") else None
auth_identity = getattr(instance, "auth_identity")
if auth_identity:
# add email to email identity and social auth if not exist
EmailIdentity.objects.get_or_create(email=instance.email, user=instance.user)
if not UserSocialAuth.objects.filter(user=instance.user, provider="email", uid=instance.email).exists():
user_social_auth = UserSocialAuth.create_social_auth(instance.user, instance.email, 'email')
user_social_auth.extra_data = {'email': [instance.email]}
user_social_auth.save()
if original_instance and (original_instance.email != instance.email or not auth_identity):
# delete the profile's email from email identity and social auth
EmailIdentity.objects.filter(email=original_instance.email, user=original_instance.user).delete()
UserSocialAuth.objects.filter(provider="email", uid=original_instance.email, user=original_instance.user).delete()
if not original_instance:
address = instance.postal_address
try:
# Check if the user has the same profile address
# Check if there is a user address
oscar_add = UserAddress.objects.get(
line1 = address.line1,
line2 = address.line2,
line3 = address.line3,
line4 = address.locality,
state = address.state,
postcode = address.postcode,
country = Country.objects.get(iso_3166_1_a2=address.country),
user = instance.user
)
if not address.oscar_address:
address.oscar_address = oscar_add
address.save()
elif address.oscar_address.id != oscar_add.id:
address.oscar_address = oscar_add
address.save()
except UserAddress.DoesNotExist:
oscar_address = UserAddress.objects.create(
line1 = address.line1,
line2 = address.line2,
line3 = address.line3,
line4 = address.locality,
state = address.state,
postcode = address.postcode,
country = Country.objects.get(iso_3166_1_a2=address.country),
user = instance.user
)
address.oscar_address = oscar_address
address.save()
# Clear out unused addresses
# EmailUser can have address that is not linked with profile, hence the exclude
''' This functionality no longer in use due to more than just
profile objects using the UserAddresses
user = instance.user
user_addr = Address.objects.filter(user=user)
for u in user_addr:
if not u.profiles.all() \
and not u in (user.postal_address, user.residential_address, user.billing_address):
u.oscar_address.delete()
u.delete()'''
class EmailIdentityListener(object):
"""
Event listener for EmailIdentity
"""
@staticmethod
@receiver(post_clean, sender=Profile)
def _profile_post_clean(sender, instance, **kwargs):
if instance.email:
if EmailIdentity.objects.filter(email=instance.email).exclude(user=instance.user).exists():
# Email already used by other user in email identity.
raise ValidationError("This email address is already associated with an existing account or profile; if this email address belongs to you, please contact the system administrator to request for the email address to be added to your account.")
@staticmethod
@receiver(post_clean, sender=EmailUser)
def _emailuser_post_clean(sender, instance, **kwargs):
if instance.email:
if EmailIdentity.objects.filter(email=instance.email).exclude(user=instance).exists():
# Email already used by other user in email identity.
raise ValidationError("This email address is already associated with an existing account or profile; if this email address belongs to you, please contact the system administrator to request for the email address to be added to your account.")
class AddressListener(object):
"""
Event listener for Address
"""
@staticmethod
@receiver(pre_save, sender=Address)
def _pre_save(sender, instance, **kwargs):
check_address = UserAddress(
line1 = instance.line1,
line2 = instance.line2,
line3 = instance.line3,
line4 = instance.locality,
state = instance.state,
postcode = instance.postcode,
country = Country.objects.get(iso_3166_1_a2=instance.country),
user = instance.user
)
if instance.pk:
original_instance = Address.objects.get(pk=instance.pk)
setattr(instance, "_original_instance", original_instance)
if original_instance.oscar_address is None:
try:
check_address = UserAddress.objects.get(hash=check_address.generate_hash(),user=check_address.user)
except UserAddress.DoesNotExist:
check_address.save()
instance.oscar_address = check_address
elif hasattr(instance, "_original_instance"):
delattr(instance, "_original_instance")
else:
try:
check_address = UserAddress.objects.get(hash=check_address.generate_hash(),user=check_address.user)
except UserAddress.DoesNotExist:
check_address.save()
instance.oscar_address = check_address
@staticmethod
@receiver(post_save, sender=Address)
def _post_save(sender, instance, **kwargs):
original_instance = getattr(instance, "_original_instance") if hasattr(instance, "_original_instance") else None
if original_instance:
oscar_address = original_instance.oscar_address
try:
if oscar_address is not None:
oscar_address.line1 = instance.line1
oscar_address.line2 = instance.line2
oscar_address.line3 = instance.line3
oscar_address.line4 = instance.locality
oscar_address.state = instance.state
oscar_address.postcode = instance.postcode
oscar_address.country = Country.objects.get(iso_3166_1_a2=instance.country)
oscar_address.save()
except IntegrityError as e:
if 'unique constraint' in e.message:
raise ValidationError('Multiple profiles cannot have the same address.')
else:
raise
@python_2_unicode_compatible
class EmailUserReport(models.Model):
hash = models.TextField(primary_key=True)
occurence = models.IntegerField()
first_name = models.CharField(max_length=128, blank=False, verbose_name='Given name(s)')
last_name = models.CharField(max_length=128, blank=False)
dob = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=False,verbose_name="date of birth", help_text='')
def __str__(self):
return 'Given Name(s): {}, Last Name: {}, DOB: {}, Occurence: {}'.format(self.first_name,self.last_name,self.dob,self.occurence)
class Meta:
managed = False
db_table = 'accounts_emailuser_report_v'
| 40.828105 | 258 | 0.642778 |
1b93c0d83a276f5976ea230ba69f64461914fb3d
| 1,320 |
py
|
Python
|
2019/12 December/dp12062019.py
|
vishrutkmr7/DailyPracticeProblemsDIP
|
d1bfbc75f2024736c22c05385f753a90ddcfa0f5
|
[
"MIT"
] | 5 |
2019-08-06T02:34:41.000Z
|
2022-01-08T03:03:16.000Z
|
2019/12 December/dp12062019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 15 |
2021-06-01T14:04:16.000Z
|
2022-03-08T21:17:22.000Z
|
2019/12 December/dp12062019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 4 |
2019-09-19T20:00:05.000Z
|
2021-08-16T11:31:51.000Z
|
# This problem was recently asked by Microsoft:
# A maze is a matrix where each cell can either be a 0 or 1. A 0 represents that the cell is empty, and a 1 represents a wall
# that cannot be walked through. You can also only travel either right or down.
# Given a nxm matrix, find the number of ways someone can go from the top left corner to the bottom right corner.
# You can assume the two corners will always be 0.
# Maze dimensions
R = 3
C = 3
def paths_through_maze(maze):
# Fill this in.
if maze[0][0] == 1:
return
for i in range(R):
if maze[i][0] == 0:
maze[i][0] = -1
else:
break
for i in range(1, C, 1):
if maze[0][i] == 0:
maze[0][i] = -1
else:
break
for i in range(1, R, 1):
for j in range(1, C, 1):
if maze[i][j] == 1:
continue # Blockage
if maze[i - 1][j] < 0:
maze[i][j] = maze[i][j] + maze[i - 1][j]
if maze[i][j - 1] < 0:
maze[i][j] = maze[i][j] + maze[i][j - 1]
# If the final cell is blocked
if maze[R - 1][C - 1] < 0:
return abs(maze[R - 1][C - 1]) # Count was -vely increased
else:
return 0
print(paths_through_maze([[0, 1, 0], [0, 0, 1], [0, 0, 0]]))
# 2
| 25.882353 | 125 | 0.522727 |
aa559ca9f62d2bdb97e45733b0e4107d144a0ef1
| 2,061 |
py
|
Python
|
var/spack/repos/builtin/packages/tar/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/tar/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 |
2022-01-08T08:41:11.000Z
|
2022-03-14T19:28:07.000Z
|
var/spack/repos/builtin/packages/tar/package.py
|
foeroyingur/spack
|
5300cbbb2e569190015c72d0970d25425ea38647
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack import *
class Tar(AutotoolsPackage, GNUMirrorPackage):
"""GNU Tar provides the ability to create tar archives, as well as various
other kinds of manipulation."""
homepage = "https://www.gnu.org/software/tar/"
gnu_mirror_path = "tar/tar-1.32.tar.gz"
executables = [r'^tar$']
version('1.34', sha256='03d908cf5768cfe6b7ad588c921c6ed21acabfb2b79b788d1330453507647aed')
version('1.32', sha256='b59549594d91d84ee00c99cf2541a3330fed3a42c440503326dab767f2fbb96c')
version('1.31', sha256='b471be6cb68fd13c4878297d856aebd50551646f4e3074906b1a74549c40d5a2')
version('1.30', sha256='4725cc2c2f5a274b12b39d1f78b3545ec9ebb06a6e48e8845e1995ac8513b088')
version('1.29', sha256='cae466e6e58c7292355e7080248f244db3a4cf755f33f4fa25ca7f9a7ed09af0')
version('1.28', sha256='6a6b65bac00a127a508533c604d5bf1a3d40f82707d56f20cefd38a05e8237de')
depends_on('iconv')
patch('tar-pgi.patch', when='@1.29')
patch('config-pgi.patch', when='@:1.29')
patch('se-selinux.patch', when='@:1.29')
patch('argp-pgi.patch', when='@:1.29')
patch('gnutar-configure-xattrs.patch', when='@1.28')
# The NVIDIA compilers do not currently support some GNU builtins.
# Detect this case and use the fallback path.
patch('nvhpc-1.30.patch', when='@1.30:1.32 %nvhpc')
patch('nvhpc-1.34.patch', when='@1.34 %nvhpc')
# Workaround bug where __LONG_WIDTH__ is not defined
patch('nvhpc-long-width.patch', when='@1.34 %nvhpc')
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'tar \(GNU tar\) (\S+)', output)
return match.group(1) if match else None
def configure_args(self):
return [
'--with-libiconv-prefix={0}'.format(self.spec['iconv'].prefix),
]
| 40.411765 | 94 | 0.704998 |
857f3dca205c3d6550b0f9a26a161ebdb01b88a4
| 377 |
py
|
Python
|
openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py
|
Tilix4/OpenPype
|
8909bd890170880aa7ec8b673abaa25a9bdf40f2
|
[
"MIT"
] | 1 |
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2 |
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
import pyblish.api
class CollectSAAppName(pyblish.api.ContextPlugin):
"""Collect app name and label."""
label = "Collect App Name/Label"
order = pyblish.api.CollectorOrder - 0.5
hosts = ["standalonepublisher"]
def process(self, context):
context.data["appName"] = "standalone publisher"
context.data["appLabel"] = "Standalone publisher"
| 26.928571 | 57 | 0.681698 |
39ebc559707b4d8eaed23c8f4d780d504e8fed8d
| 11,231 |
py
|
Python
|
qiskit/optimization/algorithms/minimum_eigen_optimizer.py
|
MetcalfeTom/qiskit-aqua
|
1718e1612dba38a3f15309c860fe7875914fe8cc
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/algorithms/minimum_eigen_optimizer.py
|
MetcalfeTom/qiskit-aqua
|
1718e1612dba38a3f15309c860fe7875914fe8cc
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/algorithms/minimum_eigen_optimizer.py
|
MetcalfeTom/qiskit-aqua
|
1718e1612dba38a3f15309c860fe7875914fe8cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A wrapper for minimum eigen solvers from Aqua to be used within the optimization module."""
from typing import Optional, Any, Union, Tuple, List
import numpy as np
from qiskit import QuantumCircuit, BasicAer, execute
from qiskit.aqua.algorithms import MinimumEigensolver
from qiskit.aqua.operators import WeightedPauliOperator, MatrixOperator, StateFn, DictStateFn
from .optimization_algorithm import OptimizationAlgorithm, OptimizationResult
from ..problems.quadratic_program import QuadraticProgram
from ..converters.quadratic_program_to_ising import QuadraticProgramToIsing
from ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo
from ..exceptions import QiskitOptimizationError
class MinimumEigenOptimizerResult(OptimizationResult):
""" Minimum Eigen Optimizer Result."""
def __init__(self, x: Optional[Any] = None, fval: Optional[Any] = None,
samples: Optional[Any] = None, results: Optional[Any] = None) -> None:
super().__init__(x, fval, results)
self._samples = samples
@property
def samples(self) -> Any:
""" returns samples """
return self._samples
@samples.setter
def samples(self, samples: Any) -> None:
""" set samples """
self._samples = samples
def get_correlations(self):
""" get <Zi x Zj> correlation matrix from samples """
states = [v[0] for v in self.samples]
probs = [v[2] for v in self.samples]
n = len(states[0])
correlations = np.zeros((n, n))
for k, prob in enumerate(probs):
b = states[k]
for i in range(n):
for j in range(i):
if b[i] == b[j]:
correlations[i, j] += prob
else:
correlations[i, j] -= prob
return correlations
class MinimumEigenOptimizer(OptimizationAlgorithm):
"""A wrapper for minimum eigen solvers from Qiskit Aqua.
This class provides a wrapper for minimum eigen solvers from Qiskit to be used within
the optimization module.
It assumes a problem consisting only of binary or integer variables as well as linear equality
constraints thereof. It converts such a problem into a Quadratic Unconstrained Binary
Optimization (QUBO) problem by expanding integer variables into binary variables and by adding
the linear equality constraints as weighted penalty terms to the objective function. The
resulting QUBO is then translated into an Ising Hamiltonian whose minimal eigen vector and
corresponding eigenstate correspond to the optimal solution of the original optimization
problem. The provided minimum eigen solver is then used to approximate the ground state of the
Hamiltonian to find a good solution for the optimization problem.
Examples:
Outline of how to use this class:
.. code-block::
from qiskit.aqua.algorithms import QAOA
from qiskit.optimization.problems import QuadraticProgram
from qiskit.optimization.algorithms import MinimumEigenOptimizer
problem = QuadraticProgram()
# specify problem here
# specify minimum eigen solver to be used, e.g., QAOA
qaoa = QAOA(...)
optimizer = MinimumEigenOptimizer(qaoa)
result = optimizer.solve(problem)
"""
def __init__(self, min_eigen_solver: MinimumEigensolver, penalty: Optional[float] = None
) -> None:
"""
This initializer takes the minimum eigen solver to be used to approximate the ground state
of the resulting Hamiltonian as well as a optional penalty factor to scale penalty terms
representing linear equality constraints. If no penalty factor is provided, a default
is computed during the algorithm (TODO).
Args:
min_eigen_solver: The eigen solver to find the ground state of the Hamiltonian.
penalty: The penalty factor to be used, or ``None`` for applying a default logic.
"""
self._min_eigen_solver = min_eigen_solver
self._penalty = penalty
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
"""Checks whether a given problem can be solved with this optimizer.
Checks whether the given problem is compatible, i.e., whether the problem can be converted
to a QUBO, and otherwise, returns a message explaining the incompatibility.
Args:
problem: The optimization problem to check compatibility.
Returns:
A message describing the incompatibility.
"""
return QuadraticProgramToQubo.get_compatibility_msg(problem)
def solve(self, problem: QuadraticProgram) -> MinimumEigenOptimizerResult:
"""Tries to solves the given problem using the optimizer.
Runs the optimizer to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: If problem not compatible.
"""
# check compatibility and raise exception if incompatible
msg = self.get_compatibility_msg(problem)
if len(msg) > 0:
raise QiskitOptimizationError('Incompatible problem: {}'.format(msg))
# convert problem to QUBO
qubo_converter = QuadraticProgramToQubo()
problem_ = qubo_converter.encode(problem)
# construct operator and offset
operator_converter = QuadraticProgramToIsing()
operator, offset = operator_converter.encode(problem_)
# only try to solve non-empty Ising Hamiltonians
if operator.num_qubits > 0:
# approximate ground state of operator using min eigen solver
eigen_results = self._min_eigen_solver.compute_minimum_eigenvalue(operator)
# analyze results
samples = eigenvector_to_solutions(eigen_results.eigenstate, operator)
samples = [(res[0], problem_.objective.sense.value * (res[1] + offset), res[2])
for res in samples]
samples.sort(key=lambda x: problem_.objective.sense.value * x[1])
x = samples[0][0]
fval = samples[0][1]
# if Hamiltonian is empty, then the objective function is constant to the offset
else:
x = [0]*problem_.get_num_binary_vars()
fval = offset
x_str = '0'*problem_.get_num_binary_vars()
samples = [(x_str, offset, 1.0)]
# translate result back to integers
opt_res = MinimumEigenOptimizerResult(x, fval, samples, qubo_converter)
opt_res = qubo_converter.decode(opt_res)
# translate results back to original problem
return opt_res
def eigenvector_to_solutions(eigenvector: Union[dict, np.ndarray, StateFn],
operator: Union[WeightedPauliOperator, MatrixOperator],
min_probability: float = 1e-6) -> List[Tuple[str, float, float]]:
"""Convert the eigenvector to the bitstrings and corresponding eigenvalues.
Examples:
>>> op = MatrixOperator(numpy.array([[1, 1], [1, -1]]) / numpy.sqrt(2))
>>> eigenvectors = {'0': 12, '1': 1}
>>> print(eigenvector_to_solutions(eigenvectors, op))
[('0', 0.7071067811865475, 0.9230769230769231), ('1', -0.7071067811865475, 0.07692307692307693)]
>>> op = MatrixOperator(numpy.array([[1, 1], [1, -1]]) / numpy.sqrt(2))
>>> eigenvectors = numpy.array([1, 1] / numpy.sqrt(2), dtype=complex)
>>> print(eigenvector_to_solutions(eigenvectors, op))
[('0', 0.7071067811865475, 0.4999999999999999), ('1', -0.7071067811865475, 0.4999999999999999)]
Returns:
For each computational basis state contained in the eigenvector, return the basis
state as bitstring along with the operator evaluated at that bitstring and the
probability of sampling this bitstring from the eigenvector.
Raises:
TypeError: Invalid Argument
"""
if isinstance(eigenvector, DictStateFn):
eigenvector = {bitstr: val**2 for (bitstr, val) in eigenvector.primitive.items()}
elif isinstance(eigenvector, StateFn):
eigenvector = eigenvector.to_matrix()
solutions = []
if isinstance(eigenvector, dict):
all_counts = sum(eigenvector.values())
# iterate over all samples
for bitstr, count in eigenvector.items():
sampling_probability = count / all_counts
# add the bitstring, if the sampling probability exceeds the threshold
if sampling_probability > 0:
if sampling_probability >= min_probability:
value = eval_operator_at_bitstring(operator, bitstr)
solutions += [(bitstr, value, sampling_probability)]
elif isinstance(eigenvector, np.ndarray):
num_qubits = int(np.log2(eigenvector.size))
probabilities = np.abs(eigenvector * eigenvector.conj())
# iterate over all states and their sampling probabilities
for i, sampling_probability in enumerate(probabilities):
# add the i-th state if the sampling probability exceeds the threshold
if sampling_probability > 0:
if sampling_probability >= min_probability:
bitstr = '{:b}'.format(i).rjust(num_qubits, '0')[::-1]
value = eval_operator_at_bitstring(operator, bitstr)
solutions += [(bitstr, value, sampling_probability)]
else:
raise TypeError('Unsupported format of eigenvector. Provide a dict or numpy.ndarray.')
return solutions
def eval_operator_at_bitstring(operator: Union[WeightedPauliOperator, MatrixOperator],
bitstr: str) -> float:
"""Evaluate an Aqua operator at a given bitstring.
This simulates a circuit representing the bitstring. Note that this will not be needed
with the Operator logic introduced in 0.7.0.
Args:
operator: The operator which is evaluated.
bitstr: The bitstring at which the operator is evaluated.
Returns:
The operator evaluated with the quantum state the bitstring describes.
"""
# TODO check that operator size and bitstr are compatible
circuit = QuantumCircuit(len(bitstr))
for i, bit in enumerate(bitstr):
if bit == '1':
circuit.x(i)
# simulate the circuit
result = execute(circuit, BasicAer.get_backend('statevector_simulator')).result()
# evaluate the operator
value = np.real(operator.evaluate_with_statevector(result.get_statevector())[0])
return value
| 41.139194 | 100 | 0.670733 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.