max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
love/src/jni/detect_love.py | MikuAuahDark/love2d-android | 116 | 12655553 | <gh_stars>100-1000
import sys
from os import path
def main(argv):
if len(argv) > 0:
if path.isdir(argv[0] + "/love") and path.isfile(argv[0] + "/love/Android.mk"):
print("yes")
else:
print("no")
else:
print("no")
if __name__ == "__main__":
main(sys.argv[1:])
|
intro/matplotlib/examples/plot_gridspec.py | junghun73/Learning | 419 | 12655562 | """
GridSpec
=========
An example demoing gridspec
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.figure(figsize=(6, 4))
G = gridspec.GridSpec(3, 3)
axes_1 = plt.subplot(G[0, :])
plt.xticks(())
plt.yticks(())
plt.text(0.5, 0.5, 'Axes 1', ha='center', va='center', size=24, alpha=.5)
axes_2 = plt.subplot(G[1, :-1])
plt.xticks(())
plt.yticks(())
plt.text(0.5, 0.5, 'Axes 2', ha='center', va='center', size=24, alpha=.5)
axes_3 = plt.subplot(G[1:, -1])
plt.xticks(())
plt.yticks(())
plt.text(0.5, 0.5, 'Axes 3', ha='center', va='center', size=24, alpha=.5)
axes_4 = plt.subplot(G[-1, 0])
plt.xticks(())
plt.yticks(())
plt.text(0.5, 0.5, 'Axes 4', ha='center', va='center', size=24, alpha=.5)
axes_5 = plt.subplot(G[-1, -2])
plt.xticks(())
plt.yticks(())
plt.text(0.5, 0.5, 'Axes 5', ha='center', va='center', size=24, alpha=.5)
plt.tight_layout()
plt.show()
|
supersqlite/third_party/_apsw/example-code.py | plasticity-admin/supersqlite | 1,520 | 12655644 | from __future__ import print_function
# some python 2 and 3 comnpatibility tweaks
import sys
py3=sys.version_info >= (3, 0)
def inext(v): # next value from iterator
return next(v) if py3 else v.next()
import os
import time
import apsw
###
### Check we have the expected version of apsw and sqlite
###
#@@CAPTURE
print (" Using APSW file",apsw.__file__) # from the extension module
print (" APSW version",apsw.apswversion()) # from the extension module
print (" SQLite lib version",apsw.sqlitelibversion()) # from the sqlite library code
print ("SQLite header version",apsw.SQLITE_VERSION_NUMBER) # from the sqlite header file at compile time
#@@ENDCAPTURE
###
### Opening/creating database
###
connection=apsw.Connection("dbfile")
cursor=connection.cursor()
###
### simple statement @@ example-cursor
###
cursor.execute("create table foo(x,y,z)")
###
### using different types
###
cursor.execute("insert into foo values(?,?,?)", (1, 1.1, None)) # integer, float/real, Null
cursor.execute("insert into foo(x) values(?)", ("abc", )) # string (note trailing comma to ensure tuple!)
cursor.execute("insert into foo(x) values(?)", # a blob (binary data)
(b"abc\xff\xfe" if py3 else buffer("abc\xff\xfe"), ))
###
### multiple statements
###
cursor.execute("delete from foo; insert into foo values(1,2,3); create table bar(a,b,c) ; insert into foo values(4, 'five', 6.0)")
###
### iterator
###
for x,y,z in cursor.execute("select x,y,z from foo"):
print (cursor.getdescription()) # shows column names and declared types
print (x,y,z)
###
### iterator - multiple statements
###
for m,n,o in cursor.execute("select x,y,z from foo ; select a,b,c from bar"):
print (m,n,o)
###
### bindings - sequence
###
cursor.execute("insert into foo values(?,?,?)", (7, 'eight', False))
cursor.execute("insert into foo values(?,?,?1)", ('one', 'two')) # nb sqlite does the numbers from 1
###
### bindings - dictionary
###
cursor.execute("insert into foo values(:alpha, :beta, :gamma)", {'alpha': 1, 'beta': 2, 'gamma': 'three'})
###
### tracing execution @@ example-exectrace
###
def mytrace(cursor, statement, bindings):
"Called just before executing each statement"
print ("SQL:",statement)
if bindings:
print ("Bindings:",bindings)
return True # if you return False then execution is aborted
#@@CAPTURE
cursor.setexectrace(mytrace)
cursor.execute("drop table bar ; create table bar(x,y,z); select * from foo where x=?", (3,))
#@@ENDCAPTURE
###
### tracing results @@ example-rowtrace
###
def rowtrace(cursor, row):
"""Called with each row of results before they are handed off. You can return None to
cause the row to be skipped or a different set of values to return"""
print ("Row:", row)
return row
#@@CAPTURE
cursor.setrowtrace(rowtrace)
for row in cursor.execute("select x,y from foo where x>3"):
pass
#@@ENDCAPTURE
# Clear tracers
cursor.setrowtrace(None)
cursor.setexectrace(None)
###
### executemany
###
# (This will work correctly with multiple statements, as well as statements that
# return data. The second argument can be anything that is iterable.)
cursor.executemany("insert into foo (x) values(?)", ( [1], [2], [3] ) )
# You can also use it for statements that return data
for row in cursor.executemany("select * from foo where x=?", ( [1], [2], [3] ) ):
print (row)
###
### defining your own functions @@ scalar-example
###
def ilove7(*args):
"a scalar function"
print ("ilove7 got",args,"but I love 7")
return 7
connection.createscalarfunction("seven", ilove7)
#@@CAPTURE
for row in cursor.execute("select seven(x,y) from foo"):
print (row)
#@@ENDCAPTURE
###
### aggregate functions are more complex @@ aggregate-example
###
# Here we return the longest item when represented as a string.
class longest:
def __init__(self):
self.longest=""
def step(self, *args):
for arg in args:
if len( str(arg) ) > len (self.longest):
self.longest=str(arg)
def final(self):
return self.longest
# Under Python 2.3 remove the following line and add
# factory=classmethod(factory) at the end
@classmethod
def factory(cls):
return cls(), cls.step, cls.final
#@@CAPTURE
connection.createaggregatefunction("longest", longest.factory)
for row in cursor.execute("select longest(x,y) from foo"):
print (row)
#@@ENDCAPTURE
###
### Defining collations. @@ collation-example
###
# The default sorting mechanisms don't understand numbers at the end of strings
# so here we define a collation that does
cursor.execute("create table s(str)")
cursor.executemany("insert into s values(?)",
( ["file1"], ["file7"], ["file17"], ["file20"], ["file3"] ) )
#@@CAPTURE
for row in cursor.execute("select * from s order by str"):
print (row)
#@@ENDCAPTURE
def strnumcollate(s1, s2):
# return -1 if s1<s2, +1 if s1>s2 else 0
# split values into two parts - the head and the numeric tail
values=[s1, s2]
for vn,v in enumerate(values):
for i in range(len(v), 0, -1):
if v[i-1] not in "01234567890":
break
try:
v=( v[:i], int(v[i:]) )
except ValueError:
v=( v[:i], None )
values[vn]=v
# compare
if values[0]<values[1]:
return -1
if values[0]>values[1]:
return 1
return 0
connection.createcollation("strnum", strnumcollate)
#@@CAPTURE
for row in cursor.execute("select * from s order by str collate strnum"):
print (row)
#@@ENDCAPTURE
###
### Authorizer (eg if you want to control what user supplied SQL can do) @@ authorizer-example
###
def authorizer(operation, paramone, paramtwo, databasename, triggerorview):
"""Called when each operation is prepared. We can return SQLITE_OK, SQLITE_DENY or
SQLITE_IGNORE"""
# find the operation name
print (apsw.mapping_authorizer_function[operation], paramone, paramtwo, databasename, triggerorview)
if operation==apsw.SQLITE_CREATE_TABLE and paramone.startswith("private"):
return apsw.SQLITE_DENY # not allowed to create tables whose names start with private
return apsw.SQLITE_OK # always allow
connection.setauthorizer(authorizer)
#@@CAPTURE
cursor.execute("insert into s values('foo')")
cursor.execute("select str from s limit 1")
#@@ENDCAPTURE
# Cancel authorizer
connection.setauthorizer(None)
###
### progress handler (SQLite 3 experimental feature) @@ example-progress-handler
###
# something to give us large numbers of random numbers
import random
def randomintegers(howmany):
for i in range(howmany):
yield (random.randint(0,9999999999),)
# create a table with 100 random numbers
cursor.execute("begin ; create table bigone(x)")
cursor.executemany("insert into bigone values(?)", randomintegers(100))
cursor.execute("commit")
# display an ascii spinner
_phcount=0
_phspinner="|/-\\"
def progresshandler():
global _phcount
sys.stdout.write(_phspinner[_phcount%len(_phspinner)]+chr(8)) # chr(8) is backspace
sys.stdout.flush()
_phcount+=1
time.sleep(0.1) # deliberate delay so we can see the spinner (SQLite is too fast otherwise!)
return 0 # returning non-zero aborts
# register progresshandler every 20 instructions
connection.setprogresshandler(progresshandler, 20)
# see it in action - sorting 100 numbers to find the biggest takes a while
print ("spinny thing -> ", end="")
for i in cursor.execute("select max(x) from bigone"):
print("\n", i, sep="", end="")
sys.stdout.flush()
connection.setprogresshandler(None)
###
### commit hook (SQLite3 experimental feature) @@ example-commithook
###
def mycommithook():
print ("in commit hook")
hour=time.localtime()[3]
if hour<8 or hour>17:
print ("no commits out of hours")
return 1 # abort commits outside of 8am through 6pm
print ("commits okay at this time")
return 0 # let commit go ahead
#@@CAPTURE
connection.setcommithook(mycommithook)
try:
cursor.execute("begin; create table example(x,y,z); insert into example values (3,4,5) ; commit")
except apsw.ConstraintError:
print ("commit was not allowed")
connection.setcommithook(None)
#@@ENDCAPTURE
###
### update hook @@ example-updatehook
###
def myupdatehook(type, databasename, tablename, rowid):
print ("Updated: %s database %s, table %s, row %d" % (
apsw.mapping_authorizer_function[type], databasename, tablename, rowid))
#@@CAPTURE
connection.setupdatehook(myupdatehook)
cursor.execute("insert into s values(?)", ("file93",))
cursor.execute("update s set str=? where str=?", ("file94", "file93"))
cursor.execute("delete from s where str=?", ("file94",))
connection.setupdatehook(None)
#@@ENDCAPTURE
###
### Blob I/O @@ example-blobio
###
cursor.execute("create table blobby(x,y)")
# Add a blob we will fill in later
cursor.execute("insert into blobby values(1,zeroblob(10000))")
# Or as a binding
cursor.execute("insert into blobby values(2,?)", (apsw.zeroblob(20000),))
# Open a blob for writing. We need to know the rowid
rowid=inext(cursor.execute("select ROWID from blobby where x=1"))[0]
blob=connection.blobopen("main", "blobby", "y", rowid, 1) # 1 is for read/write
blob.write(b"hello world")
blob.seek(2000)
blob.write(b"hello world, again")
blob.close()
###
### Virtual tables @@ example-vtable
###
# This virtual table stores information about files in a set of
# directories so you can execute SQL queries
def getfiledata(directories):
columns=None
data=[]
counter=1
for directory in directories:
for f in os.listdir(directory):
if not os.path.isfile(os.path.join(directory,f)):
continue
counter+=1
st=os.stat(os.path.join(directory,f))
if columns is None:
columns=["rowid", "name", "directory"]+[x for x in dir(st) if x.startswith("st_")]
data.append( [counter, f, directory] + [getattr(st,x) for x in columns[3:]] )
return columns, data
# This gets registered with the Connection
class Source:
def Create(self, db, modulename, dbname, tablename, *args):
columns,data=getfiledata([eval(a.replace("\\", "\\\\")) for a in args]) # eval strips off layer of quotes
schema="create table foo("+','.join(["'%s'" % (x,) for x in columns[1:]])+")"
return schema,Table(columns,data)
Connect=Create
# Represents a table
class Table:
def __init__(self, columns, data):
self.columns=columns
self.data=data
def BestIndex(self, *args):
return None
def Open(self):
return Cursor(self)
def Disconnect(self):
pass
Destroy=Disconnect
# Represents a cursor
class Cursor:
def __init__(self, table):
self.table=table
def Filter(self, *args):
self.pos=0
def Eof(self):
return self.pos>=len(self.table.data)
def Rowid(self):
return self.table.data[self.pos][0]
def Column(self, col):
return self.table.data[self.pos][1+col]
def Next(self):
self.pos+=1
def Close(self):
pass
# Register the module as filesource
connection.createmodule("filesource", Source())
# Arguments to module - all directories in sys.path
sysdirs=",".join(["'%s'" % (x,) for x in sys.path[1:] if len(x) and os.path.isdir(x)])
cursor.execute("create virtual table sysfiles using filesource("+sysdirs+")")
#@@CAPTURE
# Which 3 files are the biggest?
for size,directory,file in cursor.execute("select st_size,directory,name from sysfiles order by st_size desc limit 3"):
print (size,file,directory)
#@@ENDCAPTURE
# Which 3 files are the oldest?
#@@CAPTURE
for ctime,directory,file in cursor.execute("select st_ctime,directory,name from sysfiles order by st_ctime limit 3"):
print (ctime,file,directory)
#@@ENDCAPTURE
### @@ example-vfs
### A VFS that "obfuscates" the database file contents. The scheme
### used is to xor all bytes with 0xa5. This scheme honours that used
### for MAPI and SQL Server.
###
def encryptme(data):
if not data: return data
if py3:
return bytes([x^0xa5 for x in data])
return "".join([chr(ord(x)^0xa5) for x in data])
# Inheriting from a base of "" means the default vfs
class ObfuscatedVFS(apsw.VFS):
def __init__(self, vfsname="obfu", basevfs=""):
self.vfsname=vfsname
self.basevfs=basevfs
apsw.VFS.__init__(self, self.vfsname, self.basevfs)
# We want to return our own file implmentation, but also
# want it to inherit
def xOpen(self, name, flags):
# We can look at uri parameters
if isinstance(name, apsw.URIFilename):
#@@CAPTURE
print ("fast is", name.uri_parameter("fast"))
print ("level is", name.uri_int("level", 3))
print ("warp is", name.uri_boolean("warp", False))
print ("notpresent is", name.uri_parameter("notpresent"))
#@@ENDCAPTURE
return ObfuscatedVFSFile(self.basevfs, name, flags)
# The file implementation where we override xRead and xWrite to call our
# encryption routine
class ObfuscatedVFSFile(apsw.VFSFile):
def __init__(self, inheritfromvfsname, filename, flags):
apsw.VFSFile.__init__(self, inheritfromvfsname, filename, flags)
def xRead(self, amount, offset):
return encryptme(super(ObfuscatedVFSFile, self).xRead(amount, offset))
def xWrite(self, data, offset):
super(ObfuscatedVFSFile, self).xWrite(encryptme(data), offset)
# To register the VFS we just instantiate it
obfuvfs=ObfuscatedVFS()
# Lets see what vfs are now available?
#@@CAPTURE
print (apsw.vfsnames())
#@@ENDCAPTURE
# Make an obfuscated db, passing in some URI parameters
obfudb=apsw.Connection("file:myobfudb?fast=speed&level=7&warp=on",
flags=apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE | apsw.SQLITE_OPEN_URI,
vfs=obfuvfs.vfsname)
# Check it works
obfudb.cursor().execute("create table foo(x,y); insert into foo values(1,2)")
# Check it really is obfuscated on disk
#@@CAPTURE
print (open("myobfudb", "rb").read()[:20])
#@@ENDCAPTURE
# And unobfuscating it
#@@CAPTURE
print (encryptme(open("myobfudb", "rb").read()[:20]))
#@@ENDCAPTURE
# Tidy up
obfudb.close()
os.remove("myobfudb")
###
### Limits @@example-limit
###
#@@CAPTURE
# Print some limits
for limit in ("LENGTH", "COLUMN", "ATTACHED"):
name="SQLITE_LIMIT_"+limit
maxname="SQLITE_MAX_"+limit # compile time
orig=connection.limit(getattr(apsw, name))
print (name, orig)
# To get the maximum, set to 0x7fffffff and then read value back
connection.limit(getattr(apsw, name), 0x7fffffff)
max=connection.limit(getattr(apsw, name))
print (maxname, max)
# Set limit for size of a string
cursor.execute("create table testlimit(s)")
cursor.execute("insert into testlimit values(?)", ( "x"*1024, )) # 1024 char string
connection.limit(apsw.SQLITE_LIMIT_LENGTH, 1023) # limit is now 1023
try:
cursor.execute("insert into testlimit values(?)", ( "y"*1024, ))
print ("string exceeding limit was inserted")
except apsw.TooBigError:
print ("Caught toobig exception")
connection.limit(apsw.SQLITE_LIMIT_LENGTH, 0x7fffffff)
#@@ENDCAPTURE
###
### Backup to memory @@example-backup
###
# We will copy the disk database into a memory database
memcon=apsw.Connection(":memory:")
# Copy into memory
with memcon.backup("main", connection, "main") as backup:
backup.step() # copy whole database in one go
# There will be no disk accesses for this query
for row in memcon.cursor().execute("select * from s"):
pass
###
### Shell @@ example-shell
###
# Here we use the shell to do a csv export providing the existing db
# connection
# Export to a StringIO
if py3:
import io
else:
import StringIO as io
output=io.StringIO()
shell=apsw.Shell(stdout=output, db=connection)
# How to execute a dot command
shell.process_command(".mode csv")
shell.process_command(".headers on")
# How to execute SQL
shell.process_sql("create table csvtest(col1,col2); insert into csvtest values(3,4); insert into csvtest values('a b', NULL)")
# Let the shell figure out SQL vs dot command
shell.process_complete_line("select * from csvtest")
# Verify output
#@@CAPTURE
print (output.getvalue())
#@@ENDCAPTURE
###
### Statistics @@example-status
###
#@@CAPTURE
print ("SQLite memory usage current %d max %d" % apsw.status(apsw.SQLITE_STATUS_MEMORY_USED))
#@@ENDCAPTURE
###
### Cleanup
###
# We can close connections manually (useful if you want to catch exceptions)
# but you don't have to
connection.close(True) # force it since we want to exit
# Delete database - we don't need it any more
os.remove("dbfile")
|
tests/docker_autostop_test.py | AnjanaSuraj/docker-custodian | 377 | 12655659 | try:
from unittest import mock
except ImportError:
import mock
from docker_custodian.docker_autostop import (
build_container_matcher,
get_opts,
has_been_running_since,
main,
stop_container,
stop_containers,
)
def test_stop_containers(mock_client, container, now):
matcher = mock.Mock()
mock_client.containers.return_value = [container]
mock_client.inspect_container.return_value = container
stop_containers(mock_client, now, matcher, False)
matcher.assert_called_once_with('container_name')
mock_client.stop.assert_called_once_with(container['Id'])
def test_stop_container(mock_client):
id = 'asdb'
stop_container(mock_client, id)
mock_client.stop.assert_called_once_with(id)
def test_build_container_matcher():
prefixes = ['one_', 'two_']
matcher = build_container_matcher(prefixes)
assert matcher('one_container')
assert matcher('two_container')
assert not matcher('three_container')
assert not matcher('one')
def test_has_been_running_since_true(container, later_time):
assert has_been_running_since(container, later_time)
def test_has_been_running_since_false(container, earlier_time):
assert not has_been_running_since(container, earlier_time)
@mock.patch('docker_custodian.docker_autostop.build_container_matcher',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.stop_containers',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.get_opts',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.docker', autospec=True)
def test_main(
mock_docker,
mock_get_opts,
mock_stop_containers,
mock_build_matcher
):
mock_get_opts.return_value.timeout = 30
main()
mock_get_opts.assert_called_once_with()
mock_build_matcher.assert_called_once_with(
mock_get_opts.return_value.prefix)
mock_stop_containers.assert_called_once_with(
mock.ANY,
mock_get_opts.return_value.max_run_time,
mock_build_matcher.return_value,
mock_get_opts.return_value.dry_run)
def test_get_opts_with_defaults():
opts = get_opts(args=['--prefix', 'one', '--prefix', 'two'])
assert opts.timeout == 60
assert opts.dry_run is False
assert opts.prefix == ['one', 'two']
assert opts.max_run_time is None
def test_get_opts_with_args(now):
with mock.patch(
'docker_custodian.docker_autostop.timedelta_type',
autospec=True
) as mock_timedelta_type:
opts = get_opts(args=['--prefix', 'one', '--max-run-time', '24h'])
assert opts.max_run_time == mock_timedelta_type.return_value
mock_timedelta_type.assert_called_once_with('24h')
|
ranger/plugins/patch/shutil_generatorized.py | bechampion/rnvimr | 495 | 12655667 | <filename>ranger/plugins/patch/shutil_generatorized.py
"""
Patch ranger.ext.shutil_generatorized
"""
import os
from shutil import _basename
from ranger.ext import shutil_generatorized
from ranger.ext.safe_path import get_safe_path
def wrap_move(client):
"""
CopyLoader with do_cut parameter will invoke this method.
Wrap low-level move method to save information in loaded buffers.
:param client object: Object of attached neovim session
"""
def move(src, dst, overwrite, make_safe_path=get_safe_path):
real_dst = os.path.join(dst, _basename(src))
if not overwrite:
real_dst = make_safe_path(real_dst)
yield from raw_move(src, dst, overwrite, make_safe_path)
client.move_buf(src, real_dst)
raw_move = shutil_generatorized.move
shutil_generatorized.move = move
|
OpenDataCatalog/api/rest.py | runonthespot/Open-Data-Catalog | 105 | 12655687 | from django.http import HttpResponse
from django.contrib.auth import authenticate
import re
import base64
def http_unauth():
res = HttpResponse("Unauthorized")
res.status_code = 401
res['WWW-Authenticate'] = 'Basic realm="Secure Area"'
return res
def match_first(regx, strg):
m = re.match(regx, strg)
if (m == None):
return None
else:
return m.group(1)
def decode_auth(strg):
if (strg == None):
return None
else:
m = re.match(r'([^:]*)\:(.*)', base64.decodestring(strg))
if (m != None):
return (m.group(1), m.group(2))
else:
return None
def parse_auth_string(authstr):
auth = decode_auth(match_first('Basic (.*)', authstr))
if (auth == None):
return None
else:
return authenticate(username = auth[0], password = auth[1])
def login_required(view_f):
def wrapperf(request, *args, **kwargs):
if (request.META.has_key('HTTP_AUTHORIZATION')):
auth = request.META['HTTP_AUTHORIZATION']
user = parse_auth_string(auth)
if (user != None):
request.user = user
return view_f(request, *args, **kwargs)
return http_unauth()
return wrapperf
|
source/models/dssm.py | richardbaihe/conversation | 299 | 12655714 | <reponame>richardbaihe/conversation<filename>source/models/dssm.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: source/models/dssm.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from source.models.base_model import BaseModel
from source.modules.embedder import Embedder
from source.modules.encoders.rnn_encoder import RNNEncoder
from source.utils.misc import Pack
class DSSM(BaseModel):
"""
DSSM
"""
def __init__(self,
src_vocab_size,
tgt_vocab_size,
embed_size,
hidden_size,
padding_idx=None,
num_layers=1,
bidirectional=True,
tie_embedding=False,
margin=None,
with_project=False,
dropout=0.0,
use_gpu=False):
super(DSSM, self).__init__()
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.embed_size = embed_size
self.hidden_size = hidden_size
self.padding_idx = padding_idx
self.num_layers = num_layers
self.bidirectional = bidirectional
self.tie_embedding = tie_embedding
self.dropout = dropout
self.use_gpu = use_gpu
self.margin = margin
self.with_project = with_project
src_embedder = Embedder(num_embeddings=self.src_vocab_size,
embedding_dim=self.embed_size,
padding_idx=self.padding_idx)
self.src_encoder = RNNEncoder(input_size=self.embed_size,
hidden_size=self.hidden_size,
embedder=src_embedder,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=self.dropout)
if self.with_project:
self.project = nn.Linear(in_features=self.hidden_size,
out_features=self.hidden_size,
bias=False)
if self.tie_embedding:
assert self.src_vocab_size == self.tgt_vocab_size
tgt_embedder = src_embedder
else:
tgt_embedder = Embedder(num_embeddings=self.tgt_vocab_size,
embedding_dim=self.embed_size,
padding_idx=self.padding_idx)
self.tgt_encoder = RNNEncoder(input_size=self.embed_size,
hidden_size=self.hidden_size,
embedder=tgt_embedder,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=self.dropout)
if self.use_gpu:
self.cuda()
def score(self, inputs):
"""
score
"""
src_inputs = inputs.src[0][:, 1:-1], inputs.src[1]-2
tgt_inputs = inputs.tgt[0][:, 1:-1], inputs.tgt[1]-2
src_hidden = self.src_encoder(src_inputs)[1][-1]
if self.with_project:
src_hidden = self.project(src_hidden)
tgt_hidden = self.tgt_encoder(tgt_inputs)[1][-1]
logits = (src_hidden * tgt_hidden).sum(dim=-1)
scores = torch.sigmoid(logits)
return scores
def forward(self, src_inputs, pos_tgt_inputs, neg_tgt_inputs,
src_hidden=None, tgt_hidden=None):
"""
forward
"""
outputs = Pack()
src_hidden = self.src_encoder(src_inputs, src_hidden)[1][-1]
if self.with_project:
src_hidden = self.project(src_hidden)
pos_tgt_hidden = self.tgt_encoder(pos_tgt_inputs, tgt_hidden)[1][-1]
neg_tgt_hidden = self.tgt_encoder(neg_tgt_inputs, tgt_hidden)[1][-1]
pos_logits = (src_hidden * pos_tgt_hidden).sum(dim=-1)
neg_logits = (src_hidden * neg_tgt_hidden).sum(dim=-1)
outputs.add(pos_logits=pos_logits, neg_logits=neg_logits)
return outputs
def collect_metrics(self, outputs):
"""
collect_metrics
"""
pos_logits = outputs.pos_logits
pos_target = torch.ones_like(pos_logits)
neg_logits = outputs.neg_logits
neg_target = torch.zeros_like(neg_logits)
pos_loss = F.binary_cross_entropy_with_logits(
pos_logits, pos_target, reduction='none')
neg_loss = F.binary_cross_entropy_with_logits(
neg_logits, neg_target, reduction='none')
loss = (pos_loss + neg_loss).mean()
pos_acc = torch.sigmoid(pos_logits).gt(0.5).float().mean()
neg_acc = torch.sigmoid(neg_logits).lt(0.5).float().mean()
margin = (torch.sigmoid(pos_logits) - torch.sigmoid(neg_logits)).mean()
metrics = Pack(loss=loss, pos_acc=pos_acc,
neg_acc=neg_acc, margin=margin)
num_samples = pos_target.size(0)
metrics.add(num_samples=num_samples)
return metrics
def iterate(self, inputs, optimizer=None, grad_clip=None, is_training=True, epoch=-1):
"""
iterate
"""
src_inputs = inputs.src[0][:, 1:-1], inputs.src[1]-2
pos_tgt_inputs = inputs.tgt[0][:, 1:-1], inputs.tgt[1]-2
neg_idx = torch.arange(src_inputs[1].size(0)).type_as(src_inputs[1])
neg_idx = (neg_idx + 1) % neg_idx.size(0)
neg_tgt_inputs = pos_tgt_inputs[0][neg_idx], pos_tgt_inputs[1][neg_idx]
outputs = self.forward(src_inputs, pos_tgt_inputs, neg_tgt_inputs)
metrics = self.collect_metrics(outputs)
if is_training:
assert optimizer is not None
optimizer.zero_grad()
loss = metrics.loss
loss.backward()
if grad_clip is not None and grad_clip > 0:
clip_grad_norm_(parameters=self.parameters(),
max_norm=grad_clip)
optimizer.step()
return metrics
|
lib/mirror/retrieval/compute_overlap_benchmark.py | Yzhbuaa/DAGSfM | 255 | 12655726 | <filename>lib/mirror/retrieval/compute_overlap_benchmark.py
#!/usr/bin/env python
# Copyright 2017, <NAME>, HKUST.
# compute mAP score for overlap benchmark
import sys, os
sys.path.append('..')
from tools.common import read_list
def compute_overlap_ap(gt_list, res_list, d):
gt_size = len(gt_list)
old_recall = 0.0
old_precision = 1.0
ap = 0.0
intersect_size = 0
for i in xrange(len(res_list)):
if res_list[i] in gt_list:
intersect_size = intersect_size + 1
recall = float(intersect_size) / gt_size
precision = float(intersect_size) / (i + 1.0)
ap += (recall - old_recall) * (old_precision + precision) / 2.0
old_precision = precision
old_recall = recall
return ap
def compute_GL3D_map(image_list, overlap_result, ground_truth_file):
image_filenames = read_list(image_list)
overlap_pair_lines = read_list(overlap_result)
gt_lines = read_list(ground_truth_file)
gt_vec = [[] for i in image_filenames]
for line in gt_lines:
split_items = line.split()
index1 = int(split_items[0])
index2 = int(split_items[1])
if index1 == index2: # don't count (i, i) pair
continue
gt_vec[index1].append(index2)
res_vec = [[] for i in image_filenames]
for line in overlap_pair_lines:
split_items = line.split()
index1 = int(split_items[0])
index2 = int(split_items[1])
if index1 == index2: # don't count (i, i) pair
continue
res_vec[int(index1)].append(int(index2))
num_test = 0
mAP = 0
for i in xrange(len(image_filenames)):
if len(gt_vec[i]) != 0:
num_test = num_test + 1
ap = compute_overlap_ap(gt_vec[i], res_vec[i], i)
#print(i, image_filenames[i], ap, i)
mAP = mAP + ap
mAP = mAP / num_test
print(mAP, num_test)
return mAP
if __name__ == '__main__':
if len(sys.argv) < 4:
print(sys.argv[0], '<test_image_list> <overlap_result> <ground_truth_pairs>')
exit()
image_list = sys.argv[1]
overlap_result = sys.argv[2]
ground_truth_file = sys.argv[3]
compute_GL3D_map(image_list, overlap_result, ground_truth_file) |
recipes/Python/546526_Page_through_iterable_N_items_at/recipe-546526.py | tdiprima/code | 2,023 | 12655728 | class groupcount(object):
"""Accept a (possibly infinite) iterable and yield a succession
of sub-iterators from it, each of which will yield N values.
>>> gc = groupcount('abcdefghij', 3)
>>> for subgroup in gc:
... for item in subgroup:
... print item,
... print
...
a b c
d e f
g h i
j
"""
def __init__(self, iterable, n=10):
self.it = iter(iterable)
self.n = n
def __iter__(self):
return self
def next(self):
return self._group(self.it.next())
def _group(self, ondeck):
yield ondeck
for i in xrange(1, self.n):
yield self.it.next()
|
inselect/tests/gui/test_app.py | NaturalHistoryMuseum/inselect | 128 | 12655730 | <reponame>NaturalHistoryMuseum/inselect<filename>inselect/tests/gui/test_app.py
import locale
import unittest
from mock import patch
from pathlib import Path
from PyQt5.QtCore import QLocale
from PyQt5.QtWidgets import QApplication
from inselect.gui.app import main
from inselect.gui.main_window import MainWindow
TESTDATA = Path(__file__).parent.parent / 'test_data'
class TestApp(unittest.TestCase):
"""Start and exit the application
"""
@patch.object(QApplication, 'exec_', return_value=0)
def test_app(self, mock_exec_):
"User starts the application"
self.assertRaises(SystemExit, main, [])
self.assertTrue(mock_exec_.called)
@patch.object(QApplication, 'exec_', return_value=0)
@patch.object(MainWindow, 'open_file')
def test_app_load_file(self, mock_open_file, mock_exec_):
"User starts the application with a file"
path = str(TESTDATA / 'shapes.inselect')
self.assertRaises(SystemExit, main, [path])
self.assertTrue(mock_exec_.called)
mock_open_file.assert_called_once_with(Path(path))
@patch.object(QApplication, 'exec_', return_value=0)
@patch.object(QLocale, 'setDefault')
@patch.object(locale, 'setlocale')
def test_app_set_locale(self, mock_setlocale, mock_set_default, mock_exec_):
"User starts the application with a non-default locale"
# Python's locale.setlocale raises an exception if the locale is
# unrecognised, so it is mocked.
loc = 'ja_JP'
self.assertRaises(SystemExit, main, ['-l', loc])
self.assertTrue(mock_exec_.called)
mock_set_default.assert_called_once_with(QLocale(loc))
# Other actions inside main might cause setlocale to be called so
# should not assert number of calls.
mock_setlocale.assert_any_call(locale.LC_ALL, loc)
if __name__ == '__main__':
unittest.main()
|
selim_sef/tools/config.py | ktncktnc/SpaceNet_Off_Nadir_Solutions | 164 | 12655738 | import json
DEFAULTS = {
"arch": "fpn_resnext",
"segnetwork": {
"backbone_arch": "resnext101",
"seg_classes": 2,
"ignore_index": 255,
},
"network": {
},
"optimizer": {
"batch_size": 256,
"freeze_first_epoch": False,
"type": "SGD", # supported: SGD, Adam
"momentum": 0.9,
"weight_decay": 0,
"clip": 1.,
"learning_rate": 0.1,
"classifier_lr": -1., # If -1 use same learning rate as the rest of the network
"nesterov": True,
"schedule": {
"type": "constant", # supported: constant, step, multistep, exponential, linear, poly
"mode": "epoch", # supported: epoch, step
"epochs": 10,
"params": {}
}
},
"input": {
"scale_train": -1, # If -1 do not scale
"random_vh_shift": 0,
"crop_train": 224,
"color_jitter_train": False,
"lighting_train": False,
"random_crop": [202, 202],
"crop_size_range": [1., 1.],
"rescale_prob": 0.0,
"mask_downscale_factor": 1,
"padding_block": 0,
"padding_mode": 'reflect',
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
}
def _merge(src, dst):
for k, v in src.items():
if k in dst:
if isinstance(v, dict):
_merge(src[k], dst[k])
else:
dst[k] = v
def load_config(config_file, defaults=DEFAULTS):
with open(config_file, "r") as fd:
config = json.load(fd)
_merge(defaults, config)
return config
|
web/py-collaborator/Logger.py | H1d3r/Penetration-Testing-Tools-1 | 1,139 | 12655764 | <reponame>H1d3r/Penetration-Testing-Tools-1
import sys
class Logger:
@staticmethod
def _out(x):
sys.stderr.write(str(x) + u'\n')
@staticmethod
def dbg(x):
sys.stderr.write(u'[dbg] ' + str(x) + u'\n')
@staticmethod
def out(x):
Logger._out(u'[.] ' + str(x))
@staticmethod
def info(x):
Logger._out(u'[?] ' + str(x))
@staticmethod
def err(x):
sys.stderr.write(u'[!] ' + str(x) + u'\n')
@staticmethod
def warn(x):
Logger._out(u'[-] ' + str(x))
@staticmethod
def ok(x):
Logger._out(u'[+] ' + str(x))
|
sanic/application/logo.py | Varriount/sanic | 1,883 | 12655791 | import re
import sys
from os import environ
BASE_LOGO = """
Sanic
Build Fast. Run Fast.
"""
COFFEE_LOGO = """\033[48;2;255;13;104m \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββββββββββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββ βββββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m βββββββββββ β \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββββββββββββββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m βββββββββ \033[0m
\033[48;2;255;13;104m \033[0m
Dark roast. No sugar."""
COLOR_LOGO = """\033[48;2;255;13;104m \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββββ βββββ ββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββββββββ ββββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββ \033[0m
\033[38;2;255;255;255;48;2;255;13;104m ββββ βββββββββ \033[0m
\033[48;2;255;13;104m \033[0m
Build Fast. Run Fast."""
FULL_COLOR_LOGO = """
\033[38;2;255;13;104m ββββ βββββ ββ \033[0m βββ ββ β β βββββββββββ
\033[38;2;255;13;104m ββ \033[0m β β β ββ β β ββ
\033[38;2;255;13;104m ββββββββ ββββ \033[0m β β β ββ β β ββ
\033[38;2;255;13;104m ββ\033[0m βββββββββ β ββ β β ββ
\033[38;2;255;13;104m ββββ βββββββββ \033[0m β β β ββ β βββ βββββββ
""" # noqa
ansi_pattern = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
def get_logo(full=False, coffee=False):
logo = (
(FULL_COLOR_LOGO if full else (COFFEE_LOGO if coffee else COLOR_LOGO))
if sys.stdout.isatty()
else BASE_LOGO
)
if (
sys.platform == "darwin"
and environ.get("TERM_PROGRAM") == "Apple_Terminal"
):
logo = ansi_pattern.sub("", logo)
return logo
|
Stephanie/Modules/evernote_module.py | JeremyARussell/stephanie-va | 866 | 12655792 | <gh_stars>100-1000
import evernote.edam.type.ttypes as NoteType
from evernote.api.client import EvernoteClient
from Stephanie.Modules.base_module import BaseModule
# Written by <NAME> - <EMAIL>
class EvernoteModule(BaseModule):
def __init__(self, *args):
super(EvernoteModule, self).__init__(*args)
self.auth_token = self.get_configuration('evernote_auth_token')
if self.auth_token:
self.client = EvernoteClient(token=self.auth_token, sandbox=False)
self.user_store = self.client.get_user_store()
self.note_store = self.client.get_note_store()
else:
return False
def write_note(self):
note = NoteType.Note() # Creates a new note
note.title = "Stephanie Note"
self.assistant.say("What would you like me to write down?")
the_note = self.assistant.listen().decipher() # Listens to the input and stores it
note.content = '<?xml version="1.0" encoding="UTF-8"?>'
note.content += '<!DOCTYPE en-note SYSTEM ' \
'"http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>Note:<br/>'
note.content += ('%s' % the_note)
note.content += '</en-note>'
try:
created_note = self.note_store.createNote(note) # Stores the new note in Evernote
except:
response = ("Note wasn't created successfully, you probably didn't spelled anything or spelled really "
"bad, Not my fault okay? It's never a program's fault.")
print(response)
return response
if created_note:
return "I successfully wrote down your note."
else:
response = ("Note wasn't created successfully, you probably didn't spelled anything or spelled really "
"bad, Not my fault okay? It's never a program's fault. /s Refer back to docs.")
print(response)
return response
|
settings_screenbird.py | annevandalfsen/screenbird | 121 | 12655794 | # Django settings for pastevid project.
import datetime
import os.path
import profanity_list
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Use to determine what robots.txt to serve. To allow all crawlers set this to True.
PRODUCTION = False
ADMINS = (
('adam', '<EMAIL>'),
('caleb', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'pastevid.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<django-secret-key>'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'context_processors.current_site',
'django.contrib.auth.context_processors.auth',
"django.core.context_processors.media",
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates/'),
)
FIXTURE_DIRS = (
os.path.join(PROJECT_ROOT, 'accounts/fixtures/'),
os.path.join(PROJECT_ROOT, 'videos/fixtures/'),
)
AUTHENTICATION_BACKENDS = (
'auth_login.backends.EmailBackend',
'social_auth.backends.facebook.FacebookBackend',
'social_auth.backends.google.GoogleOAuthBackend',
'social_auth.backends.google.GoogleOAuth2Backend',
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
HITCOUNT_KEEP_HIT_ACTIVE = { 'days': 45 }
HITCOUNT_HITS_PER_IP_LIMIT = 0
HITCOUNT_EXCLUDE_USER_GROUP = ( 'Editor', )
AUTH_PROFILE_MODULE='accounts.UserProfile'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
#External Apps
'social_auth',
'south',
'hitcount',
'paypal.standard.ipn',
'chronograph',
'django_extensions',
'oembed',
'django_jenkins',
'widget_tweaks',
#Local apps
'emailnotify',
'about',
'videos',
'accounts',
'metatags',
'amazon',
'thumbsup',
'flatpages',
'authorize_net',
)
# For Jenkins testing
PROJECT_APPS = (
'accounts',
'videos',
'about',
'amazon',
'emailnotify',
'metatags',
'thumbsup',
'api',
)
JENKINS_TASKS = (
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.django_tests', # select one django or
#'django_jenkins.tasks.dir_tests' # directory tests discovery
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_ROOT, 'amazon.log'),
'formatter': 'verbose',
},
'videos': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_ROOT, 'videos.log'),
'formatter': 'verbose'
},
'encode': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_ROOT, 'encode.log'),
'formatter': 'verbose'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'amazon.utils': {
'handlers': ['file',],
'level': 'DEBUG',
'propagate': True,
},
'videos.forms': {
'handlers': ['file',],
'level': 'WARN',
'propagate': True,
},
'videos.views': {
'handlers': ['videos',],
'level': 'DEBUG',
'propagate': True,
},
'encode': {
'handlers': ['encode',],
'level': 'DEBUG',
'propagate': True
}
}
}
# Paypal Account
PAYPAL_RECEIVER_EMAIL = "<your-paypal-reciever-email>"
PROFANITY_LIST = profanity_list.CENSORED_LIST
############# EMAIL SETTINGS ###############
# override these in your local_settings.py
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 465
HOST = ""
DEFAULT_FROM_EMAIL = ''
EMAIL_SUBJECT_PREFIX = '[Screenbird]'
EMAIL_FAIL_SILENTLY = False
# AMAZON
PUSH_TO_S3 = True
AWS_ACCESS_KEY_ID = ""
AWS_SECRET_ACCESS_KEY = ""
AWS_VIDEO_BUCKET_NAME = "%s-%s" % (AWS_ACCESS_KEY_ID.lower(), "videos")
UPLOAD_DELAY = datetime.timedelta(hours=12)
UPLOAD_CHECKING = datetime.timedelta(minutes=30)
EC2_KEY_NAME = ''
QUEUE_NAME = "video_queue"
COCREATE_QUEUE_NAME = 'cocreate_queue'
PEM_PATH = os.path.join(PROJECT_ROOT, "amazon", "ec2_files")
# Use to determine what robots.txt to serve. To allow all crawlers set this to True.
PRODUCTION = False
#Facebook OAuth Keys
#Production's APP ID; Override on local_settings for test site
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
FACEBOOK_EXTENDED_PERMISSIONS = ['offline_access','publish_stream','email']
#Twitter OAuth Keys
#Production's APP ID; Override on local_settings for test site
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
#Social_Auth Parameters
SOCIAL_AUTH_CREATE_USERS = True
SOCIAL_AUTH_FORCE_RANDOM_USERNAME = False
SOCIAL_AUTH_DEFAULT_USERNAME = 'socialauth_user'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'complete'
SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
#Youtube
YOUTUBE_DEV_KEY = ''
#Login Parameters
LOGIN_ERROR_URL = '/login/error/'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/login_auth/'
#Sites
SITE_ID = 2 #Screen Bird Site ID
# If you are using secure AuthSub, be sure to set your RSA private key so a SecureAuthSubToken is created
# http://code.google.com/apis/gdata/docs/auth/authsub.html#No-Library
# SECURE_KEY the location of the RSA private key(For production). None if AuthSub is not secured.
SECURE_KEY = None #os.path.join(PROJECT_ROOT, '')
ENABLE_VIDEO_APPROVAL = True
# Default Authorize.net credentials
LOGIN_ID = u''
TRANS_KEY = u''
IS_TEST = True
DELIMITER = u','
ENCAPSULATOR = u''
#settings for upload test
FILE_LOCATION = os.path.join(MEDIA_ROOT, 'tmp/sample_video.mp4')
FILE_KEY = 'SAMPLE'
try:
from local_settings import *
except ImportError:
pass
|
Vecihi/Backend/vecihi/posts/migrations/0003_viewedposttracking.py | developertqw2017/migrationDjango | 220 | 12655806 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-24 18:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0002_auto_20171217_0008'),
]
operations = [
migrations.CreateModel(
name='ViewedPostTracking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField()),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='who_visit_post', to=settings.AUTH_USER_MODEL)),
('visited_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
],
),
]
|
pysnmp/entity/rfc3413/cmdrsp.py | RKinsey/pysnmp | 492 | 12655834 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
import pysnmp.smi.error
from pysnmp import debug
from pysnmp.proto import errind
from pysnmp.proto import error
from pysnmp.proto import rfc1902
from pysnmp.proto import rfc1905
from pysnmp.proto import rfc3411
from pysnmp.proto.api import v2c # backend is always SMIv2 compliant
from pysnmp.proto.proxy import rfc2576
from pysnmp.smi import exval
# 3.2
class CommandResponderBase(object):
ACM_ID = 3 # default MIB access control method to use
SUPPORTED_PDU_TYPES = ()
SMI_ERROR_MAP = {
pysnmp.smi.error.TooBigError: 'tooBig',
# this should never bubble up, SNMP exception objects should be passed as values
pysnmp.smi.error.NoSuchNameError: 'noSuchName',
pysnmp.smi.error.BadValueError: 'badValue',
pysnmp.smi.error.ReadOnlyError: 'readOnly',
pysnmp.smi.error.GenError: 'genErr',
pysnmp.smi.error.NoAccessError: 'noAccess',
pysnmp.smi.error.WrongTypeError: 'wrongType',
pysnmp.smi.error.WrongLengthError: 'wrongLength',
pysnmp.smi.error.WrongEncodingError: 'wrongEncoding',
pysnmp.smi.error.WrongValueError: 'wrongValue',
pysnmp.smi.error.NoCreationError: 'noCreation',
pysnmp.smi.error.InconsistentValueError: 'inconsistentValue',
pysnmp.smi.error.ResourceUnavailableError: 'resourceUnavailable',
pysnmp.smi.error.CommitFailedError: 'commitFailed',
pysnmp.smi.error.UndoFailedError: 'undoFailed',
pysnmp.smi.error.AuthorizationError: 'authorizationError',
pysnmp.smi.error.NotWritableError: 'notWritable',
pysnmp.smi.error.InconsistentNameError: 'inconsistentName'
}
def __init__(self, snmpEngine, snmpContext, cbCtx=None):
snmpEngine.msgAndPduDsp.registerContextEngineId(
snmpContext.contextEngineId, self.SUPPORTED_PDU_TYPES, self.processPdu
)
self.snmpContext = snmpContext
self.cbCtx = cbCtx
self.__pendingReqs = {}
def close(self, snmpEngine):
snmpEngine.msgAndPduDsp.unregisterContextEngineId(
self.snmpContext.contextEngineId, self.SUPPORTED_PDU_TYPES
)
self.snmpContext = self.__pendingReqs = None
def releaseStateInformation(self, stateReference):
if stateReference in self.__pendingReqs:
del self.__pendingReqs[stateReference]
def sendVarBinds(self, snmpEngine, stateReference,
errorStatus, errorIndex, varBinds):
(messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
origPdu,
maxSizeResponseScopedPDU,
statusInformation) = self.__pendingReqs[stateReference]
v2c.apiPDU.setErrorStatus(PDU, errorStatus)
v2c.apiPDU.setErrorIndex(PDU, errorIndex)
v2c.apiPDU.setVarBinds(PDU, varBinds)
debug.logger & debug.FLAG_APP and debug.logger(
'sendVarBinds: stateReference %s, errorStatus %s, errorIndex %s, '
'varBinds %s' % (stateReference, errorStatus, errorIndex, varBinds))
self.sendPdu(snmpEngine, stateReference, PDU)
def sendPdu(self, snmpEngine, stateReference, PDU):
(messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
_,
origPdu,
maxSizeResponseScopedPDU,
statusInformation) = self.__pendingReqs[stateReference]
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v2ToV1(PDU, origPdu)
# 3.2.6
try:
snmpEngine.msgAndPduDsp.returnResponsePdu(
snmpEngine,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
maxSizeResponseScopedPDU,
stateReference,
statusInformation
)
except error.StatusInformation as exc:
debug.logger & debug.FLAG_APP and debug.logger(
'sendPdu: stateReference %s, statusInformation '
'%s' % (stateReference, exc))
snmpSilentDrops, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols(
'__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
_getRequestType = rfc1905.GetRequestPDU.tagSet
_getNextRequestType = rfc1905.GetNextRequestPDU.tagSet
_setRequestType = rfc1905.SetRequestPDU.tagSet
_counter64Type = rfc1902.Counter64.tagSet
def processPdu(self, snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId, contextName,
pduVersion, PDU, maxSizeResponseScopedPDU, stateReference):
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
origPdu = PDU
PDU = rfc2576.v1ToV2(PDU)
else:
origPdu = None
# 3.2.1
if (PDU.tagSet not in rfc3411.READ_CLASS_PDUS and
PDU.tagSet not in rfc3411.WRITE_CLASS_PDUS):
raise error.ProtocolError('Unexpected PDU class %s' % PDU.tagSet)
# 3.2.2 --> no-op
# 3.2.4
rspPDU = v2c.apiPDU.getResponse(PDU)
statusInformation = {}
self.__pendingReqs[stateReference] = (
messageProcessingModel, securityModel, securityName,
securityLevel, contextEngineId, contextName, pduVersion,
rspPDU, origPdu, maxSizeResponseScopedPDU, statusInformation
)
# 3.2.5
varBinds = v2c.apiPDU.getVarBinds(PDU)
debug.logger & debug.FLAG_APP and debug.logger(
'processPdu: stateReference %s, varBinds %s' % (stateReference, varBinds))
self.initiateMgmtOperation(snmpEngine, stateReference, contextName, PDU)
@staticmethod
def _storeAccessContext(snmpEngine):
"""Copy received message metadata while it lasts"""
execCtx = snmpEngine.observer.getExecutionContext('rfc3412.receiveMessage:request')
return {
'securityModel': execCtx['securityModel'],
'securityName': execCtx['securityName'],
'securityLevel': execCtx['securityLevel'],
'contextName': execCtx['contextName'],
'pduType': execCtx['pdu'].getTagSet()
}
@classmethod
def verifyAccess(cls, viewType, varBind, **context):
name, val = varBind
snmpEngine = context['snmpEngine']
(securityModel,
securityName,
securityLevel,
contextName,
pduType) = (context['securityModel'],
context['securityName'],
context['securityLevel'],
context['contextName'],
context['pduType'])
try:
snmpEngine.accessControlModel[cls.ACM_ID].isAccessAllowed(
snmpEngine, securityModel, securityName,
securityLevel, viewType, contextName, name
)
# Map ACM errors onto SMI ones
except error.StatusInformation as exc:
statusInformation = exc
debug.logger & debug.FLAG_APP and debug.logger(
'__verifyAccess: name %s, statusInformation '
'%s' % (name, statusInformation))
errorIndication = statusInformation['errorIndication']
# 3.2.5...
if (errorIndication == errind.noSuchView or
errorIndication == errind.noAccessEntry or
errorIndication == errind.noGroupName):
raise pysnmp.smi.error.AuthorizationError(
name=name, idx=context.get('idx'))
elif errorIndication == errind.otherError:
raise pysnmp.smi.error.GenError(name=name, idx=context.get('idx'))
elif errorIndication == errind.noSuchContext:
snmpUnknownContexts, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols(
'__SNMP-TARGET-MIB', 'snmpUnknownContexts')
snmpUnknownContexts.syntax += 1
# Request REPORT generation
raise pysnmp.smi.error.GenError(
name=name, idx=context.get('idx'), oid=snmpUnknownContexts.name,
val=snmpUnknownContexts.syntax)
elif errorIndication == errind.notInView:
return True
else:
raise error.ProtocolError('Unknown ACM error %s' % errorIndication)
else:
# rfc2576: 4.1.2.1
if (securityModel == 1 and val is not None and
cls._counter64Type == val.getTagSet() and
cls._getNextRequestType == pduType):
# This will cause MibTree to skip this OID-value
raise pysnmp.smi.error.NoAccessError(name=name, idx=context.get('idx'))
def _getMgmtFun(self, contextName):
return lambda *args, **kwargs: None
def _mapSmiErrors(self, varBinds, **context):
errorIndication = None
errorStatus = errorIndex = 0
errors = context.get('errors')
if not errors:
return errorIndication, errorStatus, errorIndex, varBinds
# Take the latest exception
err = errors[-1]
if isinstance(err, pysnmp.smi.error.GenError):
errorIndication = str(err)
elif isinstance(err, pysnmp.smi.error.SmiError):
if isinstance(err, pysnmp.smi.error.TooBigError):
# rfc1905: 4.2.1.3
varBinds = []
errorStatus = self.SMI_ERROR_MAP.get(err.__class__, 'genErr')
try:
errorIndex = err['idx'] + 1
except IndexError:
errorIndex = len(varBinds) and 1 or 0
return errorIndication, errorStatus, errorIndex, varBinds
def completeMgmtOperation(self, varBinds, **context):
(errorIndication,
errorStatus, errorIndex,
varBinds) = self._mapSmiErrors(varBinds, **context)
stateReference = context['stateReference']
if errorIndication:
statusInformation = self.__pendingReqs[stateReference]['statusInformation']
try:
# Request REPORT generation
statusInformation['oid'] = errorIndication['oid']
statusInformation['val'] = errorIndication['val']
except KeyError:
pass
self.sendVarBinds(context['snmpEngine'], stateReference,
errorStatus, errorIndex, varBinds)
self.releaseStateInformation(stateReference)
def initiateMgmtOperation(self, snmpEngine, stateReference, contextName, PDU):
varBinds = v2c.apiPDU.getVarBinds(PDU)
mgmtFun = self._getMgmtFun(contextName)
context = dict(snmpEngine=snmpEngine,
stateReference=stateReference,
acFun=self.verifyAccess,
cbFun=self.completeMgmtOperation,
cbCtx=self.cbCtx)
context.update(self._storeAccessContext(snmpEngine))
mgmtFun(*varBinds, **context)
class GetCommandResponder(CommandResponderBase):
SUPPORTED_PDU_TYPES = (rfc1905.GetRequestPDU.tagSet,)
# rfc1905: 4.2.1
def _getMgmtFun(self, contextName):
return self.snmpContext.getMibInstrum(contextName).readMibObjects
class NextCommandResponder(CommandResponderBase):
SUPPORTED_PDU_TYPES = (rfc1905.GetNextRequestPDU.tagSet,)
# rfc1905: 4.2.2
def _getMgmtFun(self, contextName):
return self.snmpContext.getMibInstrum(contextName).readNextMibObjects
def _getManagedObjectsInstances(self, varBinds, **context):
"""Iterate over Managed Objects fulfilling SNMP query.
Returns
-------
:py:class:`list` - List of Managed Objects Instances to respond with or
`None` to indicate that not all objects have been gathered
so far.
"""
rspVarBinds = context['rspVarBinds']
varBindsMap = context['varBindsMap']
rtrVarBinds = []
for idx, varBind in enumerate(varBinds):
name, val = varBind
if (exval.noSuchObject.isSameTypeWith(val) or
exval.noSuchInstance.isSameTypeWith(val)):
varBindsMap[len(rtrVarBinds)] = varBindsMap.pop(idx, idx)
rtrVarBinds.append(varBind)
else:
rspVarBinds[varBindsMap.pop(idx, idx)] = varBind
if rtrVarBinds:
snmpEngine = context['snmpEngine']
# Need to unwind stack, can't recurse any more
def callLater(*args):
snmpEngine.transportDispatcher.unregisterTimerCbFun(callLater)
mgmtFun = context['mgmtFun']
mgmtFun(*varBinds, **context)
snmpEngine.transportDispatcher.registerTimerCbFun(callLater, 0.01)
else:
return rspVarBinds
def completeMgmtOperation(self, varBinds, **context):
rspVarBinds = self._getManagedObjectsInstances(varBinds, **context)
if rspVarBinds:
CommandResponderBase.completeMgmtOperation(self, rspVarBinds, **context)
def initiateMgmtOperation(self, snmpEngine, stateReference, contextName, PDU):
varBinds = v2c.apiPDU.getVarBinds(PDU)
mgmtFun = self._getMgmtFun(contextName)
context = dict(snmpEngine=snmpEngine,
stateReference=stateReference,
acFun=self.verifyAccess,
cbFun=self.completeMgmtOperation,
cbCtx=self.cbCtx,
rspVarBinds=varBinds[:],
varBindsMap={},
mgmtFun=mgmtFun)
context.update(self._storeAccessContext(snmpEngine))
mgmtFun(*varBinds, **context)
class BulkCommandResponder(NextCommandResponder):
SUPPORTED_PDU_TYPES = (rfc1905.GetBulkRequestPDU.tagSet,)
MAX_VAR_BINDS = 64
def _completeNonRepeaters(self, varBinds, **context):
mgmtFun = context['mgmtFun']
if not varBinds:
# No non-repeaters requested, proceed with repeaters
mgmtFun(*context['reqVarBinds'],
**dict(context, cbFun=self.completeMgmtOperation,
varBinds=context['reqVarBinds'][:]))
return
rspVarBinds = self._getManagedObjectsInstances(varBinds, **context)
if rspVarBinds:
context['allVarBinds'].extend(rspVarBinds)
if context['counters']['M'] and context['counters']['R']:
rspVarBinds = self._getManagedObjectsInstances(varBinds, **context)
if rspVarBinds:
# Done with non-repeaters, proceed with repeaters
mgmtFun(*context['reqVarBinds'],
**dict(context,
cbFun=self.completeMgmtOperation,
varBindsMap={},
rspVarBinds=context['reqVarBinds'][:]))
return
else:
CommandResponderBase.completeMgmtOperation(self, context['allVarBinds'], **context)
def completeMgmtOperation(self, varBinds, **context):
rspVarBinds = self._getManagedObjectsInstances(varBinds, **context)
if rspVarBinds:
context['counters']['M'] -= 1
context['allVarBinds'].extend(rspVarBinds)
eom = all(exval.endOfMibView.isSameTypeWith(value) for name, value in rspVarBinds)
if not eom and context['counters']['M'] and context['counters']['R']:
snmpEngine = context['snmpEngine']
# Need to unwind stack, can't recurse any more
def callLater(*args):
snmpEngine.transportDispatcher.unregisterTimerCbFun(callLater)
mgmtFun = context['mgmtFun']
reqVarBinds = varBinds[-context['counters']['R']:]
mgmtFun(*reqVarBinds,
**dict(context, cbFun=self.completeMgmtOperation,
varBindsMap={}, rspVarBinds=reqVarBinds[:]))
snmpEngine.transportDispatcher.registerTimerCbFun(callLater, 0.01)
else:
CommandResponderBase.completeMgmtOperation(self, context['allVarBinds'], **context)
# rfc1905: 4.2.3
def initiateMgmtOperation(self, snmpEngine, stateReference, contextName, PDU):
nonRepeaters = v2c.apiBulkPDU.getNonRepeaters(PDU)
if nonRepeaters < 0:
nonRepeaters = 0
maxRepetitions = v2c.apiBulkPDU.getMaxRepetitions(PDU)
if maxRepetitions < 0:
maxRepetitions = 0
varBinds = v2c.apiPDU.getVarBinds(PDU)
N = min(int(nonRepeaters), len(varBinds))
M = int(maxRepetitions)
R = max(len(varBinds) - N, 0)
if R:
M = min(M, self.MAX_VAR_BINDS // R)
debug.logger & debug.FLAG_APP and debug.logger(
'initiateMgmtOperation: N %d, M %d, R %d' % (N, M, R))
mgmtFun = self._getMgmtFun(contextName)
context = dict(snmpEngine=snmpEngine,
stateReference=stateReference,
contextName=contextName,
acFun=self.verifyAccess,
cbFun=self._completeNonRepeaters,
cbCtx=self.cbCtx,
reqVarBinds=varBinds[N:],
counters={'M': M, 'R': R},
rspVarBinds=varBinds[N:],
allVarBinds=[],
varBindsMap={},
mgmtFun=mgmtFun)
context.update(self._storeAccessContext(snmpEngine))
mgmtFun(*varBinds[:N], **context)
class SetCommandResponder(CommandResponderBase):
SUPPORTED_PDU_TYPES = (rfc1905.SetRequestPDU.tagSet,)
SMI_ERROR_MAP = CommandResponderBase.SMI_ERROR_MAP.copy()
# turn missing OIDs into access denial
SMI_ERROR_MAP[pysnmp.smi.error.NoSuchObjectError] = 'notWritable'
SMI_ERROR_MAP[pysnmp.smi.error.NoSuchInstanceError] = 'notWritable'
# rfc1905: 4.2.5.1-13
def _getMgmtFun(self, contextName):
return self.snmpContext.getMibInstrum(contextName).writeMibObjects
|
common/ops/loss_ops.py | vahidk/TensorflowFramework | 129 | 12655876 | """Loss ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from common.ops import activation_ops
def softmax_entropy(logits, dims=-1):
"""Softmax entropy from logits."""
plogp = activation_ops.softmax(logits, dims) * activation_ops.log_softmax(logits, dims)
return -tf.reduce_sum(plogp, dims)
def softmax_cross_entropy(labels, logits, dims=-1):
"""Softmax entropy from logits."""
plogp = labels * activation_ops.log_softmax(logits, dims)
return -tf.reduce_sum(plogp, dims)
def softmax_kl_divergence(labels, logits, dims=-1, epsilon=1e-6):
"""Softmax entropy from logits."""
plogp = labels * (tf.log(labels) - activation_ops.log_softmax(logits, dims))
return tf.reduce_sum(plogp, dims)
def gaussian_kl(q, p=(0., 0.)):
"""Computes the KL divergence between two isotropic Gaussian distributions.
Args:
q: A tuple (mu, log_sigma_sq) representing a multi-variatie Gaussian.
p: A tuple (mu, log_sigma_sq) representing a multi-variatie Gaussian.
Returns:
A tensor representing KL(q, p).
"""
mu1, log_sigma1_sq = q
mu2, log_sigma2_sq = p
return tf.reduce_sum(
0.5 * (log_sigma2_sq - log_sigma1_sq +
tf.exp(log_sigma1_sq - log_sigma2_sq) +
tf.square(mu1 - mu2) / tf.exp(log_sigma2_sq) -
1), axis=-1)
def gan_loss(x, gz, discriminator):
"""Original GAN loss.
Args:
x: Batch of real samples.
gz: Batch of generated samples.
discriminator: Discriminator function.
Returns:
d_loss: Discriminator loss.
g_loss: Generator loss.
"""
dx = discriminator(x)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
dgz = discriminator(gz)
d_loss = -tf.reduce_mean(tf.log_sigmoid(dx) + tf.log_sigmoid(1 - dgz))
g_loss = -tf.reduce_mean(tf.log_sigmoid(dgz))
return d_loss, g_loss
def lsgan_loss(x, gz, discriminator):
"""LS-GAN loss.
Args:
x: Batch of real samples.
gz: Batch of generated samples.
discriminator: Discriminator function.
Returns:
d_loss: Discriminator loss.
g_loss: Generator loss.
"""
dx = discriminator(x)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
dgz = discriminator(gz)
d_loss = tf.reduce_mean(tf.square(dx - 1.0) + tf.square(dgz))
g_loss = tf.reduce_mean(tf.square(dgz - 1.0))
return d_loss, g_loss
def wgan_loss(x, gz, discriminator, beta=10.0):
"""Improved Wasserstein GAN loss.
Args:
x: Batch of real samples.
gz: Batch of generated samples.
discriminator: Discriminator function.
beta: Regualarizer factor.
Returns:
d_loss: Discriminator loss.
g_loss: Generator loss.
"""
dx = discriminator(x)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
dgz = discriminator(gz)
batch_size = tf.shape(x)[0]
alpha = tf.random_uniform([batch_size])
xhat = x * alpha + gz * (1 - alpha)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
dxhat = discriminator(xhat)
gnorm = tf.norm(tf.gradients(dxhat, xhat)[0])
d_loss = -tf.reduce_mean(dx - dgz - beta * tf.square(gnorm - 1))
g_loss = -tf.reduce_mean(dgz)
return d_loss, g_loss
|
apps/yolo/detect_api_yolov3.py | yarenty/ml-suite | 334 | 12655916 | <reponame>yarenty/ml-suite<filename>apps/yolo/detect_api_yolov3.py
import numpy as np
import nms
import time
from yolo_utils import process_all_yolo_layers, apply_nms
from xfdnn.rt import xdnn_io
def correct_region_boxes(boxes_array, x_idx, y_idx, w_idx, h_idx, w, h, net_w, net_h):
new_w = 0;
new_h = 0;
#print "x_idx, y_idx, w_idx, h_idx, w, h, net_w, net_h", x_idx, y_idx, w_idx, h_idx, w, h, net_w, net_h
if ((float(net_w) / float(w)) < (float(net_h) / float(h))) :
new_w = net_w
new_h = (h * net_w) / w
else:
new_w = (w * net_h) / h;
new_h = net_h
boxes_array[:,x_idx] = (boxes_array[:,x_idx] - (net_w - new_w) / 2.0 / net_w) / (float(new_w) / net_w);
boxes_array[:,y_idx] = (boxes_array[:,y_idx] - (net_h - new_h) / 2.0 / net_h) / (float(new_h) / net_h);
boxes_array[:,w_idx] *= float(net_w) / float(new_w);
boxes_array[:,h_idx] *= float(net_h) / float(new_h);
return boxes_array
# simple HWC->CHW and mean subtraction/scaling
# returns tensor ready for fpga execute
def det_preprocess(image, dest, net_h, net_w):
#print "in image for preprosessing:", image.shape, image
dummy_dest, s = xdnn_io.loadYoloImageBlobFromFile(image, net_h, net_w)
dest[...] = dummy_dest
#print " prep image:", dest.shape, dest
# takes dict of two outputs from XDNN, pixel-conv and bb-output
# returns bounding boxes
def det_postprocess(fpgaOutput, config, image_shape):
#print fpgaOutput[0].shape , fpgaOutput[1].shape, config['classes'], config['anchorCnt'], config['net_w'], config['net_h']
out_yolo_layers = process_all_yolo_layers(fpgaOutput, config['classes'], config['anchorCnt'], config['net_w'], config['net_h'])
anchorCnt = config['anchorCnt']
classes = config['classes']
num_proposals_layer=[0]
total_proposals = 0
for layr_idx in range (len(out_yolo_layers)):
yolo_layer_shape = out_yolo_layers[layr_idx].shape
#print "layr_idx , yolo_layer_shape", layr_idx , yolo_layer_shape
out_yolo_layers[layr_idx] = out_yolo_layers[layr_idx].reshape(yolo_layer_shape[0], anchorCnt, (5+classes), yolo_layer_shape[2]*yolo_layer_shape[3])
out_yolo_layers[layr_idx] = out_yolo_layers[layr_idx].transpose(0,3,1,2)
out_yolo_layers[layr_idx] = out_yolo_layers[layr_idx].reshape(yolo_layer_shape[0],yolo_layer_shape[2]*yolo_layer_shape[3] * anchorCnt, (5+classes))
#print "layr_idx, final in layer sape, outlayer shape", layr_idx, yolo_layer_shape, out_yolo_layers[layr_idx].shape
total_proposals += yolo_layer_shape[2]*yolo_layer_shape[3] * anchorCnt
num_proposals_layer.append(total_proposals)
boxes_array = np.empty([config['batch_sz'], total_proposals, (5+classes)])
for layr_idx in range (len(out_yolo_layers)):
proposal_st = num_proposals_layer[layr_idx]
proposal_ed = num_proposals_layer[layr_idx + 1]
#print "proposal_st proposal_ed", proposal_st, proposal_ed
boxes_array[:,proposal_st:proposal_ed,:] = out_yolo_layers[layr_idx][...]
bboxlist_for_images = []
for i in range(config['batch_sz']):
boxes_array[i,:,:] = correct_region_boxes(boxes_array[i,:,:], 0, 1, 2, 3, float(image_shape[i][1]), float(image_shape[i][0]), float(config['net_w']), float(config['net_h']))
detected_boxes = apply_nms(boxes_array[i,:,:], classes, config['scorethresh'], config['iouthresh'])
bboxlist=[]
for det_idx in range(len(detected_boxes)):
#print detected_boxes[det_idx][0], detected_boxes[det_idx][1], detected_boxes[det_idx][2], detected_boxes[det_idx][3], config['names'][detected_boxes[det_idx][4]], detected_boxes[det_idx][5]
bboxlist.append({'classid' : detected_boxes[det_idx][4],
'prob' : detected_boxes[det_idx][5],
'll' : {'x' : int((detected_boxes[det_idx][0] - 0.5 *detected_boxes[det_idx][2]) * image_shape[i][1]),
'y' : int((detected_boxes[det_idx][1] + 0.5 *detected_boxes[det_idx][3]) * image_shape[i][0])},
'ur' : {'x' : int((detected_boxes[det_idx][0] + 0.5 *detected_boxes[det_idx][2]) * image_shape[i][1]),
'y' : int((detected_boxes[det_idx][1] - 0.5 *detected_boxes[det_idx][3]) * image_shape[i][0])}})
bboxlist_for_images.append(bboxlist)
return bboxlist_for_images
|
DIE/Plugins/DataParsers/StringParser/StringParser.py | requizm/DIE | 496 | 12655935 |
from DIE.Lib.DataPluginBase import DataPluginBase
import idc
import idaapi
# TODO: Add more string types.
ASCII_STR = 0 # ASCII String
UNICODE_STR = 1 # Unicode String
class StringParser(DataPluginBase):
"""
A generic string value parser
"""
def __init__(self):
super(StringParser, self).__init__()
def registerSupportedTypes(self):
"""
Register string types
@return:
"""
self.addSuportedType("LPCSTR", ASCII_STR)
self.addSuportedType("CHAR *", ASCII_STR)
self.addSuportedType("CONST CHAR *", ASCII_STR)
self.addSuportedType("LPSTR", ASCII_STR)
self.addSuportedType("LPCWSTR", UNICODE_STR)
self.addSuportedType("LPWSTR", UNICODE_STR)
self.setPluginType("String")
def guessValues(self, rawValue):
"""
Guess string values
"""
minLength = 5 # The minimal string length
value = idc.GetString(rawValue, strtype=idc.ASCSTR_C)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "ASCII C-String", raw_value)
value = idc.GetString(rawValue, strtype=idc.ASCSTR_UNICODE)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii Unicode String", raw_value)
value = idc.GetString(rawValue, strtype=idaapi.ASCSTR_PASCAL)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii Pascal string", raw_value)
value = idc.GetString(rawValue, strtype=idaapi.ASCSTR_LEN2)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii String (Len2)", raw_value)
value = idc.GetString(rawValue, strtype=idaapi.ASCSTR_LEN4)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii String (Len4)", raw_value)
value = idc.GetString(rawValue, strtype=idaapi.ASCSTR_ULEN2)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii String (ULen2)", raw_value)
value = idc.GetString(rawValue, strtype=idaapi.ASCSTR_ULEN4)
if value and len(value) >= minLength:
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 1, "Ascii String (ULen4)", raw_value)
def matchType(self, type):
"""
Check if given type is of a string type
@param type: IDA type_info_t object
@return: True if given type is a string type otherwise False
"""
return self.checkSupportedType(type)
def parseValue(self, rawValue):
"""
Parse the string value
@return:
"""
if self.type_params == ASCII_STR:
value = idc.GetString(rawValue, strtype=idc.ASCSTR_C)
description = "ASCII C-String"
elif self.type_params == UNICODE_STR:
value = idc.GetString(rawValue, strtype=idc.ASCSTR_UNICODE)
description = "Unicode String"
else:
return
value, raw_value = self.normalize_raw_value(value)
self.addParsedvalue(value, 0, description, raw_value)
def normalize_raw_value(self, value):
"""
Normalize value.
@param value: value to normalize
@return: a tuple (Nomralized_Value, Raw_value)
"""
if value is not None:
raw_value = "0x%s" % value.encode("hex")
value = repr(value)
return (value, raw_value)
return (None, None)
|
src/fingerflow/extractor/__init__.py | jakubarendac/fingerflow | 327 | 12655950 | <filename>src/fingerflow/extractor/__init__.py
from .extractor import Extractor
|
devices/tests/test_models.py | ggidofalvy-tc/peering-manager | 173 | 12655959 | <filename>devices/tests/test_models.py<gh_stars>100-1000
from django.conf import settings
from django.test import TestCase
from devices.enums import PasswordAlgorithm
from devices.models import Platform
class PlatformTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.platforms = [
Platform(
name="Mercuros",
slug="mercuros",
password_algorithm=PasswordAlgorithm.JUNIPER_TYPE9,
),
Platform(
name="Test OS",
slug="test-os",
password_algorithm=PasswordAlgorithm.CISCO_TYPE7,
),
Platform(name="Wrong OS", slug="wrong-os"),
]
Platform.objects.bulk_create(cls.platforms)
def test_password_encryption_decryption(self):
clear_text_password = "<PASSWORD>"
junos = Platform.objects.filter(
password_algorithm=PasswordAlgorithm.JUNIPER_TYPE9
).first()
encrypted_password = junos.encrypt_password(clear_text_password)
self.assertNotEqual(clear_text_password, encrypted_password)
self.assertEqual(
clear_text_password, junos.decrypt_password(encrypted_password)
)
cisco = Platform.objects.filter(
password_algorithm=PasswordAlgorithm.CISCO_TYPE7
).first()
encrypted_password = cisco.encrypt_password(clear_text_password)
self.assertNotEqual(clear_text_password, encrypted_password)
self.assertEqual(
clear_text_password, cisco.decrypt_password(encrypted_password)
)
wrong = Platform.objects.filter(password_algorithm="").first()
encrypted_password = wrong.encrypt_password(clear_text_password)
self.assertEqual(clear_text_password, encrypted_password)
self.assertEqual(
clear_text_password, wrong.decrypt_password(encrypted_password)
)
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GLES2/OES/texture_storage_multisample_2d_array.py | ShujaKhalid/deep-rl | 210 | 12655966 | <gh_stars>100-1000
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OES_texture_storage_multisample_2d_array'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_texture_storage_multisample_2d_array',error_checker=_errors._error_checker)
GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES',0x910C)
GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES',0x910B)
GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES=_C('GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES',0x9102)
GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES=_C('GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES',0x9105)
GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES=_C('GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES',0x910D)
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLboolean)
def glTexStorage3DMultisampleOES(target,samples,internalformat,width,height,depth,fixedsamplelocations):pass
|
h2o-py/tests/testdir_munging/pyunit_whichmaxmin.py | ahmedengu/h2o-3 | 6,098 | 12655972 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
import pandas as pd
def whichmaxmin():
#Make H2O frame
f1 = h2o.create_frame(rows = 10000, cols = 100, categorical_fraction = 0, missing_fraction = 0,seed=1234)
#Make comparable pandas frame
f2 = f1.as_data_frame(use_pandas=True)
#############################################################
#Col wise max
which_max_col = f1.idxmax()
which_max_col = which_max_col.transpose()
which_max_col_pd = f2.idxmax(axis=0)
which_max_col_pd = h2o.H2OFrame(pd.DataFrame(which_max_col_pd,columns=["C1"]))
diff_max_col_idx = which_max_col - which_max_col_pd
assert diff_max_col_idx.sum() == 0
#Col wise min
which_min_col = f1.idxmin()
which_min_col = which_min_col.transpose()
which_min_col_pd = f2.idxmin(axis=0)
which_min_col_pd = h2o.H2OFrame(pd.DataFrame(which_min_col_pd,columns=["C1"]))
diff_min_col_idx = which_min_col - which_min_col_pd
assert diff_min_col_idx.sum() == 0
#############################################################
#Row wise max
which_max_row = f1.idxmax(axis=1)
which_max_row_pd = f2.idxmax(axis=1)
which_max_row_pd = h2o.H2OFrame(pd.DataFrame(which_max_row_pd,columns=["C1"]))
which_max_row_pd = which_max_row_pd.ascharacter().lstrip("C").asnumeric() - 1 #Had to clean up before comparison (indexing was +1)
diff_max_row_idx = which_max_row - which_max_row_pd
assert diff_max_row_idx.sum() == 0
#Row wise min
which_min_row = f1.idxmin(axis=1)
which_min_row_pd = f2.idxmin(axis=1)
which_min_row_pd = h2o.H2OFrame(pd.DataFrame(which_min_row_pd,columns=["C1"]))
which_min_row_pd = which_min_row_pd.ascharacter().lstrip("C").asnumeric() - 1 #Had to clean up before comparison (indexing was +1)
diff_min_row_idx = which_min_row - which_min_row_pd
assert diff_min_row_idx.sum() == 0
if __name__ == "__main__":
pyunit_utils.standalone_test(whichmaxmin)
else:
whichmaxmin() |
Tests/test_execfile.py | cwensley/ironpython2 | 1,078 | 12655984 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import os
import unittest
from iptest import IronPythonTestCase, run_test
class ExecFileTest(IronPythonTestCase):
def test_sanity(self):
root = self.test_dir
execfile(os.path.join(root, "Inc", "toexec.py"))
execfile(os.path.join(root, "Inc", "toexec.py"))
#execfile(root + "/doc.py")
execfile(os.path.join(root, "Inc", "toexec.py"))
def test_negative(self):
self.assertRaises(TypeError, execfile, None) # arg must be string
self.assertRaises(TypeError, execfile, [])
self.assertRaises(TypeError, execfile, 1)
self.assertRaises(TypeError, execfile, "somefile", "")
def test_scope(self):
root = self.test_dir
z = 10
execfile(os.path.join(root, "Inc", "execfile_scope.py"))
run_test(__name__)
|
Python/Swap-two-numbers.py | Nikhil-Sharma-1/DS-Algo-Point | 1,148 | 12655993 | <filename>Python/Swap-two-numbers.py
# Problem statement
# write a program to swap two numbers without using third variable
x = input()
y = input()
print ("Before swapping: ")
print("Value of x : ", x, " and y : ", y)
x, y = y, x
print ("After swapping: ")
print("Value of x : ", x, " and y : ", y)
# sample input
# 10
# 20
# sample output
# Before swapping:
# Value of x : 10 and y : 20
# After swapping:
# Value of x : 20 and y : 10
# Time complexity : O(1)
# space complexity : O(1) |
test/unit/test_interface.py | Zhiyuan-w/DeepReg | 379 | 12656023 | <gh_stars>100-1000
# coding=utf-8
"""
Tests for deepreg/dataset/loader/interface.py
"""
from test.unit.util import is_equal_np
from typing import Optional, Tuple
import numpy as np
import pytest
from deepreg.dataset.loader.interface import (
AbstractPairedDataLoader,
AbstractUnpairedDataLoader,
DataLoader,
FileLoader,
GeneratorDataLoader,
)
from deepreg.dataset.loader.nifti_loader import NiftiFileLoader
from deepreg.dataset.loader.paired_loader import PairedDataLoader
from deepreg.dataset.loader.util import normalize_array
class TestDataLoader:
@pytest.mark.parametrize(
"labeled,num_indices,sample_label,seed",
[
(True, 1, "all", 0),
(False, 1, "all", 0),
(None, 1, "all", 0),
(True, 1, "sample", 0),
(True, 1, "all", 0),
(True, 1, None, 0),
(True, 1, "sample", None),
],
)
def test_init(self, labeled, num_indices, sample_label, seed):
"""
Test init function of DataLoader class
:param labeled: bool
:param num_indices: int
:param sample_label: str
:param seed: float/int/None
:return:
"""
DataLoader(
labeled=labeled,
num_indices=num_indices,
sample_label=sample_label,
seed=seed,
)
data_loader = DataLoader(
labeled=labeled,
num_indices=num_indices,
sample_label=sample_label,
seed=seed,
)
with pytest.raises(NotImplementedError):
data_loader.moving_image_shape
with pytest.raises(NotImplementedError):
data_loader.fixed_image_shape
with pytest.raises(NotImplementedError):
data_loader.num_samples
with pytest.raises(NotImplementedError):
data_loader.get_dataset()
data_loader.close()
@pytest.mark.parametrize(
"labeled,moving_shape,fixed_shape,batch_size,data_augmentation",
[
(True, (9, 9, 9), (9, 9, 9), 1, {}),
(
True,
(9, 9, 9),
(15, 15, 15),
1,
{"data_augmentation": {"name": "affine"}},
),
(
True,
(9, 9, 9),
(15, 15, 15),
1,
{
"data_augmentation": [
{"name": "affine"},
{
"name": "ddf",
"field_strength": 1,
"low_res_size": (3, 3, 3),
},
],
},
),
],
)
def test_get_dataset_and_preprocess(
self, labeled, moving_shape, fixed_shape, batch_size, data_augmentation
):
"""
Test get_transforms() function. For that, an Abstract Data Loader is created
only to set the moving and fixed shapes that are used in get_transforms().
Here we test that the get_transform() returns a function and the shape of
the output of this function. See test_preprocess.py for more testing regarding
the concrete params.
:param labeled: bool
:param moving_shape: tuple
:param fixed_shape: tuple
:param batch_size: total number of samples consumed per step, over all devices.
:param data_augmentation: dict
:return:
"""
data_dir_path = [
"data/test/nifti/paired/train",
"data/test/nifti/paired/test",
]
common_args = dict(
file_loader=NiftiFileLoader, labeled=True, sample_label="all", seed=None
)
data_loader = PairedDataLoader(
data_dir_paths=data_dir_path,
fixed_image_shape=fixed_shape,
moving_image_shape=moving_shape,
**common_args,
)
dataset = data_loader.get_dataset_and_preprocess(
training=True,
batch_size=batch_size,
repeat=True,
shuffle_buffer_num_batch=1,
**data_augmentation,
)
for outputs in dataset.take(1):
assert (
outputs["moving_image"].shape
== (batch_size,) + data_loader.moving_image_shape
)
assert (
outputs["fixed_image"].shape
== (batch_size,) + data_loader.fixed_image_shape
)
assert (
outputs["moving_label"].shape
== (batch_size,) + data_loader.moving_image_shape
)
assert (
outputs["fixed_label"].shape
== (batch_size,) + data_loader.fixed_image_shape
)
def test_abstract_paired_data_loader():
"""
Test the functions in AbstractPairedDataLoader
"""
moving_image_shape = (8, 8, 4)
fixed_image_shape = (6, 6, 4)
# test init invalid shape
with pytest.raises(ValueError) as err_info:
AbstractPairedDataLoader(
moving_image_shape=(2, 2),
fixed_image_shape=(3, 3),
labeled=True,
sample_label="sample",
)
assert "moving_image_shape and fixed_image_shape have length of three" in str(
err_info.value
)
# test init valid shapes
data_loader = AbstractPairedDataLoader(
moving_image_shape=moving_image_shape,
fixed_image_shape=fixed_image_shape,
labeled=True,
sample_label="sample",
)
# test properties
assert data_loader.num_indices == 2
assert data_loader.moving_image_shape == moving_image_shape
assert data_loader.fixed_image_shape == fixed_image_shape
assert data_loader.num_samples is None
def test_abstract_unpaired_data_loader():
"""
Test the functions in AbstractUnpairedDataLoader
"""
image_shape = (8, 8, 4)
# test init invalid shape
with pytest.raises(ValueError) as err_info:
AbstractUnpairedDataLoader(
image_shape=(2, 2), labeled=True, sample_label="sample"
)
assert "image_shape has to be length of three" in str(err_info.value)
# test init valid shapes
data_loader = AbstractUnpairedDataLoader(
image_shape=image_shape, labeled=True, sample_label="sample"
)
# test properties
assert data_loader.num_indices == 3
assert data_loader.moving_image_shape == image_shape
assert data_loader.fixed_image_shape == image_shape
assert data_loader.num_samples is None
def get_arr(shape: Tuple = (2, 3, 4), seed: Optional[int] = None) -> np.ndarray:
"""
Return a random array.
:param shape: shape of array.
:param seed: random seed.
:return: random array.
"""
np.random.seed(seed)
return np.random.random(size=shape).astype(np.float32)
class TestGeneratorDataLoader:
@pytest.mark.parametrize("labeled", [True, False])
def test_get_labeled_dataset(self, labeled: bool):
"""
Test get_dataset with data loader.
:param labeled: labeled data or not.
"""
sample = {
"moving_image": get_arr(),
"fixed_image": get_arr(),
"indices": [1],
}
if labeled:
sample = {
"moving_label": get_arr(),
"fixed_label": get_arr(),
**sample,
}
def mock_gen():
"""Toy data generator."""
for _ in range(3):
yield sample
loader = GeneratorDataLoader(labeled=labeled, num_indices=1, sample_label="all")
loader.__setattr__("data_generator", mock_gen)
dataset = loader.get_dataset()
for got in dataset.as_numpy_iterator():
assert all(is_equal_np(got[key], sample[key]) for key in sample.keys())
@pytest.mark.parametrize("labeled", [True, False])
def test_data_generator(self, labeled: bool):
"""
Test data_generator()
:param labeled: labeled data or not.
"""
class MockDataLoader:
"""Toy data loader."""
def __init__(self, seed: int):
"""
Init.
:param seed: random seed for numpy.
:param kwargs: additional arguments.
"""
self.seed = seed
def get_data(self, index: int) -> np.ndarray:
"""
Return the dummy array despite of the index.
:param index: not used
:return: dummy array.
"""
assert isinstance(index, int)
return get_arr(seed=self.seed)
def mock_sample_index_generator():
"""Toy sample index generator."""
return [[1, 1, [1]]]
loader = GeneratorDataLoader(labeled=labeled, num_indices=1, sample_label="all")
loader.__setattr__("sample_index_generator", mock_sample_index_generator)
loader.loader_moving_image = MockDataLoader(seed=0)
loader.loader_fixed_image = MockDataLoader(seed=1)
if labeled:
loader.loader_moving_label = MockDataLoader(seed=2)
loader.loader_fixed_label = MockDataLoader(seed=3)
# check data loader output
got = next(loader.data_generator())
expected = {
"moving_image": normalize_array(get_arr(seed=0)),
"fixed_image": normalize_array(get_arr(seed=1)),
# 0 or -1 is the label index
"indices": np.array([1, 0] if labeled else [1, -1], dtype=np.float32),
}
if labeled:
expected = {
"moving_label": get_arr(seed=2),
"fixed_label": get_arr(seed=3),
**expected,
}
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
def test_sample_index_generator(self):
loader = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
with pytest.raises(NotImplementedError):
loader.sample_index_generator()
@pytest.mark.parametrize(
(
"moving_image_shape",
"fixed_image_shape",
"moving_label_shape",
"fixed_label_shape",
"err_msg",
),
[
(
None,
(10, 10, 10),
(10, 10, 10),
(10, 10, 10),
"moving image and fixed image must not be None",
),
(
(10, 10, 10),
None,
(10, 10, 10),
(10, 10, 10),
"moving image and fixed image must not be None",
),
(
(10, 10, 10),
(10, 10, 10),
None,
(10, 10, 10),
"moving label and fixed label must be both None or non-None",
),
(
(10, 10, 10),
(10, 10, 10),
(10, 10, 10),
None,
"moving label and fixed label must be both None or non-None",
),
(
(10, 10),
(10, 10, 10),
(10, 10, 10),
(10, 10, 10),
"Sample [1]'s moving_image's shape should be 3D",
),
(
(10, 10, 10),
(10, 10),
(10, 10, 10),
(10, 10, 10),
"Sample [1]'s fixed_image's shape should be 3D",
),
(
(10, 10, 10),
(10, 10, 10),
(10, 10),
(10, 10, 10),
"Sample [1]'s moving_label's shape should be 3D or 4D.",
),
(
(10, 10, 10),
(10, 10, 10),
(10, 10, 10),
(10, 10),
"Sample [1]'s fixed_label's shape should be 3D or 4D.",
),
(
(10, 10, 10),
(10, 10, 10),
(10, 10, 10, 2),
(10, 10, 10, 3),
"Sample [1]'s moving image and fixed image "
"have different numbers of labels.",
),
],
)
def test_validate_images_and_labels(
self,
moving_image_shape: Optional[Tuple],
fixed_image_shape: Optional[Tuple],
moving_label_shape: Optional[Tuple],
fixed_label_shape: Optional[Tuple],
err_msg: str,
):
"""
Test error messages.
:param moving_image_shape: None or tuple.
:param fixed_image_shape: None or tuple.
:param moving_label_shape: None or tuple.
:param fixed_label_shape: None or tuple.
:param err_msg: message.
"""
moving_image = None
fixed_image = None
moving_label = None
fixed_label = None
if moving_image_shape:
moving_image = get_arr(shape=moving_image_shape)
if fixed_image_shape:
fixed_image = get_arr(shape=fixed_image_shape)
if moving_label_shape:
moving_label = get_arr(shape=moving_label_shape)
if fixed_label_shape:
fixed_label = get_arr(shape=fixed_label_shape)
loader = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
with pytest.raises(ValueError) as err_info:
loader.validate_images_and_labels(
moving_image=moving_image,
fixed_image=fixed_image,
moving_label=moving_label,
fixed_label=fixed_label,
image_indices=[1],
)
assert err_msg in str(err_info.value)
@pytest.mark.parametrize("option", [0, 1, 2, 3])
def test_validate_images_and_labels_range(self, option: int):
"""
Test error messages related to input range.
:param option: control which image to modify
"""
option_to_name = {
0: "moving_image",
1: "fixed_image",
2: "moving_label",
3: "fixed_label",
}
input = {
"moving_image": get_arr(),
"fixed_image": get_arr(),
"moving_label": get_arr(),
"fixed_label": get_arr(),
}
name = option_to_name[option]
input[name] += 1
err_msg = f"Sample [1]'s {name}'s values are not between [0, 1]"
loader = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
with pytest.raises(ValueError) as err_info:
loader.validate_images_and_labels(
image_indices=[1],
**input,
)
assert err_msg in str(err_info.value)
def test_sample_image_label_unlabeled(self):
"""Test sample_image_label in unlabeled case."""
loader = GeneratorDataLoader(labeled=False, num_indices=1, sample_label="all")
got = next(
loader.sample_image_label(
moving_image=get_arr(seed=0),
fixed_image=get_arr(seed=1),
moving_label=None,
fixed_label=None,
image_indices=[1],
)
)
expected = dict(
moving_image=get_arr(seed=0),
fixed_image=get_arr(seed=1),
indices=np.asarray([1, -1], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
@pytest.mark.parametrize("shape", [(2, 3, 4), (2, 3, 4, 1)])
def test_sample_image_label_one_label(self, shape: Tuple):
"""
Test sample_image_label in labeled case with one label.
:param shape: shape of the label.
"""
loader = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
got = next(
loader.sample_image_label(
moving_image=get_arr(shape=shape[:3], seed=0),
fixed_image=get_arr(shape=shape[:3], seed=1),
moving_label=get_arr(shape=shape, seed=2),
fixed_label=get_arr(shape=shape, seed=3),
image_indices=[1],
)
)
expected = dict(
moving_image=get_arr(shape=shape[:3], seed=0),
fixed_image=get_arr(shape=shape[:3], seed=1),
moving_label=get_arr(shape=shape[:3], seed=2),
fixed_label=get_arr(shape=shape[:3], seed=3),
indices=np.asarray([1, 0], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
def test_sample_image_label_multiple_labels(self):
"""Test sample_image_label in labeled case with multiple labels."""
loader = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
shape = (2, 3, 4, 5)
got_iter = loader.sample_image_label(
moving_image=get_arr(shape=shape[:3], seed=0),
fixed_image=get_arr(shape=shape[:3], seed=1),
moving_label=get_arr(shape=shape, seed=2),
fixed_label=get_arr(shape=shape, seed=3),
image_indices=[1],
)
moving_label = get_arr(shape=shape, seed=2)
fixed_label = get_arr(shape=shape, seed=3)
for i in range(shape[-1]):
got = next(got_iter)
expected = dict(
moving_image=get_arr(shape=shape[:3], seed=0),
fixed_image=get_arr(shape=shape[:3], seed=1),
moving_label=moving_label[:, :, :, i],
fixed_label=fixed_label[:, :, :, i],
indices=np.asarray([1, i], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
def test_file_loader():
"""
Test the functions in FileLoader
"""
# init, no error means passed
loader_grouped = FileLoader(
dir_paths=["/path/grouped_loader/"], name="grouped_loader", grouped=True
)
loader_ungrouped = FileLoader(
dir_paths=["/path/ungrouped_loader/"], name="ungrouped_loader", grouped=False
)
# init fails with repeated paths
with pytest.raises(ValueError) as err_info:
FileLoader(
dir_paths=["/path/ungrouped_loader/", "/path/ungrouped_loader/"],
name="ungrouped_loader",
grouped=False,
)
assert "dir_paths have repeated elements" in str(err_info.value)
# not implemented properties / functions
with pytest.raises(NotImplementedError):
loader_grouped.set_data_structure()
with pytest.raises(NotImplementedError):
loader_grouped.set_group_structure()
with pytest.raises(NotImplementedError):
loader_grouped.get_data(1)
with pytest.raises(NotImplementedError):
loader_grouped.get_data_ids()
with pytest.raises(NotImplementedError):
loader_grouped.get_num_images()
with pytest.raises(NotImplementedError):
loader_grouped.close()
# test grouped file loader functions
assert loader_grouped.group_struct is None
# create mock group structure with nested list
loader_grouped.group_struct = [[1, 2], [3, 4], [5, 6]]
assert loader_grouped.get_num_groups() == 3
assert loader_grouped.get_num_images_per_group() == [2, 2, 2]
with pytest.raises(ValueError) as err_info:
loader_grouped.group_struct = [[], [3, 4], [5, 6]]
loader_grouped.get_num_images_per_group()
assert "Groups of ID [0, 2, 2] are empty." in str(err_info.value)
# test ungrouped file loader
assert loader_ungrouped.group_struct is None
with pytest.raises(AssertionError):
loader_ungrouped.get_num_groups()
with pytest.raises(AssertionError):
loader_ungrouped.get_num_images_per_group()
|
3rdParty/iresearch/scripts/Prometheus/PythonBenchmark.py | rajeev02101987/arangodb | 12,278 | 12656027 | #!/usr/bin/env python
import re
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
import subprocess
import platform
import os
import subprocess
import shutil
import sys
import csv
#Base dictionary labels
baseLabels = ["repeat", "threads", "random", "scorer", "scorer-arg"]
class MetricValue:
"""Generic class for storing single metric result"""
def __init__(self, value, labels):
self.labels = labels
self.value = value
class RunFiles:
"""Generic class for storing benchmark results for all runs on same workset size"""
def __init__(self, mySize):
self.size = mySize
self.timingFiles = []
self.memoryFiles = []
self.cpuFiles = []
self.wallClockFiles = []
self.pageMinorFaultsFiles = []
self.pageMajorFaultsFiles = []
self.voluntaryContextSwitchesFiles = []
self.involuntaryContextSwitchesFiles = []
self.indexSizeFiles = []
self.labels = {"size": mySize}
def processIndexSizeFile(self, file, run):
self.indexSizeFiles.append(self.parseIndexSizeStats(file, run))
def processTimingFile(self, file, run):
self.timingFiles.append(self.parseQueriesStats(file, run))
def processMemoryFile(self, file, run):
self.memoryFiles.append(self.parseMemoryStats(file, run))
self.cpuFiles.append(self.parseCPUStats(file, run))
self.wallClockFiles.append(self.parseWallClockStats(file, run))
self.pageMinorFaultsFiles.append(self.parseMinorPageFaultStats(file, run))
self.pageMajorFaultsFiles.append(self.parseMajorPageFaultStats(file, run))
self.voluntaryContextSwitchesFiles.append(self.parseVoluntaryContextSwitchesStats(file, run))
self.involuntaryContextSwitchesFiles.append(self.parseInvoluntaryContextSwitchesStats(file, run))
def parseIndexSizeStats(self, filename, run):
metrics = []
datafile = open(filename, 'r')
sizeString = datafile.read();
result = {"run": int(run)}
metrics.append(MetricValue(int(sizeString), result))
return metrics
def parseWallClockStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Elapsed \(wall clock\) time \(h:mm:ss or m:ss\): ((([0-9]*):)?([0-9]*):)?(([0-9]*).([0-9]*))", row)
if m is not None:
seconds = 0
if m.group(3) is not None:
seconds += int(m.group(3)) * 60 * 60
if m.group(4) is not None:
seconds += int(m.group(4)) * 60
if m.group(6) is not None:
seconds += int(m.group(6))
metrics.append(MetricValue(float(seconds), result))
break
return metrics
def parseMinorPageFaultStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Minor \(reclaiming a frame\) page faults: ([0-9]*)", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseMajorPageFaultStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Major \(requiring I/O\) page faults: ([0-9]*)", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseInvoluntaryContextSwitchesStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Involuntary context switches: ([0-9]*)", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseVoluntaryContextSwitchesStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Voluntary context switches: ([0-9]*)", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseMemoryStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Maximum resident set size \(kbytes\): ([0-9]*)", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseCPUStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Percent of CPU this job got: ([0-9]*)%", row)
if m is not None:
metrics.append(MetricValue(float(m.group(1)), result))
break
return metrics
def parseQueriesStats(self, filename, run):
metrics = []
with open(filename, newline='') as datafile:
for row in datafile:
result = {"run": int(run)}
m = re.search("Query execution \(([a-zA-Z0-9]*)\) time calls:([0-9]*), time: ([0-9\.\+e]*) us, avg call: ([0-9\.\+e]*) us", row)
if m is not None:
result["stage"] = "Executing"
else:
m = re.search("Query building \(([a-zA-Z0-9]*)\) time calls:([0-9]*), time: ([0-9\.\+e]*) us, avg call: ([0-9\.\+e]*) us", row)
if m is not None:
result["stage"] = "Building"
if m is not None:
result["category"] = m.group(1)
result["calls"] = int(m.group(2))
metrics.append(MetricValue(float(m.group(4)), result))
else:
result["stage"] = "General"
# Could be Index reading or Total time
m = re.search("Index read time calls:([0-9]*), time: ([0-9\.\+e]*) us, avg call: ([0-9\.\+e]*) us", row)
if m is not None:
result["category"] = "IndexRead"
result["calls"] = int(m.group(1))
metrics.append(MetricValue(float(m.group(3)), result))
else:
m = re.search("Total Time calls:([0-9]*), time: ([0-9\.\+e]*) us, avg call: ([0-9\.\+e]*) us", row)
if m is not None:
result["category"] = "Query"
result["calls"] = int(m.group(1))
metrics.append(MetricValue(float(m.group(3)), result))
return metrics
class IResearchIndexRunFiles(RunFiles):
""" IResearch indexing specific benchmark resulst parsing """
def __init__(self, mySize):
super().__init__(mySize)
self.baseParametersExtracted = False
self.indexPath = None
def parseBaseIResearchParameters(self, filename):
result = {}
with open(filename, newline='') as datafile:
for row in datafile:
m = re.search("Command being timed: \".*iresearch-benchmarks.* -m put --in .* --index-dir (.*) --max-lines=[0-9]* --commit-period=[0-9]* --batch-size=[0-9]* --threads=([0-9]*)", row)
if m is not None:
self.indexPath = m.group(1)
result["threads"] = int(m.group(2))
break
return result
def processMemoryFile(self, file, run):
if not self.baseParametersExtracted:
self.labels.update(self.parseBaseIResearchParameters(file))
self.baseParametersExtracted = True
super().processMemoryFile(file, run)
class IResearchRunFiles(RunFiles):
""" IResearch search specific benchmark resulst parsing """
def __init__(self, mySize):
super().__init__(mySize)
self.baseParametersExtracted = False
def processTimingFile(self, file, run):
if not self.baseParametersExtracted:
self.labels.update(self.parseBaseIResearchParameters(file))
self.baseParametersExtracted = True
super().processTimingFile(file, run)
def parseBaseIResearchParameters(self, filename):
result = {}
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
param = row[0].split("=")
if len(param) == 2 and param[0] in baseLabels:
arg = param[0]
if param[0] == "scorer-arg": # original name is invalid for prometheus
arg = "scorerarg"
result[arg] = param[1]
if len(result) == len(baseLabels):
break
return result
class LuceneIndexRunFiles(RunFiles):
""" Lucene index specific benchmark resulst parsing """
def __init__(self, mySize):
super().__init__(mySize)
self.baseParametersExtracted = False
self.indexPath = None
def parseBaseLuceneParameters(self, filename):
result = {}
with open(filename, newline='') as datafile:
for row in datafile:
m = re.search("Command being timed: \"java -jar .* -dirImpl MMapDirectory -analyzer StandardAnalyzer -lineDocsFile .* -maxConcurrentMerges [0-9]* -ramBufferMB -1 -postingsFormat Lucene50 -waitForMerges -mergePolicy LogDocMergePolicy -idFieldPostingsFormat Lucene50 -grouping -waitForCommit -indexPath (.*) -docCountLimit [0-9]* -maxBufferedDocs [0-9]* -threadCount ([0-9]*)\"", row)
if m is not None:
self.indexPath = m.group(1)
result["threads"] = int(m.group(2))
break
return result
def processMemoryFile(self, file, run):
if not self.baseParametersExtracted:
self.labels.update(self.parseBaseLuceneParameters(file))
self.baseParametersExtracted = True
super().processMemoryFile(file, run)
class LuceneRunFiles(RunFiles):
""" Lucene search specific benchmark resulst parsing """
def __init__(self, mySize):
super().__init__(mySize)
self.baseParametersExtracted = False
def parseBaseLuceneParameters(self, filename):
result = {}
with open(filename, newline='') as datafile:
for row in datafile:
m = re.search("Command being timed: \"java -server -Xms2g -Xmx40g -XX:-TieredCompilation -XX:\+HeapDumpOnOutOfMemoryError -Xbatch -jar .* -dirImpl MMapDirectory -indexPath .* -analyzer StandardAnalyzer -taskSource .* -searchThreadCount ([0-9]*) -taskRepeatCount ([0-9]*) -field body", row)
if m is not None:
result["threads"] = int(m.group(1))
result["repeat"] = int(m.group(2))
break
return result
def processMemoryFile(self, file, run):
if not self.baseParametersExtracted:
self.labels.update(self.parseBaseLuceneParameters(file))
self.baseParametersExtracted = True
super().processMemoryFile(file, run)
# Helper methods
def fillGauge(files, gauge, labelsToSendTemplate):
localTemplate = labelsToSendTemplate.copy()
for s in files:
for l in s:
labelsToSend = localTemplate.copy()
labelsToSend.update(l.labels)
gauge.labels(**labelsToSend).set(l.value)
def sendStatsToPrometheus(time, memory, cpu, wallClock, pageMinFaults, pageMajFaults, volContextSwitches, involContextSwitches, indexSize, parsedFiles, engine, default_category="<None>"):
# Label must be all present! For start all will be placeholders
labelsToSendTemplate = {"engine" : engine, "size": "<None>", "category": default_category,\
"repeat": "<None>", "threads": "<None>", "random": "<None>",\
"scorer": "<None>", "scorerarg": "<None>", "run": "<None>", "calls": "<None>",\
"branch": sys.argv[3], "platform": sys.argv[2], "stage": "<None>"}
for size, stats in parsedFiles.items():
labelsToSendTemplate.update({"size":size})
labelsToSendTemplate.update(stats.labels);
fillGauge(stats.timingFiles, time, labelsToSendTemplate)
fillGauge(stats.memoryFiles, memory, labelsToSendTemplate)
fillGauge(stats.cpuFiles, cpu, labelsToSendTemplate)
fillGauge(stats.wallClockFiles, wallClock, labelsToSendTemplate)
fillGauge(stats.pageMinorFaultsFiles, pageMinFaults, labelsToSendTemplate)
fillGauge(stats.pageMajorFaultsFiles, pageMajFaults, labelsToSendTemplate)
fillGauge(stats.voluntaryContextSwitchesFiles, volContextSwitches, labelsToSendTemplate)
fillGauge(stats.involuntaryContextSwitchesFiles, involContextSwitches, labelsToSendTemplate)
if indexSize is not None:
fillGauge(stats.indexSizeFiles, indexSize, labelsToSendTemplate)
def main():
## Parsing Search results
iresearchRunFiles = {}
luceneRunFiles = {}
iresearchIndexRunFiles = {}
luceneIndexRunFiles = {}
for f in os.listdir(sys.argv[1]):
m = re.match('(lucene|iresearch)\.(stdout|stdlog|stderr)\.([0-9]*)\.(search|index)\.log\.([0-9])', f)
if m is not None:
size = int(m.group(3))
if m.group(1) == "iresearch":
if (m.group(4) == "search"):
if size not in iresearchRunFiles.keys():
iresearchRunFiles[size] = IResearchRunFiles(size)
if m.group(2) == "stdout":
iresearchRunFiles[size].processTimingFile(os.path.join(sys.argv[1],f), m.group(5))
else:
iresearchRunFiles[size].processMemoryFile(os.path.join(sys.argv[1],f), m.group(5))
else:
if size not in iresearchIndexRunFiles.keys():
iresearchIndexRunFiles[size] = IResearchIndexRunFiles(size)
if m.group(2) == "stderr":
iresearchIndexRunFiles[size].processMemoryFile(os.path.join(sys.argv[1],f), m.group(5))
else:
if (m.group(4) == "search"):
if size not in luceneRunFiles.keys():
luceneRunFiles[size] = LuceneRunFiles(size)
if m.group(2) == "stdlog":
luceneRunFiles[size].processTimingFile(os.path.join(sys.argv[1],f), m.group(5))
elif m.group(2) == "stderr":
luceneRunFiles[size].processMemoryFile(os.path.join(sys.argv[1],f), m.group(5))
else:
if size not in luceneIndexRunFiles.keys():
luceneIndexRunFiles[size] = LuceneIndexRunFiles(size)
if m.group(2) == "stderr":
luceneIndexRunFiles[size].processMemoryFile(os.path.join(sys.argv[1],f), m.group(5))
else:
m = re.match('(lucene|iresearch)\.([0-9]*)\.indexSize\.log\.([0-9])', f)
if m is not None:
size = int(m.group(2))
if m.group(1) == "iresearch":
if size not in iresearchRunFiles.keys():
iresearchRunFiles[size] = IResearchRunFiles(size)
iresearchRunFiles[size].processIndexSizeFile(os.path.join(sys.argv[1],f), m.group(3))
else:
if size not in luceneRunFiles.keys():
luceneRunFiles[size] = LuceneRunFiles(size)
luceneRunFiles[size].processIndexSizeFile(os.path.join(sys.argv[1],f), m.group(3))
registry = CollectorRegistry()
defaultLabelNames = ["engine", "size", "category", "repeat", "threads",\
"random", "scorer", "scorerarg", "run", "calls",\
"branch", "platform", "stage"]
time = Gauge('Time', 'Execution time (microseconds)', registry=registry, labelnames=defaultLabelNames)
memory = Gauge('Memory', 'Consumed memory (kbytes)', registry=registry, labelnames=defaultLabelNames)
cpu = Gauge('CPU', 'CPU utilization %', registry=registry, labelnames=defaultLabelNames)
wallClock = Gauge('Wall_Clock', 'Elapsed wall clock (seconds)', registry=registry, labelnames=defaultLabelNames)
pageMinFaults = Gauge('MinorPageFaults', 'Minor (reclaiming a frame) page faults', registry=registry, labelnames=defaultLabelNames)
pageMajFaults = Gauge('MajorPageFaults', 'Major (requiring I/O) page faults', registry=registry, labelnames=defaultLabelNames)
volContextSwitches = Gauge('VolContextSwitches', 'Voluntary context switches', registry=registry, labelnames=defaultLabelNames)
involContextSwitches = Gauge('InvolContextSwitches', 'Involuntary context switches', registry=registry, labelnames=defaultLabelNames)
indexSize = Gauge('IndexSize', 'Index directory size (kbytes)', registry=registry, labelnames=defaultLabelNames)
sendStatsToPrometheus(time, memory, cpu, wallClock, pageMinFaults, pageMajFaults, volContextSwitches, involContextSwitches, indexSize, iresearchRunFiles, "IResearch", "Query")
sendStatsToPrometheus(time, memory, cpu, wallClock, pageMinFaults, pageMajFaults, volContextSwitches, involContextSwitches, indexSize, luceneRunFiles, "Lucene", "Query")
sendStatsToPrometheus(time, memory, cpu, wallClock, pageMinFaults, pageMajFaults, volContextSwitches, involContextSwitches, None, iresearchIndexRunFiles, "IResearch", "Index")
sendStatsToPrometheus(time, memory, cpu, wallClock, pageMinFaults, pageMajFaults, volContextSwitches, involContextSwitches, None, luceneIndexRunFiles, "Lucene", "Index")
push_to_gateway(sys.argv[4], job=sys.argv[5], registry=registry)
if __name__== "__main__":
main()
|
examples/Legacy_Rest/modify_ilo_user_account.py | andreaslangnevyjel/python-ilorest-library | 214 | 12656071 | # Copyright 2020 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from redfish import LegacyRestClient
from get_resource_directory import get_resource_directory
def modify_ilo_user_account(restobj, ilo_login_name_to_modify, \
new_ilo_loginname, new_ilo_username, new_ilo_password, \
irc=None, cfg=None, virtual_media=None, usercfg=None, vpr=None):
resource_instances = get_resource_directory(restobj)
if resource_instances:
#Get URI from resource directory
for instance in resource_instances:
if "Collection." in instance.Type:
if "ManagerAccount." in instance.MemberType:
accounts_path = instance.href
break
accounts = restobj.get(accounts_path)
for account in accounts.obj.Items:
for account in accounts.dict["Items"]:
if account["UserName"] == ilo_login_name_to_modify:
body = {}
body_oemhp = {}
body_oemhp_privs = {}
# if new loginname or password specified
if new_ilo_password:
body["Password"] = <PASSWORD>
if new_ilo_loginname:
body["UserName"] = new_ilo_loginname
# if different username specified
if new_ilo_username:
body_oemhp["LoginName"] = new_ilo_username
# if different privileges were requested (None = no change)
if irc != None:
body_oemhp_privs["RemoteConsolePriv"] = irc
if virtual_media != None:
body_oemhp_privs["VirtualMediaPriv"] = virtual_media
if cfg != None:
body_oemhp_privs["iLOConfigPriv"] = cfg
if usercfg != None:
body_oemhp_privs["UserConfigPriv"] = usercfg
if vpr != None:
body_oemhp_privs["VirtualPowerAndResetPriv"] = vpr
# component assembly
if len(body_oemhp_privs):
body_oemhp["Privileges"] = body_oemhp_privs
if len(body_oemhp):
body["Oem"] = {"Hp": body_oemhp}
newrsp = restobj.patch(account["links"]["self"]["href"], body)
sys.stdout.write("%s" % newrsp)
return
sys.stderr.write("Account not found\n")
if __name__ == "__main__":
# When running on the server locally use the following commented values
# SYSTEM_URL = None
# LOGIN_ACCOUNT = None
# LOGIN_PASSWORD = <PASSWORD>
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# SYSTEM_URL acceptable examples:
# "https://10.0.0.100"
# "https://ilo.hostname"
SYSTEM_URL = "https://10.0.0.100"
LOGIN_ACCOUNT = "admin"
LOGIN_PASSWORD = "password"
#Create a REST object
REST_OBJ = LegacyRestClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, password=<PASSWORD>_PASSWORD)
REST_OBJ.login()
sys.stdout.write("\nEXAMPLE 11: Modify an iLO user account\n")
modify_ilo_user_account(REST_OBJ, "name", "newname", "newusername", "newpassword")
REST_OBJ.logout()
|
benchmarks/fast.py | ktanishqk/py-earth | 360 | 12656097 | import numpy as np
from pyearth import Earth
from timeit import Timer
# The robot arm example, as defined in:
# Fast MARS, <NAME>, Technical Report No.110, May 1993, section 6.2.
np.random.seed(2)
nb_examples = 400
theta1 = np.random.uniform(0, 2 * np.pi, size=nb_examples)
theta2 = np.random.uniform(0, 2 * np.pi, size=nb_examples)
phi = np.random.uniform(-np.pi/2, np.pi/2, size=nb_examples)
l1 = np.random.uniform(0, 1, size=nb_examples)
l2 = np.random.uniform(0, 1, size=nb_examples)
x = l1 * np.cos(theta1) - l2 * np.cos(theta1 + theta2) * np.cos(phi)
y = l1 * np.sin(theta1) - l2 * np.sin(theta1 + theta2) * np.cos(phi)
z = l2 * np.sin(theta2) * np.sin(phi)
d = np.sqrt(x**2 + y**2 + z**2)
inputs = np.concatenate([theta1[:, np.newaxis],
theta2[:, np.newaxis],
phi[:, np.newaxis],
l1[:, np.newaxis],
l2[:, np.newaxis]], axis=1)
outputs = d
hp = dict(
max_degree=5,
minspan=1,
endspan=1,
max_terms=100,
allow_linear=False,
)
model_normal = Earth(**hp)
t = Timer(lambda: model_normal.fit(inputs, outputs))
duration_normal = t.timeit(number=1)
print("Normal : MSE={0:.5f}, duration={1:.2f}s".
format(model_normal.mse_, duration_normal))
model_fast = Earth(use_fast=True,
fast_K=5,
fast_h=1,
**hp)
t = Timer(lambda: model_fast.fit(inputs, outputs))
duration_fast = t.timeit(number=1)
print("Fast: MSE={0:.5f}, duration={1:.2f}s".
format(model_fast.mse_, duration_fast))
speedup = duration_normal / duration_fast
print("diagnostic : MSE goes from {0:.5f} to {1:.5f} but it "
"is {2:.2f}x faster".
format(model_normal.mse_, model_fast.mse_, speedup))
|
code/models/unsupervised_part.py | ricklentz/2dimageto3dmodel | 150 | 12656143 | <reponame>ricklentz/2dimageto3dmodel
# author: <NAME>
import torch
import torch.nn as nn
from encoder import Encoder
from decoder import Decoder
from pose_decoder import PoseDecoder
from ..utils.dropout import PointCloudDropOut
from ..utils.effective_loss_function import EffectiveLossFunction
from ..utils.batch_repetition import repeat_tensor_for_each_element_in_batch
import torch.nn.functional as F
from ..quaternions.operations import QuaternionOperations
class UnsupervisedPart(nn.Module):
# Unsupervised model that uses ensemble of pose predictors and effective loss function
def __init__(self, image_size=128, voxel_size=64, z_dimension=1024, pose_dimensions=128,
number_of_point_cloud_points=8000, number_of_pose_predictor_candidates=4, number_of_views=5):
"""
:param image_size: image size
:param voxel_size: voxel size (after tri-linear interpolation)
:param z_dimension: dimension used for encoder-decoders
:param pose_dimensions: dimension used for pose decoder
:param number_of_point_cloud_points: number of point cloud points used when decoding it
:param number_of_pose_predictor_candidates: number of candidates from which we 'll use the best one
:param number_of_views: number of image views
"""
super().__init__()
self.encoder = Encoder(image_size=image_size)
self.decoder = Decoder(number_of_point_cloud_points=number_of_point_cloud_points, hidden_dimensions=z_dimension,
scale=True)
self.point_cloud_drop_out = PointCloudDropOut(p=0.07)
self.effective_loss_function = EffectiveLossFunction(voxel_size=voxel_size)
self.pose_decoder = PoseDecoder(input_dimensions=z_dimension, hidden_dimensions=pose_dimensions,
number_of_pose_candidates=number_of_pose_predictor_candidates)
self.number_of_views = number_of_views
self.number_of_pose_predictor_candidates = number_of_pose_predictor_candidates
self.encoder.apply(self.kaiming_initialization)
self.decoder.apply(self.kaiming_initialization)
self.pose_decoder.apply(self.kaiming_initialization)
@staticmethod
def kaiming_initialization(architecture):
# Kaiming initialization for encoder, decoder and pose decoder
if isinstance(architecture, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(architecture.weight.data, a=0)
def forward(self, images, poses):
"""
:param images: all images in a batch
:param poses: given poses attached to images
:return: new projection views, ensemble and student poses
"""
encoder_image_features = self.encoder.forward(images)
encoder_pose_features = self.encoder.forward(poses)
point_cloud, scaling = self.decoder.forward(hidden_vector=encoder_image_features)
poses = self.pose_decoder.forward(hidden_vector=encoder_pose_features)
# do not create ensemble of pose predictors if we aren't training
if not self.training:
point_clouds = repeat_tensor_for_each_element_in_batch(torch_tensor=self.point_cloud_drop_out.
forward(point_cloud=point_cloud),
n=self.number_of_views)
scalings = repeat_tensor_for_each_element_in_batch(torch_tensor=scaling, n=self.number_of_views)
projection = self.effective_loss_function.forward(point_cloud=point_clouds, rotation=poses, scale=scalings)
return projection, poses
batch_size = images.size(0) * self.number_of_views
ensemble_poses, student_poses = poses[:-batch_size], poses[-batch_size:]
point_clouds = repeat_tensor_for_each_element_in_batch(torch_tensor=self.point_cloud_drop_out.
forward(point_cloud=point_cloud),
n=self.number_of_pose_predictor_candidates * self.
number_of_views)
scalings = repeat_tensor_for_each_element_in_batch(torch_tensor=scaling, n=self.
number_of_pose_predictor_candidates * self.number_of_views)
projection = self.effective_loss_function.forward(point_cloud=point_clouds, rotation=poses, scale=scalings)
return projection, ensemble_poses, student_poses
class UnsupervisedLoss(nn.Module):
# Combines projection effective losses for ensemble and student loss
def __init__(self, number_of_pose_predictor_candidates=4, student_weight=20.00):
super().__init__()
self.student_weight = student_weight
self.number_of_pose_predictor_candidates = number_of_pose_predictor_candidates
self.minimum_indexes = None
def forward(self, predictions, masks, training):
projection, *poses = predictions
"""
Down/up samples the input to either the given size or the given scale_factor. The algorithm used for
interpolation is determined by mode. Currently temporal, spatial and volumetric sampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape. The input dimensions are interpreted in the form:
mini-batch x channels x [optional depth] x [optional height] x width. The modes available for resizing are:
nearest, linear (3D-only), bi-linear, bicubic (4D-only), tri-linear (5D-only), area.
"""
masks = F.interpolate(input=masks.unsqueeze(0), scale_factor=1/2, mode="bilinear", align_corners=True).squeeze()
if not training:
return dict(projection_loss=F.mse_loss(projection, masks, reduction="sum") / projection.size(0))
ensemble_poses, student_poses = poses
masks = repeat_tensor_for_each_element_in_batch(torch_tensor=masks, n=self.number_of_pose_predictor_candidates)
projection_loss = F.mse_loss(projection, masks, reduction="none")
projection_loss = projection_loss.sum((1, 2)).view(-1, self.num_candidates)
minimum_indexes = projection_loss.argmin(dim=-1).detach()
batch_indexes = torch.arange(minimum_indexes.size(0), device=minimum_indexes.device)
# student loss
minimum_projection_loss = projection_loss[batch_indexes, minimum_indexes].sum() / minimum_indexes.size(0)
ensemble_poses = ensemble_poses.view(-1, self.number_of_pose_predictor_candidates, 4)
best_poses = ensemble_poses[batch_indexes, minimum_indexes, :].detach()
quaternion_operations = QuaternionOperations()
poses_difference = F.normalize(
quaternion_operations.quaternion_multiplication(q1=best_poses, q2=quaternion_operations.
quaternion_conjugate(q=student_poses)), dim=-1)
angle_difference = poses_difference[:, 0]
student_loss = (1 - angle_difference ** 2).sum() / minimum_indexes.size(0)
# save to print histogram
self.minimum_indexes = minimum_indexes.detach()
total_loss = minimum_projection_loss + self.student_weight * student_loss
return dict(projection_loss=minimum_projection_loss, student_loss=student_loss, total_loss=total_loss)
|
insights/parsers/tests/test_rhn_charsets.py | lhuett/insights-core | 121 | 12656152 | from insights.tests import context_wrap
from insights.parsers.rhn_charsets import RHNCharSets
emb_charsets_content = """
server_encoding
-----------------
UTF~
(1 row)
client_encoding
-----------------
UTF8
(1 row)
"""
ora_charsets_content = """
PARAMETER VALUE
---------------------------------
NLS_CHARACTERSET UTF8
NLS_NCHAR_CHARACTERSET UTF8
"""
def test_embedded_db():
result = RHNCharSets(context_wrap(emb_charsets_content))
assert result.get('server_encoding') == 'UTF~'
assert result.get('client_encoding') == 'UTF8'
def test_oracle_db():
result = RHNCharSets(context_wrap(ora_charsets_content))
assert result.get('NLS_CHARACTERSET') == 'UTF8'
assert result.get('NLS_NCHAR_CHARACTERSET') == 'UTF8'
|
cctbx/omz/cod_refine.py | dperl-sol/cctbx_project | 155 | 12656194 | from __future__ import absolute_import, division, print_function
from cctbx import omz
import cctbx.omz.dev
from cctbx.array_family import flex
from libtbx.test_utils import approx_equal
from libtbx import easy_run
from libtbx import easy_pickle
from libtbx.utils import date_and_time, user_plus_sys_time
import libtbx.load_env
from libtbx import Auto
from six.moves import cStringIO as StringIO
import traceback
import sys, os
from six.moves import range
from six.moves import zip
op = os.path
def get_master_phil(
max_atoms=99,
f_calc_options_algorithm="*direct fft",
bulk_solvent_correction=False):
return omz.dev.get_master_phil(
iteration_limit=100,
show_distances_threshold=0.5,
bulk_solvent_correction=bulk_solvent_correction,
grads_mean_sq_threshold=1e-6,
f_calc_options_algorithm=f_calc_options_algorithm,
additional_phil_string="""\
max_atoms = %(max_atoms)s
.type = int
f_obs_f_calc_fan_outliers = *remove keep
.type = choice
.optional = False
use_f_calc_as_f_obs = False
.type = bool
reset_u_iso = 0.05
.type = float
sites_mod_short = True
.type = bool
optimizers = *dev ls_simple ls_lm shelxl_fm shelxl_cg shelx76
.type = choice(multi=True)
ls_simple_iterations = 12
.type = int
shelxl_wght = None
.type = str
.help = '''
SHELX-97 Manual 7-31:
Refinement against F2 requires different weights to refinement
against F; in particular, making all the weights equal ('unit
weights'), although useful in the initial stages of refinement
against F, is NEVER a sensible option for F2.'''
shelxl_reset_sigmas = None
.type = float
shelxl_fm_iterations = 12
.type = int
shelxl_cg_iterations = 12
.type = int
shelx76_iterations = 12
.type = int
apply_iteration_limit_to_all = False
.type = bool
keep_tmp_files = False
.type = bool
export_refined = False
.type = bool
pickle_refined_dir = None
.type = str
wdir_root = None
.type = str
sorting_of_pickle_files = *down up
.type = choice
.optional = True
random_subset {
size = None
.type = int
seed = 0
.type = int
}
tardy_samples {
iq = None
.type = int
qmin = -180
.type = float
qmax = 180
.type = float
qstep = 3
.type = float
}
""" % vars())
def shelxl_weights_a_b(fo_sq, sigmas, fc_sq, osf_sq, a, b):
assert sigmas.size() == fo_sq.size()
assert fc_sq.size() == fo_sq.size()
from cctbx.xray.targets.tst_shelxl_wght_ls import calc_w
assert sigmas.all_ge(0.01)
return calc_w(
wa=a, wb=b, i_obs=fo_sq, i_sig=sigmas, i_calc=fc_sq, k=osf_sq**0.5)
def shelxl_weights(fo_sq, sigmas, fc_sq, osf_sq, shelxl_wght):
if (shelxl_wght is None):
shelxl_wght = ""
vals = [float(s) for s in shelxl_wght.split()]
assert len(vals) <= 6
a, b, c, d, e, f = vals + [0.1, 0, 0, 0, 0, 0.33333][len(vals):]
assert c == 0
assert d == 0
assert e == 0
assert f == 0.33333
return shelxl_weights_a_b(fo_sq, sigmas, fc_sq, osf_sq, a, b)
def show_cc_r1(
params,
label,
f_obs,
xray_structure=None,
fc_abs=None,
scale_factor=Auto):
assert [xray_structure, fc_abs].count(None) == 1
if (fc_abs is None):
p = params.f_calc_options
fc_abs = f_obs.structure_factors_from_scatterers(
xray_structure=xray_structure,
algorithm=p.algorithm,
cos_sin_table=p.cos_sin_table).f_calc().amplitudes()
corr = flex.linear_correlation(x=f_obs.data(), y=fc_abs.data())
assert corr.is_well_defined()
cc = corr.coefficient()
r1 = f_obs.r1_factor(
other=fc_abs, scale_factor=scale_factor, assume_index_matching=True)
print("%-12s cc, r1: %.4f %.4f" % (label, cc, r1))
sys.stdout.flush()
return fc_abs, cc, r1
def run_smtbx_ls(mode, cod_id, i_obs, f_obs, xray_structure, params):
import smtbx.refinement
fo_sq = i_obs
assert fo_sq.sigmas() is not None
sel = (fo_sq.data() == 0) & (fo_sq.sigmas() == 0)
fo_sq = fo_sq.select(~sel)
fo_sq.select(fo_sq.sigmas() <= 0).show_array()
assert fo_sq.sigmas().all_gt(0)
if (1): # work around bug currently in smtbx weighting scheme implementation
fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.data().size(), 1))
xobs = fo_sq.as_xray_observations()
tm = user_plus_sys_time()
rm = smtbx.refinement.model(
fo_sq=xobs,
xray_structure=xray_structure,
constraints=[],
restraints_manager=smtbx.refinement.restraints.manager(),
weighting_scheme=smtbx.refinement.least_squares.unit_weighting())
ls = rm.least_squares()
if (mode == "simple"):
for i_cycle in range(params.ls_simple_iterations):
ls.build_up()
try:
ls.solve_and_step_forward()
except RuntimeError as e:
if (str(e).find("cholesky.failure") <= 0): raise
print('Aborting run_smtbx_ls("simple"): cholesky.failure: %s' \
% cod_id)
break
for sc in xray_structure.scatterers():
if (sc.u_iso <= 0 or sc.u_iso > 1):
sc.u_iso = 0.05
show_cc_r1(params, "ls%02d" % (i_cycle+1), f_obs, xray_structure)
tm.show_elapsed(prefix="time smtbx_ls_simple_iterations: ")
elif (mode == "lm"):
from scitbx.lstbx import normal_eqns_solving
thresh = 1e-6
try:
cycles = normal_eqns_solving.levenberg_marquardt_iterations(
ls,
gradient_threshold=thresh,
step_threshold=thresh,
tau=1e-7)
except RuntimeError as e:
if (not str(e).startswith(
"cctbx::adptbx::debye_waller_factor_exp: arg_limit exceeded")):
raise
print('Aborting run_smtbx_ls("lm"):' \
' debye_waller_factor_exp failure: %s' % cod_id)
show_cc_r1(params, "smtbx_lm", f_obs, xray_structure)
tm.show_elapsed(prefix="time levenberg_marquardt_iterations: ")
else:
raise RuntimeError('Unknown run_smtbx_ls(mode="%s")' % mode)
def remove_tmp_files(file_names):
for fn in file_names:
if (op.isfile(fn)):
os.remove(fn)
assert not op.exists(fn)
def run_shelxl(
mode,
cod_id,
i_obs,
f_obs,
xray_structure,
params,
reference_structure,
expected_n_refinable_parameters):
if (mode == "fm"):
if (params.apply_iteration_limit_to_all):
fm_cycles = params.iteration_limit
else:
fm_cycles = params.shelxl_fm_iterations
cg_cycles = None
elif (mode == "cg"):
fm_cycles = None
if (params.apply_iteration_limit_to_all):
cg_cycles = params.iteration_limit
else:
cg_cycles = params.shelxl_cg_iterations
else:
raise RuntimeError("Unknown mode: " + mode)
cwd_orig = os.getcwd()
wdir = "wdir_%s_shelxl_%s_%s" % (cod_id, mode, os.getpid())
if (params.wdir_root is not None):
wdir = op.join(params.wdir_root, wdir)
wdir_is_new = False
if (not op.isdir(wdir)):
os.mkdir(wdir)
wdir_is_new = True
remove_wdir = False
try:
os.chdir(wdir)
tmp_file_names = ["tmp.ins", "tmp.hkl", "tmp.res", "tmp.lst"]
remove_tmp_files(tmp_file_names)
fo_sq = i_obs
if (params.shelxl_reset_sigmas):
fo_sq = fo_sq.customized_copy(
sigmas=flex.double(fo_sq.indices().size(), params.shelxl_reset_sigmas))
import iotbx.shelx
open("tmp.ins", "w").writelines(iotbx.shelx.writer.generator(
xray_structure=xray_structure,
data_are_intensities=True,
title="cod_id=%s mode=%s" % (cod_id, mode),
wavelength=fo_sq.minimum_wavelength_based_on_d_min(),
full_matrix_least_squares_cycles=fm_cycles,
conjugate_gradient_least_squares_cycles=cg_cycles,
weighting_scheme_params=params.shelxl_wght,
sort_scatterers=False))
fo_sq.export_as_shelx_hklf(file_object=open("tmp.hkl", "w"))
import iotbx.shelx.hklf
fo_sq = iotbx.shelx.hklf.reader(file_name="tmp.hkl") \
.as_miller_arrays(crystal_symmetry=fo_sq)[0]
buffers = easy_run.fully_buffered("shelxl tmp")
buffers.raise_if_errors()
refinement_unstable = False
for line in buffers.stdout_lines:
if (line.find("** REFINEMENT UNSTABLE **") >= 0):
refinement_unstable = True
print("Aborted: shelxl %s refinement unstable: %s" % (mode, cod_id))
break
res = open("tmp.res").read()
try:
refined = xray_structure.from_shelx(
file=StringIO(res),
min_distance_sym_equiv=0,
strictly_shelxl=False)
except iotbx.shelx.error as e:
if (str(e).find("scatterer parameter") < 0):
raise
print("Aborted: shelxl %s refinement apparently unstable: %s" % (
mode, cod_id))
refined = None
if (refined is not None):
assert refined.crystal_symmetry().is_similar_symmetry(
xray_structure)
for sc,rsc in zip(xray_structure.scatterers(), refined.scatterers()):
assert rsc.label == sc.label
assert approx_equal(rsc.occupancy, sc.weight(), 1e-4)
rsc.occupancy = sc.occupancy # XXX bug in res file reader
def check_special_positions():
result = True
uc = xray_structure.unit_cell()
sstab = xray_structure.site_symmetry_table()
for i_sc in xray_structure.special_position_indices():
sc = refined.scatterers()[i_sc]
site_symmetry = sstab.get(i_sc)
assert not site_symmetry.is_point_group_1()
site_special = site_symmetry.special_op() * sc.site
d = uc.mod_short_distance(sc.site, site_special)
if (d > 1e-3):
print("site moved off special position:")
print(" %s" % sc.label)
print(" shelxl res: %11.6f %11.6f %11.6f" % sc.site)
print(" special_op: %11.6f %11.6f %11.6f" % site_special)
print(" distance moved: %.3f" % d)
result = False
return result
assert check_special_positions()
xray_structure.replace_scatterers(refined.scatterers())
res_osf = None
res_hkl_count = None
res_r1 = None
res_n_parameters = None
res_n_restraints = None
for line in res.splitlines():
if (line.startswith("FVAR ")):
flds = line.split()
assert len(flds) == 2
res_osf = float(flds[1])
continue
if (not line.startswith("REM ")): continue
assert not refinement_unstable
if (line.startswith("REM R1 =")):
flds = line.split()
assert len(flds) == 15
res_hkl_count = int(flds[13])
res_r1 = float(flds[10])
elif (line.find(" parameters refined ") >= 0):
assert line.endswith(" restraints")
flds = line.split()
assert len(flds) == 7
res_n_parameters = int(flds[1])
res_n_restraints = int(flds[-2])
if (not refinement_unstable):
assert res_osf is not None
assert res_hkl_count is not None
assert res_r1 is not None
assert res_n_parameters is not None
assert res_n_restraints is not None
#
assert res_hkl_count == fo_sq.indices().size()
def raise_unexpected_restraints(n_expected):
raise RuntimeError(
"Unexpected number of SHELXL restraints: %d (vs. %d expected)" % (
res_n_restraints, n_expected))
if (mode == "fm"):
n_caos = fo_sq.space_group_info() \
.number_of_continuous_allowed_origin_shifts()
if (res_n_restraints != n_caos):
sg_symbol = str(fo_sq.space_group_info())
if (sg_symbol in ["P 63 m c", "P 63 c m"]):
assert n_caos == 1
assert res_n_restraints == 0
print("INFO: SHELXL restraint count incorrect? code_code:", \
cod_id)
else:
raise_unexpected_restraints(n_caos)
elif (mode == "cg"):
if (res_n_restraints != 0):
raise_unexpected_restraints(0)
else:
raise RuntimeError("Unknown mode: " + mode)
assert res_n_parameters == expected_n_refinable_parameters + 1
fc_abs, _, r1_fvar = show_cc_r1(
params, "fvar_"+mode, f_obs, xray_structure, scale_factor=res_osf)
r1_diff = r1_fvar - res_r1
print("R1 recomputed - shelxl_%s.res: %.4f - %.4f = %.4f %s" % (
mode, r1_fvar, res_r1, r1_diff, cod_id))
if (abs(r1_diff) > 0.01):
raise RuntimeError("R1 MISMATCH %s" % cod_id)
_, _, r1_auto = show_cc_r1(
params, "shelxl_"+mode, f_obs, fc_abs=fc_abs)
print("R1 FVAR-Auto %s: %.4f" % (cod_id, r1_fvar - r1_auto))
#
lst_r1 = None
lst_wr2 = None
for line in open("tmp.lst").read().splitlines():
l = line.strip()
if (l.startswith("R1 = ")):
lst_r1 = float(l.split()[9])
elif (l.startswith("wR2 = ") and l.endswith(" for all data")):
lst_wr2 = float(l.replace(","," ").split()[2])
assert lst_r1 is not None
assert lst_wr2 is not None
assert lst_r1 == res_r1
#
fc_sq = fc_abs.f_as_f_sq()
weights = shelxl_weights(
fo_sq=fo_sq.data(),
sigmas=fo_sq.sigmas(),
fc_sq=fc_sq.data(),
osf_sq=res_osf**2,
shelxl_wght=params.shelxl_wght)
num = flex.sum(
weights * flex.pow2(fo_sq.data() / res_osf**2 - fc_sq.data()))
den = flex.sum(
weights * flex.pow2(fo_sq.data() / res_osf**2))
assert den != 0
wr2 = (num / den)**0.5
wr2_diff = wr2 - lst_wr2
if (abs(wr2_diff) > 0.01):
info = " significantly different"
else:
info = ""
print("wR2 recomputed - shelxl_%s.lst: %.4f - %.4f = %.4f %s%s" % (
mode, wr2, lst_wr2, wr2_diff, cod_id, info))
if (abs(wr2_diff) / max(lst_wr2, wr2) > 0.2):
raise RuntimeError("wR2 MISMATCH %s" % cod_id)
if (not params.keep_tmp_files):
remove_tmp_files(tmp_file_names)
remove_wdir = wdir_is_new
finally:
os.chdir(cwd_orig)
if (remove_wdir):
try: os.rmdir(wdir)
except Exception: pass
def run_shelx76(
cod_id,
f_obs,
xray_structure,
fvars,
encoded_sites,
params,
reference_structure):
if (params.apply_iteration_limit_to_all):
ls_cycles = params.iteration_limit
else:
ls_cycles = params.shelx76_iterations
cwd_orig = os.getcwd()
wdir = "wdir_%s_shelx76_%s" % (cod_id, os.getpid())
if (params.wdir_root is not None):
wdir = op.join(params.wdir_root, wdir)
wdir_is_new = False
if (not op.isdir(wdir)):
os.mkdir(wdir)
wdir_is_new = True
remove_wdir = False
try:
os.chdir(wdir)
tmp_file_names = [
"tmp.ins", "tmp.lst", "fort.2", "fort.3", "fort.4", "fort.7"]
remove_tmp_files(tmp_file_names)
assert not op.exists("tmp.ins")
from cctbx.development import run_shelx76
run_shelx76.write_shelx76_ls(
f_obs=f_obs,
xray_structure=xray_structure,
fvars=fvars,
encoded_sites=encoded_sites,
l_s_parameters=str(ls_cycles))
assert op.exists("tmp.ins")
buffers = easy_run.fully_buffered("shelx76 < tmp.ins > tmp.lst")
buffers.raise_if_errors_or_output()
lst = open("tmp.lst").read().splitlines()
r_from_lst = None
for line in lst:
l = line.lstrip()
if (l.startswith("R = ")):
print(l)
flds = l.split()
assert len(flds) == 12
if (flds[2].lower() == "nan"):
print("Aborted: shelx76 refinement apparently unstable: %s" % (
cod_id))
r_from_lst = "nan"
break
r_from_lst = float(flds[2])
assert r_from_lst is not None
if (r_from_lst != "nan"):
print("%-12s cc, r1: None %.4f" % ("shelx76", r_from_lst))
if (not params.keep_tmp_files):
remove_tmp_files(tmp_file_names)
remove_wdir = wdir_is_new
finally:
os.chdir(cwd_orig)
if (remove_wdir):
try: os.rmdir(wdir)
except Exception: pass
def process(params, pickle_file_name):
cod_id = op.basename(pickle_file_name).split(".",1)[0]
print("cod_id:", cod_id)
c_obs, structure_prep, edge_list = easy_pickle.load(
file_name=pickle_file_name)
changes = structure_prep.make_scatterer_labels_shelx_compatible_in_place()
if (params.sites_mod_short):
structure_prep = structure_prep.sites_mod_short()
from iotbx.shelx import fvar_encoding
structure_prep = \
fvar_encoding.move_sites_if_necessary_for_shelx_fvar_encoding(
xray_structure=structure_prep)
structure_prep.show_summary().show_scatterers()
if (len(changes) != 0):
from libtbx.utils import plural_s
print("INFO: %d atom name%s changed for compatibility with SHELXL:" \
% plural_s(len(changes)))
for change in changes:
print(' changed: "%s" -> "%s"' % change)
structure_prep.scattering_type_registry(table="it1992").show()
fvar_encoding.dev_build_shelx76_fvars(structure_prep) # only an exercise
print("."*79)
#
if (len(params.optimizers) == 0):
return
#
assert c_obs.is_xray_intensity_array() or c_obs.is_xray_amplitude_array()
if (c_obs.is_xray_intensity_array()):
i_obs = c_obs
f_obs = c_obs.f_sq_as_f(algorithm="xtal_3_7")
else:
f_obs = c_obs
i_obs = c_obs.f_as_f_sq(algorithm="shelxl")
process_continue(
params=params,
cod_id=cod_id,
c_obs=c_obs, i_obs=i_obs, f_obs=f_obs,
structure_prep=structure_prep)
def process_continue(params, cod_id, c_obs, i_obs, f_obs, structure_prep):
p = params.f_calc_options
f_calc = f_obs.structure_factors_from_scatterers(
xray_structure=structure_prep,
algorithm=p.algorithm,
cos_sin_table=p.cos_sin_table).f_calc()
sel = f_obs.f_obs_f_calc_fan_outlier_selection(f_calc=f_calc)
assert sel is not None
n_outliers = sel.count(True)
if (n_outliers != 0):
action = params.f_obs_f_calc_fan_outliers
print("INFO: f_obs_f_calc_fan_outliers = %s: %d" % (action, n_outliers))
if (action == "remove"):
i_obs = i_obs.select(~sel)
f_obs = f_obs.select(~sel)
if (f_obs.anomalous_flag()):
print("INFO: converting anomalous i+f_obs to non-anomalous.")
i_obs = i_obs.average_bijvoet_mates()
f_obs = f_obs.average_bijvoet_mates()
sel = ((i_obs.data() == 0) & (i_obs.sigmas() == 0)) \
| ((f_obs.data() == 0) & (f_obs.sigmas() == 0))
n_zero_d_and_s = sel.count(True)
if (n_zero_d_and_s != 0):
print("INFO: removing reflections with i+f_obs=0 and sigma=0:", \
n_zero_d_and_s)
i_obs = i_obs.select(~sel)
f_obs = f_obs.select(~sel)
p = params.f_calc_options
f_calc = f_obs.structure_factors_from_scatterers(
xray_structure=structure_prep,
algorithm=p.algorithm,
cos_sin_table=p.cos_sin_table).f_calc()
if (params.use_f_calc_as_f_obs):
print("INFO: using f_calc as i+f_obs")
i_obs = f_calc.intensities().customized_copy(
sigmas=flex.double(f_calc.indices().size(), 0.01))
f_obs = f_calc.amplitudes().customized_copy(
sigmas=flex.double(f_calc.indices().size(), 0.01))
else:
# scaling applied so that the data written in shelx hklf format
# have sufficient significant digits, and FVAR is 1 (shelx76 seems
# to be especially sensitive to FVAR >> 1)
k = f_obs.scale_factor(f_calc=f_calc)
assert k != 0
s = 1/k**2
print("INFO: scaling i_obs to f_calc by multiplying i_obs with: %.6g" % s)
i_obs = i_obs.apply_scaling(factor=s)
s = 1/k
print("INFO: scaling f_obs to f_calc by multiplying f_obs with: %.6g" % s)
f_obs = f_obs.apply_scaling(factor=s)
def show(obs):
obs.show_comprehensive_summary()
from cctbx.omz.cif_refine import \
report_fraction_of_negative_observations_if_any as _
_(cod_id, obs)
if (c_obs.is_xray_intensity_array()):
show(i_obs)
else:
show(f_obs)
print("."*79)
#
structure_work = structure_prep.deep_copy_scatterers()
sel = structure_work.hd_selection()
print("Removing hydrogen atoms:", sel.count(True))
structure_work = structure_work.select(selection=~sel)
sdt = params.show_distances_threshold
if (sdt > 0):
print("Distances smaller than %.6g A:" % sdt)
structure_work.show_distances(distance_cutoff=sdt)
print("."*79)
#
if (params.tardy_samples.iq is not None):
from cctbx.omz import tardy_adaptor
print()
tardy_adaptor.sample_e_pot(
id_code=cod_id,
f_obs=f_obs,
xray_structure=structure_prep,
edge_list=edge_list,
params=params.tardy_samples)
print()
return
#
from iotbx.shelx import fvar_encoding
fvars, encoded_sites = fvar_encoding.dev_build_shelx76_fvars(structure_work)
print("Number of FVARs for special position constraints:", len(fvars)-1)
print("."*79)
#
show_cc_r1(params, "prep", f_obs, structure_prep)
def cc_r1(label):
show_cc_r1(params, label, f_obs, structure_work)
cc_r1("no_h")
structure_work.convert_to_isotropic()
cc_r1("iso")
structure_iso = structure_work.deep_copy_scatterers()
#
if (params.reset_u_iso is not None):
structure_work.set_u_iso(value=params.reset_u_iso)
cc_r1("setu")
if (params.shake_sites_rmsd is not None):
mt = flex.mersenne_twister(seed=0)
structure_work.shift_sites_in_place(
shift_length=params.shake_sites_rmsd,
mersenne_twister=mt)
print("rms difference after shift_sites_in_place: %.3f" \
% structure_iso.rms_difference(structure_work))
cc_r1("shift_xyz")
#
if (params.max_atoms is not None):
n = structure_work.scatterers().size()
if (n > params.max_atoms):
print("Skipping refinement of large model: %d atoms COD %s" % (
n, cod_id))
return
#
structure_work.scatterers().flags_set_grads(state=False)
for sc in structure_work.scatterers():
sc.flags.set_grad_site(True)
assert sc.flags.use_u_iso_only()
sc.flags.set_grad_u_iso(True)
n_refinable_parameters = structure_work.n_parameters(
considering_site_symmetry_constraints=True)
print("Number of refinable parameters:", n_refinable_parameters)
#
if (params.iteration_limit < 1):
return
#
if ("dev" not in params.optimizers):
structure_dev = None
else:
structure_dev = structure_work.deep_copy_scatterers()
omz.dev.refinement(
i_obs=i_obs,
f_obs=f_obs,
xray_structure=structure_dev,
params=params,
reference_structure=structure_iso,
expected_n_refinable_parameters=n_refinable_parameters,
plot_samples_id=cod_id)
show_cc_r1(params, "dev", f_obs, structure_dev)
if (params.export_refined):
file_name = "dev_%s_%s_%s.pdb" % (
params.target_type, params.target_obs_type.lower(), cod_id)
open(file_name, "w").write(structure_dev.as_pdb_file(
remarks=[file_name]))
if (params.pickle_refined_dir is not None):
easy_pickle.dump(
file_name=op.join(params.pickle_refined_dir, cod_id+".pickle"),
obj=(c_obs, structure_dev, None))
print((
structure_dev.scatterers().size(),
c_obs.space_group().order_p(),
c_obs.indices().size(),
c_obs.d_min()), file=open("%s/qi_%s" % (params.pickle_refined_dir, cod_id), "w"))
#
def use_smtbx_ls(mode):
if ("ls_"+mode not in params.optimizers):
return None
if (not libtbx.env.has_module(name="smtbx")):
print("INFO: smtbx not available: refinement skipped.")
return None
result = structure_work.deep_copy_scatterers()
run_smtbx_ls(
mode=mode,
cod_id=cod_id,
i_obs=i_obs,
f_obs=f_obs,
xray_structure=result,
params=params)
show_cc_r1(params, "ls_"+mode, f_obs, result)
return result
structure_ls_simple = use_smtbx_ls("simple")
structure_ls_lm = use_smtbx_ls("lm")
#
def use_shelxl(mode):
if ("shelxl_"+mode not in params.optimizers):
return None
result = structure_work.deep_copy_scatterers()
run_shelxl(
mode=mode,
cod_id=cod_id,
i_obs=i_obs,
f_obs=f_obs,
xray_structure=result,
params=params,
reference_structure=structure_iso,
expected_n_refinable_parameters=n_refinable_parameters)
if (params.export_refined):
file_name = "shelxl_%s_%s.pdb" % (mode, cod_id)
open(file_name, "w").write(result.as_pdb_file(
remarks=[file_name]))
return result
structure_shelxl_fm = use_shelxl("fm")
structure_shelxl_cg = use_shelxl("cg")
#
if ("shelx76" not in params.optimizers):
structure_shelx76 = None
else:
structure_shelx76 = structure_work.deep_copy_scatterers()
run_shelx76(
cod_id=cod_id,
f_obs=f_obs,
xray_structure=structure_shelx76,
fvars=fvars,
encoded_sites=encoded_sites,
params=params,
reference_structure=structure_iso)
if (params.export_refined):
file_name = "shelx76_%s.pdb" % cod_id
open(file_name, "w").write(structure_shelx76.as_pdb_file(
remarks=[file_name]))
def run(args):
from iotbx.option_parser import option_parser as iotbx_option_parser
import libtbx.utils
show_times = libtbx.utils.show_times(time_start="now")
command_call = ["iotbx.python", __file__]
command_line = (iotbx_option_parser(
usage=" ".join(command_call) + " [options] directory|file...")
.enable_chunk(easy_all=True)
.enable_multiprocessing()
).process(args=args, min_nargs=1)
if (command_line.run_multiprocessing_chunks_if_applicable(
command_call=command_call)):
show_times()
return
co = command_line.options
#
print("TIME BEGIN cod_refine:", date_and_time())
print()
#
master_phil = get_master_phil()
argument_interpreter = master_phil.command_line_argument_interpreter()
phil_objects = []
remaining_args = []
for arg in command_line.args:
if (arg.find("=") >= 0):
phil_objects.append(argument_interpreter.process(arg=arg))
else:
remaining_args.append(arg)
work_phil = master_phil.fetch(sources=phil_objects)
work_phil.show()
print()
params = work_phil.extract()
#
qi_dict = {}
all_pickles = []
for arg in remaining_args:
if (op.isdir(arg)):
for node in sorted(os.listdir(arg)):
if (node.endswith(".pickle")):
all_pickles.append(op.join(arg, node))
elif (node.startswith("qi_") and len(node) == 10):
qi = open(op.join(arg, node)).read().splitlines()
if (len(qi) == 1):
cod_id = node[3:]
quick_info = eval(qi[0])
assert cod_id not in qi_dict
qi_dict[cod_id] = quick_info
elif (op.isfile(arg)):
all_pickles.append(arg)
else:
raise RuntimeError("Not a file or directory: %s" % arg)
print("Number of pickle files:", len(all_pickles))
print("Number of quick_infos:", len(qi_dict))
sort_choice = params.sorting_of_pickle_files
if (len(qi_dict) != 0 and sort_choice is not None):
print("Sorting pickle files by n_atoms * n_refl:", sort_choice)
assert sort_choice in ["down", "up"]
def sort_pickle_files():
if (sort_choice == "down"): i_sign = -1
else: i_sign = 1
buffer = []
for i,path in enumerate(all_pickles):
cod_id = op.basename(path).split(".",1)[0]
qi = qi_dict.get(cod_id)
if (qi is None): nn = 2**31
else: nn = qi[0] * qi[1] * qi[2]
buffer.append((nn, i_sign*i, path))
buffer.sort()
if (i_sign < 0):
buffer.reverse()
result = []
for elem in buffer:
result.append(elem[-1])
return result
all_pickles = sort_pickle_files()
print()
#
rss = params.random_subset.size
if (rss is not None and rss > 0):
seed = params.random_subset.seed
print("Selecting subset of %d pickle files using random seed %d" % (
rss, seed))
mt = flex.mersenne_twister(seed=seed)
perm = mt.random_permutation(size=len(all_pickles))[:rss]
flags = flex.bool(len(all_pickles), False).set_selected(perm, True)
all_pickles = flex.select(all_pickles, permutation=flags.iselection())
print()
#
from libtbx.path import makedirs_race
if (params.wdir_root is not None):
makedirs_race(path=params.wdir_root)
if (params.pickle_refined_dir is not None):
makedirs_race(path=params.pickle_refined_dir)
#
n_caught = 0
for i_pickle,pickle_file_name in enumerate(all_pickles):
if (i_pickle % command_line.chunk.n != command_line.chunk.i): continue
tm = user_plus_sys_time()
try:
process(params, pickle_file_name)
except KeyboardInterrupt:
print("CAUGHT EXCEPTION: KeyboardInterrupt", file=sys.stderr)
traceback.print_exc()
print(file=sys.stderr)
sys.stderr.flush()
return
except Exception:
sys.stdout.flush()
print("CAUGHT EXCEPTION: %s" % pickle_file_name, file=sys.stderr)
traceback.print_exc()
print(file=sys.stderr)
sys.stderr.flush()
n_caught += 1
else:
print("done_with: %s (%.2f seconds)" % (pickle_file_name, tm.elapsed()))
print()
sys.stdout.flush()
print()
print("Number of exceptions caught:", n_caught)
#
show_times()
print()
print("TIME END cod_refine:", date_and_time())
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
mozillians/groups/managers.py | divyamoncy/mozillians | 202 | 12656201 | from django.db.models import Case, Count, IntegerField, Manager, Sum, When
from django.db.models.query import QuerySet
class GroupBaseManager(Manager):
use_for_related_fields = True
def get_queryset(self):
"""Annotate count of group members."""
qs = super(GroupBaseManager, self).get_queryset()
qs = qs.annotate(member_count=Count('members'))
return qs
class GroupManager(Manager):
use_for_related_fields = True
def get_queryset(self):
"""Annotate count of memberships of type MEMBER."""
from mozillians.groups.models import GroupMembership
qs = super(GroupManager, self).get_queryset()
annotation = Sum(
Case(
When(
groupmembership__status=GroupMembership.MEMBER,
then=1
),
default=0, output_field=IntegerField()
)
)
qs = qs.annotate(member_count=annotation)
return qs
class GroupQuerySet(QuerySet):
def visible(self):
return self.filter(visible=True)
|
tests/clpy_tests/opencl_tests/ultima_tests/test_cindexer.py | fixstars/clpy | 142 | 12656202 | # flake8: noqa
# TODO(vorj): When we will meet flake8 3.7.0+,
# we should ignore only W291 for whole file
# using --per-file-ignores .
import clpy
import unittest
class TestUltimaCIndexer(unittest.TestCase):
def test_cindexer_argument_mutation(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(CIndexer_2 ind)
{
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CIndexer<2> ind){}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
def test_cindexer_member_function(self):
x = clpy.backend.ultima.exec_ultima('', '#include <cupy/carray.hpp>') + '''
void f(CIndexer_2 ind)
{
ind_size;
}
'''[1:]
y = clpy.backend.ultima.exec_ultima(
'''
void f(CIndexer<2> ind){
ind.size();
}
''',
'#include <cupy/carray.hpp>')
self.maxDiff = None
self.assertEqual(x, y)
if __name__ == "__main__":
unittest.main()
|
server/intrinsic/algorithm/gehler2011/lib/mpi_kmeans-1.6/mpi_kmeans.py | paulu/opensurfaces | 137 | 12656270 | #!/usr/bin/python
# Wrapper for the MPI-Kmeans library by <NAME>
from ctypes import c_int, c_double, c_uint
from numpy.ctypeslib import ndpointer
import numpy as N
from numpy import empty,array,reshape,arange
def kmeans(X, nclst, maxiter=0, numruns=1):
"""Wrapper for <NAME>ers accelerated MPI-Kmeans routine."""
mpikmeanslib = N.ctypeslib.load_library("libmpikmeans.so", ".")
mpikmeanslib.kmeans.restype = c_double
mpikmeanslib.kmeans.argtypes = [ndpointer(dtype=c_double, ndim=1, flags='C_CONTIGUOUS'), \
ndpointer(dtype=c_double, ndim=1, flags='C_CONTIGUOUS'), \
ndpointer(dtype=c_uint, ndim=1, flags='C_CONTIGUOUS'), \
c_uint, c_uint, c_uint, c_uint, c_uint ]
npts,dim = X.shape
assignments=empty( (npts), c_uint )
bestSSE=N.Inf
bestassignments=empty( (npts), c_uint)
Xvec = array( reshape( X, (-1,) ), c_double )
permutation = N.random.permutation( range(npts) ) # randomize order of points
CX = array(X[permutation[:nclst],:], c_double).flatten()
SSE = mpikmeanslib.kmeans( CX, Xvec, assignments, dim, npts, min(nclst, npts), maxiter, numruns)
return reshape(CX, (nclst,dim)), SSE, (assignments+1)
if __name__ == "__main__":
from numpy import array
from numpy.random import rand
X = array( rand(12), c_double )
X.shape = (4,3)
clst,dist,labels = kmeans(X, 2)
print "cluster centers=\n",clst
print "dist=",dist
print "cluster labels",labels
|
onnxruntime/python/tools/quantization/__init__.py | dennyac/onnxruntime | 6,036 | 12656290 | from .quantize import quantize, quantize_static, quantize_dynamic, quantize_qat
from .quantize import QuantizationMode
from .calibrate import CalibrationDataReader, CalibraterBase, MinMaxCalibrater, create_calibrator, CalibrationMethod
from .quant_utils import QuantType, QuantFormat, write_calibration_table
|
Acceleration/memcached/regressionSims/testgen/memtest_txt_extended_regressions.py | pooyaww/Vivado_HLS_Samples | 326 | 12656329 | #!/usr/bin/python
import memlib
## EDIT HERE ###################################################################
keySizes = range(1,28)
#keySizes.append(128)
valueSizes = keySizes[:]
#valueSizes.append(1015)
keyChars = map(chr, range(97, 126))
valueChars = map(chr, range(65, 94))
asciiVals = [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 123456789, 2234567891]
################################################################################
def pair2kvpair(pair):
return memlib.kv_pair(pair[0], pair[1], "%08x" % pair[2], 42)
def seq1(keys, values, repeat):
kv_pairs = []
key = keys[0]
value = values[0]
for flags in asciiVals:
kv_pairs.append( memlib.kv_pair(key, value, "%08x" % flags, 42) )
for key in keys:
for value in values:
flags = asciiVals[0]
kv_pairs.append( memlib.kv_pair(key, value, "%08x" % flags, 42) )
testset = memlib.newTestset()
for kv_pair in kv_pairs:
memlib.setSuccess(kv_pair, testset)
for _ in range(repeat):
memlib.getSuccess(kv_pair, testset)
return testset
def seq2(keys, values):
testset = memlib.newTestset()
for _ in range(len(values)):
# for more keys than values, duplicate use of values
values_used = values
if len(keys) > len(values):
while(len(keys) > len(values_used)):
values_used = values_used + values
values_used = values_used[0:len(keys)]
# requests
kv_pairs = map(pair2kvpair, zip(keys, values_used, asciiVals*50))
for kv_pair in kv_pairs:
memlib.setSuccess(kv_pair, testset)
for kv_pair in kv_pairs:
memlib.getSuccess(kv_pair, testset)
# rotation
values = values[1:] + values[0:1]
return testset
################################################################################
if len(keySizes) > len(keyChars):
sys.exit("Error: Not enough key characters.")
if len(valueSizes) > len(valueChars):
sys.exit("Error: Not enough value characters.")
keyPairs = zip(keySizes, keyChars)
valuePairs = zip(valueSizes, valueChars)
keys = map(lambda (size, char): char * size, keyPairs)
values = map(lambda (size, char): char * size, valuePairs)
memlib.generate("SEQ1-EXTENDED", seq1(keys, values, 5))
memlib.generate_hls("SEQ1-EXTENDED", seq1(keys, values, 5))
memlib.generate("SEQ2-EXTENDED", seq2(keys, values))
memlib.generate_hls("SEQ2-EXTENDED", seq2(keys, values))
memlib.generate("SEQ3-EXTENDED", seq1(keys, values, 1))
memlib.generate_hls("SEQ3-EXTENDED", seq1(keys, values, 1))
|
tests/Unit/Evolution/Systems/GrMhd/ValenciaDivClean/Fluxes.py | nilsvu/spectre | 117 | 12656353 | <gh_stars>100-1000
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def b_dot_v(magnetic_field, spatial_velocity, spatial_metric):
return np.einsum("ab, ab", spatial_metric,
np.outer(magnetic_field, spatial_velocity))
def b_squared(magnetic_field, spatial_metric):
return np.einsum("ab, ab", spatial_metric,
np.outer(magnetic_field, magnetic_field))
def magnetic_field_one_form(magnetic_field, spatial_metric):
return np.einsum("a, ia", magnetic_field, spatial_metric)
def p_star(pressure, b_dot_v, b_squared, lorentz_factor):
return pressure + 0.5 * (b_dot_v**2 + b_squared / lorentz_factor**2)
def spatial_velocity_one_form(spatial_velocity, spatial_metric):
return np.einsum("a, ia", spatial_velocity, spatial_metric)
def tilde_d_flux(tilde_d, tilde_tau, tilde_s, tilde_b, tilde_phi, lapse, shift,
sqrt_det_spatial_metric, spatial_metric, inv_spatial_metric,
pressure, spatial_velocity, lorentz_factor, magnetic_field):
return tilde_d * (lapse * spatial_velocity - shift)
def tilde_tau_flux(tilde_d, tilde_tau, tilde_s, tilde_b, tilde_phi, lapse,
shift, sqrt_det_spatial_metric, spatial_metric,
inv_spatial_metric, pressure, spatial_velocity,
lorentz_factor, magnetic_field):
b_dot_v_ = b_dot_v(magnetic_field, spatial_velocity, spatial_metric)
return (
sqrt_det_spatial_metric * lapse *
p_star(pressure, b_dot_v_, b_squared(magnetic_field, spatial_metric),
lorentz_factor) * spatial_velocity + tilde_tau *
(lapse * spatial_velocity - shift) - lapse * b_dot_v_ * tilde_b)
def tilde_s_flux(tilde_d, tilde_tau, tilde_s, tilde_b, tilde_phi, lapse, shift,
sqrt_det_spatial_metric, spatial_metric, inv_spatial_metric,
pressure, spatial_velocity, lorentz_factor, magnetic_field):
b_dot_v_ = b_dot_v(magnetic_field, spatial_velocity, spatial_metric)
b_i = (magnetic_field_one_form(magnetic_field, spatial_metric) /
lorentz_factor +
spatial_velocity_one_form(spatial_velocity, spatial_metric) *
lorentz_factor * b_dot_v_)
result = np.outer(lapse * spatial_velocity - shift, tilde_s)
result -= lapse / lorentz_factor * np.outer(tilde_b, b_i)
result += (
sqrt_det_spatial_metric * lapse *
p_star(pressure, b_dot_v_, b_squared(magnetic_field, spatial_metric),
lorentz_factor) * np.identity(shift.size))
return result
def tilde_b_flux(tilde_d, tilde_tau, tilde_s, tilde_b, tilde_phi, lapse, shift,
sqrt_det_spatial_metric, spatial_metric, inv_spatial_metric,
pressure, spatial_velocity, lorentz_factor, magnetic_field):
result = np.outer(lapse * spatial_velocity - shift, tilde_b)
result += lapse * inv_spatial_metric * tilde_phi
result -= lapse * np.outer(tilde_b, spatial_velocity)
return result
def tilde_phi_flux(tilde_d, tilde_tau, tilde_s, tilde_b, tilde_phi, lapse,
shift, sqrt_det_spatial_metric, spatial_metric,
inv_spatial_metric, pressure, spatial_velocity,
lorentz_factor, magnetic_field):
return lapse * tilde_b - tilde_phi * shift
|
fastrunner/migrations/0018_auto_20210410_1950.py | FuxiongYang/faster | 227 | 12656372 | # Generated by Django 2.2 on 2021-04-10 19:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fastrunner', '0017_visit_project'),
]
operations = [
migrations.AddField(
model_name='api',
name='yapi_catid',
field=models.IntegerField(default=0, null=True, verbose_name='yapiηεη»id'),
),
migrations.AddField(
model_name='api',
name='yapi_id',
field=models.IntegerField(default=0, null=True, verbose_name='yapiηid'),
),
migrations.AddField(
model_name='api',
name='ypai_add_time',
field=models.CharField(default='', max_length=10, null=True, verbose_name='yapiεε»ΊζΆι΄'),
),
migrations.AddField(
model_name='api',
name='ypai_up_time',
field=models.CharField(default='', max_length=10, null=True, verbose_name='yapiζ΄ζ°ζΆι΄'),
),
migrations.AddField(
model_name='api',
name='ypai_username',
field=models.CharField(default='', max_length=30, null=True, verbose_name='yapiηεδ½θ
'),
),
]
|
scss/tests/from_ruby/test_extend.py | kochelmonster/pyScss | 152 | 12656393 | from __future__ import absolute_import
from __future__ import unicode_literals
from scss import Scss
import pytest
# py.test bug: unicode literals not allowed here, so cast to native str type
pytestmark = pytest.mark.skipif(str("not config.getoption('include_ruby')"))
# TODO undupe
def assert_rendering(input, expected, **kwargs):
compiler = Scss(scss_opts=dict(compress=False), **kwargs)
css = compiler.compile(input)
# TODO chumptastic hack; sass and pyscss have slightly different
# "non-compressed" output
import re
css = re.sub(r'(?m)\n *[}]$', ' }\n', css).rstrip("\n") + "\n"
#css = re.sub(r'; [}]', ';\n }', css)
#css = re.sub(r'\n *[}]$', ' }', css)
assert expected == css
def test_basic():
assert_rendering('''\
.foo {a: b}
.bar {@extend .foo}
''', '''\
.foo, .bar {
a: b; }
''')
assert_rendering('''\
.bar {@extend .foo}
.foo {a: b}
''', '''\
.foo, .bar {
a: b; }
''')
assert_rendering('''\
.foo {a: b}
.bar {c: d; @extend .foo}
''', '''\
.foo, .bar {
a: b; }
.bar {
c: d; }
''')
assert_rendering('''\
.foo {a: b}
.bar {@extend .foo; c: d}
''', '''\
.foo, .bar {
a: b; }
.bar {
c: d; }
''')
def test_multiple_targets():
assert_rendering('''\
.foo {a: b}
.bar {@extend .foo}
.blip .foo {c: d}
''', '''\
.foo, .bar {
a: b; }
.blip .foo, .blip .bar {
c: d; }
''')
def test_multiple_extendees():
assert_rendering('''\
.foo {a: b}
.bar {c: d}
.baz {@extend .foo; @extend .bar}
''', '''\
.foo, .baz {
a: b; }
.bar, .baz {
c: d; }
''')
def test_multiple_extends_with_single_extender_and_single_target():
assert_extends(
'.foo .bar',
'.baz {@extend .foo; @extend .bar}',
'.foo .bar, .baz .bar, .foo .baz, .baz .baz')
assert_extends(
'.foo.bar',
'.baz {@extend .foo; @extend .bar}',
'.foo.bar, .baz')
def test_multiple_extends_with_multiple_extenders_and_single_target():
assert_rendering('''\
.foo .bar {a: b}
.baz {@extend .foo}
.bang {@extend .bar}
''', '''\
.foo .bar, .baz .bar, .foo .bang, .baz .bang {
a: b; }
''')
assert_rendering('''\
.foo.bar {a: b}
.baz {@extend .foo}
.bang {@extend .bar}
''', '''\
.foo.bar, .bar.baz, .baz.bang, .foo.bang {
a: b; }
''')
def test_chained_extends():
assert_rendering('''\
.foo {a: b}
.bar {@extend .foo}
.baz {@extend .bar}
.bip {@extend .bar}
''', '''\
.foo, .bar, .baz, .bip {
a: b; }
''')
def test_dynamic_extendee():
assert_extends(
'.foo',
'.bar {@extend #{".foo"}}',
'.foo, .bar')
assert_extends(
'[baz^="blip12px"]',
'.bar {@extend [baz^="blip#{12px}"]}',
'[baz^="blip12px"], .bar')
def test_nested_target():
assert_extends(
'.foo .bar',
'.baz {@extend .bar}',
'.foo .bar, .foo .baz')
def test_target_with_child():
assert_extends(
'.foo .bar',
'.baz {@extend .foo}',
'.foo .bar, .baz .bar')
def test_class_unification():
assert_unification(
'.foo.bar',
'.baz {@extend .foo}',
'.foo.bar, .bar.baz')
assert_unification(
'.foo.baz',
'.baz {@extend .foo}',
'.baz')
def test_id_unification():
assert_unification(
'.foo.bar',
'#baz {@extend .foo}',
'.foo.bar, .bar#baz')
assert_unification(
'.foo#baz',
'#baz {@extend .foo}',
'#baz')
# XXX assert_extend_doesnt_match('#bar', '.foo', :failed_to_unify, 2) do
assert_unification(
'.foo#baz',
'#bar {@extend .foo}',
'.foo#baz')
def test_universal_unification_with_simple_target():
assert_unification(
'.foo',
'* {@extend .foo}',
'.foo, *')
assert_unification(
'.foo',
'*|* {@extend .foo}',
'.foo, *|*')
assert_unification(
'.foo.bar',
'* {@extend .foo}',
'.bar')
assert_unification(
'.foo.bar',
'*|* {@extend .foo}',
'.bar')
assert_unification(
'.foo.bar',
'ns|* {@extend .foo}',
'.foo.bar, ns|*.bar')
def test_universal_unification_with_namespaceless_universal_target():
assert_unification(
'*.foo',
'* {@extend .foo}',
'*')
assert_unification(
'*.foo',
'*|* {@extend .foo}',
'*')
assert_unification(
'*|*.foo',
'* {@extend .foo}',
'*|*.foo, *')
assert_unification(
'*|*.foo',
'*|* {@extend .foo}',
'*|*')
assert_unification(
'*.foo',
'ns|* {@extend .foo}',
'*.foo, ns|*')
assert_unification(
'*|*.foo',
'ns|* {@extend .foo}',
'*|*.foo, ns|*')
def test_universal_unification_with_namespaced_universal_target():
assert_unification(
'ns|*.foo',
'* {@extend .foo}',
'ns|*')
assert_unification(
'ns|*.foo',
'*|* {@extend .foo}',
'ns|*')
# XXX assert_extend_doesnt_match('ns2|*', '.foo', :failed_to_unify, 2) do
assert_unification(
'ns1|*.foo',
'ns2|* {@extend .foo}',
'ns1|*.foo')
assert_unification(
'ns|*.foo',
'ns|* {@extend .foo}',
'ns|*')
def test_universal_unification_with_namespaceless_element_target():
assert_unification(
'a.foo',
'* {@extend .foo}',
'a')
assert_unification(
'a.foo',
'*|* {@extend .foo}',
'a')
assert_unification(
'*|a.foo',
'* {@extend .foo}',
'*|a.foo, a')
assert_unification(
'*|a.foo',
'*|* {@extend .foo}',
'*|a')
assert_unification(
'a.foo',
'ns|* {@extend .foo}',
'a.foo, ns|a')
assert_unification(
'*|a.foo',
'ns|* {@extend .foo}',
'*|a.foo, ns|a')
def test_universal_unification_with_namespaced_element_target():
assert_unification(
'ns|a.foo',
'* {@extend .foo}',
'ns|a')
assert_unification(
'ns|a.foo',
'*|* {@extend .foo}',
'ns|a')
# XXX assert_extend_doesnt_match('ns2|*', '.foo', :failed_to_unify, 2) do
assert_unification(
'ns1|a.foo',
'ns2|* {@extend .foo}',
'ns1|a.foo')
assert_unification(
'ns|a.foo',
'ns|* {@extend .foo}',
'ns|a')
def test_element_unification_with_simple_target():
assert_unification(
'.foo',
'a {@extend .foo}',
'.foo, a')
assert_unification(
'.foo.bar',
'a {@extend .foo}',
'.foo.bar, a.bar')
assert_unification(
'.foo.bar',
'*|a {@extend .foo}',
'.foo.bar, *|a.bar')
assert_unification(
'.foo.bar',
'ns|a {@extend .foo}',
'.foo.bar, ns|a.bar')
def test_element_unification_with_namespaceless_universal_target():
assert_unification(
'*.foo',
'a {@extend .foo}',
'*.foo, a')
assert_unification(
'*.foo',
'*|a {@extend .foo}',
'*.foo, a')
assert_unification(
'*|*.foo',
'a {@extend .foo}',
'*|*.foo, a')
assert_unification(
'*|*.foo',
'*|a {@extend .foo}',
'*|*.foo, *|a')
assert_unification(
'*.foo',
'ns|a {@extend .foo}',
'*.foo, ns|a')
assert_unification(
'*|*.foo',
'ns|a {@extend .foo}',
'*|*.foo, ns|a')
def test_element_unification_with_namespaced_universal_target():
assert_unification(
'ns|*.foo',
'a {@extend .foo}',
'ns|*.foo, ns|a')
assert_unification(
'ns|*.foo',
'*|a {@extend .foo}',
'ns|*.foo, ns|a')
# XXX assert_extend_doesnt_match('ns2|a', '.foo', :failed_to_unify, 2) do
assert_unification(
'ns1|*.foo',
'ns2|a {@extend .foo}',
'ns1|*.foo')
assert_unification(
'ns|*.foo',
'ns|a {@extend .foo}',
'ns|*.foo, ns|a')
def test_element_unification_with_namespaceless_element_target():
assert_unification(
'a.foo',
'a {@extend .foo}',
'a')
assert_unification(
'a.foo',
'*|a {@extend .foo}',
'a')
assert_unification(
'*|a.foo',
'a {@extend .foo}',
'*|a.foo, a')
assert_unification(
'*|a.foo',
'*|a {@extend .foo}',
'*|a')
assert_unification(
'a.foo',
'ns|a {@extend .foo}',
'a.foo, ns|a')
assert_unification(
'*|a.foo',
'ns|a {@extend .foo}',
'*|a.foo, ns|a')
# XXX assert_extend_doesnt_match('h1', '.foo', :failed_to_unify, 2) do
assert_unification(
'a.foo',
'h1 {@extend .foo}',
'a.foo')
def test_element_unification_with_namespaced_element_target():
assert_unification(
'ns|a.foo',
'a {@extend .foo}',
'ns|a')
assert_unification(
'ns|a.foo',
'*|a {@extend .foo}',
'ns|a')
# XXX assert_extend_doesnt_match('ns2|a', '.foo', :failed_to_unify, 2) do
assert_unification(
'ns1|a.foo',
'ns2|a {@extend .foo}',
'ns1|a.foo')
assert_unification(
'ns|a.foo',
'ns|a {@extend .foo}',
'ns|a')
def test_attribute_unification():
assert_unification(
'[foo=bar].baz',
'[foo=baz] {@extend .baz}',
'[foo=bar].baz, [foo=bar][foo=baz]')
assert_unification(
'[foo=bar].baz',
'[foo^=bar] {@extend .baz}',
'[foo=bar].baz, [foo=bar][foo^=bar]')
assert_unification(
'[foo=bar].baz',
'[foot=bar] {@extend .baz}',
'[foo=bar].baz, [foo=bar][foot=bar]')
assert_unification(
'[foo=bar].baz',
'[ns|foo=bar] {@extend .baz}',
'[foo=bar].baz, [foo=bar][ns|foo=bar]')
assert_unification(
'%-a [foo=bar].bar',
'[foo=bar] {@extend .bar}',
'-a [foo=bar]')
def test_pseudo_unification():
assert_unification(
':foo.baz',
':foo(2n+1) {@extend .baz}',
':foo.baz, :foo:foo(2n+1)')
assert_unification(
':foo.baz',
'::foo {@extend .baz}',
':foo.baz, :foo::foo')
# XXX assert_extend_doesnt_match('::bar', '.baz', :failed_to_unify, 2) do
assert_unification(
'::foo.baz',
'::bar {@extend .baz}',
'::foo.baz')
# XXX assert_extend_doesnt_match('::foo(2n+1)', '.baz', :failed_to_unify, 2) do
assert_unification(
'::foo.baz',
'::foo(2n+1) {@extend .baz}',
'::foo.baz')
assert_unification(
'::foo.baz',
'::foo {@extend .baz}',
'::foo')
assert_unification(
'::foo(2n+1).baz',
'::foo(2n+1) {@extend .baz}',
'::foo(2n+1)')
assert_unification(
':foo.baz',
':bar {@extend .baz}',
':foo.baz, :foo:bar')
assert_unification(
'.baz:foo',
':after {@extend .baz}',
'.baz:foo, :foo:after')
assert_unification(
'.baz:after',
':foo {@extend .baz}',
'.baz:after, :foo:after')
assert_unification(
':foo.baz',
':foo {@extend .baz}',
':foo')
def test_pseudoelement_remains_at_end_of_selector():
assert_extends(
'.foo::bar',
'.baz {@extend .foo}',
'.foo::bar, .baz::bar')
assert_extends(
'a.foo::bar',
'.baz {@extend .foo}',
'a.foo::bar, a.baz::bar')
def test_pseudoclass_remains_at_end_of_selector():
assert_extends(
'.foo:bar',
'.baz {@extend .foo}',
'.foo:bar, .baz:bar')
assert_extends(
'a.foo:bar',
'.baz {@extend .foo}',
'a.foo:bar, a.baz:bar')
def test_not_remains_at_end_of_selector():
assert_extends(
'.foo:not(.bar)',
'.baz {@extend .foo}',
'.foo:not(.bar), .baz:not(.bar)')
def test_pseudoelement_goes_lefter_than_pseudoclass():
assert_extends(
'.foo::bar',
'.baz:bang {@extend .foo}',
'.foo::bar, .baz:bang::bar')
assert_extends(
'.foo:bar',
'.baz::bang {@extend .foo}',
'.foo:bar, .baz:bar::bang')
def test_pseudoelement_goes_lefter_than_not():
assert_extends(
'.foo::bar',
'.baz:not(.bang) {@extend .foo}',
'.foo::bar, .baz:not(.bang)::bar')
assert_extends(
'.foo:not(.bang)',
'.baz::bar {@extend .foo}',
'.foo:not(.bang), .baz:not(.bang)::bar')
def test_negation_unification():
assert_unification(
':not(.foo).baz',
':not(.bar) {@extend .baz}',
':not(.foo).baz, :not(.foo):not(.bar)')
assert_unification(
':not(.foo).baz',
':not(.foo) {@extend .baz}',
':not(.foo)')
assert_unification(
':not([a=b]).baz',
':not([a = b]) {@extend .baz}',
':not([a=b])')
def test_comma_extendee():
assert_rendering('''\
.foo {a: b}
.bar {c: d}
.baz {@extend .foo, .bar}
''', '''\
.foo, .baz {
a: b; }
.bar, .baz {
c: d; }
''')
def test_redundant_selector_elimination():
assert_rendering('''\
.foo.bar {a: b}
.x {@extend .foo, .bar}
.y {@extend .foo, .bar}
''', '''\
.foo.bar, .x, .y {
a: b; }
''')
## Long Extendees
def test_long_extendee():
assert_extends(
'.foo.bar',
'.baz {@extend .foo.bar}',
'.foo.bar, .baz')
def test_long_extendee_requires_all_selectors():
# XXX assert_extend_doesnt_match('.baz', '.foo.bar', :not_found, 2) do
assert_extends(
'.foo',
'.baz {@extend .foo.bar}',
'.foo')
def test_long_extendee_matches_supersets():
assert_extends(
'.foo.bar.bap',
'.baz {@extend .foo.bar}',
'.foo.bar.bap, .bap.baz')
def test_long_extendee_runs_unification():
assert_extends(
'ns|*.foo.bar',
'a.baz {@extend .foo.bar}',
'ns|*.foo.bar, ns|a.baz')
## Long Extenders
def test_long_extender():
assert_extends(
'.foo.bar',
'.baz.bang {@extend .foo}',
'.foo.bar, .bar.baz.bang')
def test_long_extender_runs_unification():
assert_extends(
'ns|*.foo.bar',
'a.baz {@extend .foo}',
'ns|*.foo.bar, ns|a.bar.baz')
def test_long_extender_aborts_unification():
# XXX assert_extend_doesnt_match('h1.baz', '.foo', :failed_to_unify, 2) do
assert_extends(
'a.foo#bar',
'h1.baz {@extend .foo}',
'a.foo#bar')
# XXX assert_extend_doesnt_match('.bang#baz', '.foo', :failed_to_unify, 2) do
assert_extends(
'a.foo#bar',
'.bang#baz {@extend .foo}',
'a.foo#bar')
## Nested Extenders
def test_nested_extender():
assert_extends(
'.foo',
'foo bar {@extend .foo}',
'.foo, foo bar')
def test_nested_extender_runs_unification():
assert_extends(
'.foo.bar',
'foo bar {@extend .foo}',
'.foo.bar, foo bar.bar')
def test_nested_extender_aborts_unification():
# XXX assert_extend_doesnt_match('foo bar', '.foo', :failed_to_unify, 2) do
assert_extends(
'baz.foo',
'foo bar {@extend .foo}',
'baz.foo')
def test_nested_extender_alternates_parents():
assert_extends('.baz .bip .foo', 'foo .grank bar {@extend .foo}',
'.baz .bip .foo, .baz .bip foo .grank bar, foo .grank .baz .bip bar')
def test_nested_extender_unifies_identical_parents():
assert_extends('.baz .bip .foo', '.baz .bip bar {@extend .foo}',
'.baz .bip .foo, .baz .bip bar')
def test_nested_extender_unifies_common_substring():
assert_extends('.baz .bip .bap .bink .foo', '.brat .bip .bap bar {@extend .foo}',
'.baz .bip .bap .bink .foo, .baz .brat .bip .bap .bink bar, .brat .baz .bip .bap .bink bar')
def test_nested_extender_unifies_common_subseq():
assert_extends('.a .x .b .y .foo', '.a .n .b .m bar {@extend .foo}',
'.a .x .b .y .foo, .a .x .n .b .y .m bar, .a .n .x .b .y .m bar, .a .x .n .b .m .y bar, .a .n .x .b .m .y bar')
def test_nested_extender_chooses_first_subseq():
assert_extends('.a .b .c .d .foo', '.c .d .a .b .bar {@extend .foo}',
'.a .b .c .d .foo, .a .b .c .d .a .b .bar')
def test_nested_extender_counts_extended_subselectors():
assert_extends('.a .bip.bop .foo', '.b .bip .bar {@extend .foo}',
'.a .bip.bop .foo, .a .b .bip.bop .bar, .b .a .bip.bop .bar')
def test_nested_extender_counts_extended_superselectors():
assert_extends('.a .bip .foo', '.b .bip.bop .bar {@extend .foo}',
'.a .bip .foo, .a .b .bip.bop .bar, .b .a .bip.bop .bar')
def test_nested_extender_with_child_selector():
assert_extends(
'.baz .foo',
'foo > bar {@extend .foo}',
'.baz .foo, .baz foo > bar')
def test_nested_extender_finds_common_selectors_around_child_selector():
assert_extends(
'a > b c .c1',
'a c .c2 {@extend .c1}',
'a > b c .c1, a > b c .c2')
assert_extends(
'a > b c .c1',
'b c .c2 {@extend .c1}',
'a > b c .c1, a > b c .c2')
def test_nested_extender_doesnt_find_common_selectors_around_adjacent_sibling_selector():
assert_extends(
'a + b c .c1',
'a c .c2 {@extend .c1}',
'a + b c .c1, a + b a c .c2, a a + b c .c2')
assert_extends(
'a + b c .c1',
'a b .c2 {@extend .c1}',
'a + b c .c1, a a + b c .c2')
assert_extends(
'a + b c .c1',
'b c .c2 {@extend .c1}',
'a + b c .c1, a + b c .c2')
def test_nested_extender_doesnt_find_common_selectors_around_sibling_selector():
assert_extends(
'a ~ b c .c1',
'a c .c2 {@extend .c1}',
'a ~ b c .c1, a ~ b a c .c2, a a ~ b c .c2')
assert_extends(
'a ~ b c .c1',
'a b .c2 {@extend .c1}',
'a ~ b c .c1, a a ~ b c .c2')
assert_extends(
'a ~ b c .c1',
'b c .c2 {@extend .c1}',
'a ~ b c .c1, a ~ b c .c2')
def test_nested_extender_doesnt_find_common_selectors_around_reference_selector():
assert_extends(
'a /for/ b c .c1',
'a c .c2 {@extend .c1}',
'a /for/ b c .c1, a /for/ b a c .c2, a a /for/ b c .c2')
assert_extends(
'a /for/ b c .c1',
'a b .c2 {@extend .c1}',
'a /for/ b c .c1, a a /for/ b c .c2')
assert_extends(
'a /for/ b c .c1',
'b c .c2 {@extend .c1}',
'a /for/ b c .c1, a /for/ b c .c2')
def test_nested_extender_with_early_child_selectors_doesnt_subseq_them():
assert_extends('.bip > .bap .foo', '.grip > .bap .bar {@extend .foo}',
'.bip > .bap .foo, .bip > .bap .grip > .bap .bar, .grip > .bap .bip > .bap .bar')
assert_extends('.bap > .bip .foo', '.bap > .grip .bar {@extend .foo}',
'.bap > .bip .foo, .bap > .bip .bap > .grip .bar, .bap > .grip .bap > .bip .bar')
def test_nested_extender_with_child_selector_unifies():
assert_extends(
'.baz.foo',
'foo > bar {@extend .foo}',
'.baz.foo, foo > bar.baz')
assert_rendering('''\
.baz > {
.foo {a: b}
.bar {@extend .foo}
}
''', '''\
.baz > .foo, .baz > .bar {
a: b; }
''')
assert_rendering('''\
.foo {
.bar {a: b}
> .baz {@extend .bar}
}
''', '''\
.foo .bar, .foo > .baz {
a: b; }
''')
def test_nested_extender_with_early_child_selectors_doesnt_subseq_them():
assert_rendering('''\
.foo {
.bar {a: b}
.bip > .baz {@extend .bar}
}
''', '''\
.foo .bar, .foo .bip > .baz {
a: b; }
''')
assert_rendering('''\
.foo {
.bip .bar {a: b}
> .baz {@extend .bar}
}
''', '''\
.foo .bip .bar, .foo .bip .foo > .baz {
a: b; }
''')
assert_extends(
'.foo > .bar',
'.bip + .baz {@extend .bar}',
'.foo > .bar, .foo > .bip + .baz')
assert_extends(
'.foo + .bar',
'.bip > .baz {@extend .bar}',
'.foo + .bar, .bip > .foo + .baz')
assert_extends(
'.foo > .bar',
'.bip > .baz {@extend .bar}',
'.foo > .bar, .bip.foo > .baz')
def test_nested_extender_with_trailing_child_selector():
with pytest.raises(SyntaxError):
# "bar > can't extend: invalid selector"
render("bar > {@extend .baz}")
def test_nested_extender_with_sibling_selector():
assert_extends(
'.baz .foo',
'foo + bar {@extend .foo}',
'.baz .foo, .baz foo + bar')
def test_nested_extender_with_hacky_selector():
assert_extends('.baz .foo', 'foo + > > + bar {@extend .foo}',
'.baz .foo, .baz foo + > > + bar, foo .baz + > > + bar')
assert_extends(
'.baz .foo',
'> > bar {@extend .foo}',
'.baz .foo, > > .baz bar')
def test_nested_extender_merges_with_same_selector():
assert_rendering('''\
.foo {
.bar {a: b}
.baz {@extend .bar} }
''', '''\
.foo .bar, .foo .baz {
a: b; }
''')
def test_nested_extender_with_child_selector_merges_with_same_selector():
assert_extends('.foo > .bar .baz', '.foo > .bar .bang {@extend .baz}',
'.foo > .bar .baz, .foo > .bar .bang')
# Combinator Unification
def test_combinator_unification_for_hacky_combinators():
assert_extends(
'.a > + x',
'.b y {@extend x}',
'.a > + x, .a .b > + y, .b .a > + y')
assert_extends(
'.a x',
'.b > + y {@extend x}',
'.a x, .a .b > + y, .b .a > + y')
assert_extends(
'.a > + x',
'.b > + y {@extend x}',
'.a > + x, .a .b > + y, .b .a > + y')
assert_extends(
'.a ~ > + x',
'.b > + y {@extend x}',
'.a ~ > + x, .a .b ~ > + y, .b .a ~ > + y')
assert_extends(
'.a + > x',
'.b > + y {@extend x}',
'.a + > x')
assert_extends(
'.a + > x',
'.b > + y {@extend x}',
'.a + > x')
assert_extends(
'.a ~ > + .b > x',
'.c > + .d > y {@extend x}',
'.a ~ > + .b > x, .a .c ~ > + .d.b > y, .c .a ~ > + .d.b > y')
def test_combinator_unification_double_tilde():
assert_extends(
'.a.b ~ x',
'.a ~ y {@extend x}',
'.a.b ~ x, .a.b ~ y')
assert_extends(
'.a ~ x',
'.a.b ~ y {@extend x}',
'.a ~ x, .a.b ~ y')
assert_extends(
'.a ~ x',
'.b ~ y {@extend x}',
'.a ~ x, .a ~ .b ~ y, .b ~ .a ~ y, .b.a ~ y')
assert_extends(
'a.a ~ x',
'b.b ~ y {@extend x}',
'a.a ~ x, a.a ~ b.b ~ y, b.b ~ a.a ~ y')
def test_combinator_unification_tilde_plus():
assert_extends(
'.a.b + x',
'.a ~ y {@extend x}',
'.a.b + x, .a.b + y')
assert_extends(
'.a + x',
'.a.b ~ y {@extend x}',
'.a + x, .a.b ~ .a + y, .a.b + y')
assert_extends(
'.a + x',
'.b ~ y {@extend x}',
'.a + x, .b ~ .a + y, .b.a + y')
assert_extends(
'a.a + x',
'b.b ~ y {@extend x}',
'a.a + x, b.b ~ a.a + y')
assert_extends(
'.a.b ~ x',
'.a + y {@extend x}',
'.a.b ~ x, .a.b ~ .a + y, .a.b + y')
assert_extends(
'.a ~ x',
'.a.b + y {@extend x}',
'.a ~ x, .a.b + y')
assert_extends(
'.a ~ x',
'.b + y {@extend x}',
'.a ~ x, .a ~ .b + y, .a.b + y')
assert_extends(
'a.a ~ x',
'b.b + y {@extend x}',
'a.a ~ x, a.a ~ b.b + y')
def test_combinator_unification_angle_sibling():
assert_extends(
'.a > x',
'.b ~ y {@extend x}',
'.a > x, .a > .b ~ y')
assert_extends(
'.a > x',
'.b + y {@extend x}',
'.a > x, .a > .b + y')
assert_extends(
'.a ~ x',
'.b > y {@extend x}',
'.a ~ x, .b > .a ~ y')
assert_extends(
'.a + x',
'.b > y {@extend x}',
'.a + x, .b > .a + y')
def test_combinator_unification_double_angle():
assert_extends(
'.a.b > x',
'.b > y {@extend x}',
'.a.b > x, .b.a > y')
assert_extends(
'.a > x',
'.a.b > y {@extend x}',
'.a > x, .a.b > y')
assert_extends(
'.a > x',
'.b > y {@extend x}',
'.a > x, .b.a > y')
assert_extends(
'a.a > x',
'b.b > y {@extend x}',
'a.a > x')
def test_combinator_unification_double_plus():
assert_extends(
'.a.b + x',
'.b + y {@extend x}',
'.a.b + x, .b.a + y')
assert_extends(
'.a + x',
'.a.b + y {@extend x}',
'.a + x, .a.b + y')
assert_extends(
'.a + x',
'.b + y {@extend x}',
'.a + x, .b.a + y')
assert_extends(
'a.a + x',
'b.b + y {@extend x}',
'a.a + x')
def test_combinator_unification_angle_space():
assert_extends(
'.a.b > x',
'.a y {@extend x}',
'.a.b > x, .a.b > y')
assert_extends(
'.a > x',
'.a.b y {@extend x}',
'.a > x, .a.b .a > y')
assert_extends(
'.a > x',
'.b y {@extend x}',
'.a > x, .b .a > y')
assert_extends(
'.a.b x',
'.a > y {@extend x}',
'.a.b x, .a.b .a > y')
assert_extends(
'.a x',
'.a.b > y {@extend x}',
'.a x, .a.b > y')
assert_extends(
'.a x',
'.b > y {@extend x}',
'.a x, .a .b > y')
def test_combinator_unification_plus_space():
assert_extends(
'.a.b + x',
'.a y {@extend x}',
'.a.b + x, .a .a.b + y')
assert_extends(
'.a + x',
'.a.b y {@extend x}',
'.a + x, .a.b .a + y')
assert_extends(
'.a + x',
'.b y {@extend x}',
'.a + x, .b .a + y')
assert_extends(
'.a.b x',
'.a + y {@extend x}',
'.a.b x, .a.b .a + y')
assert_extends(
'.a x',
'.a.b + y {@extend x}',
'.a x, .a .a.b + y')
assert_extends(
'.a x',
'.b + y {@extend x}',
'.a x, .a .b + y')
def test_combinator_unification_nested():
assert_extends(
'.a > .b + x',
'.c > .d + y {@extend x}',
'.a > .b + x, .c.a > .d.b + y')
assert_extends(
'.a > .b + x',
'.c > y {@extend x}',
'.a > .b + x, .c.a > .b + y')
def test_combinator_unification_with_newlines():
assert_rendering('''\
.a >
.b
+ x {a: b}
.c
> .d +
y {@extend x}
''', '''\
.a >
.b
+ x, .c.a > .d.b + y {
a: b; }
''')
# Loops
def test_extend_self_loop():
assert_rendering('''\
.foo {a: b; @extend .foo}
''', '''\
.foo {
a: b; }
''')
def test_basic_extend_loop():
assert_rendering('''\
.foo {a: b; @extend .bar}
.bar {c: d; @extend .foo}
''', '''\
.bar, .foo {
a: b; }
.foo, .bar {
c: d; }
''')
def test_three_level_extend_loop():
assert_rendering('''\
.foo {a: b; @extend .bar}
.bar {c: d; @extend .baz}
.baz {e: f; @extend .foo}
''', '''\
.baz, .bar, .foo {
a: b; }
.foo, .baz, .bar {
c: d; }
.bar, .foo, .baz {
e: f; }
''')
def test_nested_extend_loop():
assert_rendering('''\
.bar {
a: b;
.foo {c: d; @extend .bar}
}
''', '''\
.bar, .bar .foo {
a: b; }
.bar .foo {
c: d; }
''')
def test_multiple_extender_merges_with_superset_selector():
assert_rendering('''\
.foo {@extend .bar; @extend .baz}
a.bar.baz {a: b}
''', '''\
a.bar.baz, a.foo {
a: b; }
''')
def test_control_flow_if():
assert_rendering('''\
.true { color: green; }
.false { color: red; }
.also-true {
@if true { @extend .true; }
@else { @extend .false; }
}
.also-false {
@if false { @extend .true; }
@else { @extend .false; }
}
''', '''\
.true, .also-true {
color: green; }
.false, .also-false {
color: red; }
''')
def test_control_flow_for():
assert_rendering('''\
.base-0 { color: green; }
.base-1 { display: block; }
.base-2 { border: 1px solid blue; }
.added {
@for $i from 0 to 3 {
@extend .base-\#{$i};
}
}
''', '''\
.base-0, .added {
color: green; }
.base-1, .added {
display: block; }
.base-2, .added {
border: 1px solid blue; }
''')
def test_control_flow_while():
assert_rendering('''\
.base-0 { color: green; }
.base-1 { display: block; }
.base-2 { border: 1px solid blue; }
.added {
$i : 0;
@while $i < 3 {
@extend .base-\#{$i};
$i : $i + 1;
}
}
''', '''\
.base-0, .added {
color: green; }
.base-1, .added {
display: block; }
.base-2, .added {
border: 1px solid blue; }
''')
def test_basic_placeholder_selector():
assert_extends(
'%foo',
'.bar {@extend %foo}',
'.bar')
def test_unused_placeholder_selector():
assert_rendering('''\
%foo {color: blue}
%bar {color: red}
.baz {@extend %foo}
''', '''\
.baz {
color: blue; }
''')
def test_placeholder_descendant_selector():
assert_extends(
'#context %foo a',
'.bar {@extend %foo}',
'#context .bar a')
def test_semi_placeholder_selector():
assert_rendering('''\
#context %foo, .bar .baz {color: blue}
''', '''\
.bar .baz {
color: blue; }
''')
def test_placeholder_selector_with_multiple_extenders():
assert_rendering('''\
%foo {color: blue}
.bar {@extend %foo}
.baz {@extend %foo}
''', '''\
.bar, .baz {
color: blue; }
''')
def test_placeholder_selector_as_modifier():
# XXX assert_extend_doesnt_match('div', '%foo', :failed_to_unify, 3) do
assert_rendering('''\
a%foo.baz {color: blue}
.bar {@extend %foo}
div {@extend %foo}
''', '''\
a.baz.bar {
color: blue; }
''')
def test_placeholder_interpolation():
assert_rendering('''\
$foo: foo;
%\#{$foo} {color: blue}
.bar {@extend %foo}
''', '''\
.bar {
color: blue; }
''')
def test_media_in_placeholder_selector():
assert_rendering('''\
%foo {bar {@media screen {a: b}}}
.baz {c: d}
''', '''\
.baz {
c: d; }
''')
"""
def test_extend_out_of_media():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 3 of test_extend_out_of_media_inline.scss:
@extending an outer selector from within @media is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
.foo {
a: b; }
CSS
.foo {a: b}
@media screen {
.bar {@extend .foo}
}
SCSS
def test_extend_out_of_unknown_directive():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 3 of test_extend_out_of_unknown_directive_inline.scss:
@extending an outer selector from within @flooblehoof is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
.foo {
a: b; }
@flooblehoof {}
CSS
.foo {a: b}
@flooblehoof {
.bar {@extend .foo}
}
SCSS
def test_extend_out_of_nested_directives():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 4 of test_extend_out_of_nested_directives_inline.scss:
@extending an outer selector from within @flooblehoof is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
@media screen {
.foo {
a: b; }
@flooblehoof {} }
CSS
@media screen {
.foo {a: b}
@flooblehoof {
.bar {@extend .foo}
}
}
SCSS
"""
def test_extend_within_media():
assert_rendering('''\
@media screen {
.foo {a: b}
.bar {@extend .foo}
}
''', '''\
@media screen {
.foo, .bar {
a: b; } }
''')
def test_extend_within_unknown_directive():
assert_rendering('''\
@flooblehoof {
.foo {a: b}
.bar {@extend .foo}
}
''', '''\
@flooblehoof {
.foo, .bar {
a: b; } }
''')
def test_extend_within_nested_directives():
assert_rendering('''\
@media screen {
@flooblehoof {
.foo {a: b}
.bar {@extend .foo}
}
}
''', '''\
@media screen {
@flooblehoof {
.foo, .bar {
a: b; } } }
''')
def test_extend_within_disparate_media():
assert_rendering('''\
@media screen {.foo {a: b}}
@media screen {.bar {@extend .foo}}
''', '''\
@media screen {
.foo, .bar {
a: b; } }
''')
def test_extend_within_disparate_unknown_directive():
assert_rendering('''\
@flooblehoof {.foo {a: b}}
@flooblehoof {.bar {@extend .foo}}
''', '''\
@flooblehoof {
.foo, .bar {
a: b; } }
@flooblehoof {}
''')
def test_extend_within_disparate_nested_directives():
assert_rendering('''\
@media screen {@flooblehoof {.foo {a: b}}}
@media screen {@flooblehoof {.bar {@extend .foo}}}
''', '''\
@media screen {
@flooblehoof {
.foo, .bar {
a: b; } } }
@media screen {
@flooblehoof {} }
''')
"""
def test_extend_within_and_without_media():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 4 of test_extend_within_and_without_media_inline.scss:
@extending an outer selector from within @media is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
.foo {
a: b; }
@media screen {
.foo, .bar {
c: d; } }
CSS
.foo {a: b}
@media screen {
.foo {c: d}
.bar {@extend .foo}
}
SCSS
def test_extend_within_and_without_unknown_directive():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 4 of test_extend_within_and_without_unknown_directive_inline.scss:
@extending an outer selector from within @flooblehoof is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
.foo {
a: b; }
@flooblehoof {
.foo, .bar {
c: d; } }
CSS
.foo {a: b}
@flooblehoof {
.foo {c: d}
.bar {@extend .foo}
}
SCSS
def test_extend_within_and_without_nested_directives():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
DEPRECATION WARNING on line 5 of test_extend_within_and_without_nested_directives_inline.scss:
@extending an outer selector from within @flooblehoof is deprecated.
You may only @extend selectors within the same directive.
This will be an error in Sass 3.3.
It can only work once @extend is supported natively in the browser.
WARN
@media screen {
.foo {
a: b; }
@flooblehoof {
.foo, .bar {
c: d; } } }
CSS
@media screen {
.foo {a: b}
@flooblehoof {
.foo {c: d}
.bar {@extend .foo}
}
}
SCSS
"""
def test_extend_with_subject_transfers_subject_to_extender():
assert_rendering('''\
foo bar! baz {a: b}
.bip .bap {@extend bar}
''', '''\
foo bar! baz, foo .bip .bap! baz, .bip foo .bap! baz {
a: b; }
''')
assert_rendering('''\
foo.x bar.y! baz.z {a: b}
.bip .bap {@extend .y}
''', '''\
foo.x bar.y! baz.z, foo.x .bip bar.bap! baz.z, .bip foo.x bar.bap! baz.z {
a: b; }
''')
def test_extend_with_subject_retains_subject_on_target():
assert_rendering('''\
.foo! .bar {a: b}
.bip .bap {@extend .bar}
''', '''\
.foo! .bar, .foo! .bip .bap, .bip .foo! .bap {
a: b; }
''')
def test_extend_with_subject_transfers_subject_to_target():
assert_rendering('''\
a.foo .bar {a: b}
.bip .bap! {@extend .foo}
''', '''\
a.foo .bar, .bip a.bap! .bar {
a: b; }
''')
def test_extend_with_subject_retains_subject_on_extender():
assert_rendering('''\
.foo .bar {a: b}
.bip! .bap {@extend .bar}
''', '''\
.foo .bar, .foo .bip! .bap, .bip! .foo .bap {
a: b; }
''')
def test_extend_with_subject_fails_with_conflicting_subject():
assert_rendering('''\
x! .bar {a: b}
y! .bap {@extend .bar}
''', '''\
x! .bar {
a: b; }
''')
"""
def test_extend_warns_when_extendee_doesnt_exist():
assert_warning(<<WARN) {assert_equal("", render(<<SCSS))}
WARNING on line 1 of test_extend_warns_when_extendee_doesnt_exist_inline.scss: ".foo" failed to @extend ".bar".
The selector ".bar" was not found.
This will be an error in future releases of Sass.
Use "@extend .bar !optional" if the extend should be able to fail.
WARN
.foo {@extend .bar}
SCSS
def test_extend_warns_when_extension_fails():
assert_warning(<<WARN) {assert_equal(<<CSS, render(<<SCSS))}
WARNING on line 2 of test_extend_warns_when_extension_fails_inline.scss: "b.foo" failed to @extend ".bar".
No selectors matching ".bar" could be unified with "b.foo".
This will be an error in future releases of Sass.
Use "@extend .bar !optional" if the extend should be able to fail.
WARN
a.bar {
a: b; }
CSS
a.bar {a: b}
b.foo {@extend .bar}
SCSS
def test_extend_does_not_warn_when_one_extension_fails_but_others_dont():
assert_no_warning {assert_equal(<<CSS, render(<<SCSS))}
a.bar {
a: b; }
.bar, b.foo {
c: d; }
CSS
a.bar {a: b}
.bar {c: d}
b.foo {@extend .bar}
SCSS
def test_extend_does_not_warn_when_one_extension_fails_but_others_dont():
assert_no_warning {assert_equal(<<CSS, render(<<SCSS))}
a.bar {
a: b; }
.bar, b.foo {
c: d; }
CSS
a.bar {a: b}
.bar {c: d}
b.foo {@extend .bar}
SCSS
def test_optional_extend_does_not_warn_when_extendee_doesnt_exist():
assert_no_warning {assert_equal("", render(<<SCSS))}
.foo {@extend .bar !optional}
SCSS
def test_optional_extend_does_not_warn_when_extension_fails():
assert_no_warning {assert_equal(<<CSS, render(<<SCSS))}
a.bar {
a: b; }
CSS
a.bar {a: b}
b.foo {@extend .bar !optional}
SCSS
"""
### Regression Tests
def test_nested_extend_specificity():
assert_rendering('''\
%foo {a: b}
a {
:b {@extend %foo}
:b:c {@extend %foo}
}
''', '''\
a :b, a :b:c {
a: b; }
''')
def test_nested_double_extend_optimization():
assert_rendering('''\
%foo %bar {
a: b;
}
.parent1 {
@extend %foo;
.child {
@extend %bar;
}
}
.parent2 {
@extend %foo;
}
''', '''\
.parent1 .child {
a: b; }
''')
def test_extend_in_double_nested_media_query():
assert_rendering('''\
@media all {
@media (orientation: landscape) {
%foo {color: blue}
.bar {@extend %foo}
}
}
''', '''\
@media all and (orientation: landscape) {
.bar {
color: blue; } }
''')
"""
def test_partially_failed_extend():
assert_no_warning {assert_equal(<<CSS, render(<<SCSS))}
.rc, test {
color: white; }
.prices span.pill span.rc {
color: red; }
CSS
test { @extend .rc; }
.rc {color: white;}
.prices span.pill span.rc {color: red;}
SCSS
"""
def test_newline_near_combinator():
assert_rendering('''\
.a +
.b x {a: b}
.c y {@extend x}
''', '''\
.a +
.b x, .a +
.b .c y, .c .a +
.b y {
a: b; }
''')
def test_duplicated_selector_with_newlines():
assert_rendering('''\
.example-1-1,
.example-1-2,
.example-1-3 {
a: b;
}
.my-page-1 .my-module-1-1 {@extend .example-1-2}
''', '''\
.example-1-1,
.example-1-2,
.my-page-1 .my-module-1-1,
.example-1-3 {
a: b; }
''')
def test_nested_selector_with_child_selector_hack_extendee():
assert_extends(
'> .foo',
'foo bar {@extend .foo}',
'> .foo, > foo bar')
def test_nested_selector_with_child_selector_hack_extender():
assert_extends(
'.foo .bar',
'> foo bar {@extend .bar}',
'.foo .bar, > .foo foo bar, > foo .foo bar')
def test_nested_selector_with_child_selector_hack_extender_and_extendee():
assert_extends(
'> .foo',
'> foo bar {@extend .foo}',
'> .foo, > foo bar')
def test_nested_selector_with_child_selector_hack_extender_and_sibling_selector_extendee():
assert_extends(
'~ .foo',
'> foo bar {@extend .foo}',
'~ .foo')
def test_nested_selector_with_child_selector_hack_extender_and_extendee_and_newline():
assert_rendering('''\
> .foo {a: b}
flip,
> foo bar {@extend .foo}
''', '''\
> .foo, > flip,
> foo bar {
a: b; }
''')
def test_extended_parent_and_child_redundancy_elimination():
assert_rendering('''\
a {
b {a: b}
c {@extend b}
}
d {@extend a}
''', '''\
a b, d b, a c, d c {
a: b; }
''')
def test_extend_redundancy_elimination_when_it_would_reduce_specificity():
assert_extends(
'a',
'a.foo {@extend a}',
'a, a.foo')
def test_extend_redundancy_elimination_when_it_would_preserve_specificity():
assert_extends(
'.bar a',
'a.foo {@extend a}',
'.bar a')
def test_extend_redundancy_elimination_never_eliminates_base_selector():
assert_extends(
'a.foo',
'.foo {@extend a}',
'a.foo, .foo')
def test_extend_cross_branch_redundancy_elimination():
assert_rendering('''\
%x c %y {a: b}
a, b {@extend %x}
a d {@extend %y}
''', '''\
a c d, b c a d {
a: b; }
''')
assert_rendering('''\
e %z {a: b}
%x c %y {@extend %z}
a, b {@extend %x}
a d {@extend %y}
''', '''\
e a c d, a c e d, e b c a d, b c a e d {
a: b; }
''')
"""
def assert_extend_doesnt_match(extender, target, reason, line, syntax = :scss):
warn = "\"#{extender}\" failed to @extend \"#{target}\"."
reason =
if reason == :not_found
"The selector \"#{target}\" was not found."
else
"No selectors matching \"#{target}\" could be unified with \"#{extender}\"."
assert_warning(<<WARNING) {yield}
WARNING on line #{line} of #{filename_for_test syntax}: #{warn}
#{reason}
This will be an error in future releases of Sass.
Use "@extend #{target} !optional" if the extend should be able to fail.
WARNING
"""
def assert_unification(selector, extension, unified):
# Do some trickery so the first law of extend doesn't get in our way.
assert_extends(
"%-a {0}".format(selector),
extension + " -a {@extend %-a}",
', '.join('-a ' + s for s in unified.split(', ')))
def assert_extends(selector, extension, result):
assert_rendering(
"{0} {{a: b}}\n{1}\n".format(selector, extension),
"{0} {{\n a: b; }}\n".format(result))
|
lettersmith/doc.py | ericmjl/lettersmith_py | 103 | 12656398 | """
Tools for working with Doc type.
Docs are namedtuples that represent a file to be transformed.
The `content` field of a doc contains the file contents, read as a
Python string with UTF-8 encoding.
Most lettersmith plugins transform Docs or iterables of Docs.
For working with non-text files, images, binary files, or text files
with other encodings, see `lettersmith.file` which stores the raw bytes
instead of reading them into a Python string.
"""
from pathlib import PurePath, Path
import json
from collections import namedtuple
from functools import wraps
import frontmatter
import yaml
from lettersmith.util import mix
from lettersmith.date import read_file_times, EPOCH, to_datetime
from lettersmith import path as pathtools
from lettersmith import lens
from lettersmith.lens import (
Lens, lens_compose, get, put, key, over_with, update
)
from lettersmith.func import compose
Doc = namedtuple("Doc", (
"id_path", "output_path", "input_path", "created", "modified",
"title", "content", "meta", "template"
))
Doc.__doc__ = """
Docs are namedtuples that represent a document to be transformed,
and eventually written to disk.
Docs contain a content field. This is a string that typically contains the
contents of the file.
"""
def create(id_path, output_path,
input_path=None, created=EPOCH, modified=EPOCH,
title="", content="", meta=None, template=""):
"""
Create a Doc tuple, populating it with sensible defaults
"""
return Doc(
id_path=str(id_path),
output_path=str(output_path),
input_path=str(input_path) if input_path is not None else None,
created=to_datetime(created),
modified=to_datetime(modified),
title=str(title),
content=str(content),
meta=meta if meta is not None else {},
template=str(template)
)
def load(pathlike):
"""
Loads a doc namedtuple from a file path.
`content` field will contain contents of file.
Typically, you decorate the doc later with meta and other fields.
Returns a doc.
"""
file_created, file_modified = read_file_times(pathlike)
with open(pathlike, 'r') as f:
content = f.read()
title = pathtools.to_title(pathlike)
return create(
id_path=pathlike,
output_path=pathlike,
input_path=pathlike,
created=file_created,
modified=file_modified,
title=title,
meta={},
content=content
)
def writeable(doc):
"""
Return a writeable tuple for doc.
writeable tuple is any 2-tuple of `output_path`, `bytes`.
`lettersmith.write` knows how to write these tuples to disk.
"""
return doc.output_path, doc.content.encode()
id_path = Lens(
lambda doc: doc.id_path,
lambda doc, id_path: doc._replace(id_path=id_path)
)
output_path = Lens(
lambda doc: doc.output_path,
lambda doc, output_path: doc._replace(output_path=output_path)
)
ext = lens_compose(output_path, pathtools.ext)
title = Lens(
lambda doc: doc.title,
lambda doc, title: doc._replace(title=title)
)
content = Lens(
lambda doc: doc.content,
lambda doc, content: doc._replace(content=content)
)
created = Lens(
lambda doc: doc.created,
lambda doc, created: doc._replace(created=created)
)
modified = Lens(
lambda doc: doc.modified,
lambda doc, modified: doc._replace(modified=modified)
)
meta = Lens(
lambda doc: doc.meta,
lambda doc, meta: doc._replace(meta=meta)
)
template = Lens(
lambda doc: doc.template,
lambda doc, template: doc._replace(template=template)
)
meta_summary = lens_compose(meta, key("summary", ""))
def update_meta(doc, patch):
"""
Mix keys from `patch` into `doc.meta`.
"""
return update(meta, mix, doc, patch)
def with_ext_html(doc):
"""
Set doc extension to ".html"
"""
return put(ext, doc, ".html")
output_tld = compose(pathtools.tld, output_path.get)
id_tld = compose(pathtools.tld, id_path.get)
_infer_template = compose(
pathtools.ext_html,
pathtools.to_slug,
id_tld
)
def autotemplate(doc):
"""
Set template based on top-level directory in doc's id_path.
E.g. if top-level-directory is "posts", template gets set to "posts.html".
"""
if get(template, doc) != "":
return doc
else:
return put(template, doc, _infer_template(doc))
def with_template(t):
"""
Set template `t`, but only if doc doesn't have one already.
"""
def with_template_on_doc(doc):
if get(template, doc) != "":
return doc
else:
return put(template, doc, t)
return with_template_on_doc
def to_json(doc):
"""
Serialize a doc as JSON-serializable data
"""
return {
"@type": "doc",
"id_path": doc.id_path,
"output_path": doc.output_path,
"input_path": doc.input_path,
"created": doc.created.timestamp(),
"modified": doc.modified.timestamp(),
"title": doc.title,
"content": doc.content,
"meta": doc.meta,
"template": doc.template
}
def uplift_meta(doc):
"""
Reads "magic" fields in the meta and uplifts their values to doc
properties.
We use this to uplift...
- title
- created
- modified
- permalink
- template
...in the frontmatter, overriding original or default values on doc.
"""
return doc._replace(
title=doc.meta.get("title", doc.title),
created=to_datetime(doc.meta.get("created", doc.created)),
modified=to_datetime(doc.meta.get("modified", doc.modified)),
output_path=doc.meta.get("permalink", doc.output_path),
template=doc.meta.get("template", "")
)
class DocException(Exception):
pass
def annotate_exceptions(func):
"""
Decorates a mapping function for docs, giving it a more useful
exception message.
"""
@wraps(func)
def func_with_annotated_exceptions(doc):
try:
return func(doc)
except Exception as e:
msg = (
'Error encountered while mapping doc '
'"{id_path}" with {module}.{func}.'
).format(
id_path=doc.id_path,
func=func.__qualname__,
module=func.__module__
)
raise DocException(msg) from e
return func_with_annotated_exceptions
@annotate_exceptions
def parse_frontmatter(doc):
"""
Parse frontmatter as YAML. Set frontmatter on meta field, and
remaining content on content field.
If there is no frontmatter, will set an empty object on meta field,
and leave content as-is.
"""
meta, content = frontmatter.parse(doc.content)
return doc._replace(
meta=meta,
content=content
)
uplift_frontmatter = compose(uplift_meta, parse_frontmatter)
def renderer(render):
"""
Create a renderer for doc content using a string rendering function.
Will also annotate any exceptions that happen during rendering,
transforming them into DocExceptions that will record the doc's
id_path and the render function where exception occurred.
Can be used as a decorator.
"""
return annotate_exceptions(over_with(content, render)) |
Algo and DSA/LeetCode-Solutions-master/Python/play-with-chips.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12656467 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/play-with-chips.py<gh_stars>1000+
# Time: O(n)
# Space: O(1)
class Solution(object):
def minCostToMoveChips(self, chips):
"""
:type chips: List[int]
:rtype: int
"""
count = [0]*2
for p in chips:
count[p%2] += 1
return min(count)
|
src/beanmachine/ppl/compiler/fix_vectorized_models.py | facebookresearch/beanmachine | 177 | 12656480 | <filename>src/beanmachine/ppl/compiler/fix_vectorized_models.py<gh_stars>100-1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List, Type
import beanmachine.ppl.compiler.bmg_nodes as bn
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.error_report import ErrorReport
from beanmachine.ppl.compiler.fix_matrix_scale import matrix_scale_fixer
from beanmachine.ppl.compiler.fix_problem import (
ancestors_first_graph_fixer,
fixpoint_graph_fixer,
GraphFixer,
GraphFixerResult,
Inapplicable,
node_fixer_first_match,
NodeFixer,
NodeFixerResult,
sequential_graph_fixer,
)
from beanmachine.ppl.compiler.sizer import is_scalar, Sizer
# TODO Move this to a utils module
from beanmachine.ppl.compiler.support import _prod
from torch import Size, tensor
# These graph fixers turn vectorized models into unvectorized models.
# For example, the model
#
# @rv def flip():
# return Bernoulli(tensor([0.25, 0.75]))
#
# which we cannot represent in BMG is rewritten into the model:
#
# p = tensor([0.25, 0.75])
# @rv def f0:
# return Bernoulli(p[0])
# @rv def f1:
# return Bernoulli(p[1])
# @functional def flip():
# return tensor([f0()), f1())])
#
# which we can represent in BMG.
def _is_fixable_size(s: Size) -> bool:
dim = len(s)
if dim == 1:
return s[0] > 1
if dim == 2:
return s[0] > 1 or s[1] > 1
return False
def _is_indexable_node(sizer: Sizer, n: bn.BMGNode) -> bool:
if type(n) not in _indexable_node_types:
return False
return _is_fixable_size(sizer[n])
def _inputs_are_devectorizable(sizer: Sizer, node: bn.BMGNode) -> bool:
# For a node to be devectorizable:
# * All its inputs must be either indexable or scalars.
# * At least one input must be indexable.
return all(
_is_indexable_node(sizer, i) or is_scalar(sizer[i]) for i in node.inputs
) and any(_is_indexable_node(sizer, i) for i in node.inputs)
def _node_to_index_list(
bmg: BMGraphBuilder, sizer: Sizer, n: bn.BMGNode
) -> List[bn.BMGNode]:
size = sizer[n]
dim = len(size)
index_list = []
# This code is a little confusing because BMG uses column-major matrices
# and torch uses row-major tensors. The Sizer always gives the size
# that a graph node would be in *torch*, so if we have a Size([2, 3])
# matrix node, that has two rows and three columns in torch, and would
# be indexed first by row and then by column. But in BMG, that would
# be two columns, three rows, and indexed by column first, then row.
#
# The practical upshot is: if we have, say, Size([3]) OR Size([1, 3])
# then either way, we will have a one-column, three row BMG node, and
# therefore we only need a single level of indexing.
if dim == 0:
# If we have just a single value then there's no indexing required.
index_list.append(n)
elif dim == 1:
for i in range(0, size[0]):
ci = bmg.add_constant(i)
ni = bmg.add_index(n, ci)
index_list.append(ni)
elif size[0] == 1:
assert dim == 2
for i in range(0, size[1]):
ci = bmg.add_constant(i)
ni = bmg.add_index(n, ci)
index_list.append(ni)
else:
# We need two levels of indexing.
assert dim == 2
for i in range(0, size[0]):
ci = bmg.add_constant(i)
ni = bmg.add_index(n, ci)
for j in range(0, size[1]):
cj = bmg.add_constant(j)
nij = bmg.add_index(ni, cj)
index_list.append(nij)
return index_list
def _generate_arglists(bmg: BMGraphBuilder, sizer: Sizer, node: bn.BMGNode):
# This code is a bit tricky to understand so lets work an example.
# Suppose node has two inputs, call them X and Y. X has size [3], Y has
# size [2, 3], and node has size [2, 3].
final_size = sizer[node] # Size([2, 3])
final_length = _prod(final_size) # 2 x 3 = 6
input_nodes = [_node_to_index_list(bmg, sizer, n) for n in node.inputs]
# input_nodes is [
# [ Index(X, 0), Index(X, 1), Index(X, 2)],
# [ Index(Index(Y, 0), 0), Index(Index(Y, 0), 1), ...]
# ]
index_lists = []
# Let's now look at what happens on the FIRST loop iteration:
for i in range(len(input_nodes)):
input_node = input_nodes[i]
# First time through the loop input_node is [Index(X, 0), Index(X, 1), Index(X, 2)]
input_length = len(input_node) # 3
input_size = sizer[node.inputs[i]] # Size([3])
t = (
tensor(range(input_length)) # tensor([0, 1, 2])
.reshape(input_size) # tensor([0, 1, 2])
.broadcast_to(final_size) # tensor([[0, 1, 2], [0, 1, 2]])
.reshape(final_length) # tensor([0, 1, 2, 0, 1, 2])
.tolist() # [0, 1, 2, 0, 1, 2]
)
index_lists.append(t)
# When we're done both iterations we have two lists of the same length:
# [0, 1, 2, 0, 1, 2]
# [0, 1, 2, 3, 4, 5]
#
# Now make tuples out of each column.
#
# [(0, 0), (1, 1), (2, 2), (0, 3), (1, 4), (2, 5)]
index_tuples = list(zip(*index_lists))
# These pairs give the elements of X and Y needed to build devectorized nodes.
# Now make actual argument lists for each tuple.
return [
[input_nodes[i][index_tuple[i]] for i in range(len(index_tuple))]
for index_tuple in index_tuples
]
def _distribution_factories(bmg: BMGraphBuilder) -> Dict[Type, Callable]:
# These are all the distributions that we know how to devectorize,
# and the factory methods we need to use to generate a new node
# of the appropriate type.
# TODO: categorical
# TODO: categorical logit
# TODO: dirichlet
return {
bn.BernoulliLogitNode: bmg.add_bernoulli_logit,
bn.BernoulliNode: bmg.add_bernoulli,
bn.BetaNode: bmg.add_beta,
bn.BinomialNode: bmg.add_binomial,
bn.BinomialLogitNode: bmg.add_binomial_logit,
bn.Chi2Node: bmg.add_chi2,
bn.GammaNode: bmg.add_gamma,
bn.HalfCauchyNode: bmg.add_halfcauchy,
bn.HalfNormalNode: bmg.add_halfnormal,
bn.NormalNode: bmg.add_normal,
bn.PoissonNode: bmg.add_poisson,
bn.StudentTNode: bmg.add_studentt,
bn.UniformNode: bmg.add_uniform,
}
_distribution_types = list(_distribution_factories(BMGraphBuilder()).keys())
def _is_fixable_sample(sizer: Sizer, n: bn.BMGNode) -> bool:
if not isinstance(n, bn.SampleNode):
return False
dist = n.operand
if type(dist) not in _distribution_types:
return False
if not _is_fixable_size(sizer[dist]):
return False
# Every input must be either a scalar or indexable,
# and at least one input must be indexable.
if not _inputs_are_devectorizable(sizer, dist):
return False
return True
_indexable_node_types = [
bn.ColumnIndexNode,
bn.ConstantTensorNode,
bn.IndexNode,
bn.MatrixMultiplicationNode,
bn.MatrixScaleNode,
bn.SampleNode,
bn.TensorNode,
bn.ToMatrixNode,
bn.UntypedConstantNode,
]
def _vectorized_distribution_node_fixer(bmg: BMGraphBuilder, sizer: Sizer) -> NodeFixer:
distribution_factories = _distribution_factories(bmg)
def vect_dist_fixer(node: bn.BMGNode) -> NodeFixerResult:
# The graph transformation we're doing here takes graphs of the form:
#
# indexable --> dist --> sample --> consumer
#
# where the "indexable" produces a matrix, the consumer takes a matrix,
# but the distribution requires scalar inputs and produces a scalar
# output.
#
# We transform it into the graph:
#
# --> index[0] --> dist --> sample -->
# indexable to_matrix --> consumer
# --> index[1] --> dist --> sample -->
# ...
#
# And now everyone is happy; the operators get scalars and the
# consumer gets a matrix.
#
#
# TODO: Consider optimizing distributions where the tensor elements are all
# the same; if we have Bernoulli([[0.5, 0.5], [0.5, 0.5]]) then that can be
# represented in BMG as an IID_SAMPLE(2,2) from Bernoulli(0.5). We could
# write another fixer which makes this transformation, or we could modify
# this fixer. NOTE that not all inference algorithms might support
# IID_SAMPLE nodes; look into this before attempting the optimization.
if not _is_fixable_sample(sizer, node):
return Inapplicable
assert isinstance(node, bn.SampleNode)
dist = node.operand
# We need to generate n new distribution and sample nodes, each of
# which takes some scalar indexed from its inputs. The factory method that
# builds the distribution is in the distribution factories list.
# _generate_arglists constructs the arguments to that factory method.
arglists = _generate_arglists(bmg, sizer, dist)
samples = []
factory = distribution_factories[type(dist)]
for arglist in arglists:
b = factory(*arglist)
s = bmg.add_sample(b)
samples.append(s)
size = sizer[dist]
# We now have n new operator nodes; stick them into a tensor. We then
# return that tensor. The caller will retarget the input edge of the
# consumer from the original operator to the tensor, and the graph is
# rewritten.
t = bmg.add_tensor(size, *samples)
return t
return vect_dist_fixer
def _operator_factories(bmg: BMGraphBuilder) -> Dict[Type, Callable]:
return {
# Note that we expect devectorization to run *before* multiary
# addition/multiplication rewriting, so we can assume that
# all additions and multiplications are binary.
bn.AdditionNode: bmg.add_addition,
bn.DivisionNode: bmg.add_division,
bn.Exp2Node: bmg.add_exp2,
bn.ExpNode: bmg.add_exp,
bn.ExpM1Node: bmg.add_expm1,
bn.LogisticNode: bmg.add_logistic,
bn.Log10Node: bmg.add_log10,
bn.Log1pNode: bmg.add_log1p,
bn.Log2Node: bmg.add_log2,
bn.Log1mexpNode: bmg.add_log1mexp,
bn.LogNode: bmg.add_log,
bn.MultiplicationNode: bmg.add_multiplication,
bn.NegateNode: bmg.add_negate,
bn.PhiNode: bmg.add_phi,
bn.PowerNode: bmg.add_power,
bn.SquareRootNode: bmg.add_squareroot,
}
# TODO: LogSumExp, all comparisons, all bitwise, floordiv,
# shifts, mod, invert. Should we devectorize "not"?
def _vectorized_operator_node_fixer(bmg: BMGraphBuilder, sizer: Sizer) -> NodeFixer:
operator_factories = _operator_factories(bmg)
def _is_fixable_operator(sizer: Sizer, operator: bn.BMGNode) -> bool:
# * The operator must be on the list of devectorizable operators
# in operator_factories above.
# * The sizer must judge that the operator in its current
# place in the graph produces a 1-d or 2-d tensor, not a scalar.
# * Every input must be either a scalar or indexable,
# * At least one input must be indexable.
# * All inputs of a multiplication must be non-scalars.
# (We rewrite scalar-matrix multiplications in a different fixer.)
if type(operator) not in operator_factories:
return False
if not _is_fixable_size(sizer[operator]):
return False
if not _inputs_are_devectorizable(sizer, operator):
return False
if isinstance(operator, bn.MultiplicationNode) and not all(
_is_indexable_node(sizer, i) for i in operator.inputs
):
return False
return True
def vect_op_node_fixer(operator: bn.BMGNode) -> NodeFixerResult:
# The graph transformation we're doing here takes graphs of the form:
#
# indexable --> operator --> consumer
#
# where the "indexable" produces a matrix, the consumer takes a matrix,
# but the BMG operator only operates on scalars.
#
# We transform it into the graph:
#
# --> index[0] --> operator -->
# indexable to_matrix --> consumer
# --> index[1] --> operator -->
# ...
#
# And now everyone is happy; the operators get scalars and the
# consumer gets a matrix.
#
# Obviously this increases the number of nodes in the graph by O(n) in
# the size of the indexible matrix but until we have more vectorized BMG
# operators we cannot do much better. (Also, we can often optimize away
# some of the indexing operations in the arithmetic graph rewriter.)
#
if not _is_fixable_operator(sizer, operator):
return Inapplicable
# We need to generate n new operator nodes, each of which takes
# some scalar indexed from its operands. The factory method that
# builds those operator nodes is in the operator factories list;
# _generate_arglists constructs the arguments to that factory method.
arglists = _generate_arglists(bmg, sizer, operator)
results = []
factory = operator_factories[type(operator)]
for arglist in arglists:
r = factory(*arglist)
results.append(r)
size = sizer[operator]
# We now have n new operator nodes; stick them into a tensor. We then
# return that tensor. The caller will retarget the input edge of the
# consumer from the original operator to the tensor, and the graph is
# rewritten.
t = bmg.add_tensor(size, *results)
return t
return vect_op_node_fixer
def vectorized_operator_fixer(bmg: BMGraphBuilder) -> GraphFixer:
def vop_fixer() -> GraphFixerResult:
sizer = Sizer()
dist_fixer = _vectorized_distribution_node_fixer(bmg, sizer)
oper_fixer = _vectorized_operator_node_fixer(bmg, sizer)
scale_fixer = matrix_scale_fixer(bmg, sizer)
node_fixer = node_fixer_first_match([dist_fixer, oper_fixer, scale_fixer])
vof = ancestors_first_graph_fixer(bmg, sizer, node_fixer)
made_progress, errors = vof()
# If we changed something then we might have a leaf sample node;
# we can remove it.
if made_progress:
for n in bmg.all_nodes():
if _is_fixable_sample(sizer, n):
assert n.is_leaf
bmg.remove_leaf(n)
return made_progress, errors
return vop_fixer
def vectorized_observation_fixer(bmg: BMGraphBuilder) -> GraphFixer:
def vobs_fixer() -> GraphFixerResult:
made_change = False
# We might have an illegal observation. Fix it.
for o in bmg.all_observations():
observed = o.observed
if not isinstance(observed, bn.TensorNode):
continue
if not _is_fixable_size(observed._size):
continue
# TODO: What if the observation is of a different size than the
# tensor node we've just generated? That should be an error, but instead
# we just crash here. Figure out where to put an error detection pass
# which prevents this crash and reports the error.
dim = len(observed._size)
if dim == 1:
for i in range(0, observed._size[0]):
s = observed.inputs[i]
assert isinstance(s, bn.SampleNode)
bmg.add_observation(s, o.value[i])
else:
assert dim == 2
for i in range(0, observed._size[0]):
for j in range(0, observed._size[1]):
s = observed.inputs[i * observed._size[1] + j]
assert isinstance(s, bn.SampleNode)
bmg.add_observation(s, o.value[i][j])
bmg.remove_leaf(o)
made_change = True
return made_change, ErrorReport()
return vobs_fixer
def vectorized_model_fixer(bmg: BMGraphBuilder) -> GraphFixer:
vector_ops = vectorized_operator_fixer(bmg)
vector_obs = vectorized_observation_fixer(bmg)
return fixpoint_graph_fixer(sequential_graph_fixer([vector_ops, vector_obs]))
|
lib/evaluation.py | pichetzh/jama16-retina-replication | 108 | 12656494 | <reponame>pichetzh/jama16-retina-replication
import tensorflow as tf
import numpy as np
def _get_operations_by_names(graph, names):
return [graph.get_operation_by_name(name) for name in names]
def _get_tensors_by_names(graph, names):
return [graph.get_tensor_by_name(name) for name in names]
def perform_test(sess, init_op, summary_writer=None, epoch=None,
feed_dict_fn=None, feed_dict_args={}, custom_tensors=[]):
tf.keras.backend.set_learning_phase(False)
sess.run(init_op)
if len(custom_tensors) == 0:
# Retrieve all default tensors and operations.
graph = tf.get_default_graph()
reset_tp, reset_fp, reset_fn, reset_tn, reset_brier, reset_auc = \
_get_operations_by_names(
graph, ['tp/reset', 'fp/reset', 'fn/reset', 'tn/reset',
'brier/reset', 'auc/reset'])
update_tp, update_fp, update_fn, update_tn, update_brier, update_auc, \
brier, auc, confusion_matrix, summaries_op = \
_get_tensors_by_names(
graph, ['tp/true_positives/AssignAdd:0',
'fp/false_positives/AssignAdd:0',
'fn/false_negatives/AssignAdd:0',
'tn/true_negatives/AssignAdd:0',
'brier/mean_squared_error/update_op:0',
'auc/auc/update_op:0',
'brier/mean_squared_error/value:0',
'auc/auc/value:0',
'confusion_matrix/Cast:0',
'Merge/MergeSummary:0'])
# Reset all streaming variables.
sess.run([reset_tp, reset_fp, reset_fn, reset_tn, reset_brier, reset_auc])
# Create an array with tensors to run for each batch.
tensors = [update_tp, update_fp, update_fn,
update_tn, update_brier, update_auc]
else:
tensors = custom_tensors
try:
batch_results = []
while True:
if feed_dict_fn is not None:
feed_dict = feed_dict_fn(**feed_dict_args)
else:
feed_dict = None
# Retrieve the validation set confusion metrics.
batch_results.append(sess.run(tensors, feed_dict))
except tf.errors.OutOfRangeError:
pass
# Yield the result if custom tensors were defined.
if len(custom_tensors) > 0:
return [np.vstack(x) for x in zip(*batch_results)]
# Retrieve confusion matrix and estimated roc auc score.
test_conf_matrix, test_brier, test_auc, summaries = sess.run(
[confusion_matrix, brier, auc, summaries_op])
# Write summary.
if summary_writer is not None:
summary_writer.add_summary(summaries, epoch)
# Print total roc auc score for validation.
print(f"Brier score: {test_brier:6.4}, AUC: {test_auc:10.8}")
# Print confusion matrix.
print(f"Confusion matrix:")
print(test_conf_matrix[0])
return test_auc
|
istio/tests/cases/11_zipkin_spec.py | pxzero/nginmesh | 402 | 12656538 | <gh_stars>100-1000
import requests
import time
import configuration
from mamba import description, context, it
from expects import expect, be_true, have_length, equal, be_a, have_property, be_none
headers = {'content-type': 'application/json','accept': 'application/json'}
with description('Zipkin tracing functionality'):
with before.all:
#Read Config file
configuration.setenv(self)
with context('Deploy Zipkin and make sure port forwarded to localhost'):
with it('Bookinfo Zipkin tracing feature'):
for _ in range(10):
r = requests.get(self.url)
r.status_code
expect(r.status_code).to(equal(200))
r1=requests.get(self.zipkin)
r1.status_code
expect(r1.status_code).to(equal(200))
if 'productpage' in r1.text:
expect(0).to(equal(0))
else:
expect(0).not_to(equal(0))
configuration.generate_request(self)
|
mono/datasets/splits/kitti_shot_sequence/gen_split.py | Jenaer/FeatDepth | 179 | 12656565 | <filename>mono/datasets/splits/kitti_shot_sequence/gen_split.py
if __name__ == "__main__":
f = open('val_files.txt', 'w')
for i in range(108):
f.writelines(['2011_09_26/2011_09_26_drive_0001_sync ', str(i).zfill(10), ' l\n'])
f.close()
print('done') |
igibson/examples/behavior/behavior_robot_related_states_demo.py | suresh-guttikonda/iGibson | 360 | 12656576 | import os
import bddl
import igibson
from igibson import object_states
from igibson.examples.behavior import behavior_demo_replay
bddl.set_backend("iGibson")
def robot_states_callback(igbhvr_act_inst, _):
window1 = (igbhvr_act_inst.object_scope["window.n.01_1"], "kitchen")
window2 = (igbhvr_act_inst.object_scope["window.n.01_2"], "living room")
windows = [window1, window2]
for window, roomname in windows:
print(
"%s window is inFOV: %r, inSameRoom: %r, inReach: %r"
% (
roomname,
window.states[object_states.InFOVOfRobot].get_value(),
window.states[object_states.InSameRoomAsRobot].get_value(),
window.states[object_states.InReachOfRobot].get_value(),
)
)
rag = igbhvr_act_inst.object_scope["rag.n.01_1"]
print("Rag is in hand: %r" % rag.states[object_states.InHandOfRobot].get_value())
agent = igbhvr_act_inst.object_scope["agent.n.01_1"]
print(
"Agent is in kitchen: %r, living room: %r, bedroom: %r."
% (
agent.states[object_states.IsInKitchen].get_value(),
agent.states[object_states.IsInLivingRoom].get_value(),
agent.states[object_states.IsInBedroom].get_value(),
)
)
def main():
DEMO_FILE = os.path.join(igibson.ig_dataset_path, "tests", "cleaning_windows_0_Rs_int_2021-05-23_23-11-46.hdf5")
behavior_demo_replay.replay_demo(
DEMO_FILE, disable_save=True, step_callbacks=[robot_states_callback], mode="headless"
)
if __name__ == "__main__":
main()
|
pkgs/conda-env-2.5.2-py27_0/lib/python2.7/site-packages/conda_env/yaml.py | wangyum/anaconda | 107 | 12656585 | <filename>pkgs/conda-env-2.5.2-py27_0/lib/python2.7/site-packages/conda_env/yaml.py
"""
Wrapper around yaml to ensure that everything is ordered correctly.
This is based on the answer at http://stackoverflow.com/a/16782282
"""
from __future__ import absolute_import, print_function
from collections import OrderedDict
import yaml
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
yaml.add_representer(OrderedDict, represent_ordereddict)
dump = yaml.dump
load = yaml.load
dict = OrderedDict
|
citeomatic/tasks.py | IntheGrass/citeomatic_learning | 162 | 12656614 | #!/usr/bin/env python
"""
Luigi pipeline for Citeomatic.
This includes tasks for fetching the dataset, building a vocabulary and
training features and training/evaluating the model.
"""
import logging
import os
import zipfile
from os import path
import luigi
from citeomatic import file_util, features, training, corpus
from citeomatic.features import Featurizer
from citeomatic.models.options import ModelOptions
from citeomatic.serialization import import_from
from luigi.util import inherits
logger = logging.getLogger('citeomatic.tasks')
import faulthandler
faulthandler.enable()
class SharedParameters(luigi.Task):
base_dir = luigi.Parameter(default=path.expanduser('~/citeomatic-data/'))
@property
def data_dir(self):
return self.base_dir + '/data'
@property
def model_dir(self):
return self.base_dir + '/model'
def log(self, msg, *args):
logger.info(msg, *args)
class DownloadCorpus(SharedParameters):
corpus_url = luigi.Parameter(
default=
'https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/2017-02-21/papers-2017-02-21.zip'
)
def output(self):
json_name = self.corpus_url.split('/')[-1]
json_name = json_name.replace('.zip', '.json.gz')
return luigi.LocalTarget(path.join(self.data_dir, json_name))
def run(self):
self.output().makedirs()
output_dir = path.dirname(self.output().path)
output_filename = self.output().path
assert os.system(
'curl "%s" > "%s/papers.zip.tmp"' % (self.corpus_url, output_dir)
) == 0
with zipfile.ZipFile('%s/papers.zip.tmp' % output_dir) as zf:
for name in zf.namelist():
if name.endswith('.json.gz'):
zf.extract(name, output_dir)
break
#assert os.unlink('%s/papers.zip.tmp' % output_dir) == 0
class BuildCorpus(SharedParameters):
def requires(self):
return {'corpus': DownloadCorpus()}
def output(self):
corpus_suffix = self.requires()['corpus'].corpus_url.split('/')[-1]
corpus_name = corpus_suffix.replace('.zip', '.sqlite')
return luigi.LocalTarget(path.join(self.data_dir, corpus_name))
def run(self):
try:
corpus.build_corpus(self.output().path + '.tmp', self.input()['corpus'].path)
os.rename(self.output().path + '.tmp', self.output().path)
except:
os.system("rm -rf '%s'" % self.output().path + '.tmp')
raise
class CreateFeaturizer(SharedParameters):
training_fraction = luigi.FloatParameter(default=0.8)
max_features = luigi.IntParameter(default=100000000)
name = luigi.Parameter('default')
def requires(self):
return {'corpus': BuildCorpus()}
def output(self):
return luigi.LocalTarget(
path.join(self.model_dir, 'featurizer-%s.pickle' % self.name)
)
def run(self):
logger.info(
"Loading corpus from file %s " % self.input()['corpus'].path
)
c = corpus.Corpus.load(self.input()['corpus'].path, self.training_fraction)
logger.info("Fitting featurizer and making cache...")
featurizer = Featurizer(max_features=self.max_features)
featurizer.fit(c)
self.output().makedirs()
file_util.write_pickle(self.output().path, featurizer)
class TrainModel(SharedParameters):
model_config = luigi.Parameter()
experiment_name = luigi.Parameter(default='v0')
def requires(self):
return {'featurizer': CreateFeaturizer(), 'corpus': BuildCorpus()}
def output(self):
return luigi.LocalTarget(
path.join(self.model_dir, self.experiment_name, 'weights.h5')
)
def run(self):
featurizer = file_util.read_pickle(self.input()['featurizer'].path)
corpus = corpus.Corpus.load(self.input()['corpus'].path)
model_options = ModelOptions.load(self.model_config)
model_options.n_authors = featurizer.n_authors
model_options.n_features = featurizer.n_features
citeomatic_model, embedding_model = train_text_model(
corpus,
featurizer,
model_options,
embedding_model_for_ann=None,
debug=False,
tensorboard_dir=None
)
self.output().makedirs()
citeomatic_model.save_weights(
path.join(self.output().path, 'weights.h5'), overwrite=True
)
embedding_model.save_weights(
path.join(self.output().path, 'embedding.h5'), overwrite=True
)
file_util.write_json(
model_options.to_json(),
path.join(self.output().path, 'options.json')
)
class TestModel(SharedParameters):
def requires(self):
return {
'featurizer': CreateFeaturizer(),
'corpus': DownloadCorpus(),
'model': TrainModel(),
}
def run(self):
from citeomatic.scripts.evaluate_citeomatic_model import \
TestCiteomatic
test_app = TestCiteomatic(
model_dir=self.output_dir(),
test_samples=self.test_samples,
min_citation_count=10,
corpus_path=self._corpus_path('corpus.msgpack'),
filter_method='es',
)
test_app.main([])
if __name__ == '__main__':
from luigi.cmdline import luigi_run
luigi_run()
|
tests/test_hyper.py | Vermeille/Torchelie | 117 | 12656625 | import os
from contextlib import suppress
from torchelie.hyper import HyperparamSearch, UniformSampler
def beale(x, y):
return (1.5 - x + x * y)**2 + (2.25 - x + x * y**2)**2 + (2.625 - x +
x * y**3)**2
def sphere(x, y):
return x**2 + y**2
def rosen(x, y):
return 100 * (y - x**2)**2 + (1 - x)**2
hpsearch = HyperparamSearch(x=UniformSampler(-4.5, 4.5),
y=UniformSampler(-4.5, 4.5))
with suppress(FileNotFoundError):
os.remove('hpsearch.json')
print(beale(3, 0.5))
for _ in range(30):
hps = hpsearch.sample(algorithm='gp', target='out')
out = -beale(**hps.params)
print(hps, '\t', out)
hpsearch.log_result(hps, {'out': out})
|
rllib/utils/schedules/linear_schedule.py | firebolt55439/ray | 21,382 | 12656626 | <reponame>firebolt55439/ray
from ray.rllib.utils.schedules.polynomial_schedule import PolynomialSchedule
class LinearSchedule(PolynomialSchedule):
"""
Linear interpolation between `initial_p` and `final_p`. Simply
uses Polynomial with power=1.0.
final_p + (initial_p - final_p) * (1 - `t`/t_max)
"""
def __init__(self, **kwargs):
super().__init__(power=1.0, **kwargs)
|
connectorx-python/connectorx/tests/benchmarks.py | ives9638/connector-x | 565 | 12656634 | """
This file is skipped during normal test because the file name is not started with benchmarks
"""
import os
from .. import read_sql
def read_sql_impl(conn: str, table: str):
read_sql(
conn,
f"""SELECT * FROM {table}""",
partition_on="L_ORDERKEY",
partition_num=10,
)
def bench_mysql(benchmark):
benchmark(read_sql_impl, os.environ["MYSQL_URL"], os.environ["TPCH_TABLE"])
def bench_postgres(benchmark):
benchmark(read_sql_impl,
os.environ["POSTGRES_URL"], os.environ["TPCH_TABLE"])
|
homeassistant/components/horizon/__init__.py | domwillcode/home-assistant | 30,023 | 12656666 | """The horizon component."""
|
DeepCpG_DNA/template/prepare_model_yaml.py | Luma-1994/lama | 137 | 12656688 | import yaml
import keras
import json
import shutil
import os
from deepcpg.utils import make_dir, to_list
from deepcpg.models.utils import decode_replicate_names, encode_replicate_names, get_sample_weights
##### This function is needed to extract info on model architecture so that the output can be generated correctly.
def data_reader_config_from_model(model, config_out_fpath = None, replicate_names=None):
"""Return :class:`DataReader` from `model`.
Builds a :class:`DataReader` for reading data for `model`.
Parameters
----------
model: :class:`Model`.
:class:`Model`.
outputs: bool
If `True`, return output labels.
replicate_names: list
Name of input cells of `model`.
Returns
-------
:class:`DataReader`
Instance of :class:`DataReader`.
"""
use_dna = False
dna_wlen = None
cpg_wlen = None
output_names = None
encode_replicates = False
#
input_shapes = to_list(model.input_shape)
for input_name, input_shape in zip(model.input_names, input_shapes):
if input_name == 'dna':
# Read DNA sequences.
use_dna = True
dna_wlen = input_shape[1]
elif input_name.startswith('cpg/state/'):
# DEPRECATED: legacy model. Decode replicate names from input name.
replicate_names = decode_replicate_names(input_name.replace('cpg/state/', ''))
assert len(replicate_names) == input_shape[1]
cpg_wlen = input_shape[2]
encode_replicates = True
elif input_name == 'cpg/state':
# Read neighboring CpG sites.
if not replicate_names:
raise ValueError('Replicate names required!')
if len(replicate_names) != input_shape[1]:
tmp = '{r} replicates found but CpG model was trained with' \
' {s} replicates. Use `--nb_replicate {s}` or ' \
' `--replicate_names` option to select {s} replicates!'
tmp = tmp.format(r=len(replicate_names), s=input_shape[1])
raise ValueError(tmp)
cpg_wlen = input_shape[2]
output_names = model.output_names
config = {"output_names":output_names,
"use_dna":use_dna,
"dna_wlen":dna_wlen,
"cpg_wlen":cpg_wlen,
"replicate_names":replicate_names,
"encode_replicates":encode_replicates}
if config_out_fpath is not None:
with open(config_out_fpath, "w") as ofh:
json.dump(config, ofh)
return config
def make_model_yaml(template_yaml, model_json, output_yaml_path):
#
with open(template_yaml, 'r') as f:
model_yaml = yaml.load(f)
#
# get the model config:
json_file = open(model_json, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
#
model_yaml["schema"]["targets"] = []
for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
, "doc":"Methylation probability for %s"%oname}
model_yaml["schema"]["targets"].append(append_el)
#
with open(output_yaml_path, 'w') as f:
yaml.dump(model_yaml, f, default_flow_style=False)
def make_secondary_dl_yaml(template_yaml, model_json, output_yaml_path):
with open(template_yaml, 'r') as f:
model_yaml = yaml.load(f)
#
# get the model config:
json_file = open(model_json, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
#
model_yaml["output_schema"]["targets"] = []
for oname, oshape in zip(loaded_model.output_names, loaded_model.output_shape):
append_el ={"name":oname , "shape":str(oshape)#replace("None,", "")
, "doc":"Methylation probability for %s"%oname}
model_yaml["output_schema"]["targets"].append(append_el)
#
with open(output_yaml_path, 'w') as f:
yaml.dump(model_yaml, f, default_flow_style=False)
import errno
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def softlink_files(bpath, model_name):
print("Softlinking: {0}".format(model_name))
symlink_force(bpath+"template/dataloader.yaml",bpath+"{0}/dataloader.yaml".format(model_name))
#symlink_force("../template/model.yaml","{0}/model.yaml".format(model_name))
symlink_force(bpath+"template/dataloader.py",bpath+"{0}/dataloader.py".format(model_name))
symlink_force(bpath+"template/example_files",bpath+"{0}/example_files".format(model_name))
# prepare DeepCpG
deepcpg_bdir = "/nfs/research2/stegle/users/rkreuzhu/deepcpg/deepcpg-1.0.4/scripts/"
output_dir = "/nfs/research2/stegle/users/rkreuzhu/kipoi_models_fork/models/DeepCpG"
models = ["Hou2016_HepG2_dna", "Hou2016_HCC_dna", "Hou2016_mESC_dna", "Smallwood2014_serum_dna", "Smallwood2014_2i_dna"]
for model in models:
in_dir = os.path.join(deepcpg_bdir, model)
out_dir = os.path.join(output_dir, model)
model_files = os.path.join(out_dir, "model_files")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(model_files):
os.makedirs(model_files)
shutil.copy(os.path.join(in_dir, "model.json"), model_files)
shutil.copy(os.path.join(in_dir, "model_weights.h5"), model_files)
make_model_yaml(os.path.join(output_dir, "template", 'model_template.yaml'), os.path.join(model_files, "model.json"), os.path.join(out_dir, 'model.yaml'))
make_secondary_dl_yaml(os.path.join(output_dir, "template", 'dataloader_m_template.yaml'), os.path.join(model_files, "model.json"), os.path.join(out_dir, 'dataloader_m.yaml'))
try:
os.unlink(output_dir+ "/" + model + "/dataloader_m.py")
except:
pass
shutil.copy(output_dir+ "/"+"template/dataloader_m.py",output_dir+ "/"+model)
softlink_files(output_dir+ "/", model)
#
# generate the model config file:
json_file = open(os.path.join(model_files, "model.json"), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
data_reader_config_from_model(loaded_model, os.path.join(out_dir, 'model_config.json'), replicate_names=None)
for model in models:
out_dir = os.path.join(output_dir, model)
if os.path.isdir(out_dir):
command = "python /nfs/research2/stegle/users/rkreuzhu/opt/model-zoo/kipoi/__main__.py test %s"%out_dir
ret =os.system(command)
assert(ret==0)
## test with custom dataloader:
import kipoi
model = kipoi.get_model(out_dir, source="dir")
Dl = kipoi.get_dataloader_factory(out_dir + "/dataloader_m.yaml", source="dir") # fails
import os
os.chdir(out_dir)
import keras
import kipoi
from dataloader import *
from keras.models import load_model
from dataloader_m import Dataloader
samples = ["example_files/BS27_1_SER.tsv", "example_files/BS27_3_SER.tsv", "example_files/BS27_5_SER.tsv", "example_files/BS27_6_SER.tsv", "example_files/BS27_8_SER.tsv"]
ref = "example_files/mm10"
model = kipoi.get_model("./", source="dir")
data_loader = Dataloader(samples, ref, outputs = True)
# the inputs, outputs, weights can then be returned from the dataloader...
ret = data_loader.__next__()
for inputs, outputs, weights in data_loader:
preds = to_list(model.model.predict(inputs))
|
terrascript/provider/dme.py | hugovk/python-terrascript | 507 | 12656706 | <filename>terrascript/provider/dme.py
# terrascript/provider/dme.py
import terrascript
class dme(terrascript.Provider):
pass
__all__ = ["dme"]
|
tests/test_fetch.py | aldslvda/django-echarts | 181 | 12656715 | # coding=utf8
import unittest
from django_echarts.datasets.fetch import fetch, fetch_single, ifetch_multiple
DICT_LIST_DATA = [
{'id': 282, 'name': 'Alice', 'age': 30, 'sex': 'female'},
{'id': 217, 'name': 'Bob', 'age': 56},
{'id': 328, 'name': 'Charlie', 'age': 56, 'sex': 'male'},
]
class FetchTestCase(unittest.TestCase):
def test_fetch_single(self):
names = fetch_single(DICT_LIST_DATA, 'name')
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
sexs = fetch_single(DICT_LIST_DATA, 'sex', default='male')
self.assertListEqual(sexs, ['female', 'male', 'male'])
def test_ifetch_multiple(self):
names, ages = map(list, ifetch_multiple(DICT_LIST_DATA, 'name', 'age'))
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
self.assertListEqual(ages, [30, 56, 56])
def test_fetch(self):
names = fetch(DICT_LIST_DATA, 'name')
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
sexs = fetch(DICT_LIST_DATA, 'sex', default='male')
self.assertListEqual(sexs, ['female', 'male', 'male'])
names, ages = fetch(DICT_LIST_DATA, 'name', 'age')
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
self.assertListEqual(ages, [30, 56, 56])
names, ages, sexs = fetch(DICT_LIST_DATA, 'name', 'age', 'sex', defaults={'sex': 'male'})
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
self.assertListEqual(ages, [30, 56, 56])
self.assertListEqual(sexs, ['female', 'male', 'male'])
class MockItem:
def __init__(self, x, y, z):
self._data = {'x': x, 'y': y, 'z': z}
def get(self, key):
return self._data.get(key)
class FetchCustomGetterTestCase(unittest.TestCase):
def test_custom_getter(self):
data_list = [MockItem(1, 2, 3), MockItem(4, 5, 6), MockItem(7, 8, 9)]
xs, ys, zs = fetch(data_list, 'x', 'y', 'z', getter=lambda item, key: item.get(key))
self.assertListEqual([1, 4, 7], xs)
def test_with_dict(self):
"""
Use dict.get(key) to pick item.
"""
names, ages = fetch(DICT_LIST_DATA, 'name', 'age', getter=lambda item, key: item.get(key))
self.assertListEqual(names, ['Alice', 'Bob', 'Charlie'])
self.assertListEqual(ages, [30, 56, 56])
|
recipe_scrapers/kingarthur.py | mathiazom/recipe-scrapers | 811 | 12656735 | from bs4 import BeautifulSoup
from ._abstract import AbstractScraper
from ._utils import normalize_string
class KingArthur(AbstractScraper):
@classmethod
def host(cls):
return "kingarthurbaking.com"
def title(self):
return self.schema.title()
def total_time(self):
return self.schema.total_time()
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
"""
King Arthur updated how they format their instructions to include html (instructions wrapped in <p>) in the
`recipeInstructions`, parse the instructions assuming each step is wrapped in a <p> first, and fallback to just
returning the schema instructions in case this is changed, again.
"""
schema_instructions = self.schema.instructions()
soup = BeautifulSoup(schema_instructions, "html.parser")
instruction_elms = soup.findAll("p")
if instruction_elms:
return "\n".join(
[normalize_string(elm.get_text()) for elm in instruction_elms]
)
return schema_instructions
def ratings(self):
return self.schema.ratings()
|
code/final_face_slider_tool.py | waterjump/alignedCelebFaces | 190 | 12656762 | <reponame>waterjump/alignedCelebFaces
import html
import math
import pygame
import numpy as np
import tensorflow as tf
from pygame.locals import *
from scipy import misc
eigenvalues = np.load("eigenvalues.npy")
eigenvectors = np.load("eigenvectors.npy")
eigenvectorInverses = np.linalg.pinv(eigenvectors)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
IMAGE_WIDTH = 64
IMAGE_HEIGHT = 64
IMAGE_COUNT = 13016
DENSE_SIZE = 300
learning_rate = 0.0002 # Used to be 0.001
settings = np.zeros((DENSE_SIZE,))
approach_settings = np.zeros((2,))
approach_settings.fill(1)
denseData = np.load("denseArray27K.npy")
shouldICalculateImage = True
f = open('names/allNames.txt', 'r+')
allNames = f.read()
f.close()
allPeople = html.unescape(allNames).split('\n')
f = open('eigenvalueNames.txt', 'r+')
eigenvalueNames = f.read().split('\n')
f.close()
nearestPerson = 0
meanData = denseData.mean(axis=0)
inputs_ = tf.placeholder(tf.float32, (None, IMAGE_HEIGHT, IMAGE_WIDTH, 3), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, IMAGE_HEIGHT, IMAGE_WIDTH, 3), name='targets')
""" Encoder """
conv0 = tf.layers.conv2d(inputs=inputs_, filters=120, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 64x64x25
maxpool0 = tf.layers.max_pooling2d(conv0, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 32x32x25
conv1 = tf.layers.conv2d(inputs=maxpool0, filters=160, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 32x32x40
maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 16x16x40
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=200, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 16x16x60
maxpool2 = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 8x8x60
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=240, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 8x8x80
maxpool3 = tf.layers.max_pooling2d(conv3, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 4x4x80
maxpool3_flat = tf.reshape(maxpool3, [-1, 4 * 4 * 240])
W_fc1 = weight_variable([4 * 4 * 240, 300])
b_fc1 = bias_variable([300])
tesy = tf.matmul(maxpool3_flat, W_fc1)
encoded = tf.nn.relu(tf.matmul(maxpool3_flat, W_fc1) + b_fc1)
W_fc2 = weight_variable([300, 4 * 4 * 240])
b_fc2 = bias_variable([4 * 4 * 240])
predecoded_flat = tf.nn.relu(tf.matmul(encoded, W_fc2) + b_fc2)
predecoded = tf.reshape(predecoded_flat, [-1, 4, 4, 240])
""" Decoder """
upsample1 = tf.image.resize_images(predecoded, size=(8, 8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 8x8x80
conv4 = tf.layers.conv2d(inputs=upsample1, filters=200, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 8x8x60
upsample2 = tf.image.resize_images(conv4, size=(16, 16), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 16x16x60
conv5 = tf.layers.conv2d(inputs=upsample2, filters=160, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 16x16x40
upsample3 = tf.image.resize_images(conv5, size=(32, 32), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 32x32x40
conv6 = tf.layers.conv2d(inputs=upsample3, filters=120, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 32x32x25
upsample4 = tf.image.resize_images(conv6, size=(64, 64), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# Now 64x64x25
conv7 = tf.layers.conv2d(inputs=upsample4, filters=15, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 64x64x10
logits = tf.layers.conv2d(inputs=conv7, filters=3, kernel_size=(3, 3), padding='same', activation=None)
# Now 64x64x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits)
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, "models/model27674.ckpt")
def get_celeb_sliders(i):
traits = denseData[i] - meanData
return np.matmul(traits, eigenvectorInverses) / eigenvalues
celebSliders = np.zeros(denseData.shape)
for i in range(denseData.shape[0]):
celebSliders[i] = get_celeb_sliders(i)
def calculate_image(settings):
real_settings = meanData.copy()
for i in range(DENSE_SIZE):
real_settings += settings[i] * eigenvalues[i] * eigenvectors[i]
real_settings = real_settings.reshape((1, DENSE_SIZE))
reconstructed_image = sess.run([decoded], feed_dict={encoded: real_settings})
ri_np = np.array(reconstructed_image).reshape((64, 64, 3))
ri_np = np.swapaxes(ri_np, 0, 1)
closest_celeb = np.argmin(np.linalg.norm((settings - celebSliders) * eigenvalues, axis=1))
return ri_np * 255, closest_celeb
def create_special_children(index, parent_count, file_name):
total_image = np.zeros((240 + 264 * parent_count, 300, 3))
total_image.fill(255)
parents = [-1] * parent_count
child_settings = np.zeros((celebSliders[0].shape,))
for i in range(0, parent_count):
while parents[i] == -1 or int(allPeople[parents[i]].split(",")[1]) > 10: # fame rank must be 5 or better
parents[i] = np.random.randint(IMAGE_COUNT)
parents[i] = 13015
child_settings += celebSliders[parents[i]]
child_settings /= parent_count
for i in range(0, parent_count + 1):
if i == parent_count:
img, _ = calculate_image(child_settings)
else:
img, _ = calculate_image(celebSliders[parents[i]])
# img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
total_image[24 + i * 264:216 + i * 264, 24:216] = misc.imresize(img, size=[192, 192], interp='nearest')
blah = pygame.surfarray.make_surface(total_image)
for i in range(0, parent_count + 1):
name = "CHILD"
if i < parent_count:
name = allPeople[parents[i]].split(",")[0]
font = pygame.font.SysFont("Helvetica", 22)
text_surface = font.render(name, 1, (0, 0, 0))
blah.blit(text_surface, [24 + 264 * i, 220])
if i < parent_count - 1:
font = pygame.font.SysFont("Helvetica", 48)
blah.blit(font.render('^', 1, (0, 0, 0)), [240 + 264 * i, 100])
font = pygame.font.SysFont("Helvetica", 48)
blah.blit(font.render('=', 1, (0, 0, 0)), [240 + 264 * (parent_count - 1), 100])
pygame.image.save(blah, "spesh/" + file_name + "{:03d}".format(index) + ".png")
def create_child_grid(index, parent_count, file_name):
total_image = np.zeros((264 + 264 * parent_count, 264 + 264 * parent_count, 3))
total_image[264:, 264:, :] = 255
parents = [-1] * parent_count
for i in range(0, parent_count):
parents[i] = np.random.randint(IMAGE_COUNT)
img, _ = calculate_image(celebSliders[parents[i]])
# img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
big_img = misc.imresize(img, size=[192, 192], interp='nearest')
total_image[24 + (i + 1) * 264:216 + (i + 1) * 264, 24:216] = big_img
total_image[24:216, 24 + (i + 1) * 264:216 + (i + 1) * 264] = big_img
total_image[264 * (i + 1):264 * (i + 2), 264 * (i + 1):264 * (i + 2)] = [0, 255, 0]
for i in range(0, parent_count):
for j in range(0, parent_count):
child_settings = (celebSliders[parents[i]] + celebSliders[parents[j]]) / 2
img, _ = calculate_image(child_settings)
total_image[24 + (i + 1) * 264:216 + (i + 1) * 264, 24 + (j + 1) * 264:216 + (j + 1) * 264] = misc.imresize(
img, size=[192, 192], interp='nearest')
blah = pygame.surfarray.make_surface(total_image)
for i in range(0, parent_count):
name = allPeople[parents[i]].split(",")[0]
font = pygame.font.SysFont("Helvetica", 22)
text_surface = font.render(name, 1, (255, 255, 255))
blah.blit(text_surface, [24 + 264 * (i + 1), 220])
blah.blit(text_surface, [24, 220 + 264 * (i + 1)])
pygame.image.save(blah, "spesh/{}{:03d}.png".format(file_name, index))
def create_family_tree(index, parent_count, file_name):
total_image = np.zeros((264 * parent_count, 264 * parent_count, 3))
total_image.fill(255)
parents = [-1] * parent_count
allSettings = np.zeros((parent_count, parent_count, celebSliders[0].shape[0]))
for i in range(0, parent_count):
parents[i] = np.random.randint(IMAGE_COUNT)
allSettings[0, i] = celebSliders[parents[i]]
img, _ = calculate_image(celebSliders[parents[i]])
# img = np.swapaxes(misc.imread("data/dataFace"+str(parents[i])+".png"),0,1)
big_img = misc.imresize(img, size=[192, 192], interp='nearest')
total_image[24 + i * 264:216 + i * 264, 40:232] = big_img
for level in range(1, parent_count):
for i in range(0, parent_count - level):
allSettings[level, i] = (allSettings[level - 1, i] + allSettings[level - 1, i + 1]) * 0.5
img, _ = calculate_image(allSettings[level, i])
x_start = 24 + i * 264 + level * 132
y_start = 40 + level * 264
total_image[x_start:x_start + 192, y_start:y_start + 192] = misc.imresize(img, size=[192, 192],
interp='nearest')
total_image[x_start + 92:x_start + 100, y_start - 32:y_start] = 0
total_image[x_start:x_start + 192, y_start - 40:y_start - 32] = 0
total_image[x_start:x_start + 8, y_start - 72:y_start - 40] = 0
total_image[x_start + 184:x_start + 192, y_start - 72:y_start - 40] = 0
blah = pygame.surfarray.make_surface(total_image)
for i in range(0, parent_count):
name = allPeople[parents[i]].split(",")[0]
font = pygame.font.SysFont("Helvetica", 22)
text_surface = font.render(name, 1, (0, 0, 0))
blah.blit(text_surface, [20 + 264 * i, 14])
pygame.image.save(blah, "spesh/{}{:03d}.png".format(file_name, index))
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 50, 50)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 50)
BLUE = (50, 50, 255)
GREY = (200, 200, 200)
ORANGE = (200, 100, 50)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
TRANS = (1, 1, 1)
VISIBLE_COMPONENTS = 20
enteringName = False
isShiftPressed = False
enteredName = ""
frameTimer = 0
misspelledTimer = 0
scrollPosition = 0
stringToNameDict = {}
transitionTimes = np.zeros((2,))
transitionKeyFrames = np.zeros((2, DENSE_SIZE))
for i in range(len(allPeople)):
line = allPeople[i]
pieces = line.split(",")
name = pieces[0]
alpha_only_name = ''.join(x for x in name if (x.isalpha() or x == ' '))
lower_name = alpha_only_name.lower()
if len(lower_name) >= 1:
stringToNameDict[lower_name] = i
def string_to_celeb(st):
alpha_only_name = ''.join(x for x in st if (x.isalpha() or x == ' '))
lower_name = alpha_only_name.lower()
if lower_name not in stringToNameDict:
return -1
return stringToNameDict[lower_name]
oops_image = pygame.image.load("oops.png")
imagerect = oops_image.get_rect()
calculatedImage, nearestPerson = calculate_image(settings)
class Slider():
def __init__(self, i, maxi, mini, x, y, w, h):
self.maxi = maxi
self.mini = mini
self.x = x
self.y = y
self.w = w
self.h = h
self.surf = pygame.surface.Surface((w, h))
self.hit = False
self.i = i
self.font = pygame.font.SysFont("Helvetica", 16)
def true_i(self):
return self.i + scrollPosition
def draw(self):
j = self.true_i()
eigen = "%.4f" % eigenvalues[j]
name = "PCA #" + str(self.true_i() + 1) + " (" + eigen + ")"
if j < len(eigenvalueNames) - 1:
name = eigenvalueNames[j]
txt_surf = self.font.render(name, 1, WHITE)
txt_rect = txt_surf.get_rect(center=(self.w / 2, 13))
s = 70
if self.i % 2 + (self.i // 2) % 2 == 1:
s = 100
self.surf.fill((s, s, s))
pygame.draw.rect(self.surf, (220, 220, 220), [10, 30, self.w - 20, 5], 0)
for g in range(7):
pygame.draw.rect(self.surf, (s + 50, s + 50, s + 50), [9 + (self.w - 20) / 6 * g, 40, 2, 5], 0)
self.surf.blit(txt_surf, txt_rect)
button_surf = pygame.surface.Surface((10, 20))
button_surf.fill(TRANS)
button_surf.set_colorkey(TRANS)
pygame.draw.rect(button_surf, WHITE, [0, 0, 10, 20])
surf = self.surf.copy()
v = min(max(settings[j], -9999), 9999)
pos = (10 + int((v - self.mini) / (self.maxi - self.mini) * (self.w - 20)), 33)
button_rect = button_surf.get_rect(center=pos)
surf.blit(button_surf, button_rect)
button_rect.move_ip(self.x, self.y)
screen.blit(surf, (self.x, self.y))
def move(self):
j = self.true_i()
settings[j] = (pygame.mouse.get_pos()[0] - self.x - 10) / 130 * (self.maxi - self.mini) + self.mini
if settings[j] < self.mini:
settings[j] = self.mini
if settings[j] > self.maxi:
settings[j] = self.maxi
class ApproachSlider():
def __init__(self, i, maxi, mini, x, y, w, h):
self.maxi = maxi
self.mini = mini
self.x = x
self.y = y
self.w = w
self.h = h
self.surf = pygame.surface.Surface((w, h))
self.hit = False
self.i = i
self.font = pygame.font.SysFont("Helvetica", 16)
def draw(self):
if self.i == 0:
st = "Go " + "%.1f" % (100 * approach_settings[self.i]) + "% the way to this celeb."
else:
st = "Speed of travel: " + "%.2f" % (100 * (1 - approach_settings[self.i])) + " frames"
txt_surf = self.font.render(st, 1, WHITE)
txt_rect = txt_surf.get_rect(center=(self.w / 2, 13))
s = 70 + 30 * self.i
self.surf.fill((s, s, s))
pygame.draw.rect(self.surf, (220, 220, 220), [10, 35, self.w - 20, 5], 0)
self.surf.blit(txt_surf, txt_rect)
button_surf = pygame.surface.Surface((10, 30))
button_surf.fill(TRANS)
button_surf.set_colorkey(TRANS)
pygame.draw.rect(button_surf, WHITE, [0, 0, 10, 30])
surf = self.surf.copy()
v = min(max(approach_settings[self.i], -9999), 9999)
pos = (10 + int((v - self.mini) / (self.maxi - self.mini) * (self.w - 20)), 38)
button_rect = button_surf.get_rect(center=pos)
surf.blit(button_surf, button_rect)
button_rect.move_ip(self.x, self.y)
screen.blit(surf, (self.x, self.y))
def move(self):
approach_settings[self.i] = (pygame.mouse.get_pos()[0] - self.x - 10) / (self.w - 20) * (
self.maxi - self.mini) + self.mini
if approach_settings[self.i] < self.mini:
approach_settings[self.i] = self.mini
if approach_settings[self.i] > self.maxi:
approach_settings[self.i] = self.maxi
def draw_buttons():
enb_shade = 200
if enteringName:
enb_shade = math.sin(frameTimer * 0.06) * 40 + 200
enter_name_button = pygame.surface.Surface((300, 120))
pygame.draw.rect(enter_name_button, (enb_shade, enb_shade, enb_shade), [5, 5, 290, 110], 0)
st = "[Enter celeb name]"
if len(enteredName) >= 1:
st = enteredName
button_data = [
[(0, 0), (300, 60), (230, 30, 30), 44, "RANDOMIZE", WHITE],
[(0, 540), (300, 60), (30, 30, 230), 44, "GO TO MEAN", WHITE],
[(800, 0), (300, 120), (230, 170, 30), 44, "INVERT", WHITE],
[(300, 500), (500, 100), (0, 0, 0), 24, "Hey! You look like " + allPeople[nearestPerson].split(",")[0] + ".",
WHITE],
[(800, 120), (300, 120), (enb_shade, enb_shade, enb_shade), 30, st, BLACK],
[(800, 360), (300, 120), (30, 170, 30), 44, "GO TO THEM", WHITE],
[(800, 480), (300, 120), (30, 170, 30), 24, "GO TO RANDOM CELEB", WHITE]]
for button in button_data:
button_surface = pygame.surface.Surface(button[1])
pygame.draw.rect(button_surface, button[2], [5, 5, button[1][0] - 10, button[1][1] - 10], 0)
font = pygame.font.SysFont("Helvetica", button[3])
b_text = font.render(button[4], 1, button[5])
b_text_rect = b_text.get_rect(center=(button[1][0] / 2, button[1][1] / 2))
button_surface.blit(b_text, b_text_rect)
screen.blit(button_surface, button[0])
if transitionTimes[0] >= 0:
w = 290 * (frameTimer - transitionTimes[0]) / (transitionTimes[1] - transitionTimes[0])
progress_bar_surface = pygame.surface.Surface((w, 25))
progress_bar_surface.fill((0, 150, 0))
screen.blit(progress_bar_surface, (805, 125))
image_surface = pygame.surfarray.make_surface(calculatedImage)
bigger = pygame.transform.scale(image_surface, (500, 500))
screen.blit(bigger, (300, 0))
if misspelledTimer >= 1:
y = 5
if misspelledTimer < 60:
y = -115 + 120 * (0.5 + math.cos((misspelledTimer - 60) / 60.0 * math.pi) * 0.5)
screen.blit(oops_image, (805, y))
# return misspelledTimer. how many frames the misspelled warning should show up. I know, it's weird and dumb.
def go_to_celeb(c):
celeb_choice = string_to_celeb(enteredName)
if c >= 0:
celeb_choice = c
if celeb_choice == -1:
return 800
else:
slider_settings = celebSliders[celeb_choice]
if approach_settings[1] == 1:
for i in range(DENSE_SIZE):
settings[i] += slider_settings[i] - settings[i] * approach_settings[0]
else:
transitionKeyFrames[0] = settings.copy()
transitionKeyFrames[1] = settings.copy()
for i in range(DENSE_SIZE):
transitionKeyFrames[1, i] += slider_settings[i] - settings[i] * approach_settings[0]
transitionTimes[0] = frameTimer - 1
transitionTimes[1] = frameTimer - 1 + 100 * (1 - approach_settings[1]) # really bad magic numbers oh well
return 0
pygame.init()
slides = []
for i in range(VISIBLE_COMPONENTS):
eigen = "%.4f" % eigenvalues[i]
slides.append(Slider(i, 3, -3, (i % 2) * 150, (i // 2) * 48 + 60, 150, 48))
approachSlides = []
for i in range(2):
approachSlides.append(ApproachSlider(i, 1, 0, 800, 240 + 60 * i, 300, 60))
screen = pygame.display.set_mode((1100, 600))
running = True
# OPTIONAL SPECIAL CHILD CREATION
# create_special_children(0,1,"speshCaryD")
# for i in range(0,2):
# create_special_children(i,2,"speshTwo")
# create_child_grid(0,6,"speshGrid")
# create_family_tree(0,12,"speshFamilyHuge")
# END OF OPTIONAL SPECIAL CHILD CREATION
while running:
shouldICalculateImage = False
frameTimer += 1
misspelledTimer = max(0, misspelledTimer - 1)
for event in pygame.event.get():
# Check for KEYDOWN event; KEYDOWN is a constant defined in pygame.locals, which we imported earlier
if event.type == KEYDOWN:
# If the Esc key has been pressed set running to false to exit the main loop
if event.key == K_LSHIFT or event.key == K_RSHIFT:
isShiftPressed = True
elif event.key == K_ESCAPE:
running = False
elif enteringName:
k = event.key
isLetter = ord('a') <= k <= ord('z')
if isLetter or k == ord('-') or k == ord(' ') or k == ord('\''):
ch = event.unicode
if isShiftPressed and isLetter:
ch = ch.upper()
enteredName = enteredName + ch
if len(enteredName) >= 1 and (k == K_BACKSPACE or k == K_DELETE):
enteredName = enteredName[0:-1]
if k == K_RETURN:
enteringName = False
misspelledTimer = go_to_celeb(-1)
shouldICalculateImage = True
# Check for QUIT event; if QUIT, set running to false
elif event.type == KEYUP:
if event.key == K_LSHIFT or event.key == K_RSHIFT:
isShiftPressed = False
elif event.type == QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_loc = pygame.mouse.get_pos()
if event.button == 4 or event.button == 5:
dire = (event.button - 4.5) * 2
if mouse_loc[0] < 300 and 60 <= mouse_loc[1] < 540:
i = (mouse_loc[0] // 150) + ((mouse_loc[1] - 60) // 48) * 2 + scrollPosition
settings[i] -= 0.2 * dire
shouldICalculateImage = True
else:
scrollPosition = min(max(scrollPosition + 2 * int(dire), 0),
denseData.shape[1] - VISIBLE_COMPONENTS)
for i in range(VISIBLE_COMPONENTS):
slides[i].val = settings[i + scrollPosition]
else:
enteringName = False
if mouse_loc[0] < 300:
if mouse_loc[1] < 60:
for i in range(DENSE_SIZE):
settings[i] = np.random.normal(0, 1, 1)
shouldICalculateImage = True
enteredName = ""
elif mouse_loc[1] >= 540:
for i in range(DENSE_SIZE):
settings[i] = 0
shouldICalculateImage = True
enteredName = ""
else:
i = (mouse_loc[0] // 150) + ((mouse_loc[1] - 60) // 48) * 2
slides[i].hit = True
elif mouse_loc[0] >= 800:
if mouse_loc[1] < 120:
for i in range(DENSE_SIZE):
settings[i] *= -1
shouldICalculateImage = True
misspelledTimer = 0
enteredName = ""
elif mouse_loc[1] < 240:
enteringName = True
misspelledTimer = 0
enteredName = ""
elif 240 <= mouse_loc[1] < 360:
i = ((mouse_loc[1] - 240) // 60)
approachSlides[i].hit = True
elif mouse_loc[1] >= 480:
c = np.random.randint(denseData.shape[0])
go_to_celeb(c)
shouldICalculateImage = True
enteredName = allPeople[c].split(",")[0]
elif mouse_loc[1] >= 360:
misspelledTimer = go_to_celeb(-1)
shouldICalculateImage = True
elif event.type == pygame.MOUSEBUTTONUP:
for s in slides:
s.hit = False
for a_s in approachSlides:
a_s.hit = False
if transitionTimes[0] >= 0:
proportion_through = min(max((frameTimer - transitionTimes[0]) / (transitionTimes[1] - transitionTimes[0]), 0),
1)
if frameTimer >= transitionTimes[1]:
proportion_through = 1
transitionTimes[:] = -1
settings = transitionKeyFrames[0] + proportion_through * (transitionKeyFrames[1] - transitionKeyFrames[0])
shouldICalculateImage = True
else:
for s in slides:
if s.hit:
s.move()
shouldICalculateImage = True
for a_s in approachSlides:
if a_s.hit:
a_s.move()
if shouldICalculateImage:
calculatedImage, nearestPerson = calculate_image(settings)
screen.fill(BLACK)
for s in slides:
s.draw()
for a_s in approachSlides:
a_s.draw()
draw_buttons()
pygame.display.flip()
|
examples/srl_example_setup.py | david-abel/mdps | 230 | 12656769 | ''' Setup example scripts run (even without simple_rl fully installed).'''
import os
import sys
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.insert(0, parent_dir) |
lightnlp/utils/score_func.py | CNLPT/lightNLP | 889 | 12656802 | <filename>lightnlp/utils/score_func.py
import torch
import torch.nn as nn
import torch.nn.functional as F
p1 = torch.nn.PairwiseDistance(p=1)
p2 = torch.nn.PairwiseDistance(p=2)
def l1_score(vec1, vec2):
return p1(vec1, vec2)
def l2_score(vec1, vec2):
return p2(vec1, vec2)
def cos_score(vec1, vec2):
return F.cosine_similarity(vec1, vec2)
|
ddos/child.py | security-geeks/hacking-tools | 198 | 12656842 | import asyncio
import aiohttp
import asyncio_redis
from config import LOG_INTERVAL,REQ_KEY,KILL_KEY
class Child(object):
"""
wrapping all the child process feature in this class
"""
def __init__(self,id,url):
self.id = id+"-"+REQ_KEY
# print(self.id)
# self.connection = redis.Connection()
self.url = url
self.loop = asyncio.get_event_loop()
self.client = aiohttp.ClientSession(loop=self.loop)
self.count = 0 # total request/s
async def init(self):
self.redis_connection = await asyncio_redis.Pool.create(poolsize=2)
async def hammer(self):
while True:
connection = await self.client.get(self.url)
self.count+=1
await connection.release()
async def send_stats(self):
while True:
await self.redis_connection.set(self.id,str(self.count//LOG_INTERVAL))
self.count=0
await asyncio.sleep(LOG_INTERVAL)
def clean_up(self):
pass
async def listen_for_close(self):
subscriber = await self.redis_connection.start_subscribe()
await subscriber.subscribe([KILL_KEY])
reply =await subscriber.next_published()
# above will block until something is published
self.clean_up()
def start(self,*funcs):
self.loop.run_until_complete(self.init())
asyncio.ensure_future(self.send_stats())
asyncio.ensure_future(self.hammer())
asyncio.ensure_future(self.listen_for_close())
if funcs:
for func in funcs:
asyncio.ensure_future(func)
self.loop.run_forever()
|
tests/test_visitors/test_tokenize/test_primitives/test_numbers/test_wrong_hex_case.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12656858 | <reponame>cdhiraj40/wemake-python-styleguide<gh_stars>1000+
import pytest
from wemake_python_styleguide.violations.consistency import (
BadNumberSuffixViolation,
WrongHexNumberCaseViolation,
)
from wemake_python_styleguide.visitors.tokenize.primitives import (
WrongNumberTokenVisitor,
)
hex_number_templates = [
'0x{0}',
'0xA{0}',
'0x{0}2',
'0xB{0}1',
]
@pytest.mark.parametrize('hex_char', ['a', 'b', 'c', 'd', 'e', 'f'])
@pytest.mark.parametrize('number', hex_number_templates)
def test_hex_wrong_case(
parse_tokens,
assert_errors,
assert_error_text,
default_options,
hex_char,
number,
number_sign,
):
"""Ensures that numbers with suffix not in lowercase raise a warning."""
real_number = number.format(hex_char)
file_tokens = parse_tokens(number_sign(real_number))
visitor = WrongNumberTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [WrongHexNumberCaseViolation])
assert_error_text(visitor, real_number)
@pytest.mark.parametrize('hex_char', ['A', 'B', 'C', 'D', 'E', 'F'])
@pytest.mark.parametrize('number', hex_number_templates)
def test_hex_correct_case(
parse_tokens,
assert_errors,
default_options,
hex_char,
number,
number_sign,
):
"""Ensures that numbers with correct numbers do not raise a warning."""
file_tokens = parse_tokens(number_sign(number.format(hex_char)))
visitor = WrongNumberTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('hex_char', ['a', 'b', 'c', 'd', 'e', 'f'])
@pytest.mark.parametrize('number', hex_number_templates)
def test_hex_double_wrong_case(
parse_tokens,
assert_errors,
default_options,
hex_char,
number,
number_sign,
):
"""Ensures that numbers with suffix not in lowercase raise a warning."""
real_number = number.format(hex_char).replace('x', 'X')
file_tokens = parse_tokens(number_sign(real_number))
visitor = WrongNumberTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [
BadNumberSuffixViolation,
WrongHexNumberCaseViolation,
])
|
src/vendor/github.com/docker/notary/buildscripts/dockertest.py | shaneutt/harbor | 1,729 | 12656874 | <gh_stars>1000+
"""
Script that automates trusted pull/pushes on different docker versions.
Usage: python buildscripts/dockertest.py
- assumes that this is run from the root notary directory
- assumes that bin/client already exists
- assumes you are logged in with docker
- environment variables to provide:
- DEBUG=true - produce debug output
- DOCKER_CONTENT_TRUST_SERVER=<notary server url> test against a non-local
notary server
- NOTARY_SERVER_USERNAME=<username> login creds username to notary server
- NOTARY_SERVER_PASSPHRASE=<passwd> login creds password to notary server
- DOCKER_USERNAME=<username> docker hub login username
"""
from __future__ import print_function
from collections import OrderedDict
import atexit
import json
import os
import platform
import pwd
import re
import shutil
import subprocess
import tarfile
from tempfile import mkdtemp
from time import sleep, time
import urllib
from urlparse import urljoin
# Configuration for testing
# please give the full path to the binary (or if it's on your path, just the
# binary name) for these if you do not want them downloaded, otherwise these
# can be ignored. Up to you to make sure you are running the correct daemon
# version.
DOCKERS = {}
# delete any of these if you want to specify the docker binaries yourself
DOWNLOAD_DOCKERS = {
"1.10": ("https://get.docker.com", "docker-1.10.3"),
"1.11": ("https://get.docker.com", "docker-1.11.2"),
"1.12": ("https://get.docker.com", "docker-1.12.1"),
}
NOTARY_VERSION = "0.4.1" # only version that will work with docker < 1.13
NOTARY_BINARY = "bin/notary"
# please replace with private registry if you want to test against a private
# registry
REGISTRY = "docker.io"
# please enter your username if it does not match your shell username, or set the
# environment variable DOCKER_USERNAME
REGISTRY_USERNAME = os.getenv("DOCKER_USERNAME", pwd.getpwuid(os.getuid())[0])
# what you want the testing repo names to be prefixed with
REPO_PREFIX = "docker_test"
# Assumes default docker config dir
DEFAULT_DOCKER_CONFIG = os.path.expanduser("~/.docker")
# Assumes the trust server will be run using compose if
# DOCKER_CONTENT_TRUST_SERVER is not specified
DEFAULT_NOTARY_SERVER = "https://notary-server:4443"
# please enter a custom trust server location if you do not wish to use a local
# docker-compose instantiation. If testing against Docker Hub's notary server
# or another trust server, please also ensure that this script does not pick up
# incorrect TLS certificates from ~/.notary/config.json by default
TRUST_SERVER = os.getenv('DOCKER_CONTENT_TRUST_SERVER', DEFAULT_NOTARY_SERVER)
# Assumes the test will be run with `python misc/dockertest.py` from
# the root of the notary repo after binaries are built
# also overrides the notary server location if need be
if TRUST_SERVER != DEFAULT_NOTARY_SERVER:
NOTARY_CLIENT = "{client} -s {server}".format(
client=NOTARY_BINARY, server=TRUST_SERVER)
else:
NOTARY_CLIENT = "{client} -c cmd/notary/config.json".format(
client=NOTARY_BINARY)
DEBUG = " -D" if os.getenv('DEBUG') else ""
# ---- setup ----
def download_docker(download_dir="/tmp"):
"""
Downloads the relevant docker binaries and sets the docker values
"""
system = platform.system()
architecture = "x86_64"
if platform.architecture()[0] != "64bit":
architecture = "i386"
downloadfile = urllib.URLopener()
for version in DOWNLOAD_DOCKERS:
domain, binary = DOWNLOAD_DOCKERS[version]
tarfilename = os.path.join(download_dir, binary+".tgz")
extractdir = os.path.join(download_dir, binary)
DOCKERS[version] = os.path.join(extractdir, "docker")
# we already have that version
if os.path.isfile(os.path.join(extractdir, "docker")):
continue
if not os.path.isdir(extractdir):
os.makedirs(extractdir)
if not os.path.isfile(tarfilename):
url = urljoin(
# as of 1.10 docker downloads are tar-ed due to potentially
# containing containerd etc.
# note that for windows (which we don't currently support),
# it's a .zip file
domain, "/".join(
["builds", system, architecture, binary+".tgz"]))
print("Downloading", url)
downloadfile.retrieve(url, tarfilename)
with tarfile.open(tarfilename, 'r:gz') as tf:
for member in tf.getmembers():
if not member.isfile():
continue
archfile = tf.extractfile(member)
fname = os.path.join(extractdir, os.path.basename(member.name))
with open(fname, 'wb') as writefile:
writefile.write(archfile.read())
os.chmod(fname, 0755)
if not os.path.isfile(DOCKERS[version]):
raise Exception(
"Extracted {tar} to {loc} but could not find {docker}".format(
tar=tarfilename, loc=extractdir, docker=DOCKERS[version]))
def verify_notary():
"""
Check that notary is the right version
"""
if not os.path.isfile(NOTARY_BINARY):
raise Exception("notary client does not exist: " + NOTARY_BINARY)
output = subprocess.check_output([NOTARY_BINARY, "version"]).strip()
lines = output.split("\n")
if len(lines) != 3:
print(output)
raise Exception("notary version output invalid")
if lines[1].split()[-1] > NOTARY_VERSION:
print(output)
raise Exception("notary version too high: must be <= " + NOTARY_VERSION)
def setup():
"""
Ensure we are set up to run the test
"""
download_docker()
verify_notary()
# ensure that we have the alpine image
subprocess.call("docker pull alpine".split())
# copy the docker config dir over so we don't break anything in real docker
# config directory
os.mkdir(_TEMP_DOCKER_CONFIG_DIR)
# copy any docker creds over so we can push
configfile = os.path.join(_TEMP_DOCKER_CONFIG_DIR, "config.json")
shutil.copyfile(
os.path.join(DEFAULT_DOCKER_CONFIG, "config.json"), configfile)
# always clean up the config file so creds aren't left in this temp directory
atexit.register(os.remove, configfile)
defaulttlsdir = os.path.join(DEFAULT_DOCKER_CONFIG, "tls")
tlsdir = os.path.join(_TEMP_DOCKER_CONFIG_DIR, "tls")
if os.path.exists(tlsdir):
shutil.copytree(defaulttlsdir, tlsdir)
# make sure that the cert is in the right place for local notary
if TRUST_SERVER == DEFAULT_NOTARY_SERVER:
tlsdir = os.path.join(tlsdir, "notary-server:4443")
if not os.path.isdir(tlsdir):
try:
shutil.rmtree(tlsdir) # in case it's not a directory
except OSError as ex:
if "No such file or directory" not in str(ex):
raise
os.makedirs(tlsdir)
cert = os.path.join(tlsdir, "root-ca.crt")
if not os.path.isfile(cert):
shutil.copyfile("fixtures/root-ca.crt", cert)
# ---- tests ----
_TEMPDIR = mkdtemp(prefix="docker-version-test")
_TEMP_DOCKER_CONFIG_DIR = os.path.join(_TEMPDIR, "docker-config-dir")
_TRUST_DIR = os.path.join(_TEMP_DOCKER_CONFIG_DIR, "trust")
_ENV = os.environ.copy()
_ENV.update({
# enable content trust and use our own server
"DOCKER_CONTENT_TRUST_SERVER": TRUST_SERVER,
"DOCKER_CONTENT_TRUST": "1",
# environment variables that notary uses
"NOTARY_ROOT_PASSPHRASE": "randompass",
"NOTARY_TARGETS_PASSPHRASE": "randompass",
"NOTARY_SNAPSHOT_PASSPHRASE": "randompass",
# environment variables used by current version of docker
"DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE": "randompass",
"DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE": "randompass",
# environment variables used by docker 1.8
"DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE": "randompass",
"DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE": "randompass",
# do not use the default docker config directory
"DOCKER_CONFIG": _TEMP_DOCKER_CONFIG_DIR
})
_DIGEST_REGEX = re.compile(r"\b[dD]igest: sha256:([0-9a-fA-F]+)\b")
_SIZE_REGEX = re.compile(r"\bsize: ([0-9]+)\b")
_PULL_A_REGEX = re.compile(
r"Pull \(\d+ of \d+\): .+:(.+)@sha256:([0-9a-fA-F]+)")
_BUILD_REGEX = re.compile(r"Successfully built ([0-9a-fA-F]+)")
def clear_tuf():
"""
Removes the trusted certificates and TUF metadata in ~/.docker/trust
"""
try:
shutil.rmtree(os.path.join(_TRUST_DIR, "trusted_certificates"))
shutil.rmtree(os.path.join(_TRUST_DIR, "tuf"))
except OSError as ex:
if "No such file or directory" not in str(ex):
raise
def clear_keys():
"""
Removes the TUF keys in trust directory, since the key format changed
between versions and can cause problems if testing newer docker versions
before testing older docker versions.
"""
try:
shutil.rmtree(os.path.join(_TRUST_DIR, "private"))
except OSError as ex:
if "No such file or directory" not in str(ex):
raise
def run_cmd(cmd, fileoutput, input=None):
"""
Takes a string command, runs it, and returns the output even if it fails.
"""
print("$ " + cmd)
fileoutput.write("$ {cmd}\n".format(cmd=cmd))
if input is not None:
process = subprocess.Popen(
cmd.split(), env=_ENV, stderr=subprocess.STDOUT,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
process.stdin.write(input)
process.stdin.close()
else:
process = subprocess.Popen(cmd.split(), env=_ENV, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
output = ""
while process.poll() is None:
line = process.stdout.readline()
print(line.strip("\n"))
fileoutput.write(line)
if "level=debug" not in line:
output += line
retcode = process.poll()
print()
fileoutput.write("\n")
if retcode:
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def rmi(fout, docker_version, image, tag):
"""
Ensures that an image is no longer available locally to docker.
"""
try:
run_cmd(
"{docker} rmi {image}:{tag}".format(
docker=DOCKERS[docker_version], image=image, tag=tag),
fout)
except subprocess.CalledProcessError as ex:
if "could not find image" not in str(ex):
raise
def assert_equality(actual, expected):
"""
Assert equality, print nice message
"""
assert actual == expected, "\nGot : {0}\nExpected: {1}".format(
repr(actual), repr(expected))
def pull(fout, docker_version, image, tag, expected_sha):
"""
Pulls an image using docker, and asserts that the sha is correct. Make
sure it is untagged first.
"""
clear_tuf()
rmi(fout, docker_version, image, tag)
output = run_cmd(
"{docker}{debug} pull {image}:{tag}".format(
docker=DOCKERS[docker_version], image=image, tag=tag, debug=DEBUG),
fout)
sha = _DIGEST_REGEX.search(output).group(1)
assert_equality(sha, expected_sha)
def push(fout, docker_version, image, tag):
"""
Tags an image with the docker version and pushes it. Returns the sha and
expected size.
"""
clear_tuf()
# tag image with the docker version
run_cmd(
"{docker} tag alpine {image}:{tag}".format(
docker=DOCKERS[docker_version], image=image, tag=tag),
fout)
# push!
output = run_cmd(
"{docker}{debug} push {image}:{tag}".format(
docker=DOCKERS[docker_version], image=image, tag=tag, debug=DEBUG),
fout)
sha = _DIGEST_REGEX.search(output).group(1)
size = _SIZE_REGEX.search(output).group(1)
# sleep for 1s after pushing, just to let things propagate :)
sleep(1)
# list
targets = notary_list(fout, image)
for target in targets:
if target[0] == tag:
assert_equality(target, [tag, sha, size, "targets"])
return sha, size
def get_notary_usernamepass():
"""
Gets the username password for the notary server
"""
username = os.getenv("NOTARY_SERVER_USERNAME")
passwd = os.getenv("NOTARY_SERVER_PASSPHRASE")
if username and passwd:
return username + "\n" + passwd + "\n"
return None
def notary_list(fout, repo):
"""
Calls notary list on the repo and returns a list of lists of tags, shas,
sizes, and roles.
"""
clear_tuf()
output = run_cmd(
"{notary}{debug} -d {trustdir} list {gun}".format(
notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=repo, debug=DEBUG),
fout, input=get_notary_usernamepass())
lines = output.strip().split("\n")
assert len(lines) >= 3, "not enough targets"
return [line.strip().split() for line in lines[2:]]
def test_build(fout, image, docker_version):
"""
Build from a simple Dockerfile and ensure it works with DCT enabled
"""
clear_tuf()
# build
# simple dockerfile to test building with trust
dockerfile = "FROM {image}:{tag}\nRUN sh\n".format(
image=image, tag=docker_version)
tempdir_dockerfile = os.path.join(_TEMPDIR, "Dockerfile")
with open(tempdir_dockerfile, 'wb') as ftemp:
ftemp.write(dockerfile)
output = run_cmd(
"{docker}{debug} build {context}".format(
docker=DOCKERS[docker_version], context=_TEMPDIR, debug=DEBUG),
fout)
build_result = _BUILD_REGEX.findall(output)
assert len(build_result) >= 0, "build did not succeed"
def test_pull_a(fout, docker_version, image, expected_tags):
"""
Pull -A on an image and ensure that all the expected tags are present
"""
clear_tuf()
# remove every image possible
for tag in expected_tags:
rmi(fout, docker_version, image, tag)
# pull -a
output = run_cmd(
"{docker}{debug} pull -a {image}".format(
docker=DOCKERS[docker_version], image=image, debug=DEBUG), fout)
pulled_tags = _PULL_A_REGEX.findall(output)
assert_equality(len(pulled_tags), len(expected_tags))
for tag, info in expected_tags.iteritems():
found = [pulled for pulled in pulled_tags if pulled[0] == tag]
assert found
assert_equality(found[0][1], info["sha"])
def test_push(tempdir, docker_version, image, tag="", allow_push_failure=False,
do_after_first_push=None):
"""
Tests a push of an image by pushing with this docker version, and asserting
that all the other docker versions can pull it.
"""
if not tag:
tag = docker_version
filename = os.path.join(
tempdir, "{0}_{1}_push_{2}").format(time(), docker_version, tag)
with open(filename, 'wb') as fout:
try:
sha, size = push(fout, docker_version, image, tag=tag)
except subprocess.CalledProcessError:
if allow_push_failure:
return {"push": "failed, but that was expected"}
raise
return_val = {
"push": {
"sha": sha,
"size": size
}
}
if do_after_first_push is not None:
do_after_first_push(fout, image)
for ver in DOCKERS:
try:
pull(fout, ver, image, tag, sha)
except subprocess.CalledProcessError:
print("pulling {0}:{1} with {2} (expected hash {3}) failed".format(
image, tag, ver, sha))
raise
else:
return_val["push"][ver] = "pull succeeded"
return return_val
def test_run(fout, image, docker_version):
"""
Runs a simple alpine container to ensure it works with DCT enabled
"""
clear_tuf()
# run
output = run_cmd(
"{docker}{debug} run -it --rm {image}:{tag} echo SUCCESS".format(
docker=DOCKERS[docker_version], image=image, tag=docker_version,
debug=DEBUG), fout)
assert "SUCCESS" in output, "run did not succeed"
def test_docker_version(docker_version, repo_name="", do_after_first_push=None):
"""
Initialize a repo with one docker version. Test that all other docker
versions against that repo (both pulling and pushing).
"""
if not repo_name:
repo_name = "repo_by_{0}".format(docker_version)
tempdir = os.path.join(_TEMPDIR, repo_name)
os.makedirs(tempdir)
image = "{0}/{1}/{2}_{3}-{4}".format(
REGISTRY, REGISTRY_USERNAME, REPO_PREFIX, repo_name, time())
result = OrderedDict([
(docker_version, test_push(tempdir, docker_version, image,
do_after_first_push=do_after_first_push))
])
# push again if we did something after the first push
if do_after_first_push:
tag = docker_version + "_push_again"
result[tag] = test_push(
tempdir, docker_version, image, tag=tag,
# 1.8.x and 1.9.x might fail to push again after snapshot rotation
# or delegation manipulation
allow_push_failure=re.compile(r"1\.[0-9](\.\d+)?$").search(docker_version))
for ver in DOCKERS:
if ver != docker_version:
# 1.8.x and 1.9.x will fail to push if the repo was created by
# a more recent docker, since the key format has changed, or if a
# snapshot rotation or delegation has occurred
can_fail = (
(do_after_first_push or
re.compile(r"1\.[1-9][0-9](\.\d+)?$").search(docker_version)) and
re.compile(r"1\.[0-9](\.\d+)?$").search(ver))
result[ver] = test_push(tempdir, ver, image, allow_push_failure=can_fail)
# find all the successfully pushed tags
expected_tags = {}
for ver in result:
if isinstance(result[ver]["push"], dict):
expected_tags[ver] = result[ver]["push"]
with open(os.path.join(tempdir, "pull_a"), 'wb') as fout:
for ver in DOCKERS:
try:
test_pull_a(fout, ver, image, expected_tags)
except subprocess.CalledProcessError:
result[ver]["pull-a"] = "failed"
else:
result[ver]["pull-a"] = "success"
with open(os.path.join(tempdir, "notary_list"), 'wb') as fout:
targets = notary_list(fout, image)
assert_equality(len(targets), len(expected_tags))
for tag, info in expected_tags.iteritems():
found = [target for target in targets if target[0] == tag]
assert found
assert_equality(
found[0][1:],
[info["sha"], info["size"], "targets"])
result["list"] = "listed expected targets successfully"
with open(os.path.join(tempdir, "build"), 'wb') as fout:
try:
test_build(fout, image, docker_version)
except subprocess.CalledProcessError:
result[docker_version]["build"] = "failed"
else:
result[docker_version]["build"] = "success"
with open(os.path.join(tempdir, "run"), 'wb') as fout:
try:
test_run(fout, image, docker_version)
except subprocess.CalledProcessError:
result[docker_version]["run"] = "failed"
else:
result[docker_version]["run"] = "success"
with open(os.path.join(tempdir, "result.json"), 'wb') as fout:
json.dump(result, fout, indent=2)
return result
def rotate_to_server_snapshot(fout, image):
"""
Uses the notary client to rotate the snapshot key to be server-managed.
"""
run_cmd(
"{notary}{debug} -d {trustdir} key rotate {gun} snapshot -r".format(
notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=image, debug=DEBUG),
fout, input=get_notary_usernamepass())
run_cmd(
"{notary}{debug} -d {trustdir} publish {gun}".format(
notary=NOTARY_CLIENT, trustdir=_TRUST_DIR, gun=image, debug=DEBUG),
fout, input=get_notary_usernamepass())
def test_all_docker_versions():
"""
Initialize a repo with each docker version, and test that other docker
versions can read/write to it.
"""
print("Output files at", _TEMPDIR)
results = OrderedDict()
for docker_version in DOCKERS:
clear_keys()
# test with just creating a regular repo
result = test_docker_version(docker_version)
print("\nRepo created with docker {0}:".format(docker_version))
print(json.dumps(result, indent=2))
results[docker_version] = result
# do snapshot rotation after creating the repo, and see if it's still ok
repo_name = "repo_by_{0}_snapshot_rotation".format(docker_version)
result = test_docker_version(
docker_version, repo_name=repo_name,
do_after_first_push=rotate_to_server_snapshot)
print("\nRepo created with docker {0} and snapshot key rotated:"
.format(docker_version))
print(json.dumps(result, indent=2))
results[docker_version + "_snapshot_rotation"] = result
with open(os.path.join(_TEMPDIR, "total_results.json"), 'wb') as fout:
json.dump(results, fout, indent=2)
print("\n\nFinal results:")
results["output_dir"] = _TEMPDIR
print(json.dumps(results, indent=2))
if __name__ == "__main__":
setup()
test_all_docker_versions()
|
pysph/examples/spheric/moving_square.py | nauaneed/pysph | 293 | 12656876 | """SPHERIC benchmark case 6. (2 hours)
See http://spheric-sph.org/tests/test-6 for more details.
"""
# math
from math import exp
# PySPH imports
from pysph.base.utils import get_particle_array
from pysph.base.kernels import QuinticSpline
from pysph.solver.solver import Solver
from pysph.solver.application import Application
from pysph.sph.integrator_step import TwoStageRigidBodyStep, TransportVelocityStep
from pysph.sph.integrator import Integrator
from pysph.tools import uniform_distribution
# SPH equations for this problem
from pysph.sph.equation import Group, Equation
from pysph.sph.wc.transport_velocity import SummationDensity,\
StateEquation, MomentumEquationPressureGradient, MomentumEquationViscosity,\
MomentumEquationArtificialStress, SolidWallPressureBC, SolidWallNoSlipBC,\
SetWallVelocity
# domain and reference values
Lx = 10.0
Ly = 5.0
Umax = 1.0
c0 = 25.0 * Umax
rho0 = 1.0
p0 = c0 * c0 * rho0
# obstacle dimensions
obstacle_width = 1.0
obstacle_height = 1.0
# Reynolds number and kinematic viscosity
Re = 150
nu = Umax * obstacle_width / Re
# Numerical setup
nx = 50
dx = 0.20 * Lx / nx
nghost_layers = 4
ghost_extent = nghost_layers * dx
hdx = 1.2
# adaptive time steps
h0 = hdx * dx
dt_cfl = 0.25 * h0 / (c0 + Umax)
dt_viscous = 0.125 * h0**2 / nu
dt_force = 1.0
tf = 8.0
dt = 0.8 * min(dt_cfl, dt_viscous, dt_force)
class SPHERICBenchmarkAcceleration(Equation):
r"""Equation to set the acceleration for the moving square
benchmark problem.
We use scipy.optimize to fit the Gaussian:
.. math::
a \exp( -\frac{(t-b)^2}{2c^2} ) + d
to the SPHERIC Motion.dat file. The values for the parameters are
a = 2.8209512
b = 0.525652151
c = 0.14142151
d = -2.55580905e-08
Notes:
This equation must be instantiated with no sources
"""
def loop(self, d_idx, d_au, t=0.0):
a = 2.8209512
b = 0.525652151
c = 0.14142151
d = -2.55580905e-08
# compute the acceleration and set it for the destination
d_au[d_idx] = a * exp(-(t - b) * (t - b) / (2.0 * c * c)) + d
def _get_interior(x, y):
indices = []
for i in range(x.size):
if ((x[i] > 0.0) and (x[i] < Lx)):
if ((y[i] > 0.0) and (y[i] < Ly)):
indices.append(i)
return indices
def _get_obstacle(x, y):
indices = []
for i in range(x.size):
if ((1.0 <= x[i] <= 2.0) and (2.0 <= y[i] <= 3.0)):
indices.append(i)
return indices
class MovingSquare(Application):
def _setup_particle_properties(self, particles, volume):
fluid, solid, obstacle = particles
#### ADD PROPS FOR THE PARTICLES ###
# volume from number density
fluid.add_property('V')
solid.add_property('V')
obstacle.add_property('V')
# extrapolated velocities for the fluid
for name in ['uf', 'vf', 'wf']:
solid.add_property(name)
obstacle.add_property(name)
# dummy velocities for the solid and obstacle
# required for the no-slip BC
for name in ['ug', 'vg', 'wg']:
solid.add_property(name)
obstacle.add_property(name)
# advection velocities and accelerations for fluid
for name in ('uhat', 'vhat', 'what', 'auhat',
'avhat', 'awhat', 'au', 'av', 'aw'):
fluid.add_property(name)
# kernel summation correction for solids
solid.add_property('wij')
obstacle.add_property('wij')
# initial velocities and positions needed for the obstacle for
# rigid-body integration
obstacle.add_property('u0'); obstacle.u0[:] = 0.
obstacle.add_property('v0'); obstacle.v0[:] = 0.
obstacle.add_property('w0'); obstacle.w0[:] = 0.
obstacle.add_property('x0')
obstacle.add_property('y0')
obstacle.add_property('z0')
# imposed accelerations on the solid and obstacle
solid.add_property('ax')
solid.add_property('ay')
solid.add_property('az')
obstacle.add_property('ax')
obstacle.add_property('ay')
obstacle.add_property('az')
# magnitude of velocity squared
fluid.add_property('vmag2')
#### SETUP PARTICLE PROPERTIES ###
# mass is set to get the reference density of rho0
fluid.m[:] = volume * rho0
solid.m[:] = volume * rho0
obstacle.m[:] = volume * rho0
# volume is set as dx^2
fluid.V[:] = 1. / volume
solid.V[:] = 1. / volume
obstacle.V[:] = 1. / volume
# smoothing lengths
fluid.h[:] = h0
solid.h[:] = h0
obstacle.h[:] = h0
# set the output arrays
fluid.set_output_arrays(['x', 'y', 'u', 'v', 'vmag2', 'rho', 'p',
'V', 'm', 'h'])
solid.set_output_arrays(['x', 'y', 'rho', 'p'])
obstacle.set_output_arrays(['x', 'y', 'u0', 'rho', 'p', 'u'])
particles = [fluid, solid, obstacle]
return particles
def add_user_options(self, group):
group.add_argument(
"--hcp", action="store_true", dest="hcp", default=False,
help="Use hexagonal close packing of particles."
)
def create_particles(self):
hcp = self.options.hcp
# Initial distribution using Hexagonal close packing of particles
# create all particles
global dx
if hcp:
x, y, dx, dy, xmin, xmax, ymin, ymax = uniform_distribution.uniform_distribution_hcp2D(
dx=dx, xmin=-ghost_extent, xmax=Lx + ghost_extent,
ymin=-ghost_extent, ymax=Ly + ghost_extent)
else:
x, y, dx, dy, xmin, xmax, ymin, ymax = uniform_distribution.uniform_distribution_cubic2D(
dx=dx, xmin=-ghost_extent, xmax=Lx + ghost_extent,
ymin=-ghost_extent, ymax=Ly + ghost_extent)
x = x.ravel()
y = y.ravel()
# create the basic particle array
solid = get_particle_array(name='solid', x=x, y=y)
# now sort out the interior from all particles
indices = _get_interior(solid.x, solid.y)
fluid = solid.extract_particles(indices)
fluid.set_name('fluid')
solid.remove_particles(indices)
# sort out the obstacle from the interior
indices = _get_obstacle(fluid.x, fluid.y)
obstacle = fluid.extract_particles(indices)
obstacle.set_name('obstacle')
fluid.remove_particles(indices)
print("SPHERIC benchmark 6 :: Re = %d, nfluid = %d, nsolid=%d, nobstacle = %d, dt = %g" % (
Re, fluid.get_number_of_particles(),
solid.get_number_of_particles(),
obstacle.get_number_of_particles(), dt))
# setup requisite particle properties and initial conditions
if hcp:
wij_sum = uniform_distribution.get_number_density_hcp(
dx, dy, kernel, h0)
volume = 1. / wij_sum
else:
volume = dx * dy
particles = self._setup_particle_properties(
[fluid, solid, obstacle], volume=volume
)
return particles
def create_solver(self):
kernel = QuinticSpline(dim=2)
integrator = Integrator(fluid=TransportVelocityStep(),
obstacle=TwoStageRigidBodyStep())
solver = Solver(kernel=kernel, dim=2, integrator=integrator,
tf=tf, dt=dt, adaptive_timestep=False,
output_at_times=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
return solver
def create_equations(self):
equations = [
# set the acceleration for the obstacle using the special function
# mimicking the accelerations provided in the test.
Group(
equations=[
SPHERICBenchmarkAcceleration(
dest='obstacle', sources=None),
], real=False
),
# Summation density along with volume summation for the fluid
# phase. This is done for all local and remote particles. At the
# end of this group, the fluid phase has the correct density
# taking into consideration the fluid and solid
# particles.
Group(
equations=[
SummationDensity(
dest='fluid', sources=[
'fluid', 'solid', 'obstacle']),
], real=False
),
# Once the fluid density is computed, we can use the EOS to set
# the fluid pressure. Additionally, the dummy velocity for the
# channel is set, which is later used in the no-slip wall BC.
Group(
equations=[
StateEquation(
dest='fluid',
sources=None,
p0=p0,
rho0=rho0,
b=1.0),
SetWallVelocity(dest='solid', sources=['fluid']),
SetWallVelocity(dest='obstacle', sources=['fluid']),
], real=False
),
# Once the pressure for the fluid phase has been updated, we can
# extrapolate the pressure to the ghost particles. After this
# group, the fluid density, pressure and the boundary pressure has
# been updated and can be used in the integration equations.
Group(
equations=[
SolidWallPressureBC(
dest='obstacle',
sources=['fluid'],
b=1.0,
rho0=rho0,
p0=p0),
SolidWallPressureBC(
dest='solid', sources=['fluid'], b=1.0, rho0=rho0, p0=p0),
], real=False
),
# The main accelerations block. The acceleration arrays for the
# fluid phase are upadted in this stage for all local particles.
Group(
equations=[
# Pressure gradient terms
MomentumEquationPressureGradient(
dest='fluid', sources=['fluid', 'solid', 'obstacle'], pb=p0),
# fluid viscosity
MomentumEquationViscosity(
dest='fluid', sources=['fluid'], nu=nu),
# No-slip boundary condition. This is effectively a
# viscous interaction of the fluid with the ghost
# particles.
SolidWallNoSlipBC(
dest='fluid', sources=['solid', 'obstacle'], nu=nu),
# Artificial stress for the fluid phase
MomentumEquationArtificialStress(
dest='fluid', sources=['fluid']),
], real=True
),
]
return equations
if __name__ == '__main__':
app = MovingSquare()
app.run()
|
warp/__init__.py | NVIDIA/warp | 306 | 12656877 | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# for autocomplete on builtins
#from warp.stubs import *
from warp.types import array, array2d, array3d, array4d, constant
from warp.types import int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64
from warp.types import vec2, vec3, vec4, mat22, mat33, mat44, quat, transform, spatial_vector, spatial_matrix
from warp.types import Mesh, HashGrid, Volume
from warp.context import init, func, kernel, runtime
from warp.context import is_cpu_available, is_cuda_available, is_device_available
from warp.context import get_devices, get_preferred_device
from warp.context import zeros, zeros_like, clone, empty, empty_like, copy, from_numpy, launch, synchronize, force_load
from warp.context import set_module_options, get_module_options, get_module
from warp.context import capture_begin, capture_end, capture_launch
from warp.context import print_builtins, export_builtins, export_stubs
from warp.context import Kernel, Function
import warp.builtins
from warp.tape import Tape
from warp.utils import ScopedTimer, ScopedCudaGuard
from warp.utils import transform_expand
# optional on PyTorch being installed
try:
from warp.torch import from_torch
from warp.torch import to_torch
except ModuleNotFoundError:
pass
# optional on USD being installed
try:
import warp.render
except ModuleNotFoundError:
pass
|
tests/components/luftdaten/conftest.py | MrDelik/core | 30,023 | 12656898 | """Fixtures for Luftdaten tests."""
from __future__ import annotations
from collections.abc import Generator
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.luftdaten.const import CONF_SENSOR_ID, DOMAIN
from homeassistant.const import CONF_SHOW_ON_MAP
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="12345",
domain=DOMAIN,
data={CONF_SENSOR_ID: 12345, CONF_SHOW_ON_MAP: True},
unique_id="12345",
)
@pytest.fixture
def mock_setup_entry() -> Generator[None, None, None]:
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.luftdaten.async_setup_entry", return_value=True
):
yield
@pytest.fixture
def mock_luftdaten_config_flow() -> Generator[None, MagicMock, None]:
"""Return a mocked Luftdaten client."""
with patch(
"homeassistant.components.luftdaten.config_flow.Luftdaten", autospec=True
) as luftdaten_mock:
luftdaten = luftdaten_mock.return_value
luftdaten.validate_sensor.return_value = True
yield luftdaten
@pytest.fixture
def mock_luftdaten() -> Generator[None, MagicMock, None]:
"""Return a mocked Luftdaten client."""
with patch(
"homeassistant.components.luftdaten.Luftdaten", autospec=True
) as luftdaten_mock:
luftdaten = luftdaten_mock.return_value
luftdaten.sensor_id = 12345
luftdaten.meta = {
"altitude": 123.456,
"latitude": 56.789,
"longitude": 12.345,
"sensor_id": 12345,
}
luftdaten.values = {
"humidity": 34.70,
"P1": 8.5,
"P2": 4.07,
"pressure_at_sealevel": 103102.13,
"pressure": 98545.00,
"temperature": 22.30,
}
yield luftdaten
@pytest.fixture
async def init_integration(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_luftdaten: MagicMock
) -> MockConfigEntry:
"""Set up the Luftdaten integration for testing."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
return mock_config_entry
|
backend/MaskFormer/mask_former/data/datasets/__init__.py | rune-l/coco-annotator | 143 | 12656902 | <filename>backend/MaskFormer/mask_former/data/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates.
from . import (
register_ade20k_full,
register_ade20k_panoptic,
register_coco_stuff_10k,
register_mapillary_vistas,
)
|
data_structures/binary_trees/check_full_binary_tree.py | ruler30cm/python-ds | 1,723 | 12656906 | """
A full binary tree is a tree which has either 0 children or 2 children
"""
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def check(root):
if not root:
return True
if not root.left and not root.right:
return True
if root.left and root.right:
return check(root.left) and check(root.right)
root = Node(0)
root.left = Node(1)
root.right = Node(2)
if check(root):
print('True')
else:
print("False") |
tests/serializers.py | MojixCoder/django-jalali | 191 | 12656928 | <reponame>MojixCoder/django-jalali<filename>tests/serializers.py
from rest_framework.serializers import ModelSerializer
from django_jalali.serializers.serializerfield import (
JDateField, JDateTimeField,
)
from .models import Bar, BarTime
class JDateFieldSerializer(ModelSerializer):
date = JDateField()
class Meta:
model = Bar
exclude = []
class JDateTimeFieldSerializer(ModelSerializer):
datetime = JDateTimeField()
class Meta:
model = BarTime
exclude = []
|
test/conftest.py | oss-transfer/neomodel | 242 | 12656950 | <filename>test/conftest.py
from __future__ import print_function
import warnings
import os
import sys
import pytest
from neomodel import config, db, clear_neo4j_database, change_neo4j_password
from neo4j.exceptions import ClientError as CypherError
from neobolt.exceptions import ClientError
def pytest_addoption(parser):
"""
Adds the command line option --resetdb.
:param parser: The parser object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_addoption>`_
:type Parser object: For more information please see <https://docs.pytest.org/en/latest/reference.html#_pytest.config.Parser>`_
"""
parser.addoption("--resetdb", action="store_true", help = "Ensures that the database is clear prior to running tests for neomodel", default=False)
def pytest_sessionstart(session):
"""
Provides initial connection to the database and sets up the rest of the test suite
:param session: The session object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_sessionstart>`_
:type Session object: For more information please see <https://docs.pytest.org/en/latest/reference.html#session>`_
"""
warnings.simplefilter('default')
config.DATABASE_URL = os.environ.get('NEO4J_BOLT_URL', 'bolt://neo4j:foobar@localhost:7687')
config.AUTO_INSTALL_LABELS = True
try:
# Clear the database if required
database_is_populated, _ = db.cypher_query("MATCH (a) return count(a)>0 as database_is_populated")
if database_is_populated[0][0] and not session.config.getoption("resetdb"):
raise SystemError("Please note: The database seems to be populated.\n\tEither delete all nodes and edges manually, or set the --resetdb parameter when calling pytest\n\n\tpytest --resetdb.")
else:
clear_neo4j_database(db, clear_constraints=True, clear_indexes=True)
except (CypherError, ClientError) as ce:
# Handle instance without password being changed
if 'The credentials you provided were valid, but must be changed before you can use this instance' in str(ce):
warnings.warn("New database with no password set, setting password to '<PASSWORD>'")
try:
change_neo4j_password(db, 'test')
# Ensures that multiprocessing tests can use the new password
config.DATABASE_URL = 'bolt://neo4j:test@localhost:7687'
db.set_connection('bolt://neo4j:test@localhost:7687')
warnings.warn("Please 'export NEO4J_BOLT_URL=bolt://neo4j:test@localhost:7687' for subsequent test runs")
except (CypherError, ClientError) as e:
if 'The credentials you provided were valid, but must be changed before you can use this instance' in str(e):
warnings.warn("You appear to be running on version 4.0+ of Neo4j, without having changed the password."
"Please manually log in, change your password, then update the config.DATABASE_URL call at line 32 in this file")
else:
raise e
else:
raise ce
def version_to_dec(a_version_string):
"""
Converts a version string to a number to allow for quick checks on the versions of specific components.
:param a_version_string: The version string under test (e.g. '3.4.0')
:type a_version_string: str
:return: An integer representation of the string version, e.g. '3.4.0' --> 340
"""
components = a_version_string.split('.')
while len(components) < 3:
components.append('0')
num = 0
for a_component in enumerate(components):
num += (10 ** ((len(components) - 1) - a_component[0])) * int(a_component[1])
return num
def check_and_skip_neo4j_least_version(required_least_neo4j_version, message):
"""
Checks if the NEO4J_VERSION is at least `required_least_neo4j_version` and skips a test if not.
WARNING: If the NEO4J_VERSION variable is not set, this function returns True, allowing the test to go ahead.
:param required_least_neo4j_version: The least version to check. This must be the numberic representation of the
version. That is: '3.4.0' would be passed as 340.
:type required_least_neo4j_version: int
:param message: An informative message as to why the calling test had to be skipped.
:type message: str
:return: A boolean value of True if the version reported is at least `required_least_neo4j_version`
"""
if 'NEO4J_VERSION' in os.environ:
if version_to_dec(os.environ['NEO4J_VERSION']) < required_least_neo4j_version:
pytest.skip('Neo4j version: {}. {}.'
'Skipping test.'.format(os.environ['NEO4J_VERSION'], message))
@pytest.fixture
def skip_neo4j_before_330():
check_and_skip_neo4j_least_version(330, 'Neo4J version does not support this test')
|
tools/lock_labs.py | WenzelArifiandi/applied-machine-learning-intensive | 126 | 12656956 | <gh_stars>100-1000
"""lock_labs.py zips and password-protects labs.
$ tools/lock_labs.py
"""
from absl import app
from pathlib import Path
import getpass
import os
import pyminizip
def main(argv):
password = None
home = str(Path.home())
password_file = os.path.join(home, '.amli')
if os.path.exists(password_file):
with open (password_file) as f:
password = f.read().replace('\n', '')
else:
password = getpass.getpass("Password: ")
for root, _, files in os.walk('content'):
for name in files:
if name.endswith('-key.ipynb'):
lname = os.path.join(root, name)
zname = lname.replace('.ipynb', '.zip')
if os.path.exists(zname):
os.remove(zname)
pyminizip.compress(lname, root, zname, password, 0)
os.remove(lname)
if __name__ == '__main__':
app.run(main)
|
orator/utils/command_formatter.py | wjzero/orator | 1,484 | 12656978 | <reponame>wjzero/orator
# -*- coding: utf-8 -*-
from pygments.formatter import Formatter
from pygments.token import (
Keyword,
Name,
Comment,
String,
Error,
Number,
Operator,
Generic,
Token,
Whitespace,
)
from pygments.util import get_choice_opt
COMMAND_COLORS = {
Token: ("", ""),
Whitespace: ("fg=white", "fg=black;options=bold"),
Comment: ("fg=white", "fg=black;options=bold"),
Comment.Preproc: ("fg=cyan", "fg=cyan;options=bold"),
Keyword: ("fg=blue", "fg=blue;options=bold"),
Keyword.Type: ("fg=cyan", "fg=cyan;options=bold"),
Operator.Word: ("fg=magenta", "fg=magenta;options=bold"),
Name.Builtin: ("fg=cyan", "fg=cyan;options=bold"),
Name.Function: ("fg=green", "fg=green;option=bold"),
Name.Namespace: ("fg=cyan;options=underline", "fg=cyan;options=bold,underline"),
Name.Class: ("fg=green;options=underline", "fg=green;options=bold,underline"),
Name.Exception: ("fg=cyan", "fg=cyan;options=bold"),
Name.Decorator: ("fg=black;options=bold", "fg=white"),
Name.Variable: ("fg=red", "fg=red;options=bold"),
Name.Constant: ("fg=red", "fg=red;options=bold"),
Name.Attribute: ("fg=cyan", "fg=cyan;options=bold"),
Name.Tag: ("fg=blue;options=bold", "fg=blue;options=bold"),
String: ("fg=yellow", "fg=yellow"),
Number: ("fg=blue", "fg=blue;options=bold"),
Generic.Deleted: ("fg=red;options=bold", "fg=red;options=bold"),
Generic.Inserted: ("fg=green", "fg=green;options=bold"),
Generic.Heading: ("options=bold", "option=bold"),
Generic.Subheading: ("fg=magenta;options=bold", "fg=magenta;options=bold"),
Generic.Prompt: ("options=bold", "options=bold"),
Generic.Error: ("fg=red;options=bold", "fg=red;options=bold"),
Error: ("fg=red;options=bold,underline", "fg=red;options=bold,underline"),
}
class CommandFormatter(Formatter):
r"""
Format tokens with Cleo color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers on the terminal output as well
(default: ``False`` = no line numbers).
"""
name = "Command"
aliases = ["command"]
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = (
get_choice_opt(options, "bg", ["light", "dark"], "light") == "dark"
)
self.colorscheme = options.get("colorscheme", None) or COMMAND_COLORS
self.linenos = options.get("linenos", False)
self._lineno = 0
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("%s%04d: " % (self._lineno != 1 and "\n" or "", self._lineno))
def _get_color(self, ttype):
# self.colorscheme is a dict containing usually generic types, so we
# have to walk the tree of dots. The base Token type must be a key,
# even if it's empty string, as in the default above.
colors = self.colorscheme.get(ttype)
while colors is None:
ttype = ttype.parent
colors = self.colorscheme.get(ttype)
return colors[self.darkbg]
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for ttype, value in tokensource:
color = self._get_color(ttype)
for line in value.splitlines(True):
if color:
outfile.write("<%s>%s</>" % (color, line.rstrip("\n")))
else:
outfile.write(line.rstrip("\n"))
if line.endswith("\n"):
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write("\n")
if self.linenos:
outfile.write("\n")
|
fpn/test.py | chi3x10/RepMet | 103 | 12657010 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by <NAME>
# --------------------------------------------------------
import _init_paths
#import cv2
import argparse
import os
import sys
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
update_config(args.cfg)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--ignore_cache', help='ignore cached results boxes', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--debug', default=0, help='experiment configure file name', required=False, type=int)
parser.add_argument('--is_docker', help='test in docker mode', action='store_true')
args = parser.parse_args()
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import mxnet as mx
from function.test_rcnn import test_rcnn
from utils.create_logger import create_logger
from nms.nms import gpu_nms_wrapper
def main():
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
print args
gpu_nums = [int(i) for i in config.gpus.split(',')]
nms_dets = gpu_nms_wrapper(config.TEST.NMS, gpu_nums[0])
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.test_image_set)
output_path = os.path.join(final_output_path, '..', '+'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix)
test_rcnn(config, config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path,
ctx, output_path, config.TEST.test_epoch, args.vis, args.ignore_cache, args.shuffle, config.TEST.HAS_RPN, config.dataset.proposal,
args.thresh, logger=logger, output_path=final_output_path, nms_dets=nms_dets, is_docker=args.is_docker)
if __name__ == '__main__':
main()
|
examples/tutorials/advanced/configuration.py | jbusecke/pygmt | 326 | 12657044 | <reponame>jbusecke/pygmt
"""
Configuring PyGMT defaults
==========================
Default GMT parameters can be set globally or locally using
:class:`pygmt.config`.
"""
# sphinx_gallery_thumbnail_number = 3
import pygmt
###############################################################################
# Configuring default GMT parameters
# ----------------------------------
#
# Users can override default parameters either temporarily (locally) or
# permanently (globally) using :meth:`pygmt.config`. The full list of default
# parameters that can be changed can be found at :gmt-docs:`gmt.conf.html`.
#
# We demonstrate the usage of :meth:`pygmt.config` by configuring a map plot.
# Start with a basic figure with the default style
fig = pygmt.Figure()
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
###############################################################################
# Globally overriding defaults
# ----------------------------
#
# The ``MAP_FRAME_TYPE`` parameter specifies the style of map frame to use, of
# which there are 5 options: ``fancy`` (default, seen above), ``fancy+``,
# ``plain``, ``graph`` (which does not apply to geographical maps) and
# ``inside``.
#
# The ``FORMAT_GEO_MAP`` parameter controls the format of geographical tick
# annotations. The default uses degrees and minutes. Here we specify the ticks
# to be a decimal number of degrees.
fig = pygmt.Figure()
# Configuration for the 'current figure'.
pygmt.config(MAP_FRAME_TYPE="plain")
pygmt.config(FORMAT_GEO_MAP="ddd.xx")
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
###############################################################################
# Locally overriding defaults
# ---------------------------
#
# It is also possible to temporarily override the default parameters, which is
# very useful for limiting the scope of changes to a particular plot.
# :class:`pygmt.config` is implemented as a context manager, which handles the
# setup and teardown of a GMT session. Python users are likely familiar with
# the ``with open(...) as file:`` snippet, which returns a ``file`` context
# manager. In this way, it can be used to override a parameter for a single
# command, or a sequence of commands. An application of :class:`pygmt.config`
# as a context manager is shown below:
fig = pygmt.Figure()
# This will have a fancy+ frame
with pygmt.config(MAP_FRAME_TYPE="fancy+"):
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
# Shift plot origin down by 10cm to plot another map
fig.shift_origin(yshift="-10c")
# This figure retains the default "fancy" frame
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
|
recipes/Python/425044_Splitting_up_a_sequence/recipe-425044.py | tdiprima/code | 2,023 | 12657081 | <filename>recipes/Python/425044_Splitting_up_a_sequence/recipe-425044.py<gh_stars>1000+
# <NAME>
# http://gumuz.looze.net/
def split_seq(seq,size):
""" Split up seq in pieces of size """
return [seq[i:i+size] for i in range(0, len(seq), size)]
|
reconstruction-scripts/triangulation_pipeline.py | kudo1026/local-feature-refinement | 180 | 12657083 | <filename>reconstruction-scripts/triangulation_pipeline.py
import argparse
import os
import shutil
import types
from colmap_utils import generate_empty_reconstruction, import_features, triangulate
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset_path', type=str, required=True,
help='path to the dataset'
)
parser.add_argument(
'--colmap_path', type=str, required=True,
help='path to the COLMAP executable folder'
)
parser.add_argument(
'--method_name', type=str, required=True,
help='name of the method'
)
parser.add_argument(
'--matches_file', type=str, required=True,
help='path to the matches file'
)
parser.add_argument(
'--solution_file', type=str, default=None,
help='path to the multi-view optimization solution file (leave None for no refinement)'
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
refine = (args.solution_file is not None)
# Create the extra paths.
paths = types.SimpleNamespace()
paths.database_path = os.path.join(
args.dataset_path, '%s-%s.db' % (args.method_name, 'ref' if refine else 'raw')
)
paths.image_path = os.path.join(
args.dataset_path, 'images'
)
paths.reference_model_path = os.path.join(
args.dataset_path, 'dslr_calibration_undistorted'
)
paths.match_list_path = os.path.join(
args.dataset_path, 'match-list.txt'
)
paths.empty_model_path = os.path.join(
args.dataset_path, 'sparse-%s-%s-empty' % (args.method_name, 'ref' if refine else 'raw')
)
paths.model_path = os.path.join(
args.dataset_path, 'sparse-%s-%s' % (args.method_name, 'ref' if refine else 'raw')
)
paths.ply_model_path = os.path.join(
args.dataset_path, 'sparse-%s-%s.ply' % (args.method_name, 'ref' if refine else 'raw')
)
# Create a copy of the dummy database.
if os.path.exists(paths.database_path):
raise FileExistsError(
'The database file already exists.'
)
shutil.copyfile(
os.path.join(args.dataset_path, 'database.db'),
paths.database_path
)
# Reconstruction pipeline.
_ = generate_empty_reconstruction(
paths.reference_model_path,
paths.empty_model_path
)
import_features(
args.colmap_path, args.method_name,
paths.database_path, paths.image_path, paths.match_list_path,
args.matches_file, args.solution_file
)
triangulate(
args.colmap_path,
paths.database_path, paths.image_path,
paths.empty_model_path,
paths.model_path, paths.ply_model_path
)
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/i/implicit/implicit_str_concat.py | ciskoinch8/vimrc | 463 | 12657099 | #pylint: disable=invalid-name,missing-docstring
# Basic test with a list
TEST_LIST1 = ['a' 'b'] # [implicit-str-concat]
# Testing with unicode strings in a tuple, with a comma AFTER concatenation
TEST_LIST2 = (u"a" u"b", u"c") # [implicit-str-concat]
# Testing with raw strings in a set, with a comma BEFORE concatenation
TEST_LIST3 = {r'''a''', r'''b''' r'''c'''} # [implicit-str-concat]
# Testing that only ONE warning is generated when string concatenation happens
# in the middle of a list
TEST_LIST4 = ["""a""", """b""" """c""", """d"""] # [implicit-str-concat]
# The following shouldn't raise a warning because it is a function call
print('a', 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'ccc')
# The following shouldn't raise a warning because string literals are
# on different lines
TEST_LIST5 = ('a', 'b'
'c')
# The following shouldn't raise a warning because of the escaped newline
TEST_LIST6 = ('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \
ccc')
# But we should emit when there is an actual juxtaposition
# +1: [implicit-str-concat]
TEST_LIST7 = ('a' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb \
ccc')
# No warning for bytes
TEST_LIST8 = [b'A' b'B']
|
fuxictr/datasets/data_utils.py | xue-pai/FuxiCTR | 144 | 12657102 | <filename>fuxictr/datasets/data_utils.py
# =========================================================================
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import h5py
import os
import logging
import numpy as np
import gc
import glob
def save_hdf5(data_array, data_path, key="data"):
logging.info("Saving data to h5: " + data_path)
if not os.path.exists(os.path.dirname(data_path)):
os.makedirs(os.path.dirname(data_path))
with h5py.File(data_path, 'w') as hf:
hf.create_dataset(key, data=data_array)
def load_hdf5(data_path, key=None, verbose=True):
if verbose:
logging.info('Loading data from h5: ' + data_path)
with h5py.File(data_path, 'r') as hf:
if key is not None:
data_array = hf[key][:]
else:
data_array = hf[list(hf.keys())[0]][:]
return data_array
def split_train_test(train_ddf=None, valid_ddf=None, test_ddf=None, valid_size=0,
test_size=0, split_type="sequential"):
num_samples = len(train_ddf)
train_size = num_samples
instance_IDs = np.arange(num_samples)
if split_type == "random":
np.random.shuffle(instance_IDs)
if test_size > 0:
if test_size < 1:
test_size = int(num_samples * test_size)
train_size = train_size - test_size
test_ddf = train_ddf.loc[instance_IDs[train_size:], :].reset_index()
instance_IDs = instance_IDs[0:train_size]
if valid_size > 0:
if valid_size < 1:
valid_size = int(num_samples * valid_size)
train_size = train_size - valid_size
valid_ddf = train_ddf.loc[instance_IDs[train_size:], :].reset_index()
instance_IDs = instance_IDs[0:train_size]
if valid_size > 0 or test_size > 0:
train_ddf = train_ddf.loc[instance_IDs, :].reset_index()
return train_ddf, valid_ddf, test_ddf
def build_dataset(feature_encoder, train_data=None, valid_data=None, test_data=None, valid_size=0,
test_size=0, split_type="sequential", **kwargs):
""" Build feature_map and transform h5 data """
# Load csv data
train_ddf = feature_encoder.read_csv(train_data)
valid_ddf = feature_encoder.read_csv(valid_data) if valid_data else None
test_ddf = feature_encoder.read_csv(test_data) if test_data else None
# Split data for train/validation/test
if valid_size > 0 or test_size > 0:
train_ddf, valid_ddf, test_ddf = split_train_test(train_ddf, valid_ddf, test_ddf,
valid_size, test_size, split_type)
# fit and transform train_ddf
train_ddf = feature_encoder.preprocess(train_ddf)
train_array = feature_encoder.fit_transform(train_ddf, **kwargs)
block_size = int(kwargs.get("data_block_size", 0))
if block_size > 0:
block_id = 0
for idx in range(0, len(train_array), block_size):
save_hdf5(train_array[idx:(idx + block_size), :], os.path.join(feature_encoder.data_dir, 'train_part_{}.h5'.format(block_id)))
block_id += 1
else:
save_hdf5(train_array, os.path.join(feature_encoder.data_dir, 'train.h5'))
del train_array, train_ddf
gc.collect()
# Transfrom valid_ddf
if valid_ddf is not None:
valid_ddf = feature_encoder.preprocess(valid_ddf)
valid_array = feature_encoder.transform(valid_ddf)
if block_size > 0:
block_id = 0
for idx in range(0, len(valid_array), block_size):
save_hdf5(valid_array[idx:(idx + block_size), :], os.path.join(feature_encoder.data_dir, 'valid_part_{}.h5'.format(block_id)))
block_id += 1
else:
save_hdf5(valid_array, os.path.join(feature_encoder.data_dir, 'valid.h5'))
del valid_array, valid_ddf
gc.collect()
# Transfrom test_ddf
if test_ddf is not None:
test_ddf = feature_encoder.preprocess(test_ddf)
test_array = feature_encoder.transform(test_ddf)
if block_size > 0:
block_id = 0
for idx in range(0, len(test_array), block_size):
save_hdf5(test_array[idx:(idx + block_size), :], os.path.join(feature_encoder.data_dir, 'test_part_{}.h5'.format(block_id)))
block_id += 1
else:
save_hdf5(test_array, os.path.join(feature_encoder.data_dir, 'test.h5'))
del test_array, test_ddf
gc.collect()
logging.info("Transform csv data to h5 done.")
def h5_generator(feature_map, stage="both", train_data=None, valid_data=None, test_data=None,
batch_size=32, shuffle=True, **kwargs):
logging.info("Loading data...")
if kwargs.get("data_block_size", 0) > 0:
from ..pytorch.data_generator import DataBlockGenerator as DataGenerator
else:
from ..pytorch.data_generator import DataGenerator
train_gen = None
valid_gen = None
test_gen = None
if stage in ["both", "train"]:
train_blocks = glob.glob(train_data)
valid_blocks = glob.glob(valid_data)
assert len(train_blocks) > 0 and len(valid_blocks) > 0, "invalid data files or paths."
if len(train_blocks) > 1:
train_blocks.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
if len(valid_blocks) > 1:
valid_blocks.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
train_gen = DataGenerator(train_blocks, batch_size=batch_size, shuffle=shuffle, **kwargs)
valid_gen = DataGenerator(valid_blocks, batch_size=batch_size, shuffle=False, **kwargs)
logging.info("Train samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}" \
.format(train_gen.num_samples, train_gen.num_positives, train_gen.num_negatives,
100. * train_gen.num_positives / train_gen.num_samples, train_gen.num_blocks))
logging.info("Validation samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}" \
.format(valid_gen.num_samples, valid_gen.num_positives, valid_gen.num_negatives,
100. * valid_gen.num_positives / valid_gen.num_samples, valid_gen.num_blocks))
if stage == "train":
logging.info("Loading train data done.")
return train_gen, valid_gen
if stage in ["both", "test"]:
test_blocks = glob.glob(test_data)
if len(test_blocks) > 0:
if len(test_blocks) > 1:
test_blocks.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
test_gen = DataGenerator(test_blocks, batch_size=batch_size, shuffle=False, **kwargs)
logging.info("Test samples: total/{:d}, pos/{:.0f}, neg/{:.0f}, ratio/{:.2f}%, blocks/{:.0f}" \
.format(test_gen.num_samples, test_gen.num_positives, test_gen.num_negatives,
100. * test_gen.num_positives / test_gen.num_samples, test_gen.num_blocks))
if stage == "test":
logging.info("Loading test data done.")
return test_gen
logging.info("Loading data done.")
return train_gen, valid_gen, test_gen
def tfrecord_generator():
raise NotImplementedError()
|
tests/test_code_generation_options.py | patrick-gxg/mashumaro | 394 | 12657122 | <reponame>patrick-gxg/mashumaro
from dataclasses import dataclass, field
from typing import Optional
from mashumaro import DataClassDictMixin, field_options
from mashumaro.config import (
TO_DICT_ADD_BY_ALIAS_FLAG,
TO_DICT_ADD_OMIT_NONE_FLAG,
BaseConfig,
)
@dataclass
class A(DataClassDictMixin):
x: Optional[int] = None
class Config(BaseConfig):
aliases = {"x": "x_alias"}
code_generation_options = [
TO_DICT_ADD_OMIT_NONE_FLAG,
TO_DICT_ADD_BY_ALIAS_FLAG,
]
@dataclass
class B(DataClassDictMixin):
a: Optional[A] = None
class Config(BaseConfig):
aliases = {"a": "a_alias"}
code_generation_options = [
TO_DICT_ADD_OMIT_NONE_FLAG,
TO_DICT_ADD_BY_ALIAS_FLAG,
]
def test_passing_flags_if_parent_has_them():
@dataclass
class WithFlags(DataClassDictMixin):
b: B
class Config(BaseConfig):
code_generation_options = [
TO_DICT_ADD_OMIT_NONE_FLAG,
TO_DICT_ADD_BY_ALIAS_FLAG,
]
assert WithFlags.from_dict({"b": {"a": {"x": None}}}) == WithFlags(
b=B(a=None)
)
obj = WithFlags.from_dict({"b": {"a_alias": {"x": None}}})
assert obj == WithFlags(b=B(a=A(x=None)))
assert obj.to_dict() == {"b": {"a": {"x": None}}}
assert obj.to_dict(by_alias=True) == {"b": {"a_alias": {"x_alias": None}}}
assert obj.to_dict(by_alias=True, omit_none=True) == {"b": {"a_alias": {}}}
def test_passing_flags_if_parent_does_not_have_them():
@dataclass
class WithoutFlags(DataClassDictMixin):
b: B
assert WithoutFlags.from_dict({"b": {"a": {"x": None}}}) == WithoutFlags(
b=B(a=None)
)
obj = WithoutFlags.from_dict({"b": {"a_alias": {"x": None}}})
assert obj == WithoutFlags(b=B(a=A(x=None)))
assert obj.to_dict() == {"b": {"a": {"x": None}}}
|
cvnets/models/classification/config/mobilevit.py | apple/ml-cvnets | 209 | 12657139 | <filename>cvnets/models/classification/config/mobilevit.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from typing import Dict
from utils import logger
def get_configuration(opts) -> Dict:
mode = getattr(opts, "model.classification.mit.mode", "small")
if mode is None:
logger.error("Please specify mode")
head_dim = getattr(opts, "model.classification.mit.head_dim", None)
num_heads = getattr(opts, "model.classification.mit.number_heads", 4)
if head_dim is not None:
if num_heads is not None:
logger.error(
"--model.classification.mit.head-dim and --model.classification.mit.number-heads "
"are mutually exclusive."
)
elif num_heads is not None:
if head_dim is not None:
logger.error(
"--model.classification.mit.head-dim and --model.classification.mit.number-heads "
"are mutually exclusive."
)
mode = mode.lower()
if mode == "xx_small":
mv2_exp_mult = 2
config = {
"layer1": {
"out_channels": 16,
"expand_ratio": mv2_exp_mult,
"num_blocks": 1,
"stride": 1,
"block_type": "mv2",
},
"layer2": {
"out_channels": 24,
"expand_ratio": mv2_exp_mult,
"num_blocks": 3,
"stride": 2,
"block_type": "mv2",
},
"layer3": { # 28x28
"out_channels": 48,
"transformer_channels": 64,
"ffn_dim": 128,
"transformer_blocks": 2,
"patch_h": 2, # 8,
"patch_w": 2, # 8,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer4": { # 14x14
"out_channels": 64,
"transformer_channels": 80,
"ffn_dim": 160,
"transformer_blocks": 4,
"patch_h": 2, # 4,
"patch_w": 2, # 4,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer5": { # 7x7
"out_channels": 80,
"transformer_channels": 96,
"ffn_dim": 192,
"transformer_blocks": 3,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"last_layer_exp_factor": 4,
}
elif mode == "x_small":
mv2_exp_mult = 4
config = {
"layer1": {
"out_channels": 32,
"expand_ratio": mv2_exp_mult,
"num_blocks": 1,
"stride": 1,
"block_type": "mv2",
},
"layer2": {
"out_channels": 48,
"expand_ratio": mv2_exp_mult,
"num_blocks": 3,
"stride": 2,
"block_type": "mv2",
},
"layer3": { # 28x28
"out_channels": 64,
"transformer_channels": 96,
"ffn_dim": 192,
"transformer_blocks": 2,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer4": { # 14x14
"out_channels": 80,
"transformer_channels": 120,
"ffn_dim": 240,
"transformer_blocks": 4,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer5": { # 7x7
"out_channels": 96,
"transformer_channels": 144,
"ffn_dim": 288,
"transformer_blocks": 3,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"last_layer_exp_factor": 4,
}
elif mode == "small":
mv2_exp_mult = 4
config = {
"layer1": {
"out_channels": 32,
"expand_ratio": mv2_exp_mult,
"num_blocks": 1,
"stride": 1,
"block_type": "mv2",
},
"layer2": {
"out_channels": 64,
"expand_ratio": mv2_exp_mult,
"num_blocks": 3,
"stride": 2,
"block_type": "mv2",
},
"layer3": { # 28x28
"out_channels": 96,
"transformer_channels": 144,
"ffn_dim": 288,
"transformer_blocks": 2,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer4": { # 14x14
"out_channels": 128,
"transformer_channels": 192,
"ffn_dim": 384,
"transformer_blocks": 4,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"layer5": { # 7x7
"out_channels": 160,
"transformer_channels": 240,
"ffn_dim": 480,
"transformer_blocks": 3,
"patch_h": 2,
"patch_w": 2,
"stride": 2,
"mv_expand_ratio": mv2_exp_mult,
"head_dim": head_dim,
"num_heads": num_heads,
"block_type": "mobilevit",
},
"last_layer_exp_factor": 4,
}
else:
raise NotImplementedError
return config
|
libraries/botbuilder-dialogs/tests/test_prompt_validator_context.py | Fl4v/botbuilder-python | 388 | 12657194 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.dialogs import DialogSet
from botbuilder.core import MemoryStorage, ConversationState
class PromptValidatorContextTests(aiounittest.AsyncTestCase):
async def test_prompt_validator_context_end(self):
storage = MemoryStorage()
conv = ConversationState(storage)
accessor = conv.create_property("dialogstate")
dialog_set = DialogSet(accessor)
self.assertNotEqual(dialog_set, None)
# TODO: Add TestFlow
def test_prompt_validator_context_retry_end(self):
storage = MemoryStorage()
conv = ConversationState(storage)
accessor = conv.create_property("dialogstate")
dialog_set = DialogSet(accessor)
self.assertNotEqual(dialog_set, None)
# TODO: Add TestFlow
# All require Testflow!
|
conftest.py | kratz00/irc | 362 | 12657221 | <filename>conftest.py
import sys
import fnmatch
import os
collect_ignore = ["setup.py"]
if sys.version_info < (3, 5):
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*aio.py'):
collect_ignore.append(os.path.join(root, filename))
|
evennia/contrib/menu_login.py | Jaykingamez/evennia | 1,544 | 12657225 | <reponame>Jaykingamez/evennia
"""
A login menu using EvMenu.
Contribution - Vincent-lg 2016, Griatch 2019 (rework for modern EvMenu)
This changes the Evennia login to ask for the account name and password in
sequence instead of requiring you to enter both at once.
To install, add this line to the settings file (`mygame/server/conf/settings.py`):
CMDSET_UNLOGGEDIN = "evennia.contrib.menu_login.UnloggedinCmdSet"
Reload the server and the new connection method will be active. Note that you must
independently change the connection screen to match this login style, by editing
`mygame/server/conf/connection_screens.py`.
This uses Evennia's menu system EvMenu and is triggered by a command that is
called automatically when a new user connects.
"""
from django.conf import settings
from evennia import Command, CmdSet
from evennia import syscmdkeys
from evennia.utils.evmenu import EvMenu
from evennia.utils.utils import random_string_from_module, class_from_module, callables_from_module
_CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
_GUEST_ENABLED = settings.GUEST_ENABLED
_ACCOUNT = class_from_module(settings.BASE_ACCOUNT_TYPECLASS)
_GUEST = class_from_module(settings.BASE_GUEST_TYPECLASS)
_ACCOUNT_HELP = (
"Enter the name you used to log into the game before, " "or a new account-name if you are new."
)
_PASSWORD_HELP = (
"Password should be a minimum of 8 characters (preferably longer) and "
"can contain a mix of letters, spaces, digits and @/./+/-/_/'/, only."
)
# Menu nodes
def _show_help(caller, raw_string, **kwargs):
"""Echo help message, then re-run node that triggered it"""
help_entry = kwargs["help_entry"]
caller.msg(help_entry)
return None # re-run calling node
def node_enter_username(caller, raw_text, **kwargs):
"""
Start node of menu
Start login by displaying the connection screen and ask for a user name.
"""
def _check_input(caller, username, **kwargs):
"""
'Goto-callable', set up to be called from the _default option below.
Called when user enters a username string. Check if this username already exists and set the flag
'new_user' if not. Will also directly login if the username is 'guest'
and GUEST_ENABLED is True.
The return from this goto-callable determines which node we go to next
and what kwarg it will be called with.
"""
username = username.rstrip("\n")
if username == "guest" and _GUEST_ENABLED:
# do an immediate guest login
session = caller
address = session.address
account, errors = _GUEST.authenticate(ip=address)
if account:
return "node_quit_or_login", {"login": True, "account": account}
else:
session.msg("|R{}|n".format("\n".join(errors)))
return None # re-run the username node
try:
_ACCOUNT.objects.get(username__iexact=username)
except _ACCOUNT.DoesNotExist:
new_user = True
else:
new_user = False
# pass username/new_user into next node as kwargs
return "node_enter_password", {"new_user": new_user, "username": username}
callables = callables_from_module(_CONNECTION_SCREEN_MODULE)
if "connection_screen" in callables:
connection_screen = callables["connection_screen"]()
else:
connection_screen = random_string_from_module(_CONNECTION_SCREEN_MODULE)
if _GUEST_ENABLED:
text = "Enter a new or existing user name to login (write 'guest' for a guest login):"
else:
text = "Enter a new or existing user name to login:"
text = "{}\n\n{}".format(connection_screen, text)
options = (
{"key": "", "goto": "node_enter_username"},
{"key": ("quit", "q"), "goto": "node_quit_or_login"},
{"key": ("help", "h"), "goto": (_show_help, {"help_entry": _ACCOUNT_HELP, **kwargs})},
{"key": "_default", "goto": _check_input},
)
return text, options
def node_enter_password(caller, raw_string, **kwargs):
"""
Handle password input.
"""
def _check_input(caller, password, **kwargs):
"""
'Goto-callable', set up to be called from the _default option below.
Called when user enters a password string. Check username + password
viability. If it passes, the account will have been created and login
will be initiated.
The return from this goto-callable determines which node we go to next
and what kwarg it will be called with.
"""
# these flags were set by the goto-callable
username = kwargs["username"]
new_user = kwargs["new_user"]
password = password.<PASSWORD>")
session = caller
address = session.address
if new_user:
# create a new account
account, errors = _ACCOUNT.create(
username=username, password=password, ip=address, session=session
)
else:
# check password against existing account
account, errors = _ACCOUNT.authenticate(
username=username, password=password, ip=address, session=session
)
if account:
if new_user:
session.msg("|gA new account |c{}|g was created. Welcome!|n".format(username))
# pass login info to login node
return "node_quit_or_login", {"login": True, "account": account}
else:
# restart due to errors
session.msg("|R{}".format("\n".join(errors)))
kwargs["retry_password"] = True
return "node_enter_password", kwargs
def _restart_login(caller, *args, **kwargs):
caller.msg("|yCancelled login.|n")
return "node_enter_username"
username = kwargs["username"]
if kwargs["new_user"]:
if kwargs.get("retry_password"):
# Attempting to fix password
text = "Enter a new password:"
else:
text = "Creating a new account |c{}|n. " "Enter a password (empty to abort):".format(
username
)
else:
text = "Enter the password for account |c{}|n (empty to abort):".format(username)
options = (
{"key": "", "goto": _restart_login},
{"key": ("quit", "q"), "goto": "node_quit_or_login"},
{"key": ("help", "h"), "goto": (_show_help, {"help_entry": _PASSWORD_HELP, **kwargs})},
{"key": "_default", "goto": (_check_input, kwargs)},
)
return text, options
def node_quit_or_login(caller, raw_text, **kwargs):
"""
Exit menu, either by disconnecting or logging in.
"""
session = caller
if kwargs.get("login"):
account = kwargs.get("account")
session.msg("|gLogging in ...|n")
session.sessionhandler.login(session, account)
else:
session.sessionhandler.disconnect(session, "Goodbye! Logging off.")
return "", {}
# EvMenu helper function
def _node_formatter(nodetext, optionstext, caller=None):
"""Do not display the options, only the text.
This function is used by EvMenu to format the text of nodes. The menu login
is just a series of prompts so we disable all automatic display decoration
and let the nodes handle everything on their own.
"""
return nodetext
# Commands and CmdSets
class UnloggedinCmdSet(CmdSet):
"Cmdset for the unloggedin state"
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
"Called when cmdset is first created."
self.add(CmdUnloggedinLook())
class CmdUnloggedinLook(Command):
"""
An unloggedin version of the look command. This is called by the server
when the account first connects. It sets up the menu before handing off
to the menu's own look command.
"""
key = <KEY>.CMD_LOGINSTART
locks = "cmd:all()"
arg_regex = r"^$"
def func(self):
"""
Run the menu using the nodes in this module.
"""
EvMenu(
self.caller,
"evennia.contrib.menu_login",
startnode="node_enter_username",
auto_look=False,
auto_quit=False,
cmd_on_exit=None,
node_formatter=_node_formatter,
)
|
hummingbot/core/web_assistant/connections/data_types.py | BGTCapital/hummingbot | 3,027 | 12657227 | from abc import abstractmethod, ABC
from dataclasses import dataclass
from enum import Enum
from typing import Any, Mapping, Optional
import aiohttp
import ujson
class RESTMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
def __str__(self):
obj_str = repr(self)
return obj_str
def __repr__(self):
return self.value
@dataclass
class RESTRequest:
method: RESTMethod
url: Optional[str] = None
params: Optional[Mapping[str, str]] = None
data: Any = None
headers: Optional[Mapping[str, str]] = None
is_auth_required: bool = False
throttler_limit_id: Optional[str] = None
@dataclass
class EndpointRESTRequest(RESTRequest, ABC):
"""This request class enable the user to provide either a complete URL or simply an endpoint.
The endpoint is concatenated with the return value of `base_url`. It can handle endpoints supplied both as
`"endpoint"` and `"/endpoint"`. It also provides the necessary checks to ensure a valid URL can be constructed.
"""
endpoint: Optional[str] = None
def __post_init__(self):
self._ensure_url()
self._ensure_params()
self._ensure_data()
@property
@abstractmethod
def base_url(self) -> str:
...
def _ensure_url(self):
if self.url is None and self.endpoint is None:
raise ValueError("Either the full url or the endpoint must be specified.")
if self.url is None:
if self.endpoint.startswith("/"):
self.url = f"{self.base_url}{self.endpoint}"
else:
self.url = f"{self.base_url}/{self.endpoint}"
def _ensure_params(self):
if self.method == RESTMethod.POST:
if self.params is not None:
raise ValueError("POST requests should not use `params`. Use `data` instead.")
def _ensure_data(self):
if self.method == RESTMethod.POST:
if self.data is not None:
self.data = ujson.dumps(self.data)
elif self.data is not None:
raise ValueError(
"The `data` field should be used only for POST requests. Use `params` instead."
)
@dataclass(init=False)
class RESTResponse:
url: str
method: RESTMethod
status: int
headers: Optional[Mapping[str, str]]
def __init__(self, aiohttp_response: aiohttp.ClientResponse):
self._aiohttp_response = aiohttp_response
@property
def url(self) -> str:
url_str = str(self._aiohttp_response.url)
return url_str
@property
def method(self) -> RESTMethod:
method_ = RESTMethod[self._aiohttp_response.method.upper()]
return method_
@property
def status(self) -> int:
status_ = int(self._aiohttp_response.status)
return status_
@property
def headers(self) -> Optional[Mapping[str, str]]:
headers_ = self._aiohttp_response.headers
return headers_
async def json(self) -> Any:
json_ = await self._aiohttp_response.json()
return json_
async def text(self) -> str:
text_ = await self._aiohttp_response.text()
return text_
@dataclass
class WSRequest:
payload: Mapping[str, Any]
throttler_limit_id: Optional[str] = None
is_auth_required: bool = False
@dataclass
class WSResponse:
data: Any
|
OpenCLGA/utils.py | czarnobylu/OpenCLGA | 112 | 12657238 | #!/usr/bin/python3
import random
from math import pi, sqrt, asin, cos, sin, pow
def get_local_IP():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 1))
ip = s.getsockname()[0]
s.close()
return ip
def get_testing_params():
return 20, 200, 5000
def init_testing_rand_seed():
random.seed(119)
def calc_linear_distance(x1, y1, x2, y2):
return sqrt((x2 - x1)**2 + (y2 - y1)**2)
def calc_spherical_distance(x1, y1, x2, y2):
def rad(deg):
return deg * pi / 180.0
rad_x1 = rad(x1)
rad_x2 = rad(x2)
a = rad_x1 - rad_x2
b = rad(y1) - rad(y2)
s = 2 * asin(sqrt(pow(sin(a/2),2)+cos(rad_x1)*cos(rad_x2)*pow(sin(b/2),2)))
s = s * 6378.137
s = round( s * 10000 ) / 10000
return s
def plot_tsp_result(city_info, city_ids):
import matplotlib.pyplot as plt
x = []
y = []
for c_id in city_ids:
x.append(city_info[c_id][0])
y.append(city_info[c_id][1])
x.append(x[0])
y.append(y[0])
plt.plot(x, y, 'ro-')
plt.ylabel('y')
plt.xlabel('x')
plt.grid(True)
plt.show()
def plot_grouping_result(group_id_set, group_ids, city_info):
assert len(group_id_set) != 0
import matplotlib.pyplot as plt
markers = ['p', '*', '+', 'x', 'd', 'o', 'v', 's', 'h']
colors = [(random.random(), random.random(), random.random()) for x in range(len(group_id_set))]
while len(group_id_set) > 0:
group_id = group_id_set.pop()
clr = colors.pop()
makr = markers[random.randint(0, len(markers)-1)]
x = []
y = []
for idx, gid in enumerate(group_ids):
if gid == group_id:
x.append(city_info[idx][0])
y.append(city_info[idx][1])
plt.plot(x, y, color=clr, marker=makr)
plt.ylabel('y')
plt.xlabel('x')
plt.grid(True)
plt.show()
def plot_ga_result(statistics):
import matplotlib.pyplot as plt
gen = []
bests = []
worsts = []
avgs = []
avg_time_per_gen = 0
for key, value in statistics.items():
if key != 'avg_time_per_gen':
gen.append(key)
bests.append(value['best'])
worsts.append(value['worst'])
avgs.append(value['avg'])
elif key == 'avg_time_per_gen':
avg_time_per_gen = value
arrow_idx = int(len(gen) * 0.7)
arrow_x = gen[arrow_idx]
arrow_y = bests[arrow_idx]
plt.plot(gen, bests, 'g-')
plt.annotate('best', xy=(arrow_x, arrow_y,))
arrow_y = worsts[arrow_idx]
plt.plot(gen, worsts, 'r-')
plt.annotate('worst', xy=(arrow_x, arrow_y))
arrow_y = avgs[arrow_idx]
plt.plot(gen, avgs, 'b-')
plt.annotate('avg', xy=(arrow_x, arrow_y))
plt.ylabel('Fitness')
plt.xlabel('Generation')
xmin, xmax, ymin, ymax = plt.axis()
textX = abs(xmax - xmin) * 0.1
textY = abs(ymax) * 0.95
plt.text(textX, textY, 'avg time per gen: %f (sec.)'%(avg_time_per_gen))
plt.grid(True)
plt.show()
def calculate_estimated_kernel_usage(prog, ctx, kernel_name):
try:
import pyopencl as cl
from pyopencl import context_info as ci
from pyopencl import kernel_work_group_info as kwgi
devices = ctx.get_info(ci.DEVICES)
assert len(devices) == 1, 'Should only one device is used !'
device = devices[0]
# for name in kernel_names:
kernel = cl.Kernel(prog, kernel_name)
# gws = kernel.get_work_group_info(kwgi.GLOBAL_WORK_SIZE, device)
lm = kernel.get_work_group_info(kwgi.LOCAL_MEM_SIZE, device)
pm = kernel.get_work_group_info(kwgi.PRIVATE_MEM_SIZE, device)
cwgs = kernel.get_work_group_info(kwgi.COMPILE_WORK_GROUP_SIZE, device)
pwgsm = kernel.get_work_group_info(kwgi.PREFERRED_WORK_GROUP_SIZE_MULTIPLE, device)
print('For kernel "{}" running on device {}:'.format(kernel.function_name, device.name))
# print('\t Max work size: {}'.format(gws))
print('\t Max work-group size: {}'.format(cwgs))
print('\t Recommended work-group multiple: {}'.format(pwgsm))
print('\t Local mem used: {} of {}'.format(lm, device.local_mem_size))
print('\t Private mem used: {}'.format(pm))
return cwgs, pwgsm, lm, pm
except:
import traceback
traceback.print_exc()
return None, None, None, None
|
recipes/Python/577882_Convert_nested_Pythdatstructure/recipe-577882.py | tdiprima/code | 2,023 | 12657297 | <reponame>tdiprima/code
import lxml.etree as et
def data2xml(d, name='data'):
r = et.Element(name)
return et.tostring(buildxml(r, d))
def buildxml(r, d):
if isinstance(d, dict):
for k, v in d.iteritems():
s = et.SubElement(r, k)
buildxml(s, v)
elif isinstance(d, tuple) or isinstance(d, list):
for v in d:
s = et.SubElement(r, 'i')
buildxml(s, v)
elif isinstance(d, basestring):
r.text = d
else:
r.text = str(d)
return r
print data2xml({'a':[1,2,('c',{'d':'e'})],'f':'g'})
# <data><a><i>1</i><i>2</i><i><i>c</i><i><d>e</d></i></i></a><f>g</f></data>
|
src/ralph/operations/migrations/0008_auto_20170331_0952.py | DoNnMyTh/ralph | 1,668 | 12657319 | <reponame>DoNnMyTh/ralph
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('operations', '0007_auto_20170328_1728'),
]
operations = [
migrations.AlterField(
model_name='operation',
name='assignee',
field=models.ForeignKey(verbose_name='assignee', blank=True, to=settings.AUTH_USER_MODEL, null=True, related_name='operations', on_delete=django.db.models.deletion.PROTECT),
),
]
|
lldb/test/API/lang/objc/conflicting-class-list-function-from-user/TestObjCClassListFunctionFromUser.py | mkinsner/llvm | 2,338 | 12657324 | <gh_stars>1000+
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
# LLDB ends up calling the user-defined function (but at least doesn't
# crash).
@expectedFailureDarwin
def test(self):
"""
Tests LLDB's behaviour if the user defines their own conflicting
objc_copyRealizedClassList_nolock function.
"""
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.m"))
# Get the (dynamic) type of our 'id' variable so that our Objective-C
# runtime information is updated.
str_val = self.expect_expr("custom_class")
dyn_val = str_val.GetDynamicValue(lldb.eDynamicCanRunTarget)
# We should have retrieved the proper class list even in presence of
# the user-defined function.
self.assertEqual(dyn_val.GetTypeName(), "CustomClass *")
|
linkedrw/linkedw/__init__.py | CypherAk007/LinkedRW | 114 | 12657380 | <gh_stars>100-1000
from .website import make_website_files
|
tests/test_colors_config.py | rohankumardubey/chartify | 3,111 | 12657388 | <reponame>rohankumardubey/chartify<gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
COLORS_CONFIG = '''\
? !!python/tuple
- 0
- 100
- 80
: Foo
? !!python/tuple
- 25
- 20
- 20
: Bar
? !!python/tuple
- 25
- 230
- 140
: Baz
'''
EXPECTED_COLORS = {
'style.color_palette_categorical': 'My Palette',
'style.color_palette_sequential': 'Midnight Orange Sequential',
'style.color_palette_diverging': 'Midnight Orange Diverging',
'style.color_palette_accent': 'My Palette',
'style.color_palette_accent_default_color': 'light grey',
}
def test_colors_config(monkeypatch, tmpdir):
f = tmpdir.join('colors_config.yaml')
f.write(COLORS_CONFIG)
# XXX (dano): CHARTIFY_CONFIG_DIR must end with /
monkeypatch.setenv('CHARTIFY_CONFIG_DIR', os.path.join(str(tmpdir), ''))
# reload modules to reload configuration
import chartify._core.options
import chartify._core.colors
import chartify._core.style
importlib.reload(chartify._core.options)
importlib.reload(chartify._core.colors)
import chartify._core.colour as colour
assert colour.COLOR_NAME_TO_RGB['foo'] == (0, 100, 80)
assert colour.COLOR_NAME_TO_RGB['bar'] == (25, 20, 20)
assert colour.COLOR_NAME_TO_RGB['baz'] == (25, 230, 140)
|
reddit2telegram/channels/r_indiaa/app.py | mainyordle/reddit2telegram | 187 | 12657399 | #encoding:utf-8
subreddit = 'india'
t_channel = '@r_indiaa'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
packages/engine/src/worker/runner/python/fbs/Package.py | mschrader15/hash | 219 | 12657404 | <gh_stars>100-1000
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Package(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Package()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsPackage(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Package
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Package
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Package
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Package
def Sid(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Package
def InitPayload(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from Serialized import Serialized
obj = Serialized()
obj.Init(self._tab.Bytes, x)
return obj
return None
def Start(builder): builder.StartObject(4)
def PackageStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddType(builder, type): builder.PrependInt8Slot(0, type, 0)
def PackageAddType(builder, type):
"""This method is deprecated. Please switch to AddType."""
return AddType(builder, type)
def AddName(builder, name): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def PackageAddName(builder, name):
"""This method is deprecated. Please switch to AddName."""
return AddName(builder, name)
def AddSid(builder, sid): builder.PrependUint64Slot(2, sid, 0)
def PackageAddSid(builder, sid):
"""This method is deprecated. Please switch to AddSid."""
return AddSid(builder, sid)
def AddInitPayload(builder, initPayload): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(initPayload), 0)
def PackageAddInitPayload(builder, initPayload):
"""This method is deprecated. Please switch to AddInitPayload."""
return AddInitPayload(builder, initPayload)
def End(builder): return builder.EndObject()
def PackageEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) |
chapter14/write_mindrecord.py | mindspore-ai/book | 165 | 12657431 | <reponame>mindspore-ai/book<gh_stars>100-1000
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import uuid
from mindspore.mindrecord import FileWriter, FileReader
from utils import get_data
MINDRECORD_FILE_NAME = "./imagenet.mindrecord"
def write_mindrecord_tutorial():
writer = FileWriter(MINDRECORD_FILE_NAME)
data = get_data("./ImageNetDataSimulation")
schema_json = {"file_name": {"type": "string"},
"label": {"type": "int64"},
"data": {"type": "bytes"}}
writer.add_schema(schema_json, "img_schema")
writer.add_index(["file_name", "label"])
writer.write_raw_data(data)
writer.commit()
reader = FileReader(MINDRECORD_FILE_NAME)
count = 0
for index, x in enumerate(reader.get_next()):
assert len(x) == 3
count = count + 1
# print("#item {}: {}".format(index, x))
assert count == 20
reader.close()
if __name__ == '__main__':
write_mindrecord_tutorial()
os.remove(MINDRECORD_FILE_NAME)
os.remove(MINDRECORD_FILE_NAME + ".db")
|
examples/05_utc_and_timezone.py | Saketh-Chandra/blynk-library-python | 219 | 12657446 | <reponame>Saketh-Chandra/blynk-library-python<filename>examples/05_utc_and_timezone.py
"""
Blynk is a platform with iOS and Android apps to control
Arduino, Raspberry Pi and the likes over the Internet.
You can easily build graphic interfaces for all your
projects by simply dragging and dropping widgets.
Downloads, docs, tutorials: http://www.blynk.cc
Sketch generator: http://examples.blynk.cc
Blynk community: http://community.blynk.cc
Social networks: http://www.fb.com/blynkapp
http://twitter.com/blynk_app
This example shows how to get UTC time and Timezone info
"""
import BlynkLib
import time
BLYNK_AUTH = '<PASSWORD>AuthToken'
# Initialize Blynk
blynk = BlynkLib.Blynk(BLYNK_AUTH)
@blynk.on("connected")
def blynk_connected(ping):
print('Blynk ready. Ping:', ping, 'ms')
blynk.send_internal("utc", "time")
blynk.send_internal("utc", "tz_name")
@blynk.on("disconnected")
def blynk_disconnected():
print('Blynk disconnected')
@blynk.on("internal:utc")
def on_utc(value):
if value[0] == "time":
ts = int(value[1])//1000
# on embedded systems, you may need to subtract time difference between 1970 and 2000
#ts -= 946684800
tm = time.gmtime(ts)
print("UTC time: ", time.asctime(tm))
elif value[0] == "tz_name":
print("Timezone: ", value[1])
while True:
blynk.run()
|
Python/Tests/fullanalysistest.py | techkey/PTVS | 404 | 12657449 | <filename>Python/Tests/fullanalysistest.py
from __future__ import print_function
import os
import re
import subprocess
import sys
SCRIPT_NAME = 'script{sys.version_info[0]}{sys.version_info[1]}.rsp'
TEMPLATE = r'''python {sys.version_info[0]}.{sys.version_info[1]} {sys.executable}
logs logs
module * {sys.prefix}\Lib\site-packages\{module}\**\*.py
enqueue *
analyze
'''
VERSION = '.'.join(str(i) for i in sys.version_info[:2])
if len(sys.argv) < 3:
print('Usage:', sys.argv[0], '<path to AnalysisMemoryTester.exe> <output path>', file=sys.stderr)
sys.exit(1)
if sys.version_info[0] == 2:
import threading
def wait(p, timeout):
t = threading.Timer(timeout, p.kill)
t.daemon = True
t.start()
p.wait()
t.cancel()
else:
def wait(p, timeout):
p.wait(timeout)
TOOL = os.path.abspath(sys.argv[1])
OUTDIR = os.path.abspath(sys.argv[2] if len(sys.argv) > 2 else '.')
for module in os.listdir(os.path.join(sys.prefix, 'Lib', 'site-packages')):
if module == '__pycache__':
continue
if not re.match(r'[a-z0-9_]+$', module, re.I):
continue
outdir = os.path.join(OUTDIR, module)
try:
os.makedirs(outdir)
except OSError:
if not os.path.isdir(outdir):
raise
script = os.path.join(outdir, SCRIPT_NAME.format(sys=sys, module=module))
with open(script, 'w') as f:
print(TEMPLATE.format(sys=sys, module=module), file=f)
print("Testing", module)
p = subprocess.Popen([TOOL, script])
try:
wait(p, 3600)
except KeyboardInterrupt:
p.kill()
sys.exit(0)
except:
p.kill()
|
xfel/merging/application/statistics/experiment_resolution_statistics.py | dperl-sol/cctbx_project | 155 | 12657467 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from six.moves import range
from dials.array_family import flex
from libtbx import table_utils
from xfel.merging.application.worker import worker
from xfel.merging.application.reflection_table_utils import reflection_table_utils
class experiment_resolution_statistics(worker):
'''Calculates experiments accepted vs resolution bins'''
def __init__(self, params, mpi_helper=None, mpi_logger=None):
super(experiment_resolution_statistics, self).__init__(params=params, mpi_helper=mpi_helper, mpi_logger=mpi_logger)
def __repr__(self):
return 'Lattices resolution'
def run(self, experiments, reflections):
self.logger.log_step_time("EXPERIMENT_RESOLUTION_STATS")
# Get pre-created resolution binning objects from the parameters
self.resolution_binner = self.params.statistics.resolution_binner
self.hkl_resolution_bins = self.params.statistics.hkl_resolution_bins
# How many bins do we have?
self.n_bins = self.resolution_binner.n_bins_all() # (self.params.statistics.n_bins + 2), 2 - to account for the hkls outside of the binner resolution range
# To enable MPI all-rank reduction, every rank must initialize statistics array(s), even if the rank doesn't have any reflections.
self.experiment_count_per_resolution_bins = flex.int(self.n_bins, 0)
# Calculate, format and output statistics for each rank
if reflections.size() > 0:
self.count_experiments_per_resolution_bins(reflections)
Experiment_Table_text = self.get_formatted_table(self.experiment_count_per_resolution_bins, len(experiments))
self.logger.log(Experiment_Table_text)
# Accumulate statistics from all ranks
all_ranks_experiment_count_per_resolution_bins = self.mpi_helper.cumulative_flex(self.experiment_count_per_resolution_bins, flex.int)
all_ranks_total_experiment_count = self.mpi_helper.sum(len(experiments))
# Format and output all-rank total statistics
if self.mpi_helper.rank == 0:
Experiment_Table_text = self.get_formatted_table(all_ranks_experiment_count_per_resolution_bins, all_ranks_total_experiment_count)
self.logger.main_log(Experiment_Table_text)
self.logger.log_step_time("EXPERIMENT_RESOLUTION_STATS", True)
return experiments, reflections
def get_formatted_table(self, experiment_count_per_bin, total_experiment_count):
'''Produce a table with experiment count over resolution bins'''
table_data = [["Bin", "Resolution Range", "Lattices", "Accepted (%)"]]
for i_bin in self.resolution_binner.range_used():
col_legend = '%-13s' % self.resolution_binner.bin_legend(
i_bin=i_bin,
show_bin_number=False,
show_bin_range=False,
show_d_range=True,
show_counts=False)
exp_count_abs = '%8d' % experiment_count_per_bin[i_bin]
exp_count_percent = '%5.2f'% (100. * experiment_count_per_bin[i_bin] / total_experiment_count)
table_data.append(['%3d' % i_bin, col_legend, exp_count_abs, exp_count_percent])
table_data.append([""] * len(table_data[0]))
table_data.append(["All", "", '%8d' % total_experiment_count])
return "\n Image Statistics\n" + table_utils.format(table_data, has_header=1, justify='center', delim=' ')
def count_experiments_per_resolution_bins(self, reflections):
'''For each resolution bin, count experiments that contributed reflections to that bin'''
# Sort all reflections on asu hkls
self.logger.log_step_time("SORT")
self.logger.log("Sorting reflection table...")
reflections.sort('miller_index_asymmetric')
self.logger.log_step_time("SORT", True)
# Initialize a dictionary to store unique experiment ids in resolution bins
experiments_per_resolution_bins = {}
for i_bin in range(self.n_bins):
experiments_per_resolution_bins[i_bin] = set()
# Accumulate experiment ids in the resolution bins where those experiments contributed reflections
for refls in reflection_table_utils.get_next_hkl_reflection_table(reflections=reflections):
if refls.size() == 0:
break # unless the input "reflections" list is empty, generated "refls" lists cannot be empty
hkl = refls[0]['miller_index_asymmetric']
if hkl in self.hkl_resolution_bins:
i_bin = self.hkl_resolution_bins[hkl]
for refl in refls.rows():
experiments_per_resolution_bins[i_bin].add(refl['exp_id'])
# For each bin, reduce the sets of unique experiment ids to their count
for i_bin in range(self.resolution_binner.n_bins_all()):
self.experiment_count_per_resolution_bins[i_bin] = len(experiments_per_resolution_bins[i_bin])
if __name__ == '__main__':
from xfel.merging.application.worker import exercise_worker
exercise_worker(experiment_resolution_statistics)
|
venv/Lib/site-packages/IPython/lib/__init__.py | ajayiagbebaku/NFL-Model | 6,989 | 12657486 | <filename>venv/Lib/site-packages/IPython/lib/__init__.py
# encoding: utf-8
"""
Extra capabilities for IPython
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.lib.security import passwd
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
webui/main.py | hydgladiator/Themis | 356 | 12657503 | <gh_stars>100-1000
# -*-coding:utf-8-*-
import os
import tornado.ioloop
import tornado.autoreload
from webui import view
def run_server(config, host=None, port=7000):
handlers = [
(r"/", view.SqlReRuleSetIndex),
(r"/sqlreview/rule/simple/addition", view.RuleSimpleAdditoin),
(r"/sqlreview/rule/complex/addition", view.RuleComplexAddition),
(r"/sqlreview/rule/addition", view.RuleAddition),
(r"/new/version/sql/review/rule/info/index", view.SqlReRuleSetInfoIndex),
(r"/sqlreview/rule/upload", view.RuleUpload),
(r"/sqlreview/rule/info", view.SqlReviewRuleInfo),
(r"/new/version/sql/review/get/struct", view.SqlReviewGetStruct),
(r"/new/version/sql/review/task/index", view.SqlReviewTaskIndex),
(r"/new/version/sql/review/job/data", view.SqlReviewJobData),
(r"/new/version/sql/review/task/rule/info", view.SqlReviewTaskRuleInfo),
(r"/new/version/sql/review/task/rule/detail/info", view.SqlReviewTaskRuleDetailInfo),
(r"/new/version/sql/review/task/rule/plan/info", view.SqlReviewTaskRulePlanInfo),
(r"/new/version/sql/review/task/rule/text/info", view.SqlReviewTaskRuleTextInfo),
# (r"/new/version/sql/review/prevent/object/index", view.SqlReviewPreventObjectIndex),
# (r"/new/version/sql/review/prevent/object", view.SqlReviewPreventObject),
(r"/new/version/sql/review/get/db/user/list", view.SqlReviewGetDbUserList),
(r"/new/version/sql/review/get/db/port", view.SqlReviewGetDbPort),
(r"/new/version/sql/review/task/publish", view.SqlReviewTaskPublish),
(r"/new/version/sql/review/task/rule/export", view.SqlReviewRuleExport)
]
application = tornado.web.Application(handlers,
template_path=os.path.join(os.path.dirname(__file__), "template"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
config=config
)
application.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
application.listen(server_port)
tornado.ioloop.IOLoop.instance().start()
|
packages/syft/tests/syft/core/common/serde/deserialize_test.py | vishalbelsare/PySyft | 8,428 | 12657530 | <gh_stars>1000+
# third party
from pytest import raises
# syft absolute
from syft.core.common.serde.deserialize import _deserialize
def test_fail_deserialize_no_format() -> None:
with raises(TypeError):
_deserialize(blob="to deserialize", from_proto=False)
def test_fail_deserialize_wrong_format() -> None:
with raises(TypeError, match="You tried to deserialize an unsupported type."):
_deserialize(blob="to deserialize")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.