id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
12708
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.session_api
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.synthesize_api
|
12714
|
import socket
import requests
import json
import xml.etree.ElementTree as ET
class Camera(object):
def __init__(self):
"""
create camera object
"""
self.xml_url = self.discover()
self.name, self.api_version, self.services = self.connect(self.xml_url)
self.camera_endpoint_url = self.services["camera"] + "/camera"
self.available_apis = self.do("getAvailableApiList")["result"]
# prepare camera for rec mode
if "startRecMode" in self.available_apis[0]:
self.do("startRecMode")
self.available_apis = self.do("getAvailableApiList")["result"]
self.connected = False
def discover(self):
"""
discover camera using upnp ssdp method, return url for device xml
"""
msg = (
"M-SEARCH * HTTP/1.1\r\n"
"HOST: 172.16.58.3:1900\r\n"
'MAN: "ssdp:discover" \r\n'
"MX: 2\r\n"
"ST: urn:schemas-sony-com:service:ScalarWebAPI:1\r\n"
"\r\n"
).encode()
# Set up UDP socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.settimeout(2)
s.sendto(msg, ("172.16.58.3", 1900))
try:
while True:
data, addr = s.recvfrom(65507)
decoded_data = data.decode()
# get xml url from ssdp response
for item in decoded_data.split("\n"):
if "LOCATION" in item:
return item.strip().split(" ")[
1
] # get location url from ssdp response
self.connected = True
except socket.timeout:
raise ConnectionError("you are not connected to the camera's wifi")
def connect(self, xml_url):
"""
returns name, api_version, api_service_urls on success
"""
device_xml_request = requests.get(xml_url)
xml_file = str(device_xml_request.content.decode())
xml = ET.fromstring(xml_file)
name = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-upnp-org:device-1-0}friendlyName"
).text
api_version = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_Version"
).text
service_list = xml.find(
"{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-sony-com:av}X_ScalarWebAPI_DeviceInfo/{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceList"
)
api_service_urls = {}
for service in service_list:
service_type = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ServiceType"
).text
action_url = service.find(
"{urn:schemas-sony-com:av}X_ScalarWebAPI_ActionList_URL"
).text
api_service_urls[service_type] = action_url
return name, api_version, api_service_urls
def info(self):
"""
returns camera info(name, api version, supported services, available apis) in a dictionary
"""
return {
"name": self.name,
"api version": self.api_version,
"supported services": list(self.services.keys()),
"available apis": self.available_apis,
}
def post_request(self, url, method, param=[]):
"""
sends post request to url with method and param as json
"""
if type(param) is not list:
param = [param]
json_request = {"method": method, "params": param, "id": 1, "version": "1.0"}
request = requests.post(url, json.dumps(json_request))
response = json.loads(request.content)
if "error" in list(response.keys()):
print("Error: ")
print(response)
else:
return response
def do(self, method, param=[]):
"""
this calls to camera service api, require method and param args
"""
# TODO: response handler, return result of do, etc
response = self.post_request(self.camera_endpoint_url, method, param)
return response
class ConnectionError(Exception):
pass
|
12724
|
import pandas as pd
import smartplots3_setup
def createSetup(name,expansion_factor,percapita_factor,plot_size,settings):
plt_setup_smart={
'name': name,
'expansion_factor':expansion_factor,
'percapita_factor':percapita_factor,
'scenarios_itr': [],
'scenarios_id':[],
'scenarios_year':[],
'plot_size': plot_size,
'bottom_labels': [],
'top_labels': [],
'plots_folder': "makeplots3"
}
plt_setup_smart['name']=name
plt_setup_smart['expansion_factor']=expansion_factor
plt_setup_smart['plot_size']=plot_size
plt_setup_smart['scenarios_year']=[]
plt_setup_smart['scenarios_id']=[]
plt_setup_smart['scenarios_itr']=[]
plt_setup_smart['top_labels']=[]
for (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label) in settings:
plt_setup_smart['scenarios_year'].append(scenarios_year)
plt_setup_smart['scenarios_id'].append(scenarios_id)
plt_setup_smart['scenarios_itr'].append(scenarios_itr)
plt_setup_smart['top_labels'].append(top_label)
plt_setup_smart['bottom_labels'].append(bottom_label)
return plt_setup_smart
def createSettingRow(scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label):
return (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label)
scenarios_lables = {
"Base_CL_CT": "Base0",
"Base_STL_STT_BAU": "Base2",
"Base_STL_STT_VTO": "Base3",
"Base_LTL_LTT_BAU": "Base5",
"Base_LTL_LTT_VTO": "Base6",
"A_STL_STT_BAU": "A2",
"A_STL_STT_VTO": "A3",
"B_LTL_LTT_BAU": "B5",
"B_LTL_LTT_VTO": "B6",
"C_LTL_LTT_BAU": "C5",
"C_LTL_LTT_VTO": "C6"
}
output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019"
# Base_CL_CT
# A_STL_STT_BAU
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings)
#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder)
#smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder)
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings)
smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder)
#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267])
smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder)
|
12726
|
import os
import sys
def main():
print 'const char* ya_get_symbolizer_gen() {'
print ' return "{}";'.format(os.path.join(os.path.dirname(sys.argv[1]), 'llvm-symbolizer'))
print '}'
if __name__ == '__main__':
main()
|
12735
|
from django.contrib.auth import SESSION_KEY
from django.core.cache import cache
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
from proxy_server.response import AJAX_REQUEST
import httplib, json, proxy_server
def invoke_backend_service(method, function_path, json_data=dict(), request=None, response_token=True, public=False, secure=False):
error_message = None
try:
if public is False and request is None:
error_message = 'A private web service must receive Django\'s request'
raise Exception
if response_token is True and request is None:
error_message = 'A web service cannot expect a response token and not receive Django\'s request'
raise Exception
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.API_KEY] = settings.SECRET_KEY
if request is not None:
pk = cache.get(AJAX_REQUEST, None)
if pk:
request.user.pk = pk
cache.delete(AJAX_REQUEST)
headers[proxy_server.USER_TOKEN] = request.user.pk
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
return 403, None
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
return 204, None
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response_token is True:
user_dict = None
if SESSION_KEY in request.session:
user_dict = cache.get(request.session[SESSION_KEY])
cache.delete(request.session[SESSION_KEY])
request.session[SESSION_KEY] = response_json[proxy_server.USER_TOKEN]
request.user.pk = response_json[proxy_server.USER_TOKEN]
request.session[proxy_server.EXPIRATION_DATE] = response_json[proxy_server.EXPIRATION_DATE]
if user_dict:
user_dict['pk'] = request.user.pk
cache.set(request.session[SESSION_KEY], user_dict)
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
result = None
if proxy_server.RESPONSE in response_json:
result = response_json[proxy_server.RESPONSE]
return 200, result
else:
code = response.status
if proxy_server.ERROR in response_json:
error_message = response_json[proxy_server.ERROR][proxy_server.MESSAGE]
raise Exception(code)
else:
error_message = response.reason
raise Exception(code)
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return code, error
def invoke_backend_service_as_proxy(request, method, function_path, json_data=dict(), response_token=True, secure=False):
error_message = None
try:
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.USER_TOKEN] = request.META.get(proxy_server.HTTP_USER_TOKEN)
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
headers[proxy_server.API_KEY] = request.META.get(proxy_server.HTTP_API_KEY)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
resp = HttpResponse(status=response.status, reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
resp = HttpResponse(status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
resp = HttpResponse(response_data, status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return HttpResponseServerError(json.dumps(error), content_type='application/json')
|
12787
|
from mysql.connector.pooling import MySQLConnectionPool
from ._connect import _parse_kwargs, _patch_MySQLConnection
class MySQLConnectionPool(MySQLConnectionPool):
def set_config(self, **kwargs):
kwargs = _parse_kwargs(kwargs)
super(MySQLConnectionPool, self).set_config(**kwargs)
def add_connection(self, cnx=None):
with _patch_MySQLConnection(include_pooling=True):
super().add_connection(cnx)
|
12812
|
from django.db import models
class SoftDeleteManager(models.Manager):
def save_soft_delete(self):
self.is_deleted = True
self.save()
return True
def get_soft_delete(self):
return self.filter(is_deleted=True)
def get_unsoft_delete(self):
return self.filter(is_deleted=False)
|
12849
|
from core.terraform.resources import BaseTerraformVariable
class TerraformVariable(BaseTerraformVariable):
"""
Base resource class for Terraform tfvar variable
Attributes:
variable_dict_input (dict/none): Var dict values
available_args (dict): Instance configurations
variable_type (str): Define the variable i.e. terraform list var or terraform dict var etc
"""
variable_dict_input = None
variable_type = None
available_args = {
'variable_name': {'required': True},
'variable_type': {'required': False},
'default_value': {'required': False}
}
|
12871
|
import os
scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider'
os.chdir(scrapy_project_path) #切换工作区,进入爬虫工程根目录执行命令
os.system('scrapyd-deploy')
import json
import time
import requests
start_url = 'http://45.76.110.210:6800/schedule.json'
start_data = {'project': 'DeploySpider',
'spider': 'Example'}
end_url = 'http://172.16.31.10:6800/cancel.json'
end_data = {'project': 'DeploySpider'}
result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text
result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text
# result_dict = json.loads(result)
# job_id = result_dict['jobid']
# print(f'启动的爬虫,jobid为:{job_id}')
#
# time.sleep(5)
# end_data['job'] = job_id
# result = requests.post(end_url, data=end_data).text
# print(result)
|
12891
|
import traceback
import re
import sys
import logging
"""
**********
Note by wvmarle:
This file contains the complete code from chained_exception.py plus the
error handling code from GlacierWrapper.py, allowing it to be used in other
modules like glaciercorecalls as well.
**********
"""
class GlacierException(Exception):
"""
An extension of the built-in Exception class, this handles
an additional cause keyword argument, adding it as cause
attribute to the exception message.
It logs the error message (amount of information depends on the log
level) and passes it on to a higher level to handle.
Furthermore it allows for the upstream handler to call for a
complete stack trace or just a simple error and cause message.
TODO: describe usage.
"""
ERRORCODE = {'InternalError': 127, # Library internal error.
'UndefinedErrorCode': 126, # Undefined code.
'NoResults': 125, # Operation yielded no results.
'GlacierConnectionError': 1, # Can not connect to Glacier.
'SdbConnectionError': 2, # Can not connect to SimpleDB.
'CommandError': 3, # Command line is invalid.
'VaultNameError': 4, # Invalid vault name.
'DescriptionError': 5, # Invalid archive description.
'IdError': 6, # Invalid upload/archive/job ID given.
'RegionError': 7, # Invalid region given.
'FileError': 8, # Error related to reading/writing a file.
'ResumeError': 9, # Problem resuming a multipart upload.
'NotReady': 10, # Requested download is not ready yet.
'BookkeepingError': 11, # Bookkeeping not available.
'SdbCommunicationError': 12, # Problem reading/writing SimpleDB data.
'ResourceNotFoundException': 13, # Glacier can not find the requested resource.
'InvalidParameterValueException': 14, # Parameter not accepted.
'DownloadError': 15, # Downloading an archive failed.
'SNSConnectionError': 126, # Can not connect to SNS
'SNSConfigurationError': 127, # Problem with configuration file
'SNSParameterError':128, # Problem with arguments passed to SNS
}
def __init__(self, message, code=None, cause=None):
"""
Constructor. Logs the error.
:param message: the error message.
:type message: str
:param code: the error code.
:type code: str
:param cause: explanation on what caused the error.
:type cause: str
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.exitcode = self.ERRORCODE[code] if code in self.ERRORCODE else 254
self.code = code
if cause:
self.logger.error('ERROR: %s'% cause)
self.cause = cause if isinstance(cause, tuple) else (cause,)
self.stack = traceback.format_stack()[:-2]
else:
self.logger.error('An error occurred, exiting.')
self.cause = ()
# Just wrap up a cause-less exception.
# Get the stack trace for this exception.
self.stack = (
traceback.format_stack()[:-2] +
traceback.format_tb(sys.exc_info()[2]))
# ^^^ let's hope the information is still there; caller must take
# care of this.
self.message = message
self.logger.info(self.fetch(message=True))
self.logger.debug(self.fetch(stack=True))
if self.exitcode == 254:
self.logger.debug('Unknown error code: %s.'% code)
# Works as a generator to help get the stack trace and the cause
# written out.
def causeTree(self, indentation=' ', alreadyMentionedTree=[], stack=False, message=False):
"""
Returns a complete stack tree, an error message, or both.
Returns a warning if neither stack or message are True.
"""
if stack:
yield "Traceback (most recent call last):\n"
ellipsed = 0
for i, line in enumerate(self.stack):
if (ellipsed is not False
and i < len(alreadyMentionedTree)
and line == alreadyMentionedTree[i]):
ellipsed += 1
else:
if ellipsed:
yield " ... (%d frame%s repeated)\n" % (
ellipsed,
"" if ellipsed == 1 else "s")
ellipsed = False # marker for "given out"
yield line
if message:
exc = self if self.message is None else self.message
for line in traceback.format_exception_only(exc.__class__, exc):
yield line
if self.cause:
yield ("Caused by: %d exception%s\n" %
(len(self.cause), "" if len(self.cause) == 1 else "s"))
for causePart in self.cause:
if hasattr(causePart,"causeTree"):
for line in causePart.causeTree(indentation, self.stack):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
else:
for line in traceback.format_exception_only(causePart.__class__, causePart):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
if not message and not stack:
yield ('No output. Specify message=True and/or stack=True \
to get output when calling this function.\n')
def write(self, stream=None, indentation=' ', message=False, stack=False):
"""
Writes the error details to sys.stderr or a stream.
"""
stream = sys.stderr if stream is None else stream
for line in self.causeTree(indentation, message=message, stack=stack):
stream.write(line)
def fetch(self, indentation=' ', message=False, stack=False):
"""
Fetches the error details and returns them as string.
"""
out = ''
for line in self.causeTree(indentation, message=message, stack=stack):
out += line
return out
class InputException(GlacierException):
"""
Exception that is raised when there is someting wrong with the
user input.
"""
VaultNameError = 1
VaultDescriptionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ConnectionException(GlacierException):
"""
Exception that is raised when there is something wrong with
the connection.
"""
GlacierConnectionError = 1
SdbConnectionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class CommunicationException(GlacierException):
"""
Exception that is raised when there is something wrong in
the communication with an external library like boto.
"""
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ResponseException(GlacierException):
"""
Exception that is raised when there is an http response error.
"""
def __init__(self, message, code=None, cause=None):
GlacierException.__init__(self, message, code=code, cause=cause)
if __name__ == '__main__':
class ChildrenException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ParentException(GlacierException):
def __init__(self, message, cause=None):
if cause:
GlacierException.__init__(self, message, cause=cause)
else:
GlacierException.__init__(self, message)
try:
try:
raise ChildrenException("parent")
except ChildrenException, e:
raise ParentException("children", cause=e)
except ParentException, e:
e.write(indentation='|| ')
|
12898
|
from ..alert_code import AlertCode
class EVMNodeAlertCode(AlertCode):
NoChangeInBlockHeight = 'evm_node_alert_1'
BlockHeightUpdatedAlert = 'evm_node_alert_2'
BlockHeightDifferenceIncreasedAboveThresholdAlert = 'evm_node_alert_3'
BlockHeightDifferenceDecreasedBelowThresholdAlert = 'evm_node_alert_4'
InvalidUrlAlert = 'evm_node_alert_5'
ValidUrlAlert = 'evm_node_alert_6'
NodeWentDownAtAlert = 'evm_node_alert_7'
NodeBackUpAgainAlert = 'evm_node_alert_8'
NodeStillDownAlert = 'evm_node_alert_9'
|
12910
|
import discord
def check(ctx):
def inner(m):
return m.author == ctx.author
return inner
def Membercheck(ctx):
def inner(m):
return m.author == ctx.guild.me
return inner
def warn_permission(ctx, Member):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages and ctx.author.top_role > Member.top_role and ctx.author.guild_permissions >= Member.guild_permissions
#bug with user with same permissions maybe and other stuff(seems fixed for right now, leaving note just in case.)
if isinstance(ctx.channel, discord.DMChannel):
return True
def cleanup_permission(ctx):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages
if isinstance(ctx.channel, discord.DMChannel):
return True
def mutual_guild_check(ctx, user):
mutual_guilds = set(ctx.author.mutual_guilds)
mutual_guilds2 = set(user.mutual_guilds)
return bool(mutual_guilds.intersection(mutual_guilds2))
async def filter_commands(ctx, command_list):
async def check(cmd, ctx):
try:
return await cmd.can_run(ctx)
except:
return False
return [cmd for cmd in command_list if await check(cmd, ctx)]
|
12921
|
import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
|
12993
|
class Config:
# helps to store settings for an experiment.
def __init__(self, _experiments_folder_path='experiments', _dataset_name='dataset', _column_names='unknown',
_types={1:'integer', 2:'string', 3:'float', 4:'boolean', 5:'gender', 6:'unknown', 7:'date-iso-8601', 8:'date-eu', 9:'date-non-std-subtype', 10:'date-non-std',
11:'positive integer', 12:'positive float'}):
self.main_experiments_folder = _experiments_folder_path
self.dataset_name = _dataset_name
self.column_names = _column_names
self.types = _types
self.types_as_list = list(_types.values())
columns = ['missing', 'catch-all',]
for key in _types:
columns.append(_types[key])
self.columns = columns
|
13008
|
import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class NoiseTexturize(Augmentation):
"""Creates a random noise based texture pattern to emulate paper textures.
Consequently applies noise patterns to the original image from big to small.
:param sigma_range: Defines bounds of noise fluctuations.
:type sigma_range: tuple, optional
:param turbulence_range: Defines how quickly big patterns will be
replaced with the small ones. The lower value -
the more iterations will be performed during texture generation.
:type turbulence_range: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
sigma_range=(3, 10),
turbulence_range=(2, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.sigma_range = sigma_range
self.turbulence_range = turbulence_range
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"NoiseTexturize(sigma_range={self.sigma_range}, turbulence_range={self.turbulence_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
sigma = random.randint(self.sigma_range[0], self.sigma_range[1])
turbulence = random.randint(
self.turbulence_range[0],
self.turbulence_range[1],
)
result = image.astype(float)
rows, cols = image.shape[:2]
if len(image.shape) > 2:
channel = image.shape[2]
else:
channel = 0
ratio = cols
while not ratio == 1:
result += self.noise(cols, rows, channel, ratio, sigma=sigma)
ratio = (ratio // turbulence) or 1
cut = np.clip(result, 0, 255)
cut = cut.astype(np.uint8)
return cut
def noise(self, width, height, channel, ratio, sigma):
"""The function generates an image, filled with gaussian nose. If ratio
parameter is specified, noise will be generated for a lesser image and
then it will be upscaled to the original size. In that case noise will
generate larger square patterns. To avoid multiple lines, the upscale
uses interpolation.
:param ratio: the size of generated noise "pixels"
:param sigma: defines bounds of noise fluctuations
"""
mean = 0
# assert width % ratio == 0, "Can't scale image with of size {} and ratio {}".format(width, ratio)
# assert height % ratio == 0, "Can't scale image with of size {} and ratio {}".format(height, ratio)
h = int(height / ratio)
w = int(width / ratio)
if h == 0:
h = 1
if w == 0:
w = 1
gaussian = np.vectorize(lambda x: random.gauss(mean, sigma))
result = gaussian(np.array((w, h)))
result = cv2.resize(
result,
dsize=(width, height),
interpolation=cv2.INTER_LINEAR,
)
# for multiple channels input, convert result to multiple channels
if channel:
result = np.stack([result, result, result], axis=2)
return result
|
13053
|
from abc import abstractproperty
from ..backend_config.bucket_config import S3BucketConfig
from ..storage.helper import StorageHelper
class SetupUploadMixin(object):
log = abstractproperty()
storage_uri = abstractproperty()
def setup_upload(
self, bucket_name, host=None, access_key=None, secret_key=None, region=None, multipart=True, https=True, verify=True):
"""
Setup upload options (currently only S3 is supported)
:param bucket_name: AWS bucket name
:type bucket_name: str
:param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used)
:type host: str
:param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the
configuration file (bucket-specific, than global)
:type access_key: str
:param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the
configuration file (bucket-specific, than global)
:type secret_key: str
:param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support
multipart.
:type multipart: bool
:param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS.
:type https: bool
:param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1)
:type region: str
:param verify: Whether or not to verify SSL certificates. Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate.
:type verify: bool
"""
self._bucket_config = S3BucketConfig(
bucket=bucket_name,
host=host,
key=access_key,
secret=secret_key,
multipart=multipart,
secure=https,
region=region,
verify=verify
)
self.storage_uri = ('s3://%(host)s/%(bucket_name)s' if host else 's3://%(bucket_name)s') % locals()
StorageHelper.add_configuration(self._bucket_config, log=self.log)
|
13057
|
import pickle
import pandas as pd
# cat aa ab ac > dataset.pkl from https://github.com/zhougr1993/DeepInterestNetwork
with open('dataset.pkl', 'rb') as f:
train_set = pickle.load(f, encoding='bytes')
test_set = pickle.load(f, encoding='bytes')
cate_list = pickle.load(f, encoding='bytes')
user_count, item_count, cate_count = pickle.load(f, encoding='bytes')
train_data = []
for sample in train_set:
user_id = sample[0]
item_id = sample[2]
item_history = "^".join([str(i) for i in sample[1]])
label = sample[3]
cate_id = cate_list[item_id]
cate_history = "^".join([str(i) for i in cate_list[sample[1]]])
train_data.append([label, user_id, item_id, cate_id, item_history, cate_history])
train_df = pd.DataFrame(train_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history'])
train_df.to_csv("train.csv", index=False)
test_data = []
for sample in test_set:
user_id = sample[0]
item_pair = sample[2]
item_history = "^".join([str(i) for i in sample[1]])
cate_history = "^".join([str(i) for i in cate_list[sample[1]]])
test_data.append([1, user_id, item_pair[0], cate_list[item_pair[0]], item_history, cate_history])
test_data.append([0, user_id, item_pair[1], cate_list[item_pair[1]], item_history, cate_history])
test_df = pd.DataFrame(test_data, columns=['label', 'user_id', 'item_id', 'cate_id', 'item_history', 'cate_history'])
test_df.to_csv("test.csv", index=False)
|
13061
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from matplotlib.patches import Ellipse
# For reproducibility
np.random.seed(1000)
nb_samples = 300
nb_centers = 2
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=nb_centers,
cluster_std=[1.0, 0.6], random_state=1000)
# Show the dataset
sns.set()
fig, ax = plt.subplots(figsize=(15, 9))
ax.scatter(X[:, 0], X[:, 1], s=120)
ax.set_xlabel(r'$x_0$', fontsize=14)
ax.set_ylabel(r'$x_1$', fontsize=14)
plt.show()
# Train the model
gm = GaussianMixture(n_components=2, random_state=1000)
gm.fit(X)
Y_pred = gm.fit_predict(X)
print('Means: \n{}'.format(gm.means_))
print('Covariance matrices: \n{}'.format(gm.covariances_))
print('Weights: \n{}'.format(gm.weights_))
m1 = gm.means_[0]
m2 = gm.means_[1]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
# Eigendecompose the covariances
w1, v1 = np.linalg.eigh(c1)
w2, v2 = np.linalg.eigh(c2)
nv1 = v1 / np.linalg.norm(v1)
nv2 = v2 / np.linalg.norm(v2)
print('Eigenvalues 1: \n{}'.format(w1))
print('Eigenvectors 1: \n{}'.format(nv1))
print('Eigenvalues 2: \n{}'.format(w2))
print('Eigenvectors 2: \n{}'.format(nv2))
a1 = np.arccos(np.dot(nv1[:, 1], [1.0, 0.0]) / np.linalg.norm(nv1[:, 1])) * 180.0 / np.pi
a2 = np.arccos(np.dot(nv2[:, 1], [1.0, 0.0]) / np.linalg.norm(nv2[:, 1])) * 180.0 / np.pi
# Perform K-Means clustering
km = KMeans(n_clusters=2, random_state=1000)
km.fit(X)
Y_pred_km = km.predict(X)
# Show the comparison of the results
fig, ax = plt.subplots(1, 2, figsize=(22, 9), sharey=True)
ax[0].scatter(X[Y_pred == 0, 0], X[Y_pred == 0, 1], s=80, marker='o', label='Gaussian 1')
ax[0].scatter(X[Y_pred == 1, 0], X[Y_pred == 1, 1], s=80, marker='d', label='Gaussian 2')
g1 = Ellipse(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=1)
g1_1 = Ellipse(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=2)
g1_2 = Ellipse(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle='dashed', angle=a1,
color='black', linewidth=3)
g2 = Ellipse(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=1)
g2_1 = Ellipse(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=2)
g2_2 = Ellipse(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle='dashed', angle=a2,
color='black', linewidth=3)
ax[0].set_xlabel(r'$x_0$', fontsize=16)
ax[0].set_ylabel(r'$x_1$', fontsize=16)
ax[0].add_artist(g1)
ax[0].add_artist(g1_1)
ax[0].add_artist(g1_2)
ax[0].add_artist(g2)
ax[0].add_artist(g2_1)
ax[0].add_artist(g2_2)
ax[0].set_title('Gaussian Mixture', fontsize=16)
ax[0].legend(fontsize=16)
ax[1].scatter(X[Y_pred_km == 0, 0], X[Y_pred_km == 0, 1], s=80, marker='o', label='Cluster 1')
ax[1].scatter(X[Y_pred_km == 1, 0], X[Y_pred_km == 1, 1], s=80, marker='d', label='Cluster 2')
ax[1].set_xlabel(r'$x_0$', fontsize=16)
ax[1].set_title('K-Means', fontsize=16)
ax[1].legend(fontsize=16)
# Predict the probability of some sample points
print('P([0, -2]=G1) = {:.3f} and P([0, -2]=G2) = {:.3f}'.format(*list(gm.predict_proba([[0.0, -2.0]]).squeeze())))
print('P([1, -1]=G1) = {:.3f} and P([1, -1]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, -1.0]]).squeeze())))
print('P([1, 0]=G1) = {:.3f} and P([1, 0]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, 0.0]]).squeeze())))
plt.show()
# Compute AICs, BICs, and log-likelihood
n_max_components = 20
aics = []
bics = []
log_likelihoods = []
for n in range(1, n_max_components + 1):
gm = GaussianMixture(n_components=n, random_state=1000)
gm.fit(X)
aics.append(gm.aic(X))
bics.append(gm.bic(X))
log_likelihoods.append(gm.score(X) * nb_samples)
# Show the results
fig, ax = plt.subplots(1, 3, figsize=(20, 6))
ax[0].plot(range(1, n_max_components + 1), aics)
ax[0].set_xticks(range(1, n_max_components + 1))
ax[0].set_xlabel('Number of Gaussians', fontsize=14)
ax[0].set_title('AIC', fontsize=14)
ax[1].plot(range(1, n_max_components + 1), bics)
ax[1].set_xticks(range(1, n_max_components + 1))
ax[1].set_xlabel('Number of Gaussians', fontsize=14)
ax[1].set_title('BIC', fontsize=14)
ax[2].plot(range(1, n_max_components + 1), log_likelihoods)
ax[2].set_xticks(range(1, n_max_components + 1))
ax[2].set_xlabel('Number of Gaussians', fontsize=14)
ax[2].set_title('Log-likelihood', fontsize=14)
plt.show()
|
13091
|
import torch
import torch.nn as nn
import neuron.ops as ops
from neuron.config import registry
@registry.register_module
class ReID_Metric(nn.Module):
def __init__(self, metric_cls, metric_rank):
super(ReID_Metric, self).__init__()
self.metric_cls = metric_cls
self.metric_rank = metric_rank
def forward(self, *args):
if len(args) == 2:
scores = None
feats, labels = args
elif len(args) == 3:
scores, feats, labels = args
else:
raise ValueError('Expected to have 2 or 3 inputs,'
'but got {}'.format(len(args)))
metrics = self.metric_rank(feats, labels)
if scores is not None:
metrics.update(self.metric_cls(scores, labels))
return metrics
|
13118
|
import math
# Modify the parameters here
UNROLL_FACTOR = 32
DATA_T = 'unsigned short'
# Generate the code
data_type = DATA_T
level = int(math.log2(UNROLL_FACTOR))
for layer in range(level - 1, -1, -1):
pair = int(math.pow(2, layer))
for i in range(pair):
# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1]
if layer == level - 1:
print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i*2}] * local_B[0][{i*2}];')
print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i*2+1}] * local_B[0][{i*2+1}];')
else:
print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i*2} + add_{layer+1}_{i*2+1};')
print('local_C[c7][c6] += add_0_0;')
|
13173
|
from .utils import (get_prescription, get_attributes, get_group)
from .models import Disease, Result, Score, Question, SurveyResponse
from .analysis import cardio_risk_group, diabetes_risk_group, stroke_risk_group
from statistics import mean
from celery import shared_task
@shared_task
def worker(session_id):
df, attributes = get_attributes(session_id)
diseases = list(Disease.objects.all())
supported_methods = {
'cardiovascular disease': cardio_risk_group,
'diabetes': diabetes_risk_group,
'stroke': stroke_risk_group
}
question_region = Question.objects.get(label='region')
session_region = (list(SurveyResponse.objects.filter(
session_id=session_id,
question_id=question_region.id))[0]).answer
results = []
for disease in diseases:
illness = disease.illness
result_kwargs = {
'session_id': session_id,
'disease': disease,
'region': session_region
}
if illness not in supported_methods:
result_kwargs['risk_factor'] = 0
result_kwargs['prescription'] = 'Method is currently not supported'
else:
method = supported_methods[illness]
score = method(df, attributes[illness])
result_kwargs['risk_factor'] = float(score)
result_kwargs['label'] = get_group(score)
result_kwargs['prescription'] = get_prescription(score)
result_obj = Result.objects.update_or_create(
session_id=session_id, disease=disease,
defaults=result_kwargs
)
results.append(result_obj[0])
score = (1 - mean([res.risk_factor for res in results])) * 100
Score.objects.create(session_id=session_id, score=score)
|
13177
|
from functools import wraps
from collections import Iterable
from django.conf import settings
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six import string_types
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import resolve_url
from waliki.utils import is_authenticated
from .models import ACLRule
from .settings import (WALIKI_ANONYMOUS_USER_PERMISSIONS,
WALIKI_LOGGED_USER_PERMISSIONS,
WALIKI_RENDER_403)
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
|
13211
|
from discord.ext.commands import BadArgument
class InvalidLength(BadArgument):
"""This exception is thrown whenever a invalid length was provided"""
def __init__(self, my_name, _min=None, _max=None, *args: object) -> None:
if _min is not None and _max is not None:
err = "Length of '" + my_name + "' must be between " + str(_min) + " and " + str(_max)
elif _min is None and _max is not None:
err = "Length of '" + my_name + "' must be less than " + str(_max)
elif _min is not None and _max is None:
err = "Lenght of '" + my_name + "' must be more than " + str(_min)
super().__init__(err)
class OutOfValidRange(BadArgument):
"""This exception is thrown whenever a value was ot of its valid range"""
def __init__(self, name, _min, _max, *args: object) -> None:
super().__init__("'" + name + "' must be in range " + str(_min) + " and " + str(_max))
class WrongType(BadArgument):
"""This exception is thrown whenever a value is of the wrong type"""
def __init__(self, name, me, valid_type, *args: object) -> None:
super().__init__("'" + name + "' must be of type " + (str(valid_type) if not isinstance(valid_type, list) else ' or '.join(valid_type)) + ", not " + str(type(me)))
class InvalidEvent(BadArgument):
"""This exception is thrown whenever a invalid eventname was passed"""
def __init__(self, name, events, *args: object) -> None:
super().__init__("Invalid event name, event must be " + " or ".join(events) + ", not " + str(name))
class MissingListenedComponentParameters(BadArgument):
"""This exception is thrown whenever a callback for a listening component is missing parameters"""
def __init__(self, *args: object) -> None:
super().__init__("Callback function for listening components needs to accept one parameter (the used component)", *args)
class CouldNotParse(BadArgument):
"""This exception is thrown whenever the libary was unable to parse the data with the given method"""
def __init__(self, data, type, method, *args: object) -> None:
super().__init__("Could not parse '" + str(data) + " [" + str(type) + "]' with method " + str(method), *args)
|
13218
|
from flask import Flask
from flask_restful_swagger.swagger import SwaggerRegistry
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@patch("flask_restful_swagger.swagger._get_current_registry")
@patch("flask_restful_swagger.swagger.render_homepage")
def test_get_swagger_registry(homepage, registry):
mock_registry = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
registry.return_value = mock_registry
app = Flask(__name__)
resource = SwaggerRegistry()
bases = [base.__name__ for base in SwaggerRegistry.__mro__]
assert sorted(bases) == [
"MethodView",
"Resource",
"SwaggerRegistry",
"View",
"object",
]
with app.test_request_context(path="/some_path.html"):
_ = resource.get()
assert homepage.called
homepage.assert_called_once_with(
"mock_pathmock_spec_endpoint_path/_/resource_list.json"
)
with app.test_request_context(path="/some_path"):
homepage.reset_mock()
response = resource.get()
assert not homepage.called
assert response == mock_registry
|
13265
|
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import numpy as np
#-------------------------------------------------------------
def plot_subfigure(axis, array, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, cmin, cmax, cmap):
xMin = 1.0e30
xMax = -1.0e30
yMin = 1.0e30
yMax = -1.0e30
cmap = plt.get_cmap(cmap)
patches = []
colors = []
for iCell in range(0,nCells):
if (yCell[iCell] > 0.0):
vertices = []
for iVertexOnCell in range(0,nEdgesOnCell[iCell]):
iVertex = verticesOnCell[iCell,iVertexOnCell]
vertices.append((xVertex[iVertex],zVertex[iVertex]))
colors.append(array[iCell])
patches.append(Polygon(vertices))
xMin = min(xMin,xVertex[iVertex])
xMax = max(xMax,xVertex[iVertex])
yMin = min(yMin,zVertex[iVertex])
yMax = max(yMax,zVertex[iVertex])
pc = PatchCollection(patches, cmap=cmap)
pc.set_array(np.array(colors))
pc.set_clim(cmin, cmax)
axis.add_collection(pc)
axis.set_xlim(xMin,xMax)
axis.set_ylim(yMin,yMax)
axis.set_aspect("equal")
axis.ticklabel_format(style='plain')
axis.tick_params(axis='x', \
which='both', \
bottom=False, \
top=False, \
labelbottom=False)
axis.tick_params(axis='y', \
which='both', \
left=False, \
right=False, \
labelleft=False)
#-------------------------------------------------------------
def plot_testcase():
nGrids = [2562,10242,40962,163842]
testTypes = ["cosine_bell","slotted_cylinder"]
methods = ["IR","IR","upwind"]
iTimes = [0,-1,-1]
for nGrid in nGrids:
print("nGrid: ", nGrid)
fig, axes = plt.subplots(3,4)
iTestType = -1
for testType in testTypes:
iTestType += 1
print(" Test type: ", testType)
iMethod = -1
for method, iTime in zip(methods,iTimes):
iMethod += 1
print(" Method: ", method, ", iTime: ", iTime)
filenamein = "./output_%s_%s_%i/output.2000.nc" %(method,testType,nGrid)
filein = Dataset(filenamein,"r")
nCells = len(filein.dimensions["nCells"])
nEdgesOnCell = filein.variables["nEdgesOnCell"][:]
verticesOnCell = filein.variables["verticesOnCell"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
zCell = filein.variables["zCell"][:]
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
zVertex = filein.variables["zVertex"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
iceAreaCategory = filein.variables["iceAreaCategory"][:]
filein.close()
iceAreaCell = np.sum(iceAreaCategory,axis=(2,3))
plot_subfigure(axes[iMethod,iTestType*2], iceAreaCell[iTime], nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, 0.0, 1.0, "viridis")
iceAreaCellDiff = iceAreaCell[iTime] - iceAreaCell[0]
if (iMethod != 0):
plot_subfigure(axes[iMethod,iTestType*2+1], iceAreaCellDiff, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, -1.0, 1.0, "bwr")
else:
axes[iMethod,iTestType*2+1].axis('off')
plt.savefig("advection_%6.6i.png" %(nGrid),dpi=300)
plt.cla()
plt.close(fig)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
plot_testcase()
|
13275
|
import engine
print("Python: Script 2")
class Rotation(metaclass=engine.MetaComponent):
def __init__(self):
self.trans = 5
result = engine.query(Color)
print("Python: Query colors from Script 2")
for c in result:
c.string()
print("--------------------")
|
13335
|
import os
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_vtable_extraction_x86_64():
p = angr.Project(os.path.join(test_location, "x86_64", "cpp_classes"), auto_load_libs=False)
vtables_sizes = {0x403cb0: 24, 0x403cd8: 16, 0x403cf8: 16, 0x403d18: 16}
vtable_analysis = p.analyses.VtableFinder()
vtables = vtable_analysis.vtables_list
assert len(vtables) == 4
for vtable in vtables:
assert vtable.vaddr in [0x403cb0, 0x403cd8, 0x403cf8, 0x403d18]
assert vtables_sizes[vtable.vaddr] == vtable.size
if __name__ == "__main__":
test_vtable_extraction_x86_64()
|
13346
|
from threading import Lock
from time import time
from ui import Menu
from ui.utils import clamp, check_value_lock, to_be_foreground
class NumberedMenu(Menu):
"""
This Menu allows the user to jump to entries using the numpad. If the menu is 10 entries or less
the navigation is instant. Otherwise, it lets the user type multiple digits to navigate to entries beyond 10th.
The `input_delay` parameter controls how long, and if, the menu waits before considering an input as definitive.
If `input_delay` is 0, then only the 10 first entries can be navigated to using the keypad.
The `prepend_numbers` parameters controls whether the entries should be prefixed by their number.
(default: `True`)
"""
def __init__(self, *args, **kwargs):
self.prepend_numbers = kwargs.pop('prepend_numbers', True)
self.input_delay = kwargs.pop('input_delay', 1)
Menu.__init__(self, *args, **kwargs)
self.__locked_name__ = None
self.value_lock = Lock()
self.numeric_keymap = {"KEY_{}".format(i): i for i in range(10)}
self.last_input_time = 0
self.current_input = None
@property
def entry_count(self):
return len(self.contents)
def before_activate(self):
Menu.before_activate(self)
self.last_input_time = -self.input_delay
def idle_loop(self):
Menu.idle_loop(self)
self.check_character_state()
def set_keymap(self):
Menu.set_keymap(self)
self.i.set_streaming(self.on_key_pressed)
def deactivate(self):
Menu.deactivate(self)
self.i.remove_streaming()
@to_be_foreground
def on_key_pressed(self, key):
if key == "KEY_RIGHT" and self.is_multi_digit():
self.confirm_current_input()
if key not in self.numeric_keymap:
return
if self.is_multi_digit():
self.process_multi_digit_input(key)
else:
self.process_single_digit_input(key)
self.view.refresh()
def process_single_digit_input(self, key):
self.move_to_entry(self.numeric_keymap[key])
def process_multi_digit_input(self, key):
self.last_input_time = time()
if not self.current_input:
self.current_input = str(self.numeric_keymap[key])
else:
self.current_input += str(self.numeric_keymap[key])
def move_to_entry(self, index):
if self.pointer == index:
# Moving to the same item that's already selected
# let's interpret this as KEY_ENTER
self.current_input = None
self.select_entry()
return
self.pointer = clamp(index, 0, len(self.contents) - 1)
self.current_input = None
self.view.refresh()
def process_contents(self):
Menu.process_contents(self)
if self.prepend_numbers:
self.prepend_entry_text()
def prepend_entry_text(self):
# prepend numbers to each entry name
if self.is_multi_digit():
self.contents = [["{} {}".format(i, entry[0]), entry[1]]
for i, entry in enumerate(self.contents)]
else:
for i, entry in enumerate(self.contents[:10]):
entry[0] = "{} {}".format(i, entry[0])
@check_value_lock
def check_character_state(self):
if self.is_current_input_finished():
self.move_to_entry(int(self.current_input))
def is_multi_digit(self):
return self.input_delay > 0
def is_current_input_finished(self):
# nothing in the buffer
if not self.current_input:
return False
# no need to let the user input '100' if we have 20 entries
if len(str(self.current_input)) == len(str(self.entry_count)):
return True
# user typed 2 and we have 19 entries, going to the most likely option
if int(self.current_input) * 10 > self.entry_count:
return True
# user typed 17 and we have 12 entries
if int(self.current_input) >= self.entry_count:
return True
now = time()
elapsed = now - self.last_input_time
if self.is_multi_digit() and elapsed >= self.input_delay: # delay wait is over
return True
return False
def confirm_current_input(self):
if self.current_input is None:
return
self.move_to_entry(int(self.current_input))
|
13383
|
disable_gtk_binaries = True
def gtk_dependent_cc_library(**kwargs):
if not disable_gtk_binaries:
native.cc_library(**kwargs)
def gtk_dependent_cc_binary(**kwargs):
if not disable_gtk_binaries:
native.cc_binary(**kwargs)
|
13390
|
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy
from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice
from dexp.processing.utils.normalise import Normalise
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def scatter_gather_i2i(
function: Callable,
image: xpArray,
tiles: Union[int, Tuple[int, ...]],
margins: Optional[Union[int, Tuple[int, ...]]] = None,
normalise: bool = False,
clip: bool = False,
to_numpy: bool = True,
internal_dtype: Optional[numpy.dtype] = None,
) -> xpArray:
"""
Image-2-image scatter-gather.
'Scatters' computation of a given unary function by splitting the input array into tiles,
computing using a given backend, and reassembling the tiles into a single array of same
shape as the inpout that is either backed by the same backend than that of the input image,
or that is backed by numpy -- usefull when the compute backend cannot hold the whole input and output
images in memory.
Parameters
----------
function : unary function
image : input image (can be any backend, numpy )
tiles : tile sizes to cut input image into, can be a single integer or a tuple of integers.
margins : margins to add to each tile, can be a single integer or a tuple of integers.
if None, no margins are added.
normalise : normalises the input image.
clip : clip after normalisation/denormalisation
to_numpy : should the result be a numpy array? Very usefull when the compute backend
cannot hold the whole input and output images in memory.
internal_dtype : internal dtype for computation
Returns
-------
Result of applying the unary function to the input image, if to_numpy==True then the image is
"""
if internal_dtype is None:
internal_dtype = image.dtype
if type(tiles) == int:
tiles = (tiles,) * image.ndim
# If None is passed for a tile that means that we don't tile along that axis, we als clip the tile size:
tiles = tuple((length if tile is None else min(length, tile)) for tile, length in zip(tiles, image.shape))
if margins is None:
margins = (0,) * image.ndim
if type(margins) == int:
margins = (margins,) * image.ndim
if to_numpy:
result = numpy.empty(shape=image.shape, dtype=internal_dtype)
else:
result = Backend.get_xp_module(image).empty_like(image, dtype=internal_dtype)
# Normalise:
norm = Normalise(Backend.to_backend(image), do_normalise=normalise, clip=clip, quantile=0.005)
# image shape:
shape = image.shape
# We compute the slices objects to cut the input and target images into batches:
tile_slices = list(nd_split_slices(shape, chunks=tiles, margins=margins))
tile_slices_no_margins = list(nd_split_slices(shape, chunks=tiles))
# Zipping together slices with and without margins:
slices = zip(tile_slices, tile_slices_no_margins)
# Number of tiles:
number_of_tiles = len(tile_slices)
if number_of_tiles == 1:
# If there is only one tile, let's not be complicated about it:
result = norm.backward(function(norm.forward(image)))
if to_numpy:
result = Backend.to_numpy(result, dtype=internal_dtype)
else:
result = Backend.to_backend(result, dtype=internal_dtype)
else:
_scatter_gather_loop(
norm.backward, function, image, internal_dtype, norm.forward, result, shape, slices, to_numpy
)
return result
def _scatter_gather_loop(
denorm_fun: Callable,
function: Callable,
image: xpArray,
internal_dtype: numpy.dtype,
norm_fun: Callable,
result: Callable,
shape: Tuple[int, ...],
slices: Sequence[Tuple[slice, ...]],
to_numpy: bool,
) -> None:
for tile_slice, tile_slice_no_margins in slices:
image_tile = image[tile_slice]
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
image_tile = denorm_fun(function(norm_fun(image_tile)))
if to_numpy:
image_tile = Backend.to_numpy(image_tile, dtype=internal_dtype)
else:
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
remove_margin_slice_tuple = remove_margin_slice(shape, tile_slice, tile_slice_no_margins)
image_tile = image_tile[remove_margin_slice_tuple]
result[tile_slice_no_margins] = image_tile
# Dask turned out not too work great here, HUGE overhead compared to the light approach above.
# def scatter_gather_dask(backend: Backend,
# function,
# image,
# chunks,
# margins=None):
# boundary=None
# trim=True
# align_arrays=True
#
# image_d = from_array(image, chunks=chunks, asarray=False)
#
# def function_numpy(_image):
# print(_image.shape)
# return backend.to_numpy(function(_image))
#
# #func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs
# computation= map_overlap(function_numpy,
# image_d,
# depth=margins,
# boundary=boundary,
# trim=trim,
# align_arrays=align_arrays,
# dtype=image.dtype
# )
#
# #computation.visualize(filename='transpose.png')
# result = computation.compute()
#
# return result
|
13407
|
from .config import add_panopticfcn_config
from .panoptic_seg import PanopticFCN
from .build_solver import build_lr_scheduler
|
13468
|
import os
import scipy
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def predict_batch(net, inputs):
v = Variable(inputs.cuda(), volatile=True)
return net(v).data.cpu().numpy()
def get_probabilities(model, loader):
model.eval()
return np.vstack(predict_batch(model, data[0]) for data in loader)
def get_predictions(probs, thresholds):
preds = np.copy(probs)
preds[preds >= thresholds] = 1
preds[preds < thresholds] = 0
return preds.astype('uint8')
def get_argmax(output):
val,idx = torch.max(output, dim=1)
return idx.data.cpu().view(-1).numpy()
def get_targets(loader):
targets = None
for data in loader:
if targets is None:
shape = list(data[1].size())
shape[0] = 0
targets = np.empty(shape)
target = data[1]
if len(target.size()) == 1:
target = target.view(-1,1)
target = target.numpy()
targets = np.vstack([targets, target])
return targets
def ensemble_with_method(arr, method):
if method == c.MEAN:
return np.mean(arr, axis=0)
elif method == c.GMEAN:
return scipy.stats.mstats.gmean(arr, axis=0)
elif method == c.VOTE:
return scipy.stats.mode(arr, axis=0)[0][0]
raise Exception("Operation not found")
|
13483
|
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def vflip(img):
return cv2.flip(img, 0)
def hflip(img):
return cv2.flip(img, 1)
def flip(img, code):
return cv2.flip(img, code)
def transpose(img):
return img.transpose(1, 0, 2) if len(img.shape) > 2 else img.transpose(1, 0)
def rot90(img, times):
img = np.rot90(img, times)
return np.ascontiguousarray(img)
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def center_crop(img, height, width):
h, w, c = img.shape
dy = (h-height)//2
dx = (w-width)//2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def shift_hsv(img, hue_shift, sat_shift, val_shift):
dtype = img.dtype
maxval = np.max(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
h, s, v = cv2.split(img)
h = cv2.add(h, hue_shift)
h = np.where(h < 0, maxval - h, h)
h = np.where(h > maxval, h - maxval, h)
h = h.astype(dtype)
s = clip(cv2.add(s, sat_shift), dtype, maxval)
v = clip(cv2.add(v, val_shift), dtype, maxval)
img = cv2.merge((h, s, v)).astype(dtype)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def shift_channels(img, r_shift, g_shift, b_shift):
img[...,0] = clip(img[...,0] + r_shift, np.uint8, 255)
img[...,1] = clip(img[...,1] + g_shift, np.uint8, 255)
img[...,2] = clip(img[...,2] + b_shift, np.uint8, 255)
return img
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
return img_output
def blur(img, ksize):
return cv2.blur(img, (ksize, ksize))
def invert(img):
return 255 - img
def channel_shuffle(img):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
|
13491
|
test = int(input())
while test > 0 :
n,k = map(int,input().split())
p = list(map(int,input().split()))
original = 0
later = 0
for i in p :
if i > k :
later += k
original += i
else :
later += i
original += i
print(original-later)
test -= 1
|
13503
|
from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint
|
13511
|
from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
|
13547
|
import telnetlib
import time
def send_command_telnetlib(ipaddress, username, password, enable_pass, command):
t = telnetlib.Telnet("192.168.100.1")
t.read_until(b"Username:")
t.write(username.encode("ascii") + b"\n")
t.read_until(b"Password:")
t.write(password.encode("ascii") + b"\n")
t.write(b"enable\n")
t.read_until(b"Password:")
t.write(enable_pass.encode("ascii") + b"\n")
t.read_until(b"#")
t.write(b"terminal length 0\n")
t.write(command + b"\n")
time.sleep(1)
result = t.read_until(b"#").decode("utf-8")
return result
|
13563
|
import io
import pytest
import pytorch_pfn_extras as ppe
from pytorch_pfn_extras.training.extensions import _ipython_module_available
from pytorch_pfn_extras.training.extensions.log_report import _pandas_available
@pytest.mark.skipif(
not _ipython_module_available or not _pandas_available,
reason="print report notebook import failed, "
"maybe ipython is not installed"
)
def test_run_print_report_notebook():
max_epochs = 5
iters_per_epoch = 5
manager = ppe.training.ExtensionsManager(
{}, {}, max_epochs, iters_per_epoch=iters_per_epoch)
out = io.StringIO()
log_report = ppe.training.extensions.LogReport()
manager.extend(log_report)
extension = ppe.training.extensions.PrintReportNotebook(out=out)
manager.extend(extension)
for _ in range(max_epochs):
for _ in range(iters_per_epoch):
with manager.run_iteration():
# Only test it runs without fail
# The value is not tested now...
pass
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
13567
|
import numpy as np
def get_conf_thresholded(conf, thresh_log_conf, dtype_np):
"""Normalizes a confidence score to (0..1).
Args:
conf (float):
Unnormalized confidence.
dtype_np (type):
Desired return type.
Returns:
confidence (np.float32):
Normalized joint confidence.
"""
# 1. / (1. + np.exp(-5000. * conf + 5))
# https://www.desmos.com/calculator/olqbvoffua
# + 9.5: 0.0019 => 0.5
# + 5 : 0.0010 => 0.5
# + 6.5: 0.0013 => 0.5
return np.where(
conf < dtype_np(0.),
dtype_np(0.),
dtype_np(1.) /
(dtype_np(1.) + np.exp(dtype_np(-5000.) * conf + dtype_np(9.5)))
).astype(dtype_np)
def get_confs(query_2d_full, frame_id, thresh_log_conf, mx_conf, dtype_np):
"""
Args:
query_2d_full (stealth.logic.skeleton.Skeleton):
Skeleton with confidences.
frame_id (int):
Frame id.
Returns:
confs (List[float]):
Confidences at frame_id.
"""
confs = np.zeros(query_2d_full.poses.shape[-1],
dtype=dtype_np)
is_normalized = query_2d_full.is_confidence_normalized()
if query_2d_full.has_confidence(frame_id):
for joint, conf in query_2d_full.confidence[frame_id].items():
cnf = dtype_np(conf) \
if is_normalized \
else get_conf_thresholded(conf, thresh_log_conf, dtype_np)
if mx_conf is not None and mx_conf < cnf:
mx_conf = dtype_np(cnf)
confs[joint] = dtype_np(cnf)
if mx_conf is None:
return confs
else:
assert isinstance(mx_conf, dtype_np)
return confs, mx_conf
|
13585
|
import string
from ...errors import SimValueError
from . import MemoryMixin
class HexDumperMixin(MemoryMixin):
def hex_dump(self, start, size, word_size=4, words_per_row=4, endianness="Iend_BE",
symbolic_char='?', unprintable_char='.', solve=False, extra_constraints=None,
inspect=False, disable_actions=True):
"""
Returns a hex dump as a string. The solver, if enabled, is called once for every byte
potentially making this function very slow. It is meant to be used mainly as a
"visualization" for debugging.
Warning: May read and display more bytes than `size` due to rounding. Particularly,
if size is less than, or not a multiple of word_size*words_per_line.
:param start: starting address from which to print
:param size: number of bytes to display
:param word_size: number of bytes to group together as one space-delimited unit
:param words_per_row: number of words to display per row of output
:param endianness: endianness to use when displaying each word (ASCII representation is unchanged)
:param symbolic_char: the character to display when a byte is symbolic and has multiple solutions
:param unprintable_char: the character to display when a byte is not printable
:param solve: whether or not to attempt to solve (warning: can be very slow)
:param extra_constraints: extra constraints to pass to the solver is solve is True
:param inspect: whether or not to trigger SimInspect breakpoints for the memory load
:param disable_actions: whether or not to disable SimActions for the memory load
:return: hex dump as a string
"""
if endianness == "Iend_BE":
end = 1
else:
end = -1
if extra_constraints is None:
extra_constraints = []
# round up size so that chop() works
line_size = word_size * words_per_row
size = size if size % line_size == 0 else size + line_size - size % line_size
raw_mem = super().load(start, size=size, inspect=inspect, disable_actions=disable_actions)
i = start
dump_str = ""
for line in raw_mem.chop(line_size * self.state.arch.byte_width):
dump = "%x:" % i
group_str = ""
for word in line.chop(word_size * self.state.arch.byte_width):
word_bytes = ""
word_str = ""
for byte_ in word.chop(self.state.arch.byte_width)[::end]:
byte_value = None
if not self.state.solver.symbolic(byte_) or solve:
try:
byte_value = self.state.solver.eval_one(
byte_,
extra_constraints=extra_constraints
)
except SimValueError:
pass
if byte_value is not None:
word_bytes += "%02x" % byte_value
if chr(byte_value) in string.printable[:-5]:
word_str += chr(byte_value)
else:
word_str += unprintable_char
else:
word_bytes += symbolic_char*2
word_str += symbolic_char
dump += ' ' + word_bytes
group_str += word_str[::end] # always print ASCII representation in little-endian
dump += ' ' + group_str
i += line_size
dump_str += dump + '\n'
return dump_str
|
13592
|
import idaapi
from idaapi import *
inifinite_loops = [
b"\x00\xbf\xfd\xe7", # loop: nop; b loop
b"\xfe\xe7", # loop: b loop
]
whitelist = [
"Reset_Handler",
"main"
]
def detect_noret_funcs():
exit_locs_name_pairs = []
for func_addr in Functions():
if get_func_flags(func_addr) & idaapi.FUNC_NORET:
name = get_func_name(func_addr)
if name not in whitelist:
print("noret function: '{}' at 0x{:x}".format(name, func_addr))
exit_locs_name_pairs.append((func_addr, name))
return exit_locs_name_pairs
def detect_exit_ats(add_noret_functions=False):
# 0. find BKPTs
exit_locs = []
# 1. find noret functions if requested
if add_noret_functions:
exit_locs += detect_noret_funcs()
cnt = 0
# 2. find infinite loops and BKPT instructions
for segea in Segments():
for funcea in Functions(segea, get_segm_end(segea)):
functionName = get_func_name(funcea)
for (startea, endea) in Chunks(funcea):
for head in Heads(startea, endea):
# print(functionName, ":", "0x%08x"%(head), ":", GetDisasm(head))
for loop_code in inifinite_loops:
if get_bytes(head, len(loop_code)) == loop_code:
print("Found endless loop: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "endless_loop_{:02d}_{}".format(cnt, functionName)))
cnt += 1
if print_insn_mnem(head) == 'BKPT':
print("Found bkpt: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "bkpt_{:02d}_{}".format(cnt, functionName)))
cnt += 1
return exit_locs
def print_exit_ats(add_noret_functions=False):
exit_locs = detect_exit_ats(add_noret_functions=add_noret_functions)
print("exit_at:")
for addr, name in exit_locs:
print(" {}: 0x{:08x}".format(name, addr))
def dump_exit_ats(filename="exit_ats.yml"):
exit_locs = detect_exit_ats()
with open(filename, "w") as f:
f.write("exit_at:\n")
for addr, name in exit_locs:
f.write(" {}: 0x{:08x}\n".format(name, addr))
dump_exit_ats()
|
13606
|
class Acl(object):
def __init__(self, read_acl):
self.read_acl = read_acl
@staticmethod
def from_acl_response(acl_response):
'''Takes JSON response from API and converts to ACL object'''
if 'read' in acl_response:
read_acl = AclType.from_acl_response(acl_response['read'])
return Acl(read_acl)
else:
raise ValueError('Response does not contain read ACL')
def to_api_param(self):
read_acl_string = self.read_acl.acl_string
if read_acl_string is None:
return {'read':[]}
return {'read':[read_acl_string]}
class AclInner(object):
def __init__(self, pseudonym, acl_string):
self.pseudonym = pseudonym
self.acl_string = acl_string
def __repr__(self):
return 'AclType(pseudonym=%s,acl_string=%s)' % (self.pseudonym, self.acl_string)
class AclType(object):
public = AclInner('public','user://*')
my_algos = AclInner('my_algos','algo://.my/*')
private = AclInner('private',None) # Really is an empty list
default = my_algos
types = (public, my_algos, private)
@staticmethod
def from_acl_response(acl_list):
if len(acl_list) == 0:
return AclType.private
else:
acl_string = acl_list[0]
for t in AclType.types:
if t.acl_string == acl_string:
return t
else:
raise ValueError('Invalid acl string %s' % (acl_list[0]))
class ReadAcl(object):
public = Acl(AclType.public)
private = Acl(AclType.private)
my_algos = Acl(AclType.my_algos)
|
13622
|
import os
from pathlib import Path
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable, Shutdown
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
def generate_launch_description():
bringup_dir = Path(get_package_share_directory('rj_robocup'))
launch_dir = bringup_dir / 'launch'
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
grsim = Node(package='rj_robocup', executable='grSim', arguments=[])
radio = Node(package='rj_robocup',
executable='sim_radio_node',
output='screen',
on_exit=Shutdown())
control = Node(package='rj_robocup',
executable='control_node',
output='screen',
on_exit=Shutdown())
config_server = Node(package='rj_robocup',
executable='config_server',
output='screen',
on_exit=Shutdown())
vision_receiver_launch_path = str(launch_dir / "vision_receiver.launch.py")
vision_receiver = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_receiver_launch_path))
ref_receiver = Node(package='rj_robocup',
executable='internal_referee_node',
output='screen',
on_exit=Shutdown())
vision_filter_launch_path = str(launch_dir / "vision_filter.launch.py")
vision_filter = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_filter_launch_path))
return LaunchDescription([
grsim, stdout_linebuf_envvar, config_server, radio, control,
vision_receiver, vision_filter, ref_receiver
])
|
13668
|
from trainer.normal import NormalTrainer
from config import cfg
def get_trainer():
pair = {
'normal': NormalTrainer
}
assert (cfg.train.trainer in pair)
return pair[cfg.train.trainer]()
|
13724
|
import _plotly_utils.basevalidators
class ConnectorValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="connector", parent_name="waterfall", **kwargs):
super(ConnectorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Connector"),
data_docs=kwargs.pop(
"data_docs",
"""
line
:class:`plotly.graph_objects.waterfall.connecto
r.Line` instance or dict with compatible
properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
""",
),
**kwargs
)
|
13738
|
import json
import regex
import nltk.data
from nltk.tokenize import word_tokenize
import sys
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def tokenize(string):
return word_tokenize(string)
def split_paragraphs(text):
"""
remove urls, lowercase all words and separate paragraphs
"""
splits = regex.split(r'\n+', text)
paras = []
for split in splits[1:]: # skip the titles
split = split.strip()
if len(split) == 0:
continue
if 'Section::' in split:
continue
paras.append(split)
paras = " ".join(paras)
return sent_detector.tokenize(paras)
def split_sent(sent):
strings = regex.split('<a |</a>', sent)
new_strings = []
count = 0
for s in strings:
s = s.strip()
if s:
if 'href=' in s:
s = s.lstrip('href="')
href, text = s.split('">')
new_strings.append((text, href))
count += 1
else:
ss = tokenize(s)
new_strings.extend([(_, None) for _ in ss])
return new_strings, count / len(new_strings), count
fw = open('out-more.json', 'w')
with open('en.json', 'r') as f:
for i, line in enumerate(f):
data = json.loads(line)
entry = {"id": data['id'], "url": data['url'], 'title': data['title']}
outputs = []
if len(data['text']) > 50:
try:
sents = split_paragraphs(data['text'])
for sent in sents:
if len(sent) < 400:
output, ratio, count = split_sent(sent)
if count > 1 and ratio >= 0.10 and len(output) >= 8 and output[0][0][0].isupper():
text = [_[0] for _ in output]
hyperlink = [_[1] for _ in output]
outputs.append((text, hyperlink))
except Exception:
pass
if len(outputs) > 0:
entry['text'] = outputs
fw.write(json.dumps(entry) + '\n')
sys.stdout.write('finished {}/{} \r'.format(i, 5989879))
fw.close()
|
13744
|
from zzcore import StdAns, mysakuya
import requests
class Ans(StdAns):
def GETMSG(self):
msg=''
try:
msg += xs()
except:
msg += '可能是机器人笑死了!'
return msg
def xs():
url = "http://api-x.aya1.xyz:6/"
text = requests.get(url=url).text
return text
|
13751
|
import json
import math
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from pathlib import Path
from typing import List, Optional
import numpy as np
from vad.util.time_utils import (
format_timedelta_to_milliseconds,
format_timedelta_to_timecode,
parse_timecode_to_timedelta,
)
class VoiceActivityVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
class VoiceActivityMillisecondsVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
@dataclass
class Activity:
start: timedelta
end: timedelta
@dataclass
class VoiceActivity:
duration: timedelta
activities: List[Activity]
probs_sample_rate: Optional[int]
probs: Optional[List[float]]
@classmethod
def load(cls, path: Path):
with path.open() as file:
voice_activity_data = json.load(file)
return VoiceActivity.from_json(voice_activity_data)
@classmethod
def from_json(cls, voice_activity_data: dict):
version = voice_activity_data["version"]
if version == VoiceActivityVersion.v01.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityVersion.v02.value:
if voice_activity_data["time_format"] == "timecode":
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif voice_activity_data["time_format"] == "millisecond":
voice_activity = cls(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
elif version == VoiceActivityVersion.v03.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(activity["start"]),
end=parse_timecode_to_timedelta(activity["end"]),
)
for activity in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def save(self, path: Path, version: VoiceActivityVersion = VoiceActivityVersion.v03):
voice_activity_data = self.to_json(version)
with path.open("w") as file:
json.dump(voice_activity_data, file, ensure_ascii=False, indent=4)
def to_json(self, version: VoiceActivityVersion = VoiceActivityVersion.v03):
if version == VoiceActivityVersion.v01:
voice_activity_formatted = {
"version": VoiceActivityVersion.v01.value,
"duration": format_timedelta_to_timecode(self.duration),
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v02:
voice_activity_formatted = {
"version": VoiceActivityVersion.v02.value,
"duration": format_timedelta_to_timecode(self.duration),
"time_format": "timecode",
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v03:
voice_activity_formatted = {
"version": VoiceActivityVersion.v03.value,
"duration": format_timedelta_to_timecode(self.duration),
"activities": [
{
"start": format_timedelta_to_timecode(activity.start),
"end": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_formatted
def to_milliseconds(
self, version: VoiceActivityMillisecondsVersion = VoiceActivityMillisecondsVersion.v03
):
if version == VoiceActivityMillisecondsVersion.v02:
voice_activity_milliseconds = {
"version": version.value,
"duration": format_timedelta_to_milliseconds(self.duration),
"time_format": "millisecond",
"voice_activity": [
{
"start_time": format_timedelta_to_milliseconds(activity.start),
"end_time": format_timedelta_to_milliseconds(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityMillisecondsVersion.v03:
voice_activity_milliseconds = {
"version": version.value,
"duration": {"total_milliseconds": format_timedelta_to_milliseconds(self.duration)},
"activities": [
{
"start": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.start)
},
"end": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.end)
},
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_milliseconds
@classmethod
def from_milliseconds(cls, voice_activity_data: dict):
version = voice_activity_data["version"] # version of milliseconds format
if version == VoiceActivityMillisecondsVersion.v02.value:
voice_activity = VoiceActivity(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityMillisecondsVersion.v03.value:
voice_activity = VoiceActivity(
duration=timedelta(
milliseconds=voice_activity_data["duration"]["total_milliseconds"]
),
activities=[
Activity(
start=timedelta(milliseconds=segment["start"]["total_milliseconds"]),
end=timedelta(milliseconds=segment["end"]["total_milliseconds"]),
)
for segment in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def to_labels(self, sample_rate: int) -> np.array:
total_samples = int(self.duration.total_seconds() * sample_rate)
labels = np.zeros(total_samples, dtype=np.long)
for activity in self.activities:
start_sample = int(activity.start.total_seconds() * sample_rate)
end_sample = int(activity.end.total_seconds() * sample_rate)
labels[start_sample:end_sample] = 1
return labels
|
13776
|
from ..utils import run
import logging
logger = logging.getLogger(__name__)
def process_one_package(path, package, python_version="3"):
"""Get details about one precise python package in the given image.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param package: name of the python package to get info from.
:type package: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing package name, version and size
:rtype: list[string, string, int]
"""
command = f"sudo chroot {path} pip{python_version} show {package}"
info = get_ipython().getoutput(command)
for line in info:
if "Name" in line:
name = line.split(" ").pop()
if "Version" in line:
version = line.split(" ").pop()
if "Location" in line:
location = line.split(" ").pop()
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name}").pop()
# If the folder does not exist, try lowercase
if "cannot access" in result:
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name.lower()}").pop()
# If the lowercase folder do not exist either
if "cannot access" not in result:
size = int(result.split('\t').pop(0))
# List the files by hand
else:
command = f"sudo chroot {path} pip{python_version} show {package} -f"
info = get_ipython().getoutput(command)
flag = False
size = 0
for line in info:
if flag:
command = f"du {path}{location}/{line.strip()}"
size += int(get_ipython().getoutput(command).pop().split('\t').pop(0))
if 'Files' in line:
flag = True
return [name, version, size]
def get_python_packages_info(path, python_version="3"):
"""Get details about all python packages in an image filesystem.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing lists of each package's name, version and size
:rtype: list[list[string, string, int]]
"""
command = f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"
packages = [package.split('==')
for package in get_ipython().getoutput(command)]
package_list = []
for package in packages:
try:
package_list.append(process_one_package(path, package[0]))
except Exception as e:
logger.error("Error processing python packages", package[0], e)
pass
return package_list
|
13789
|
from ansible.parsing.yaml.objects import AnsibleMapping
from ansiblelater.standard import StandardBase
class CheckScmInSrc(StandardBase):
sid = "ANSIBLE0005"
description = "Use `scm:` key rather than `src: scm+url`"
helptext = "usage of `src: scm+url` not recommended"
version = "0.1"
types = ["rolesfile"]
def check(self, candidate, settings):
roles, errors = self.get_tasks(candidate, settings)
if not errors:
for role in roles:
if isinstance(role, AnsibleMapping):
if "+" in role.get("src"):
errors.append(self.Error(role["__line__"], self.helptext))
return self.Result(candidate.path, errors)
|
13806
|
from django.core.urlresolvers import resolve
from django.shortcuts import render,redirect,HttpResponse
from kingadmin.permission_list import perm_dic
from django.conf import settings
def perm_check(*args,**kwargs):
request = args[0]
resolve_url_obj = resolve(request.path)
current_url_name = resolve_url_obj.url_name # 当前url的url_name
print('---perm:',request.user,request.user.is_authenticated(),current_url_name)
#match_flag = False
match_results = [None,]
match_key = None
if request.user.is_authenticated() is False:
return redirect(settings.LOGIN_URL)
for permission_key,permission_val in perm_dic.items():
per_url_name = permission_val[0]
per_method = permission_val[1]
perm_args = permission_val[2]
perm_kwargs = permission_val[3]
perm_hook_func = permission_val[4] if len(permission_val)>4 else None
if per_url_name == current_url_name: #matches current request url
if per_method == request.method: #matches request method
# if not perm_args: #if no args defined in perm dic, then set this request to passed perm
#逐个匹配参数,看每个参数时候都能对应的上。
args_matched = False #for args only
for item in perm_args:
request_method_func = getattr(request,per_method) #request.GET/POST
if request_method_func.get(item,None):# request字典中有此参数
args_matched = True
else:
print("arg not match......")
args_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:#当列表为空的时候才走这里
args_matched = True
#匹配有特定值的参数
kwargs_matched = False
for k,v in perm_kwargs.items():
request_method_func = getattr(request, per_method)
arg_val = request_method_func.get(k, None) # request字典中有此参数
print("perm kwargs check:",arg_val,type(arg_val),v,type(v))
if arg_val == str(v): #匹配上了特定的参数 及对应的 参数值, 比如,需要request 对象里必须有一个叫 user_id=3的参数
kwargs_matched = True
else:
kwargs_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:
kwargs_matched = True
#开始匹配自定义权限钩子函数
perm_hook_matched = False
if perm_hook_func:
perm_hook_matched = perm_hook_func(request)
match_results = [args_matched,kwargs_matched,perm_hook_matched]
print("--->match_results ", match_results)
if all(match_results): #都匹配上了
match_key = permission_key
break
if all(match_results):
app_name, *per_name = match_key.split('_')
print("--->matched ",match_results,match_key)
print(app_name, *per_name)
perm_obj = '%s.%s' % (app_name,match_key)
print("perm str:",perm_obj)
if request.user.has_perm(perm_obj):
print('当前用户有此权限')
return True
else:
print('当前用户没有该权限')
return False
else:
print("未匹配到权限项,当前用户无权限")
def check_permission(func):
def inner(*args,**kwargs):
if not perm_check(*args,**kwargs):
request = args[0]
return render(request,'kingadmin/page_403.html')
return func(*args,**kwargs)
return inner
|
13821
|
import os
import glob
import torch
import numpy as np
# from PIL import Image, UnidentifiedImageError
from torch.utils.data import Dataset
from torchvision.datasets import MNIST
class ToyDataset(Dataset):
def __init__(self, N_K=50, K=4, X=None, Y=None):
super().__init__()
if X is not None:
self.data, self.targets = X, Y
else:
self.data, self.targets = self._init_data(N_K, K)
self.task_ids = torch.arange(self.targets.size(0))
def _init_data(self, N_K, K):
X1 = torch.cat([
0.8 + 0.4 * torch.randn(N_K, 1),
1.5 + 0.4 * torch.randn(N_K, 1),
], dim=-1)
Y1 = 0 * torch.ones(X1.size(0)).long()
X2 = torch.cat([
0.5 + 0.6 * torch.randn(N_K, 1),
-0.2 - 0.1 * torch.randn(N_K, 1),
], dim=-1)
Y2 = 1 * torch.ones(X2.size(0)).long()
X3 = torch.cat([
2.5 - 0.1 * torch.randn(N_K, 1),
1.0 + 0.6 * torch.randn(N_K, 1),
], dim=-1)
Y3 = 2 * torch.ones(X3.size(0)).long()
X4 = torch.distributions.MultivariateNormal(
torch.Tensor([-0.5, 1.5]),
covariance_matrix=torch.Tensor([[0.2, 0.1], [0.1, 0.1]])).sample(torch.Size([N_K]))
Y4 = 3 * torch.ones(X4.size(0)).long()
X = torch.cat([X1, X2, X3, X4], dim=0)
X[:, 1] -= 1
X[:, 0] -= 0.5
Y = torch.cat([Y1, Y2, Y3, Y4])
return X, Y
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def __getitem__(self, index):
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class SplitMNIST(MNIST):
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.task_ids = torch.arange(self.targets.size(0))
def filter_by_class(self, class_list=None):
if class_list:
mask = torch.zeros_like(self.targets).bool()
for c in class_list:
mask |= self.targets == c
else:
mask = torch.ones_like(self.targets).bool()
self.task_ids = torch.masked_select(torch.arange(self.targets.size(0)), mask)
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
self.task_ids = torch.arange(self.targets.size(0))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[self.task_ids[index]], self.targets[self.task_ids[index]]
def __len__(self):
return self.task_ids.size(0)
class PermutedMNIST(MNIST):
@staticmethod
def create_tasks(n=1):
return [torch.randperm(784) for _ in range(n)]
def __init__(self, *args, **kwargs):
kwargs['download'] = True
super().__init__(*args, **kwargs)
self.data = self.data.reshape(self.data.size(0), -1).float() / 255.
self.perm = None
def set_task(self, perm):
assert self.perm is None, 'Cannot set task again.'
self.data = self.data[:, perm]
self.perm = perm
def filter_by_idx(self, idx):
self.data = self.data[idx]
self.targets = self.targets[idx]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
return self.data[index], self.targets[index]
|
13860
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.kmeans_utils import compute_logits
from fewshot.models.model import Model
from fewshot.models.refine_model import RefineModel
from fewshot.models.basic_model_VAT import BasicModelVAT
from fewshot.models.model_factory import RegisterModel
from fewshot.models.nnlib import (concat, weight_variable)
from fewshot.utils import logger
from fewshot.utils.debug import debug_identity
from fewshot.models.SSL_utils import *
l2_norm = lambda t: tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))
log = logger.get()
@RegisterModel("basic-VAT-ENT")
class BasicModelVAT_ENT(BasicModelVAT):
def get_train_op(self, logits, y_test):
loss, train_op = BasicModelVAT.get_train_op(self, logits, y_test)
config = self.config
ENT_weight = config.ENT_weight
VAT_ENT_step_size = config.VAT_ENT_step_size
logits = self._unlabel_logits
s = tf.shape(logits)
s = s[0]
p = tf.stop_gradient(self.h_unlabel)
affinity_matrix = compute_logits(p, p) - (tf.eye(s, dtype=tf.float32) * 1000.0)
# logits = tf.Print(logits, [tf.shape(point_logits)])
ENT_loss = walking_penalty(logits, affinity_matrix)
loss += ENT_weight * ENT_loss
ENT_opt = tf.train.AdamOptimizer(VAT_ENT_step_size * self.learn_rate, name="Entropy-optimizer")
ENT_grads_and_vars = ENT_opt.compute_gradients(loss)
train_op = ENT_opt.apply_gradients(ENT_grads_and_vars)
for gradient, variable in ENT_grads_and_vars:
if gradient is None:
gradient = tf.constant(0.0)
self.adv_summaries.append(tf.summary.scalar("ENT/gradients/" + variable.name, l2_norm(gradient), family="Grads"))
self.adv_summaries.append(tf.summary.histogram("ENT/gradients/" + variable.name, gradient, family="Grads"))
self.summaries.append(tf.summary.scalar('entropy loss', ENT_loss))
return loss, train_op
|
13891
|
from pydantic import BaseModel, Field, EmailStr
class PostSchema(BaseModel):
id: int = Field(default=None)
title: str = Field(...)
content: str = Field(...)
class Config:
schema_extra = {
"example": {
"title": "Securing FastAPI applications with JWT.",
"content": "In this tutorial, you'll learn how to secure your application by enabling authentication using JWT. We'll be using PyJWT to sign, encode and decode JWT tokens...."
}
}
class UserSchema(BaseModel):
fullname: str = Field(...)
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"fullname": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
}
class UserLoginSchema(BaseModel):
email: EmailStr = Field(...)
password: str = Field(...)
class Config:
schema_extra = {
"example": {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
}
|
13937
|
import os
from fabric.api import env, run, sudo, execute, local, settings, \
hide, open_shell, parallel, serial, put
from fabric.decorators import hosts
from fabric.contrib.console import confirm
import fabric.colors as fab_col
import paramiko
import getpass
from tabulate import tabulate
file_hosts = "hosts.txt"
paramiko.util.log_to_file("paramiko.log")
env.colorize_errors = True
# The selected hosts are the hosts in env (at the beginning)
selected_hosts = env.hosts
running_hosts = {}
env.connection_attempts = 2
# env.skip_bad_hosts = True
def load_hosts():
"""
Load hosts from hosts.txt.
A host can either be in form
username@host[:port] password
or
username@host[:port]
If no port is specified, port 22 is selected.
"""
with open(file_hosts, "r") as f:
data = f.readlines()
for line in data:
try:
host, password = line.strip().split()
except ValueError:
host = line.strip()
password = None
if len(host.split(':')) == 1:
host = host + ":22"
env.hosts.append(host)
if password is not None:
env.passwords[host] = password.strip()
env.hosts = list(set(env.hosts)) # Remove duplicates
def add_host():
"""
Add a new host to the running hosts.
The user can decide whether to add the host also to the external hosts.txt
file.
"""
name = raw_input("Username: ")
host = raw_input("Host: ")
port = input("Port: ")
new_host = name + "@" + host + ":" + str(port)
selected_hosts.append(new_host)
password = None
if confirm("Authenticate using a password? "):
password = getpass.getpass("Password: ").strip()
env.passwords[new_host] = password
# Append the new host to the hosts file
if confirm("Add the new host to the hosts file? "):
if password is not None:
line = new_host + " " + password + "\n"
else:
line = new_host + "\n"
with open(file_hosts, 'a') as f:
f.write(line)
def print_hosts():
"""
Print selected hosts.
If hosts haven't been hand-selected yet, all hosts are selected.
"""
hosts = map(lambda x: [x, env.passwords.get(x, None)], selected_hosts)
print(fab_col.green(tabulate(hosts, ["Host", "Password"])))
def check_hosts():
"""
Check if hosts are active or not and print the result.
"""
global running_hosts
running_hosts = dict()
for host in selected_hosts:
print(fab_col.magenta("\nPing host %d of %d" %
(selected_hosts.index(host) + 1, len(selected_hosts))))
response = os.system("ping -c 1 " + host.split("@")[1].split(":")[0])
if response == 0:
running_hosts[host] = True
else:
running_hosts[host] = False
# Convert running_hosts in order to print it as table
mylist = map(lambda index: [index[0], str(index[1])], running_hosts.items())
print(fab_col.green(tabulate(mylist, ["Host", "Running"])))
def select_running_hosts():
"""
Select all active hosts.
"""
global selected_hosts
with hide('stdout'):
check_hosts()
host_up = filter(lambda x: running_hosts.get(x, False),
running_hosts.keys())
selected_hosts = host_up
def choose_hosts():
"""
Select the hosts to be used.
"""
global selected_hosts
mylist = map(lambda (num, h): [num, h], enumerate(env.hosts))
print(fab_col.blue("Select Hosts (space-separated):"))
print(fab_col.blue(tabulate(mylist, ["Number", "Host"])))
choices = raw_input("> ").split()
# Avoid letters in string index
choices = filter(lambda x: x.isdigit(), choices)
# Convert to int list
choices = map(int, choices)
# Avoid IndexError
choices = filter(lambda x: x < len(env.hosts), choices)
# Remove duplicates
choices = list(set(choices))
# If no hosts are selected, keep the current hosts
if len(choices) == 0:
return
# Get only selected hosts
selected_hosts = map(lambda i: env.hosts[i], choices)
def run_locally(cmd=None):
"""
Execute a command locally.
"""
if cmd is None:
cmd = raw_input("Insert command: ")
with settings(warn_only=True):
local(cmd)
# This function cannot have the parallel decorator since
# a sudo command must receive the user password
@serial
def _execute_sudo(command):
"""
Execute a sudo command on a host.
Returns:
The results of the execution.
"""
with settings(warn_only=True):
return sudo(command[4:].strip(), shell=True)
@parallel
def _execute_command(command):
"""
Execute a command on a host.
Returns:
The results of the execution.
"""
with settings(warn_only=True):
try:
return run(command)
except:
print(fab_col.red("Error execution in host %s" % env.host))
return None
@parallel
def run_command(cmd=None):
"""
Execute a command on hosts.
"""
if cmd is None:
cmd = raw_input("Insert command: ")
if cmd.strip()[:4] == "sudo":
execute(_execute_sudo, cmd, hosts=selected_hosts)
else:
execute(_execute_command, cmd, hosts=selected_hosts)
@hosts(selected_hosts)
def execute_script():
"""
Execute a script file.
"""
# Attention to script name.
# Add security checks
script_file = raw_input("Name of the script: ")
remote_path = "~/"
if len(script_file) < 4 or ".." in script_file:
# Invalid script
print(fab_col.red("Error. Invalid script name."))
return
for h in selected_hosts:
with settings(host_string=h):
with hide('running'):
put(script_file, remote_path, mode=777)
# Remove the path from the name of the script
script_file = script_file.split("/")[-1]
# Execution
extension = script_file.split(".")[-1]
if extension == script_file:
print(fab_col.red("Invalid script"))
return
if extension == 'py':
run_command("python " + remote_path + script_file)
elif extension == "sh" or extension == "bash":
run_command("bash " + remote_path + script_file)
else:
print(fab_col.red("Extension not supported"))
# Delete the script
with hide('running', 'stdout'):
run_command("rm -f " + remote_path + script_file)
def open_sh():
"""
Open a shell on a host.
"""
mylist = map(lambda (num, h): [num, h], enumerate(selected_hosts))
print(fab_col.blue(tabulate(mylist, ["Number", "Host"])))
try:
n = input("Open shell in host number: ")
h = selected_hosts[n]
execute(open_shell, host=h)
except (NameError, IndexError):
print(fab_col.red("Error: invalid host selection."))
print(fab_col.red("Shell not opened."))
|
13952
|
import sys
from . import app
sys.path.append(str(app.config['LIB_PATH']))
from musicautobot.music_transformer import *
from musicautobot.config import *
from flask import Response, send_from_directory, send_file, request, jsonify
from .save import to_s3
import torch
import traceback
torch.set_num_threads(4)
data = load_data(app.config['DATA_PATH'], app.config['DATA_SAVE_NAME'], num_workers=1)
learn = music_model_learner(data, pretrained_path=app.config['MUSIC_MODEL_PATH'])
if torch.cuda.is_available(): learn.model.cuda()
# learn.to_fp16(loss_scale=512) # fp16 not supported for cpu - https://github.com/pytorch/pytorch/issues/17699
@app.route('/predict/midi', methods=['POST'])
def predict_midi():
args = request.form.to_dict()
midi = request.files['midi'].read()
print('THE ARGS PASSED:', args)
bpm = float(args['bpm']) # (AS) TODO: get bpm from midi file instead
temperatures = (float(args.get('noteTemp', 1.2)), float(args.get('durationTemp', 0.8)))
n_words = int(args.get('nSteps', 200))
seed_len = int(args.get('seedLen', 12))
# debugging 1 - send exact midi back
# with open('/tmp/test.mid', 'wb') as f:
# f.write(midi)
# return send_from_directory('/tmp', 'test.mid', mimetype='audio/midi')
# debugging 2 - test music21 conversion
# stream = file2stream(midi) # 1.
# debugging 3 - test npenc conversion
# seed_np = midi2npenc(midi) # music21 can handle bytes directly
# stream = npenc2stream(seed_np, bpm=bpm)
# debugging 4 - midi in, convert, midi out
# stream = file2stream(midi) # 1.
# midi_in = Path(stream.write("musicxml"))
# print('Midi in:', midi_in)
# stream_sep = separate_melody_chord(stream)
# midi_out = Path(stream_sep.write("midi"))
# print('Midi out:', midi_out)
# s3_id = to_s3(midi_out, args)
# result = {
# 'result': s3_id
# }
# return jsonify(result)
# Main logic
try:
full = predict_from_midi(learn, midi=midi, n_words=n_words, seed_len=seed_len, temperatures=temperatures)
stream = separate_melody_chord(full.to_stream(bpm=bpm))
midi_out = Path(stream.write("midi"))
print('Wrote to temporary file:', midi_out)
except Exception as e:
traceback.print_exc()
return jsonify({'error': f'Failed to predict: {e}'})
s3_id = to_s3(midi_out, args)
result = {
'result': s3_id
}
return jsonify(result)
# return send_from_directory(midi_out.parent, midi_out.name, mimetype='audio/midi')
# @app.route('/midi/song/<path:sid>')
# def get_song_midi(sid):
# return send_from_directory(file_path/data_dir, htlist[sid]['midi'], mimetype='audio/midi')
@app.route('/midi/convert', methods=['POST'])
def convert_midi():
args = request.form.to_dict()
if 'midi' in request.files:
midi = request.files['midi'].read()
elif 'midi_path'in args:
midi = args['midi_path']
stream = file2stream(midi) # 1.
# stream = file2stream(midi).chordify() # 1.
stream_out = Path(stream.write('musicxml'))
return send_from_directory(stream_out.parent, stream_out.name, mimetype='xml')
|
13953
|
from __future__ import absolute_import, division, print_function
from tests.core import mock
from trakt import Trakt
from httmock import HTTMock
import pytest
def test_likes():
with HTTMock(mock.fixtures, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
likes = Trakt['users'].likes()
assert likes is not None
likes = list(likes)
assert len(likes) == 3
assert likes[0].keys == [
('trakt', 1519)
]
assert likes[1].keys == [
('trakt', '1238362'),
('slug', 'star-wars-machete')
]
assert likes[2].keys == [
('trakt', '840781'),
('slug', 'star-wars-timeline')
]
def test_likes_invalid_response():
with HTTMock(mock.fixtures, mock.unknown):
likes = Trakt['users'].likes()
assert likes is None
def test_likes_invalid_type():
with HTTMock(mock.fixtures, mock.unknown):
with pytest.raises(ValueError):
likes = Trakt['users'].likes('invalid')
assert likes is not None
likes = list(likes)
|
13988
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from orchestra.models.fields import PrivateFileField
from orchestra.models.queryset import group_by
from . import settings
from .methods import PaymentMethod
class PaymentSourcesQueryset(models.QuerySet):
def get_default(self):
return self.filter(is_active=True).first()
class PaymentSource(models.Model):
account = models.ForeignKey('accounts.Account', verbose_name=_("account"),
related_name='paymentsources')
method = models.CharField(_("method"), max_length=32,
choices=PaymentMethod.get_choices())
data = JSONField(_("data"), default={})
is_active = models.BooleanField(_("active"), default=True)
objects = PaymentSourcesQueryset.as_manager()
def __str__(self):
return "%s (%s)" % (self.label, self.method_class.verbose_name)
@cached_property
def method_class(self):
return PaymentMethod.get(self.method)
@cached_property
def method_instance(self):
""" Per request lived method_instance """
return self.method_class(self)
@cached_property
def label(self):
return self.method_instance.get_label()
@cached_property
def number(self):
return self.method_instance.get_number()
def get_bill_context(self):
method = self.method_instance
return {
'message': method.get_bill_message(),
}
def get_due_delta(self):
return self.method_instance.due_delta
def clean(self):
self.data = self.method_instance.clean_data()
class TransactionQuerySet(models.QuerySet):
group_by = group_by
def create(self, **kwargs):
source = kwargs.get('source')
if source is None or not hasattr(source.method_class, 'process'):
# Manual payments don't need processing
kwargs['state'] = self.model.WAITTING_EXECUTION
amount = kwargs.get('amount')
if amount == 0:
kwargs['state'] = self.model.SECURED
return super(TransactionQuerySet, self).create(**kwargs)
def secured(self):
return self.filter(state=Transaction.SECURED)
def exclude_rejected(self):
return self.exclude(state=Transaction.REJECTED)
def amount(self):
return next(iter(self.aggregate(models.Sum('amount')).values())) or 0
def processing(self):
return self.filter(state__in=[Transaction.EXECUTED, Transaction.WAITTING_EXECUTION])
class Transaction(models.Model):
WAITTING_PROCESSING = 'WAITTING_PROCESSING' # CREATED
WAITTING_EXECUTION = 'WAITTING_EXECUTION' # PROCESSED
EXECUTED = 'EXECUTED'
SECURED = 'SECURED'
REJECTED = 'REJECTED'
STATES = (
(WAITTING_PROCESSING, _("Waitting processing")),
(WAITTING_EXECUTION, _("Waitting execution")),
(EXECUTED, _("Executed")),
(SECURED, _("Secured")),
(REJECTED, _("Rejected")),
)
STATE_HELP = {
WAITTING_PROCESSING: _("The transaction is created and requires processing by the "
"specific payment method."),
WAITTING_EXECUTION: _("The transaction is processed and its pending execution on "
"the related financial institution."),
EXECUTED: _("The transaction is executed on the financial institution."),
SECURED: _("The transaction ammount is secured."),
REJECTED: _("The transaction has failed and the ammount is lost, a new transaction "
"should be created for recharging."),
}
bill = models.ForeignKey('bills.bill', verbose_name=_("bill"),
related_name='transactions')
source = models.ForeignKey(PaymentSource, null=True, blank=True, on_delete=models.SET_NULL,
verbose_name=_("source"), related_name='transactions')
process = models.ForeignKey('payments.TransactionProcess', null=True, blank=True,
on_delete=models.SET_NULL, verbose_name=_("process"), related_name='transactions')
state = models.CharField(_("state"), max_length=32, choices=STATES,
default=WAITTING_PROCESSING)
amount = models.DecimalField(_("amount"), max_digits=12, decimal_places=2)
currency = models.CharField(max_length=10, default=settings.PAYMENT_CURRENCY)
created_at = models.DateTimeField(_("created"), auto_now_add=True)
modified_at = models.DateTimeField(_("modified"), auto_now=True)
objects = TransactionQuerySet.as_manager()
def __str__(self):
return "#%i" % self.id
@property
def account(self):
return self.bill.account
def clean(self):
if not self.pk:
amount = self.bill.transactions.exclude(state=self.REJECTED).amount()
if amount >= self.bill.total:
raise ValidationError(
_("Bill %(number)s already has valid transactions that cover bill total amount (%(amount)s).") % {
'number': self.bill.number,
'amount': amount,
}
)
def get_state_help(self):
if self.source:
return self.source.method_instance.state_help.get(self.state) or self.STATE_HELP.get(self.state)
return self.STATE_HELP.get(self.state)
def mark_as_processed(self):
self.state = self.WAITTING_EXECUTION
self.save(update_fields=('state', 'modified_at'))
def mark_as_executed(self):
self.state = self.EXECUTED
self.save(update_fields=('state', 'modified_at'))
def mark_as_secured(self):
self.state = self.SECURED
self.save(update_fields=('state', 'modified_at'))
def mark_as_rejected(self):
self.state = self.REJECTED
self.save(update_fields=('state', 'modified_at'))
class TransactionProcess(models.Model):
"""
Stores arbitrary data generated by payment methods while processing transactions
"""
CREATED = 'CREATED'
EXECUTED = 'EXECUTED'
ABORTED = 'ABORTED'
COMMITED = 'COMMITED'
STATES = (
(CREATED, _("Created")),
(EXECUTED, _("Executed")),
(ABORTED, _("Aborted")),
(COMMITED, _("Commited")),
)
data = JSONField(_("data"), blank=True)
file = PrivateFileField(_("file"), blank=True)
state = models.CharField(_("state"), max_length=16, choices=STATES, default=CREATED)
created_at = models.DateTimeField(_("created"), auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(_("updated"), auto_now=True)
class Meta:
verbose_name_plural = _("Transaction processes")
def __str__(self):
return '#%i' % self.id
def mark_as_executed(self):
self.state = self.EXECUTED
for transaction in self.transactions.all():
transaction.mark_as_executed()
self.save(update_fields=('state', 'updated_at'))
def abort(self):
self.state = self.ABORTED
for transaction in self.transactions.all():
transaction.mark_as_rejected()
self.save(update_fields=('state', 'updated_at'))
def commit(self):
self.state = self.COMMITED
for transaction in self.transactions.processing():
transaction.mark_as_secured()
self.save(update_fields=('state', 'updated_at'))
|
14006
|
import pytest
import os
import memcnn.experiment.factory
from memcnn.config import Config
def test_get_attr_from_module():
a = memcnn.experiment.factory.get_attr_from_module('memcnn.experiment.factory.get_attr_from_module')
assert a is memcnn.experiment.factory.get_attr_from_module
def test_load_experiment_config():
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
@pytest.mark.skip(reason="Covered more efficiently by test_train.test_run_experiment")
def test_experiment_config_parser(tmp_path):
tmp_data_dir = tmp_path / "tmpdata"
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
cfg = memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
memcnn.experiment.factory.experiment_config_parser(cfg, str(tmp_data_dir), workers=None)
def test_circular_dependency(tmp_path):
p = str(tmp_path / "circular.json")
content = u'{ "circ": { "base": "circ" } }'
with open(p, 'w') as fh:
fh.write(content)
with open(p, 'r') as fh:
assert fh.read() == content
with pytest.raises(RuntimeError):
memcnn.experiment.factory.load_experiment_config(p, ['circ'])
|
14018
|
import copy, os
from ansible import errors
def expand_config(config_data):
try:
all_data = copy.deepcopy(expand_envs(config_data))
return all_data
except Exception, e:
raise errors.AnsibleFilterError(
'expand_config plugin error: {0}, config_data={1}'.format(
str(e),
str(config_data)))
def expand_envs(obj):
if isinstance(obj, dict):
return { key: expand_envs(val) for key, val in obj.items()}
if isinstance(obj, list):
return [ expand_envs(item) for item in obj ]
if isinstance(obj, basestring):
return os.path.expandvars(obj)
return obj
class FilterModule(object):
''' Expand Kraken configuration file '''
def filters(self):
return {
'expand_config': expand_config
}
|
14047
|
def formstash_to_querystring(formStash):
err = []
for (k, v) in formStash.errors.items():
err.append(("%s--%s" % (k, v)).replace("\n", "+").replace(" ", "+"))
err = sorted(err)
err = "---".join(err)
return err
class _UrlSafeException(Exception):
@property
def as_querystring(self):
return str(self).replace("\n", "+").replace(" ", "+")
class GarfieldMinusGarfield(Exception):
"""
An exception for those odd moments
"""
pass
class InvalidTransition(Exception):
"""raised when a transition is invalid"""
pass
class ObjectExists(Exception):
"""raised when an object already exists, no need to create"""
pass
class ConflictingObject(Exception):
"""
raised when an object already exists
args[0] = tuple(conflicting_object, error_message_string)
"""
pass
class OpenSslError(Exception):
pass
class OpenSslError_CsrGeneration(OpenSslError):
pass
class OpenSslError_InvalidKey(OpenSslError):
pass
class OpenSslError_InvalidCSR(OpenSslError):
pass
class OpenSslError_InvalidCertificate(OpenSslError):
pass
class OpenSslError_VersionTooLow(OpenSslError):
pass
class QueueProcessingError(Exception):
pass
class AcmeError(_UrlSafeException):
pass
class AcmeDuplicateAccount(AcmeError):
"""
args[0] MUST be the duplicate AcmeAccount
"""
pass
class AcmeDuplicateChallenges(AcmeError):
pass
class AcmeDuplicateChallengesExisting(AcmeDuplicateChallenges):
"""the first arg should be a list of the active challenges"""
def __str__(self):
return (
"""One or more domains already have active challenges: %s."""
% ", ".join(
[
"`%s` (%s)" % (ac.domain.domain_name, ac.acme_challenge_type)
for ac in self.args[0]
]
)
)
class AcmeDuplicateChallenge(AcmeDuplicateChallenges):
"""the first arg should be a single active challenge"""
def __str__(self):
return (
"""This domain already has active challenges: `%s`."""
% self.args[0].domain.domain_name
)
class AcmeDuplicateOrderlessDomain(AcmeDuplicateChallenges):
pass
class AcmeServerError(AcmeError):
pass
class AcmeServer404(AcmeServerError):
pass
class AcmeCommunicationError(AcmeError):
pass
class AcmeAuthorizationFailure(AcmeError):
"""raised when an Authorization fails"""
pass
class AcmeOrphanedObject(AcmeError):
pass
class AcmeOrderError(AcmeError):
pass
class AcmeOrderFatal(AcmeOrderError):
"""
The AcmeOrder has a fatal error.
Authorizations should be killed.
"""
pass
class AcmeOrderCreatedError(AcmeOrderError):
"""
If an exception occurs AFTER an AcmeOrder is created, raise this.
It should have two attributes:
args[0] - AcmeOrder
args[1] - original exception
"""
def __str__(self):
return "An AcmeOrder-{0} was created but errored".format(self.args[0])
@property
def acme_order(self):
return self.args[0]
@property
def original_exception(self):
return self.args[1]
class AcmeOrderProcessing(AcmeOrderCreatedError):
"""
raise when the AcmeOrder is `processing` (RFC status)
this should generally indicate the user should retry their action
"""
def __str__(self):
return "An AcmeOrder-{0} was created. The order is still processing.".format(
self.args[0]
)
class AcmeOrderValid(AcmeOrderCreatedError):
"""
raise when the AcmeOrder is `valid` (RFC status)
this should generally indicate the user should retry their action
"""
def __str__(self):
return "An AcmeOrder-{0} was created. The order is valid and the CertificateSigned can be downloaded.".format(
self.args[0]
)
class AcmeMissingChallenges(AcmeError):
"""There are no Acme Challenges"""
pass
class AcmeChallengeFailure(AcmeError):
pass
class AcmeDomainsInvalid(AcmeError):
def __str__(self):
return "The following Domains are invalid: {0}".format(", ".join(self.args[0]))
class AcmeDomainsBlocklisted(AcmeDomainsInvalid):
def __str__(self):
return "The following Domains are blocklisted: {0}".format(
", ".join(self.args[0])
)
class AcmeDomainsRequireConfigurationAcmeDNS(AcmeDomainsInvalid):
def __str__(self):
return "The following Domains are not configured with ACME-DNS: {0}".format(
", ".join(self.args[0])
)
class DomainVerificationError(AcmeError):
pass
class DisplayableError(_UrlSafeException):
pass
class InvalidRequest(_UrlSafeException):
"""
raised when an end-user wants to do something invalid/not-allowed
"""
pass
# class TransitionError(_UrlSafeException):
# pass
# class OperationsContextError(_UrlSafeException):
# pass
|
14060
|
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.core import local
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
class TestGeneratorDbSession(unittest.TestCase):
def setUp(self):
db = Database()
class Account(db.Entity):
id = PrimaryKey(int)
amount = Required(int)
setup_database(db)
self.db = db
self.Account = Account
with db_session:
a1 = Account(id=1, amount=1000)
a2 = Account(id=2, amount=2000)
a3 = Account(id=3, amount=3000)
def tearDown(self):
teardown_database(self.db)
assert local.db_session is None
self.db = self.Account = None
@raises_exception(TypeError, 'db_session with `retry` option cannot be applied to generator function')
def test1(self):
@db_session(retry=3)
def f(): yield
@raises_exception(TypeError, 'db_session with `ddl` option cannot be applied to generator function')
def test2(self):
@db_session(ddl=True)
def f(): yield
@raises_exception(TypeError, 'db_session with `serializable` option cannot be applied to generator function')
def test3(self):
@db_session(serializable=True)
def f(): yield
def test4(self):
@db_session(immediate=True)
def f(): yield
@raises_exception(TransactionError, '@db_session-wrapped generator cannot be used inside another db_session')
def test5(self):
@db_session
def f(): yield
with db_session:
next(f())
def test6(self):
@db_session
def f():
x = local.db_session
self.assertTrue(x is not None)
yield self.db._get_cache()
self.assertEqual(local.db_session, x)
a1 = self.Account[1]
yield a1.amount
self.assertEqual(local.db_session, x)
a2 = self.Account[2]
yield a2.amount
gen = f()
cache = next(gen)
self.assertTrue(cache.is_alive)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 1000)
self.assertEqual(local.db_session, None)
amount = next(gen)
self.assertEqual(amount, 2000)
self.assertEqual(local.db_session, None)
try: next(gen)
except StopIteration:
self.assertFalse(cache.is_alive)
else:
self.fail()
def test7(self):
@db_session
def f(id1):
a1 = self.Account[id1]
id2 = yield a1.amount
a2 = self.Account[id2]
amount = yield a2.amount
a1.amount -= amount
a2.amount += amount
commit()
gen = f(1)
amount1 = next(gen)
self.assertEqual(amount1, 1000)
amount2 = gen.send(2)
self.assertEqual(amount2, 2000)
try:
gen.send(100)
except StopIteration:
pass
else:
self.fail()
with db_session:
a1 = self.Account[1]
self.assertEqual(a1.amount, 900)
a2 = self.Account[2]
self.assertEqual(a2.amount, 2100)
@raises_exception(TransactionError, 'You need to manually commit() changes before suspending the generator')
def test8(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
yield a1.amount
for amount in f(1):
pass
def test9(self):
@db_session
def f(id1):
a1 = self.Account[id1]
a1.amount += 100
commit()
yield a1.amount
for amount in f(1):
pass
def test10(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
a1.amount += 100
with db_session:
a = self.Account[1].amount
for amount in f(1):
pass
with db_session:
b = self.Account[1].amount
self.assertEqual(b, a + 100)
def test12(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.close()
@raises_exception(TypeError, 'error message')
def test13(self):
@db_session
def f(id1):
a1 = self.Account[id1]
yield a1.amount
gen = f(1)
next(gen)
gen.throw(TypeError('error message'))
if __name__ == '__main__':
unittest.main()
|
14087
|
import os
import tensorflow as tf
from util import masked_softmax
class PolicyNetwork(object):
""" Policy Function approximator. """
def __init__(self, input_size, output_size, learning_rate=0.001, summaries_dir=None, scope="policy_estimator"):
with tf.variable_scope(scope):
# Writes Tensorboard summaries to disk
self.summary_writer = None
if summaries_dir:
summary_dir = os.path.join(summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.action = tf.placeholder(dtype=tf.int32, name="action")
self.target = tf.placeholder(dtype=tf.float64, name="target")
self.mask = tf.placeholder(dtype=tf.float64, shape=[1, output_size], name="mask")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=len(env.state),
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
# self.action_probs = tf.squeeze(tf.nn.softmax(self.output_layer))
self.action_probs = tf.squeeze(masked_softmax(self.output_layer, self.mask))
self.picked_action_prob = tf.gather(self.action_probs, self.action)
# Loss and train op
self.loss = -tf.log(self.picked_action_prob) * self.target
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, mask, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.action_probs, {self.state: state.reshape(1, -1),
self.mask: mask.reshape(1, -1)})
def update(self, state, target, action, mask, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target,
self.action: action, self.mask: mask.reshape(1, -1)}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class ValueNetwork(object):
""" Value Function approximator. """
def __init__(self, input_size, output_size=1, learning_rate=0.01, scope="value_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(dtype=tf.float64, shape=[1, input_size], name="state")
self.target = tf.placeholder(dtype=tf.float64, name="target")
# This is just table lookup estimator
# self.fc_layer1 = tf.contrib.layers.fully_connected(
# inputs=self.state,
# num_outputs=input_size,
# activation_fn=tf.nn.relu)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=self.state,
num_outputs=output_size,
activation_fn=None)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1)})
def update(self, state, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
class ObjectAwareRewardNetwork(object):
""" Object-aware Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.object = tf.placeholder(shape=[], dtype=tf.int32, name="person_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
object_vec = tf.one_hot(self.object, input_size, dtype=tf.float64)
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
concat_vec = tf.concat([object_vec, action_vec], 0)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(concat_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, object, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action, self.object: object})
def update(self, state, action, object, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.object: object, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
class RewardNetwork(object):
""" Reward Function approximator. """
def __init__(self, input_size, output_size, action_num, learning_rate=0.01, scope="reward_estimator"):
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[1, input_size], dtype=tf.float64, name="state")
self.action = tf.placeholder(shape=[], dtype=tf.int32, name="question_idx")
self.target = tf.placeholder(dtype=tf.float64, name="target")
action_vec = tf.one_hot(self.action, action_num, dtype=tf.float64)
self.output_layer = tf.contrib.layers.fully_connected(
inputs=tf.concat([self.state, tf.expand_dims(action_vec, 0)], 1),
num_outputs=output_size,
activation_fn=tf.nn.sigmoid)
self.value_estimate = tf.squeeze(self.output_layer)
self.loss = tf.squared_difference(self.value_estimate, self.target)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = self.optimizer.minimize(
self.loss, global_step=tf.train.get_global_step())
def predict(self, state, action, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.value_estimate, {self.state: state.reshape(1, -1), self.action: action})
def update(self, state, action, target, sess=None):
sess = sess or tf.get_default_session()
feed_dict = {self.state: state.reshape(1, -1), self.action: action, self.target: target}
_, loss = sess.run([self.train_op, self.loss], feed_dict)
def restore(self, sess, checkpoint_file):
sess = sess or tf.get_default_session()
self.saver = tf.train.Saver(tf.global_variables())
self.saver.restore(sess=sess, save_path=checkpoint_file)
|
14118
|
from marshmallow import fields, Schema
from .provision import ProvisionActionSchema
class InstanceSchema(Schema):
type = fields.String(required=True)
image_id = fields.String(required=True)
availability_zone = fields.String(required=True)
ebs_optimized = fields.Boolean()
iam_fleet_role = fields.String(required=True)
class Meta:
ordered = True
class AuthSchema(Schema):
key_pair_name = fields.String(required=True)
identity_file = fields.String(required=True)
user = fields.String(required=True)
group = fields.String(required=True)
class Meta:
ordered = True
class NetworkSchema(Schema):
security_group_id = fields.String(required=True)
subnet_id = fields.String()
class Meta:
ordered = True
class ComputeAwsSchema(Schema):
provider = fields.String(required=True)
instance = fields.Nested(InstanceSchema, required=True)
auth = fields.Nested(AuthSchema, required=True)
network = fields.Nested(NetworkSchema, required=True)
provision_actions = fields.Nested(ProvisionActionSchema, many=True)
class Meta:
ordered = True
|
14168
|
import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
|
14187
|
import pytest
from brownie import Wei
@pytest.fixture(scope="function", autouse=True)
def shared_setup(fn_isolation):
pass
@pytest.fixture(scope='module')
def nocoiner(accounts, lido):
assert lido.balanceOf(accounts[9]) == 0
return accounts[9]
@pytest.fixture(scope='module')
def ape(accounts):
return accounts[0]
@pytest.fixture(scope='module')
def whale(accounts):
return accounts[1]
@pytest.fixture()
def vault(LidoVault, ape):
return LidoVault.deploy({"from": ape})
@pytest.fixture(scope='module')
def lido(interface, accounts):
lido = interface.Lido("0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84")
oracle = accounts.at(lido.getOracle(), force=True)
return interface.Lido(lido, owner=oracle)
class Helpers:
@staticmethod
def filter_events_from(addr, events):
return list(filter(lambda evt: evt.address == addr, events))
@staticmethod
def assert_single_event_named(evt_name, tx, evt_keys_dict):
receiver_events = Helpers.filter_events_from(tx.receiver, tx.events[evt_name])
assert len(receiver_events) == 1
assert dict(receiver_events[0]) == evt_keys_dict
@staticmethod
def report_beacon_balance_increase(lido):
beacon_stat = lido.getBeaconStat().dict()
total_pooled_ether = lido.getTotalPooledEther()
new_beacon_balance = Wei(total_pooled_ether * 1.5) + "1 ether"
lido.pushBeacon(beacon_stat['beaconValidators'], new_beacon_balance)
@pytest.fixture(scope='module')
def helpers():
return Helpers
|
14200
|
import sys, requests, json, time
METRIC_NAME = "builtin:billing.ddu.metrics.byEntity"
PAGE_SIZE = 500
sys.tracebacklimit = 0
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 MyManagementZone
arguments = len(sys.argv) - 1
if arguments != 5 and arguments != 6:
print(
"The script was called with {} arguments but expected 5 or 6: \nFROM_DATE_AND_TIME TO_DATE_AND_TIME URL_TO_ENVIRONMENT API_TOKEN MAX_REQUESTS_PER_MINUTE [SELECTED_MANAGEMENT_ZONE]\n"
"Example: python dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 [myManagementZone]\n"
"Note: The SELECTED_MANAGEMENT_ZONE is optional. Specify it if you only want the calculate the ddu consumption for a single management zone.".format(
arguments
)
)
exit()
FROM = str(sys.argv[1])
TO = str(sys.argv[2])
BASE_URL = str(sys.argv[3])
API_TOKEN = str(sys.argv[4])
MAX_REQUESTS_PER_MINUTE = int(sys.argv[5])
if arguments == 6:
SELECTED_MANAGEMENT_ZONE_NAME = str(sys.argv[6])
else:
SELECTED_MANAGEMENT_ZONE_NAME = None
# Get all available management zones
# https://mySampleEnv.live.dynatrace.com/api/config/v1/managementZones
# try:
response = requests.get(
BASE_URL + "config/v1/managementZones",
headers={"Authorization": "Api-Token " + API_TOKEN},
)
# Show error message when a connection can’t be established. Terminates the script when there’s an error.
response.raise_for_status()
allManagemementZones = json.loads(response.content)["values"]
# print("Amount of different management zones: ", len(allManagemementZones))
# If the management zone is specified: Get the index of the occurrence
if SELECTED_MANAGEMENT_ZONE_NAME != None:
for mzIndex, managementZone in enumerate(allManagemementZones):
if allManagemementZones[mzIndex].get("name") == SELECTED_MANAGEMENT_ZONE_NAME:
SELECTED_MANAGEMENT_ZONE_INDEX = mzIndex
# Get all different entityTypes. Due to the high number of different types you can't fetch all at once => Loop through every page with nextPageKey
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes?nextPageKey=AQAAADIBAAAAMg==
response = requests.get(
BASE_URL + "v2/entityTypes", headers={"Authorization": "Api-Token " + API_TOKEN}
)
response.raise_for_status()
allEntityTypes = json.loads(response.content)["types"]
nextPage = json.loads(response.content)["nextPageKey"]
while nextPage != None:
response = requests.get(
BASE_URL + "v2/entityTypes?nextPageKey=" + nextPage,
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
nextPage = (json.loads(response.content)).get("nextPageKey", None)
allEntityTypes.extend(json.loads(response.content)["types"])
# print("Amount of different entity types: ", len(allEntityTypes))
# print()
dduConsumptionObjectOfManagementZone = {}
# Result JSON Object with Array of dduConsumption for each management zone
dduConsumptionPerManagementZone = "[ "
dduConsumptionOfEntityType = 0
dduConsumptionOfManagementZone = 0
# https://mySampleEnv.live.dynatrace.com/api/v2/metrics/query?metricSelector=builtin:billing.ddu.metrics.byEntity&entitySelector=type(HOST),mzId(123456789)&from=2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00
# Loop through every entityType of every management zone
# If there is a specific management zone selected: "loop through" the single management zone
for managementZoneIndex, managementZone in (
enumerate([allManagemementZones[SELECTED_MANAGEMENT_ZONE_INDEX]])
if SELECTED_MANAGEMENT_ZONE_NAME != None
else enumerate(allManagemementZones)
):
# If a management zone got specified: access it via the index in all management zones
if SELECTED_MANAGEMENT_ZONE_NAME != None:
managementZoneIndex = SELECTED_MANAGEMENT_ZONE_INDEX
for entityTypeIndex, entityType in enumerate(allEntityTypes):
"""
print(
"MZId: {:21} MZName: {:20} ET Name: {:5}".format(
allManagemementZones[managementZoneIndex]["id"],
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
)
)
"""
# Replace the "+" of Timezone to the encoded %2B
response = requests.get(
"{}v2/metrics/query?metricSelector={}:splitBy()&entitySelector=mzId({}),type({})&pageSize={}&from={}&to={}".format(
BASE_URL,
METRIC_NAME,
allManagemementZones[managementZoneIndex]["id"],
allEntityTypes[entityTypeIndex]["type"],
str(PAGE_SIZE),
FROM.replace("+", "%2B", 1),
TO.replace("+", "%2B", 1),
),
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
# print("Waiting for ", 60 / MAX_REQUESTS_PER_MINUTE, " seconds")
time.sleep(60 / MAX_REQUESTS_PER_MINUTE)
dduConsumptionOfMZandETDict = json.loads(response.content)["result"][0]["data"]
# If there are any results
if dduConsumptionOfMZandETDict:
# Filter out every empty usage values and create the sum of ddu usage
dduConsumptionOfMZandET = sum(
filter(None, dduConsumptionOfMZandETDict[0]["values"])
)
"""
print(
"Ddu consumption of manangement zone {} and entityType {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
round(dduConsumptionOfMZandET, 3),
)
)
"""
dduConsumptionOfManagementZone += dduConsumptionOfMZandET
dduConsumptionOfMZandET = 0
"""
print(
"Ddu consumption of management zone {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
round(dduConsumptionOfManagementZone, 3),
)
)
"""
# print()
# Populate JSON Object
dduConsumptionObjectOfManagementZone["MZId"] = allManagemementZones[
managementZoneIndex
]["id"]
dduConsumptionObjectOfManagementZone["MZName"] = allManagemementZones[
managementZoneIndex
]["name"]
dduConsumptionObjectOfManagementZone["dduConsumption"] = round(
dduConsumptionOfManagementZone, 3
)
dduConsumptionOfManagementZone = 0
# <[ > takes 2 chars
if len(dduConsumptionPerManagementZone) > 2:
dduConsumptionPerManagementZone = (
dduConsumptionPerManagementZone
+ ", "
+ json.dumps(dduConsumptionObjectOfManagementZone)
)
else:
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + json.dumps(
dduConsumptionObjectOfManagementZone
)
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + " ]"
print(dduConsumptionPerManagementZone)
|
14206
|
import os
import matplotlib as mpl
import torch
import torchvision
from data_management import IPDataset, Jitter, SimulateMeasurements
from networks import IterativeNet, Tiramisu
from operators import Radon
# ----- load configuration -----
import config # isort:skip
# ----- global configuration -----
mpl.use("agg")
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# ----- measurement configuration -----
theta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint
OpA = Radon(config.n, theta)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"down_blocks": (5, 7, 9, 12, 15),
"up_blocks": (15, 12, 9, 7, 5),
"pool_factors": (2, 2, 2, 2, 2),
"bottleneck_layers": 20,
"growth_rate": 16,
"out_chans_first_conv": 16,
}
subnet = Tiramisu
it_net_params = {
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
# ----- training configuration -----
mseloss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mseloss(pred, tar) / pred.shape[0]
train_phases = 1
train_params = {
"num_epochs": [19],
"batch_size": [10],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"Radon_Tiramisu_jitter_v6_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [{"lr": 8e-5, "eps": 2e-4, "weight_decay": 5e-4}],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1],
"train_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA), Jitter(5e2, 0.0, 1.0)]
),
"val_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA)],
),
"train_loader_params": {"shuffle": True, "num_workers": 0},
"val_loader_params": {"shuffle": False, "num_workers": 0},
}
# ----- data configuration -----
train_data_params = {
"path": config.DATA_PATH,
"device": device,
}
train_data = IPDataset
val_data_params = {
"path": config.DATA_PATH,
"device": device,
}
val_data = IPDataset
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_data_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in val_data_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet_tmp = subnet(**subnet_params).to(device)
it_net_tmp = IterativeNet(
subnet_tmp,
**{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
).to(device)
it_net_tmp.load_state_dict(
torch.load(
"results/Radon_Tiramisu_jitter_v4_train_phase_1/model_weights.pt",
map_location=torch.device(device),
)
)
subnet = it_net_tmp.subnet
it_net = IterativeNet(subnet, **it_net_params).to(device)
train_data = train_data("train", **train_data_params)
val_data = val_data("val", **val_data_params)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on(train_data, val_data, **train_params_cur)
|
14217
|
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
|
14234
|
import os
import json
from torchblocks.metrics import SequenceLabelingScore
from torchblocks.trainer import SequenceLabelingTrainer
from torchblocks.callback import TrainLogger
from torchblocks.processor import SequenceLabelingProcessor, InputExample
from torchblocks.utils import seed_everything, dict_to_text, build_argparse
from torchblocks.utils import prepare_device, get_checkpoints
from torchblocks.data import CNTokenizer
from torchblocks.data import Vocabulary, VOCAB_NAME
from torchblocks.models.nn.lstm_crf import LSTMCRF
from torchblocks.models.bases import TrainConfig
from torchblocks.models.bases import WEIGHTS_NAME
MODEL_CLASSES = {
'lstm-crf': (TrainConfig, LSTMCRF, CNTokenizer)
}
def build_vocab(data_dir, vocab_dir):
'''
构建vocab
'''
vocab = Vocabulary()
vocab_path = os.path.join(vocab_dir, VOCAB_NAME)
if os.path.exists(vocab_path):
vocab.load_vocab(str(vocab_path))
else:
files = ["train.json", "dev.json", "test.json"]
for file in files:
with open(os.path.join(data_dir, file), 'r') as fr:
for line in fr:
line = json.loads(line.strip())
text = line['text']
vocab.update(list(text))
vocab.build_vocab()
vocab.save_vocab(vocab_path)
print("vocab size: ", len(vocab))
class CluenerProcessor(SequenceLabelingProcessor):
def get_labels(self):
"""See base class."""
# 默认第一个为X
return ["X", "B-address", "B-book", "B-company", 'B-game', 'B-government', 'B-movie', 'B-name',
'B-organization', 'B-position', 'B-scene', "I-address",
"I-book", "I-company", 'I-game', 'I-government', 'I-movie', 'I-name',
'I-organization', 'I-position', 'I-scene',
"S-address", "S-book", "S-company", 'S-game', 'S-government', 'S-movie',
'S-name', 'S-organization', 'S-position',
'S-scene', 'O', "[START]", "[END]"]
def read_data(self, input_file):
"""Reads a json list file."""
lines = []
with open(input_file, 'r') as f:
for line in f:
line = json.loads(line.strip())
text = line['text']
label_entities = line.get('label', None)
labels = ['O'] * len(text)
if label_entities is not None:
for key, value in label_entities.items():
for sub_name, sub_index in value.items():
for start_index, end_index in sub_index:
assert text[start_index:end_index + 1] == sub_name
if start_index == end_index:
labels[start_index] = 'S-' + key
else:
labels[start_index] = 'B-' + key
labels[start_index + 1:end_index + 1] = ['I-' + key] * (len(sub_name) - 1)
lines.append({"text": text, "labels": labels})
return lines
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['text']
labels = line['labels']
examples.append(InputExample(guid=guid, texts=[text_a, None], label_ids=labels))
return examples
def main():
parser = build_argparse()
parser.add_argument('--markup', type=str, default='bios', choices=['bios', 'bio'])
parser.add_argument('--use_crf', action='store_true', default=True)
args = parser.parse_args()
# output dir
if args.model_name is None:
args.model_name = args.model_path.split("/")[-1]
args.output_dir = args.output_dir + '{}'.format(args.model_name)
os.makedirs(args.output_dir, exist_ok=True)
# logging
prefix = "_".join([args.model_name, args.task_name])
logger = TrainLogger(log_dir=args.output_dir, prefix=prefix)
# device
logger.info("initializing device")
args.device, args.n_gpu = prepare_device(args.gpu, args.local_rank)
# build vocab
build_vocab(args.data_dir, vocab_dir=args.model_path)
seed_everything(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# data processor
logger.info("initializing data processor")
tokenizer = tokenizer_class.from_pretrained(args.model_path, do_lower_case=args.do_lower_case)
processor = CluenerProcessor(data_dir=args.data_dir, tokenizer=tokenizer, prefix=prefix,add_special_tokens=False)
label_list = processor.get_labels()
num_labels = len(label_list)
id2label = {i: label for i, label in enumerate(label_list)}
args.id2label = id2label
args.num_labels = num_labels
# model
logger.info("initializing model and config")
config = config_class.from_pretrained(args.model_path, num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_path, config=config)
model.to(args.device)
# Trainer
logger.info("initializing traniner")
trainer = SequenceLabelingTrainer(args=args, logger=logger, collate_fn=processor.collate_fn,
input_keys=processor.get_input_keys(),
metrics=[SequenceLabelingScore(id2label, markup=args.markup)])
# do train
if args.do_train:
train_dataset = processor.create_dataset(args.train_max_seq_length, 'train.json', 'train', )
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
trainer.train(model, train_dataset=train_dataset, eval_dataset=eval_dataset)
# do eval
if args.do_eval and args.local_rank in [-1, 0]:
results = {}
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
checkpoints = [args.output_dir]
if args.eval_all_checkpoints or args.checkpoint_number > 0:
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint, config=config)
model.to(args.device)
trainer.evaluate(model, eval_dataset, save_preds=True, prefix=str(global_step))
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in trainer.records['result'].items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
dict_to_text(output_eval_file, results)
# do predict
if args.do_predict:
test_dataset = processor.create_dataset(args.eval_max_seq_length, 'test.json', 'test')
if args.checkpoint_number == 0:
raise ValueError("checkpoint number should > 0,but get %d", args.checkpoint_number)
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
trainer.predict(model, test_dataset=test_dataset, prefix=str(global_step))
if __name__ == "__main__":
main()
|
14242
|
import unittest
import os
from six import StringIO
from package_manager import util
CHECKSUM_TXT = "1915adb697103d42655711e7b00a7dbe398a33d7719d6370c01001273010d069"
DEBIAN_JESSIE_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="8"
VERSION="Debian GNU/Linux 8 (jessie)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_STRETCH_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="9"
VERSION="Debian GNU/Linux 9 (stretch)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_BUSTER_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="10"
VERSION="Debian GNU/Linux 10 (buster)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
# VERSION and VERSION_ID aren't set on unknown distros
DEBIAN_UNKNOWN_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
osReleaseForDistro = {
"jessie": DEBIAN_JESSIE_OS_RELEASE,
"stretch": DEBIAN_STRETCH_OS_RELEASE,
"buster": DEBIAN_BUSTER_OS_RELEASE,
"???": DEBIAN_UNKNOWN_OS_RELEASE,
}
class TestUtil(unittest.TestCase):
def test_sha256(self):
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'testdata', 'checksum.txt')
actual = util.sha256_checksum(filename)
self.assertEqual(CHECKSUM_TXT, actual)
def test_generate_debian_os_release(self):
for distro, expected_output in osReleaseForDistro.items():
output_file = StringIO()
util.generate_os_release(distro, output_file)
self.assertEqual(expected_output, output_file.getvalue())
if __name__ == '__main__':
unittest.main()
|
14283
|
import struct
from six import binary_type
from capnpy import ptr
from capnpy.packing import mychr
from capnpy.printer import print_buffer
class SegmentBuilder(object):
def __init__(self, length=None):
self.buf = bytearray()
def get_length(self):
return len(self.buf)
def as_string(self):
return binary_type(self.buf)
def _print(self):
print_buffer(self.as_string())
def write_generic(self, ifmt, i, value):
struct.pack_into(mychr(ifmt), self.buf, i, value)
def write_int8(self, i, value):
struct.pack_into('b', self.buf, i, value)
def write_uint8(self, i, value):
struct.pack_into('B', self.buf, i, value)
def write_int16(self, i, value):
struct.pack_into('h', self.buf, i, value)
def write_uint16(self, i, value):
struct.pack_into('H', self.buf, i, value)
def write_int32(self, i, value):
struct.pack_into('i', self.buf, i, value)
def write_uint32(self, i, value):
struct.pack_into('I', self.buf, i, value)
def write_int64(self, i, value):
struct.pack_into('q', self.buf, i, value)
def write_uint64(self, i, value):
struct.pack_into('Q', self.buf, i, value)
def write_float32(self, i, value):
struct.pack_into('f', self.buf, i, value)
def write_float64(self, i, value):
struct.pack_into('d', self.buf, i, value)
def write_bool(self, byteoffset, bitoffset, value):
current = struct.unpack_from('B', self.buf, byteoffset)[0]
current |= (value << bitoffset)
struct.pack_into('B', self.buf, byteoffset, current)
def write_slice(self, i, src, start, n):
self.buf[i:i+n] = src.buf[start:start+n]
def allocate(self, length):
# XXX: check whether there is a better method to zero-extend the array in PyPy
result = len(self.buf)
self.buf += b'\x00'*length
return result
def alloc_struct(self, pos, data_size, ptrs_size):
"""
Allocate a new struct of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
length = (data_size+ptrs_size) * 8
result = self.allocate(length)
offet = result - (pos+8)
p = ptr.new_struct(offet//8, data_size, ptrs_size)
self.write_int64(pos, p)
return result
def alloc_list(self, pos, size_tag, item_count, body_length):
"""
Allocate a new list of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
body_length = ptr.round_up_to_word(body_length)
result = self.allocate(body_length)
offet = result - (pos+8)
p = ptr.new_list(offet//8, size_tag, item_count)
self.write_int64(pos, p)
return result
def alloc_text(self, pos, s, trailing_zero=1):
if s is None:
self.write_int64(pos, 0)
return -1
n = len(s)
nn = n + trailing_zero
result = self.alloc_list(pos, ptr.LIST_SIZE_8, nn, nn)
self.buf[result:result+n] = s
# there is no need to write the trailing 0 as the byte is already
# guaranteed to be 0
return result
def alloc_data(self, pos, s):
return self.alloc_text(pos, s, trailing_zero=0)
def copy_from_struct(self, dst_pos, structcls, value):
if value is None:
self.write_int64(dst_pos, 0)
return
if not isinstance(value, structcls):
raise TypeError("Expected %s instance, got %s" %
(structcls.__class__.__name__, value))
self.copy_from_pointer(dst_pos, value._seg, value._as_pointer(0), 0)
def copy_from_pointer(self, dst_pos, src, p, src_pos):
return copy_pointer(src, p, src_pos, self, dst_pos)
def copy_inline_struct(self, dst_pos, src, p, src_pos):
"""
Similar to copy_from_pointer but:
1. it assumes that p is a pointer to a struct
2. it does NOT allocate a new struct in dst_pos: instead, it writes
the struct directly into dst_pos
"""
return _copy_struct_inline(src, p, src_pos, self, dst_pos)
def copy_from_list(self, pos, item_type, lst):
return copy_from_list(self, pos, item_type, lst)
from capnpy.segment._copy_pointer import copy_pointer, _copy_struct_inline
from capnpy.segment._copy_list import copy_from_list
|
14302
|
from .model import DeepLabResNetModel
from .hc_deeplab import HyperColumn_Deeplabv2
from .image_reader import ImageReader, read_data_list, get_indicator_mat, get_batch_1chunk, read_an_image_from_disk, tf_wrap_get_patch, get_batch
from .utils import decode_labels, inv_preprocess, prepare_label
|
14355
|
import math
def fuel_needed(mass):
return math.floor(int(mass)/3 - 2)
def fuel_needed_recursive(mass):
fuel_needed_i = fuel_needed(mass)
if (fuel_needed_i <= 0):
return 0
return fuel_needed_i + fuel_needed_recursive(fuel_needed_i)
total_fuel = 0
total_fuel_recursive = 0
with open("input.txt", "r") as fp:
for line in fp:
total_fuel += fuel_needed(line)
total_fuel_recursive += fuel_needed_recursive(line)
print("Total fuel: " + str(total_fuel))
print("Total fuel recursive: " + str(total_fuel_recursive))
|
14372
|
import json
def get_all_pets():
pets = read_from_file()
pets_in_store = []
for k, v in pets.items():
current_pet = {"id": k, **v}
pets_in_store.append(current_pet)
return pets
def remove_pet(id):
pets = read_from_file()
del pets[id]
write_to_file(pets)
def update_pet(id, pet):
pets = read_from_file()
ids = pets.keys()
pets[id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def add_pet(pet):
pets = read_from_file()
ids = pets.keys()
new_id = int(ids[-1]) + 1
pets[new_id] = {"name": pet.name, "breed": pet.breed, "price": pet.price}
write_to_file(pets)
def get_pet(id):
pets = read_from_file()
pet = pets[id]
pet["id"] = id
return pet
def write_to_file(content):
with open("./pets.json", "w") as pets:
pets.write(json.dumps(content))
def read_from_file():
with open("./pets.json", "r") as pets:
return json.loads(pets.read())
|
14422
|
from .base import *
class Input(Layer):
def __init__(self, input_shape: Union[List, Tuple], **kwargs):
super(Input, self).__init__(input_shape=input_shape, **kwargs)
self._shape = input_shape
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = x
return self._data
class Reshape(Layer):
def __init__(self, shape: Tuple, **kwargs):
super().__init__(shape=shape, **kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.view(x, (-1, ) + self._shape, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
class ZeroPadding2D(Layer):
def __init__(self, padding, **kwargs):
self.padding = padding
super(ZeroPadding2D, self).__init__(**kwargs)
def call(self, x: F.Tensor, *args, **kwargs) -> F.Tensor:
self._data = F.pad2d(x, self.padding, self._data)
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
self._shape = (input_shape[0], input_shape[1] + 2 * self.padding[0], input_shape[2] + 2 * self.padding[1])
return self._shape
class Add(Layer):
def __call__(self, inbounds: List[Layer], *args, **kwargs):
for inbound in inbounds:
self._in_bounds.append(inbound)
inbound.add_out_bounds(self)
self._shape = inbound.shape
return self
def init_layer_out_tensor(self, x : F.Tensor = None):
x = self._in_bounds[0].data if x is None else x
if self._data is None or x.shape[0] > self._data.shape_capacity[0]:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
elif x.shape[0] < self._data.shape_capacity[0]:
if GLOBAL.TRAINING:
self._data.slices(slice(None, x.shape[0], None))
else:
self._data = Zeros()((x.shape[0],) + self.shape, requires_grad=self.trainable)
self._data.to('static')
for in_bound in self._in_bounds:
self._data.add_in_bounds(in_bound.data)
else:
self._data.slices(slice(None, None, None))
def forward(self, x: F.Tensor = None, *args, **kwargs) -> F.Tensor:
self._data.zero_()
for in_bound in self._in_bounds:
GLOBAL.np.add(self._data.eval, in_bound.data.eval, out=self._data.eval)
if GLOBAL.TRAINING and in_bound.data.requires_grad:
initialize_ops_grad(in_bound.data)
self._data.requires_grad = self._data.requires_grad or in_bound.data.requires_grad
return self._data
def compute_output_shape(self, input_shape: Union[List, Tuple] = None) -> Union[List, Tuple]:
return self._shape
def backward(self, gradients: F.Tensor = None):
for in_bound in self._in_bounds:
if in_bound.data.requires_grad:
GLOBAL.np.add(in_bound.data.grad.eval, self._data.grad.eval, out=in_bound.data.grad.eval)
self._data.zero_grad()
|
14502
|
import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('../data/train_data.csv')
df_valid = pd.read_csv('../data/valid_data.csv')
df_test = pd.read_csv('../data/test_data.csv')
with open('result.dat', 'rb') as f:
N, = struct.unpack('i', f.read(4))
no_dims, = struct.unpack('i', f.read(4))
print(N, no_dims)
mappedX = struct.unpack('{}d'.format(N * no_dims), f.read(8 * N * no_dims))
mappedX = np.array(mappedX).reshape((N, no_dims))
print(mappedX)
tsne_train = mappedX[:len(df_train)]
tsne_valid = mappedX[len(df_train):len(df_train)+len(df_valid)]
tsne_test = mappedX[len(df_train)+len(df_valid):]
assert(len(tsne_train) == len(df_train))
assert(len(tsne_valid) == len(df_valid))
assert(len(tsne_test) == len(df_test))
save_path = '../data/tsne_{}d_30p.npz'.format(no_dims)
np.savez(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)
print('Saved: {}'.format(save_path))
# landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N))
# costs, = struct.unpack('{}d'.format(N), f.read(8 * N))
|
14517
|
import grasp_net, grasp_params, h5py, aolib.img as ig, os, numpy as np, aolib.util as ut
net_pr = grasp_params.im_fulldata_v5()
net_pr = grasp_params.gel_im_fulldata_v5()
checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-6499'
gpu = '/gpu:0'
db_file = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_212702.hdf5'
with h5py.File(db_file, 'r') as db:
pre, mid, _ = grasp_net.milestone_frames(db)
# sc = lambda x : ig.scale(x, (224, 224))
def sc(x):
""" do a center crop (helps with gelsight) """
x = ig.scale(x, (256, 256))
return ut.crop_center(x, 224)
u = ig.uncompress
crop = grasp_net.crop_kinect
inputs = dict(
gel0_pre=sc(u(db['GelSightA_image'].value[pre])),
gel1_pre=sc(u(db['GelSightB_image'].value[pre])),
gel0_post=sc(u(db['GelSightA_image'].value[mid])),
gel1_post=sc(u(db['GelSightB_image'].value[mid])),
im0_pre=sc(crop(u(db['color_image_KinectA'].value[pre]))),
im0_post=sc(crop(u(db['color_image_KinectA'].value[mid]))),
# these are probably unnecessary
depth0_pre=sc(crop(db['depth_image_KinectA'].value[pre].astype('float32'))),
depth0_post=sc(crop(db['depth_image_KinectA'].value[mid].astype('float32'))))
net = grasp_net.NetClf(net_pr, checkpoint_file, gpu)
prob = net.predict(**inputs)
print 'prob = ', prob
|
14534
|
from flask import Blueprint
from apps.auth.business.wxlogin import WxLoginBusiness
from apps.auth.extentions import validation, parse_json_form
from library.api.render import json_detail_render
wxlogin = Blueprint("wxlogin", __name__)
@wxlogin.route('/', methods=['POST'])
@validation('POST:wx_user_code')
def wxuser_index_handler():
"""
@api {post} /v1/wxlogin/ 登录 微信
@apiName WxLogin
@apiGroup 用户
@apiDescription 登录微信
@apiParam {string} user_code 用户编码
@apiParamExample {json} Request-Example:
{
"user_code":"j2qL3QjNXXwa_4A0WJFDNJyPEx88HTHytARgRbr176g"
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"token": "<PASSWORD>"
},
"message": ""
}
"""
user_code = parse_json_form('wx_user_code')
ret, data, msg = WxLoginBusiness.get_user(user_code[0])
return json_detail_render(ret, data, msg)
|
14542
|
import pytest
from weaverbird.backends.sql_translator.metadata import SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_filter
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.conditions import ComparisonCondition
from weaverbird.pipeline.steps import FilterStep
def test_translate_filter(mocker):
step = FilterStep(
name='filter', condition=ComparisonCondition(column='amount', operator='eq', value=10)
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT TOTO, TATA FROM products)',
selection_query='SELECT TOTO, TATA FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'table1': {'toto': 'text', 'tata': 'int'}},
),
)
mocker.patch(
'weaverbird.backends.sql_translator.steps.utils.query_transformation.apply_condition',
return_value='SELECT TOTO, TATA FROM SELECT_STEP_0 WHERE amount = 10',
)
res = translate_filter(step, query, index=1)
assert (
res.transformed_query
== 'WITH SELECT_STEP_0 AS (SELECT TOTO, TATA FROM products), FILTER_STEP_1 AS (SELECT TOTO, TATA FROM '
'SELECT_STEP_0 WHERE amount = 10)'
)
assert res.selection_query == 'SELECT TOTO, TATA FROM FILTER_STEP_1'
def test_translate_filter_error(mocker):
step = FilterStep(
name='filter', condition=ComparisonCondition(column='amount', operator='eq', value=10)
)
query = SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products), SELECT * FROM SELECT_STEP_0',
selection_query='SELECT * FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={'table1': {'toto': 'text', 'tata': 'int'}},
),
)
mocker.patch(
'weaverbird.backends.sql_translator.steps.filter.apply_condition',
side_effect=NotImplementedError,
)
with pytest.raises(NotImplementedError):
translate_filter(step, query, index=1)
|
14564
|
from .repository import Repository
from .manager import Manager
__all__ = ["Manager", "Repository", "__version__"]
__version__ = "0.2.0"
|
14583
|
from OpenGL.arrays import vbo
from OpenGL.GLES2.VERSION import GLES2_2_0
from OpenGL.GLES2.OES import mapbuffer
class Implementation( vbo.Implementation ):
"""OpenGL-based implementation of VBO interfaces"""
def __init__( self ):
for name in self.EXPORTED_NAMES:
for source in [ GLES2_2_0, mapbuffer ]:
for possible in (name,name+'OES'):
try:
setattr( self, name, getattr( source, possible ))
except AttributeError as err:
pass
else:
found = True
assert found, name
if GLES2_2_0.glBufferData:
self.available = True
Implementation.register()
|
14595
|
from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), np.zeros(nreps * npts)
for index in range(npts):
# Get query sentences
queries = sentences[nreps * index:nreps * (index + 1)]
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = queries.mm(ims.t())
else:
sim_kk = queries.view(-1, queries.size(-1)).mm(ims.view(-1, ims.size(-1)).t())
sim_kk = sim_kk.view(queries.size(0), queries.size(1), ims.size(0), ims.size(1))
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(queries.size(0), -1, ims.size(0))
sim, _ = sim_kk.max(dim=1)
else:
if order:
if nreps * index % ORDER_BATCH_SIZE == 0:
mx = min(sentences.shape[0], nreps * index + ORDER_BATCH_SIZE)
sentences_batch = sentences[nreps * index:mx]
sim_batch = order_sim(torch.Tensor(images).cuda(),
torch.Tensor(sentences_batch).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[:, (nreps * index) % ORDER_BATCH_SIZE:(nreps * index) % ORDER_BATCH_SIZE + nreps].T
else:
sim = np.tensordot(queries, ims, axes=[2, 2]).max(axis=(1,3)) \
if len(sentences.shape) == 3 else np.dot(queries, ims.T)
inds = np.zeros(sim.shape)
for i in range(len(inds)):
if use_gpu:
_, inds_gpu = sim[i].sort()
inds[i] = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds[i] = np.argsort(sim[i])[::-1]
ranks[nreps * index + i] = np.where(inds[i] == index)[0][0]
top1[nreps * index + i] = inds[i][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def convert_old_state_dict(x, model, multi_gpu=False):
params = model.state_dict()
prefix = ['module.img_enc.', 'module.txt_enc.'] \
if multi_gpu else ['img_enc.', 'txt_enc.']
for i, old_params in enumerate(x):
for key, val in old_params.items():
key = prefix[i] + key.replace('module.','').replace('our_model', 'pie_net')
assert key in params, '{} not found in model state_dict'.format(key)
params[key] = val
return params
def evalrank(model, args, split='test'):
print('Loading dataset')
data_loader = get_test_loader(args, vocab)
print('Computing results... (eval_on_gpu={})'.format(args.eval_on_gpu))
img_embs, txt_embs = encode_data(model, data_loader, args.eval_on_gpu)
n_samples = img_embs.shape[0]
nreps = 5 if args.data_name == 'coco' else 1
print('Images: %d, Sentences: %d' % (img_embs.shape[0] / nreps, txt_embs.shape[0]))
# 5fold cross-validation, only for MSCOCO
mean_metrics = None
if args.data_name == 'coco':
results = []
for i in range(5):
r, rt0 = i2t(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
print("Image to text: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % r)
ri, rti0 = t2i(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
if i == 0:
rt, rti = rt0, rti0
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("Text to image: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % ri)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.2f ar: %.2f ari: %.2f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("-----------------------------------")
print("Mean metrics from 5-fold evaluation: ")
print("rsum: %.2f" % (mean_metrics[-1] * 6))
print("Average i2t Recall: %.2f" % mean_metrics[-3])
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[:7])
print("Average t2i Recall: %.2f" % mean_metrics[-2])
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[7:14])
# no cross-validation, full evaluation
r, rt = i2t(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ri, rti = t2i(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("rsum: %.2f" % rsum)
print("Average i2t Recall: %.2f" % ar)
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % r)
print("Average t2i Recall: %.2f" % ari)
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % ri)
return mean_metrics
if __name__ == '__main__':
multi_gpu = torch.cuda.device_count() > 1
args = verify_input_args(parser.parse_args())
opt = verify_input_args(parser.parse_args())
# load vocabulary used by the model
with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
vocab = pickle.load(f)
args.vocab_size = len(vocab)
# load model and options
assert os.path.isfile(args.ckpt)
model = PVSE(vocab.word2idx, args)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
torch.backends.cudnn.benchmark = True
model.load_state_dict(torch.load(args.ckpt))
# evaluate
metrics = evalrank(model, args, split='test')
|
14616
|
import time
from adafruit_circuitplayground.express import cpx
import simpleio
cpx.pixels.auto_write = False
cpx.pixels.brightness = 0.3
# Set these based on your ambient temperature for best results!
minimum_temp = 24
maximum_temp = 30
while True:
# temperature value remapped to pixel position
peak = simpleio.map_range(cpx.temperature, minimum_temp, maximum_temp, 0, 10)
print(cpx.temperature)
print(int(peak))
for i in range(0, 10, 1):
if i <= peak:
cpx.pixels[i] = (0, 255, 255)
else:
cpx.pixels[i] = (0, 0, 0)
cpx.pixels.show()
time.sleep(0.05)
|
14622
|
import numpy as np
from torch import nn
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
"""
Simple function to init layers
"""
nn.init.orthogonal_(layer.weight, std)
nn.init.constant_(layer.bias, bias_const)
return layer
|
14709
|
import numpy as np
import matplotlib.pyplot as mp
import matplotlib.cm as mpcm
import matplotlib.colors as mpc
import scipy.stats as ss
# plotting settings
lw = 1.5
mp.rc('font', family = 'serif')
mp.rcParams['text.latex.preamble'] = [r'\boldmath']
mp.rcParams['axes.linewidth'] = lw
mp.rcParams['lines.linewidth'] = lw
cm = mpcm.get_cmap('plasma')
# datafiles
ppds = ['cmb', 'loc']
sums = ['ptes', 'prs']
# posterior summaries
post_means = np.genfromtxt('gw_grb_h_0_posterior_means.csv', \
delimiter=',')
post_vars = np.genfromtxt('gw_grb_h_0_posterior_vars.csv', \
delimiter=',')
n_h_0_true = post_means.shape[0]
n_bs = post_means.shape[1]
print n_bs
h_0_true_col = [cm(col) for col in np.linspace(0.2, 0.8, n_h_0_true)]
fig, axes = mp.subplots(1, 2, figsize=(12, 5))
for i in range(n_h_0_true):
print '* H_0 = {:5.2f}'.format(post_means[i, 0])
to_print = 'posterior mean = {:5.2f} +/- {:4.2f}'
print to_print.format(np.mean(post_means[i, 1:]), \
np.std(post_means[i, 1:]))
to_print = 'posterior sigma = {:5.2f} +/- {:4.2f}'
print to_print.format(np.mean(np.sqrt(post_vars[i, 1:])), \
np.std(np.sqrt(post_vars[i, 1:])))
kde = ss.gaussian_kde(post_means[i, 1:])
grid = np.linspace(np.min(post_means[i, 1:]), \
np.max(post_means[i, 1:]), \
1000)
axes[0].plot(grid, kde.evaluate(grid), color=h_0_true_col[i])
axes[0].axvline(post_means[i, 0], color=h_0_true_col[i], ls='--')
kde = ss.gaussian_kde(np.sqrt(post_vars[i, 1:]))
grid = np.linspace(np.min(np.sqrt(post_vars[i, 1:])), \
np.max(np.sqrt(post_vars[i, 1:])), \
1000)
axes[1].plot(grid, kde.evaluate(grid), color=h_0_true_col[i], \
label=r'$H_0 = {:5.2f}$'.format(post_vars[i, 0]))
axes[0].set_xlabel(r'$\bar{H}_0$', fontsize=18)
axes[0].set_ylabel(r'${\rm Pr}(\bar{H}_0)$', fontsize=18)
axes[0].tick_params(axis='both', which='major', labelsize=12)
axes[1].set_xlabel(r'$\sigma_{H_0}$', fontsize=18)
axes[1].set_ylabel(r'${\rm Pr}(\sigma_{H_0})$', fontsize=18)
axes[1].tick_params(axis='both', which='major', labelsize=12)
axes[1].legend(loc='upper right', fontsize=14)
fig.suptitle('Bootstrap-Averaged Posterior Means / Sigmas', \
fontsize=18)
fig.savefig('gw_grd_h_0_bs_avg_posterior_moments.pdf', \
bbox_inches = 'tight')
mp.close(fig)
# PPD summaries
for i in range(len(ppds)):
for j in range(len(sums)):
# read data
fname = 'gw_grb_h_0_' + ppds[i] + '_ppd_' + sums[j]
data = np.genfromtxt(fname + '.csv', delimiter=',')
n_bs = data.shape[1]
print n_bs
# plot
n_h_0_true = data.shape[0]
fig, axes = mp.subplots(1, n_h_0_true, \
figsize=(6 * n_h_0_true, 5))
if ppds[i] == 'cmb':
fig.suptitle(r'$\hat{H}_0^{\rm CMB}\, {\rm Prediction}$', \
fontsize=18)
else:
fig.suptitle(r'$\hat{H}_0^{\rm CDL}\, {\rm Prediction}$', \
fontsize=18)
if sums[j] == 'ptes':
x_label = r'$p$'
y_label = r'${\rm Pr}(p)$'
else:
x_label = r'$\rho$'
y_label = r'${\rm Pr}(\rho)$'
for k in range(n_h_0_true):
kde = ss.gaussian_kde(data[k, 1:])
grid = np.linspace(np.min(data[k, 1:]), \
np.max(data[k, 1:]), \
1000)
axes[k].plot(grid, kde.evaluate(grid), color=cm(0.5))
axes[k].set_xlabel(x_label, fontsize=18)
axes[k].set_ylabel(y_label, fontsize=18)
axes[k].tick_params(axis='both', which='major', labelsize=12)
axes[k].set_title(r'$H_0 = {:5.2f}$'.format(data[k, 0]), \
fontsize=18)
# finish plot
fig.savefig(fname + '.pdf', bbox_inches = 'tight')
mp.close(fig)
# quick check of required numbers of samples
def rho(d, n, var_ratio, n_event_ref, n_event):
d_n_event = n_event_ref / n_event
return np.exp(-0.5 * rho_num(d, n, d_n_event) / \
rho_den(var_ratio, d_n_event))
def rho_num(d, n, d_n_event):
if d > 0.0:
return (d - n * np.sqrt(d_n_event)) ** 2
else:
return (d + n * np.sqrt(d_n_event)) ** 2
def rho_den(var_ratio, d_n_event):
return var_ratio + d_n_event
def num_ratio(d, n, m, var_ratio):
term = (m ** 2 * var_ratio - d ** 2)
print term
return [((-n * d - \
np.sqrt((n * d) ** 2 - term * (m ** 2 - n ** 2))) / \
term) ** 2, \
((-n * d + \
np.sqrt((n * d) ** 2 - term * (m ** 2 - n ** 2))) / \
term) ** 2]
n_ref = 51.0
mu_obs = np.array([67.81, 73.24])
sig_obs = np.array([0.92, 1.74])
n_sigma_sv = 1.0
n_sigma_thresh = 3.0
n_sigma_diff = [(mu_obs[1] - mu_obs[0]) / np.sqrt(post_vars[i, 1]), \
(mu_obs[0] - mu_obs[1]) / np.sqrt(post_vars[i, 1])]
var_ratio = [sig_obs[1] ** 2 / post_vars[i, 1], \
sig_obs[0] ** 2 / post_vars[i, 1]]
print n_sigma_diff
print var_ratio
n_req = np.zeros(2)
n_req[0] = n_ref * num_ratio(n_sigma_diff[0], n_sigma_sv, \
n_sigma_thresh, var_ratio[0])[0]
ln_rho = -2.0 * np.log(rho(n_sigma_diff[0], n_sigma_sv, \
var_ratio[0], n_ref, n_req[0]))
print n_req[0], ln_rho, n_sigma_thresh ** 2
n_req[1] = n_ref * num_ratio(n_sigma_diff[1], n_sigma_sv, \
n_sigma_thresh, var_ratio[1])[1]
ln_rho = -2.0 * np.log(rho(n_sigma_diff[1], n_sigma_sv, \
var_ratio[1], n_ref, n_req[1]))
print n_req[1], ln_rho, n_sigma_thresh ** 2
n_grid = np.arange(n_ref, 5000.0)
mp.loglog(n_grid, rho_num(n_sigma_diff[0], n_sigma_sv, n_ref / n_grid), 'r', lw=1.0)
mp.plot(n_grid, 1.0 / rho_den(var_ratio[0], n_ref / n_grid), 'g', lw=1.0)
mp.plot(n_grid, 1.0 / rho_den(var_ratio[1], n_ref / n_grid), 'b', lw=1.0)
mp.plot(n_grid, -2.0 * np.log(rho(n_sigma_diff[0], n_sigma_sv, var_ratio[0], \
n_ref, n_grid)), 'g')
mp.plot(n_grid, -2.0 * np.log(rho(n_sigma_diff[1], n_sigma_sv, var_ratio[1], \
n_ref, n_grid)), 'b')
mp.axhline(n_sigma_thresh ** 2, color='k', linestyle='-.')
mp.axvline(n_req[0], color='g', linestyle='-.')
mp.axvline(n_req[1], color='b', linestyle='-.')
mp.xlabel(r'$N$')
mp.ylabel(r'$f(N)$')
mp.xlim(n_ref, 5000)
mp.ylim(0.3, 40.0)
mp.savefig('gw_grb_h_0_ppd_samp_var_limits.pdf', bbox_inches='tight')
mp.show()
exit()
print num_ratio(4.53, n_sigma_sv, n_sigma_thresh, 2.1)
print 5.43, mu_obs[1] - mu_obs[0]
print 1.2, np.sqrt(post_vars[i, 1])
print 5.43 / 1.2, n_sigma_diff[0]
m = 3.0
n = 1.0
d = 3.77 # 4.53
vrat = 1.46 # 2.1
print ((d*n+np.sqrt((d*n)**2-(vrat*m**2-d**2)*(m**2-n**2)))/(vrat*m**2-d**2))**2
|
14738
|
import argparse
import os
import sys
import numpy as np
from scipy import misc
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models import vgg16, vgg19
from torchvision.utils import save_image
from lib.gradients import GradCam, GuidedBackpropGrad
from lib.image_utils import preprocess_image, save_cam_image, save_as_gray_image
from lib.labels import IMAGENET_LABELS
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--img', type=str, default='',
help='Input image path')
parser.add_argument('--out_dir', type=str, default='./result/cam/',
help='Result directory path')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if args.cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
if args.img:
print('Input image: {}'.format(args.img))
else:
print('Input image: raccoon face (scipy.misc.face())')
print('Output directory: {}'.format(args.out_dir))
print()
return args
def main():
args = parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
target_layer_names = ['35']
target_index = None
# Prepare input image
if args.img:
img = cv2.imread(args.img, 1)
else:
img = misc.face()
img = np.float32(cv2.resize(img, (224, 224))) / 255
preprocessed_img = preprocess_image(img, args.cuda)
model = vgg19(pretrained=True)
if args.cuda:
model.cuda()
# Prediction
output = model(preprocessed_img)
pred_index = np.argmax(output.data.cpu().numpy())
print('Prediction: {}'.format(IMAGENET_LABELS[pred_index]))
# Prepare grad cam
grad_cam = GradCam(
pretrained_model=model,
target_layer_names=target_layer_names,
cuda=args.cuda)
# Compute grad cam
mask = grad_cam(preprocessed_img, target_index)
save_cam_image(img, mask, os.path.join(args.out_dir, 'grad_cam.jpg'))
print('Saved Grad-CAM image')
# Reload preprocessed image
preprocessed_img = preprocess_image(img)
# Compute guided backpropagation
guided_backprop = GuidedBackpropGrad(
pretrained_model=model, cuda=args.cuda)
guided_backprop_saliency = guided_backprop(preprocessed_img, index=target_index)
cam_mask = np.zeros(guided_backprop_saliency.shape)
for i in range(guided_backprop_saliency.shape[0]):
cam_mask[i, :, :] = mask
cam_guided_backprop = np.multiply(cam_mask, guided_backprop_saliency)
save_as_gray_image(
cam_guided_backprop,
os.path.join(args.out_dir, 'guided_grad_cam.jpg'))
print('Saved Guided Grad-CAM image')
if __name__ == '__main__':
main()
|
14771
|
from django.views.generic import TemplateView
from django_tables2.config import RequestConfig
from django_tables2_column_shifter.tables import (
ColumnShiftTableBootstrap2,
ColumnShiftTableBootstrap3,
ColumnShiftTableBootstrap4,
ColumnShiftTableBootstrap5,
)
from .models import Author, Book
from .tables import get_author_table_class, get_book_table_class
class Index(TemplateView):
template_name = "testproject/index.html"
class Base(object):
container_css = "span10 offset1"
template_name = "testproject/test_bootstrap2.html"
table_class_version = ColumnShiftTableBootstrap2
def get_context_data(self, **kwargs):
context = super(Base, self).get_context_data(**kwargs)
# Build tabels
author_queryset = Author.objects.all()
author_table1 = get_author_table_class(
self.table_class_version
)(author_queryset)
author_table2 = get_author_table_class(
self.table_class_version
)(author_queryset, prefix="authors2")
book_queryset = Book.objects.all()
book_table = get_book_table_class(
self.table_class_version
)(book_queryset, prefix="books")
# Turn on sorting and pagination
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table1)
RequestConfig(self.request, paginate={'per_page': 2}).configure(author_table2)
RequestConfig(self.request, paginate={'per_page': 2}).configure(book_table)
context['container_css'] = self.container_css
context['author_table1'] = author_table1
context['author_table2'] = author_table2
context['book_table'] = book_table
context['book_queryset'] = book_queryset
return context
class Bootstrap2(Base, TemplateView):
pass
class Bootstrap3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap3.html"
table_class_version = ColumnShiftTableBootstrap3
class Bootstrap4(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap4_1_3(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap4.1.3.html"
table_class_version = ColumnShiftTableBootstrap4
class Bootstrap5(Base, TemplateView):
container_css = "col-xs-10 col-xs-offset-1"
template_name = "testproject/test_bootstrap5.html"
table_class_version = ColumnShiftTableBootstrap5
|
14772
|
class Workset(WorksetPreview,IDisposable):
""" Represents a workset in the document. """
@staticmethod
def Create(document,name):
"""
Create(document: Document,name: str) -> Workset
Creates a new workset.
document: The document in which the new instance is created.
name: The workset name.
Returns: Returns the newly created workset.
"""
pass
def Dispose(self):
""" Dispose(self: WorksetPreview,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: WorksetPreview,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsEditable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is editable.
Get: IsEditable(self: Workset) -> bool
"""
IsOpen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is open (rather than closed).
Get: IsOpen(self: Workset) -> bool
"""
IsVisibleByDefault=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the workset is visible by default.
Get: IsVisibleByDefault(self: Workset) -> bool
"""
Kind=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Kind of the workset.
Get: Kind(self: Workset) -> WorksetKind
"""
|
14795
|
import argparse, pdb
import gym
import numpy as np
import os
import pickle
import random
import torch
import scipy.misc
from gym.envs.registration import register
parser = argparse.ArgumentParser()
parser.add_argument('-display', type=int, default=0)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-lanes', type=int, default=3)
parser.add_argument('-traffic_rate', type=int, default=15)
parser.add_argument('-state_image', type=int, default=1)
parser.add_argument('-save_images', type=int, default=0)
parser.add_argument('-store', type=int, default=1)
parser.add_argument('-data_dir', type=str, default='traffic-data/state-action-cost/')
parser.add_argument('-fps', type=int, default=30)
parser.add_argument('-time_slot', type=int, default=0)
parser.add_argument('-map', type=str, default='i80', choices={'ai', 'i80', 'us101', 'lanker', 'peach'})
parser.add_argument('-delta_t', type=float, default=0.1)
opt = parser.parse_args()
opt.state_image = (opt.state_image == 1)
opt.store = (opt.store == 1)
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
os.system("mkdir -p " + opt.data_dir)
kwargs = dict(
display=opt.display,
state_image=opt.state_image,
store=opt.store,
fps=opt.fps,
nb_lanes=opt.lanes,
traffic_rate=opt.traffic_rate,
data_dir=opt.data_dir,
delta_t=opt.delta_t,
)
register(
id='Traffic-v0',
entry_point='traffic_gym:Simulator',
kwargs=kwargs
)
register(
id='I-80-v0',
entry_point='map_i80:I80',
kwargs=kwargs
)
gym.envs.registration.register(
id='US-101-v0',
entry_point='map_us101:US101',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Lankershim-v0',
entry_point='map_lanker:Lankershim',
kwargs=kwargs,
)
gym.envs.registration.register(
id='Peachtree-v0',
entry_point='map_peach:Peachtree',
kwargs=kwargs,
)
env_names = {
'ai': 'Traffic-v0',
'i80': 'I-80-v0',
'us101': 'US-101-v0',
'lanker': 'Lankershim-v0',
'peach': 'Peachtree-v0',
}
print('Building the environment (loading data, if any)')
env = gym.make(env_names[opt.map])
env.reset(frame=0, time_slot=opt.time_slot)
done = False
while not done:
observation, reward, done, info = env.step()
env.render()
print(f'Data generation for <{opt.map}, time slot {opt.time_slot}> completed')
|
14822
|
from demos.setup import np, plt
from compecon import BasisChebyshev, BasisSpline
from compecon.tools import nodeunif
__author__ = 'Randall'
# DEMAPP06 Chebychev and cubic spline derivative approximation errors
# Function to be approximated
def f(x):
g = np.zeros((3, x.size))
g[0], g[1], g[2] = np.exp(-x), -np.exp(-x), np.exp(-x)
return g
# Set degree of approximation and endpoints of approximation interval
a = -1 # left endpoint
b = 1 # right endpoint
n = 10 # order of interpolatioin
# Construct refined uniform grid for error ploting
x = nodeunif(1001, a, b)
# Compute actual and fitted values on grid
y, d, s = f(x) # actual
# Construct and evaluate Chebychev interpolant
C = BasisChebyshev(n, a, b, f=f) # chose basis functions
yc = C(x) # values
dc = C(x, 1) # first derivative
sc = C(x, 2) # second derivative
# Construct and evaluate cubic spline interpolant
S = BasisSpline(n, a, b, f=f) # chose basis functions
ys = S(x) # values
ds = S(x, 1) # first derivative
ss = S(x, 2) # second derivative
# Plot function approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, y - yc[0])
plt.ylabel('Chebychev')
plt.title('Function Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, y - ys[0])
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot first derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, d - dc[0])
plt.ylabel('Chebychev')
plt.title('First Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, d - ds[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot second derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, s - sc[0])
plt.ylabel('Chebychev')
plt.title('Second Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, s - ss[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
plt.show()
|
14860
|
from .pressureprofile import PressureProfile
import numpy as np
class ArrayPressureProfile(PressureProfile):
def __init__(self, array, reverse=False):
super().__init__(self.__class__.__name__, array.shape[-1])
if reverse:
self.pressure_profile = array[::-1]
else:
self.pressure_profile = array
def compute_pressure_profile(self):
"""
Sets up the pressure profile for the atmosphere model
"""
logp = np.log10(self.pressure_profile)
gradp = np.gradient(logp)
self.pressure_profile_levels = \
10**np.append(logp-gradp/2, logp[-1]+gradp[-1]/2)
@property
def profile(self):
return self.pressure_profile
def write(self, output):
pressure = super().write(output)
return pressure
@classmethod
def input_keywords(self):
return ['array', 'fromarray',]
|
14878
|
import copy
import logging
import os
from typing import Dict, List, Tuple
import checksumdir
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ..adapter import download_object
logger = logging.getLogger("fastface.dataset")
class _IdentitiyTransforms:
"""Dummy tranforms"""
def __call__(self, img: np.ndarray, targets: Dict) -> Tuple:
return img, targets
def default_collate_fn(batch):
batch, targets = zip(*batch)
batch = np.stack(batch, axis=0).astype(np.float32)
batch = torch.from_numpy(batch).permute(0, 3, 1, 2).contiguous()
for i, target in enumerate(targets):
for k, v in target.items():
if isinstance(v, np.ndarray):
targets[i][k] = torch.from_numpy(v)
return batch, targets
class BaseDataset(Dataset):
def __init__(self, ids: List[str], targets: List[Dict], transforms=None, **kwargs):
super().__init__()
assert isinstance(ids, list), "given `ids` must be list"
assert isinstance(targets, list), "given `targets must be list"
assert len(ids) == len(targets), "lenght of both lists must be equal"
self.ids = ids
self.targets = targets
self.transforms = _IdentitiyTransforms() if transforms is None else transforms
# set given kwargs to the dataset
for key, value in kwargs.items():
if hasattr(self, key):
# log warning
continue
setattr(self, key, value)
def __getitem__(self, idx: int) -> Tuple:
img = self._load_image(self.ids[idx])
targets = copy.deepcopy(self.targets[idx])
# apply transforms
img, targets = self.transforms(img, targets)
# clip boxes
targets["target_boxes"] = self._clip_boxes(
targets["target_boxes"], img.shape[:2]
)
# discard zero sized boxes
targets["target_boxes"] = self._discard_zero_size_boxes(targets["target_boxes"])
return (img, targets)
def __len__(self) -> int:
return len(self.ids)
@staticmethod
def _clip_boxes(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
# TODO pydoc
height, width = shape
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(min=0, max=width - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(min=0, max=height - 1)
return boxes
@staticmethod
def _discard_zero_size_boxes(boxes: np.ndarray) -> np.ndarray:
# TODO pydoc
scale = (boxes[:, [2, 3]] - boxes[:, [0, 1]]).min(axis=1)
return boxes[scale > 0]
@staticmethod
def _load_image(img_file_path: str):
"""loads rgb image using given file path
Args:
img_path (str): image file path to load
Returns:
np.ndarray: rgb image as np.ndarray
"""
img = imageio.imread(img_file_path)
if not img.flags["C_CONTIGUOUS"]:
# if img is not contiguous than fix it
img = np.ascontiguousarray(img, dtype=img.dtype)
if len(img.shape) == 4:
# found RGBA, converting to => RGB
img = img[:, :, :3]
elif len(img.shape) == 2:
# found GRAYSCALE, converting to => RGB
img = np.stack([img, img, img], axis=-1)
return np.array(img, dtype=np.uint8)
def get_dataloader(
self,
batch_size: int = 1,
shuffle: bool = False,
num_workers: int = 0,
collate_fn=default_collate_fn,
pin_memory: bool = False,
**kwargs
):
return DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
**kwargs
)
def get_mean_std(self) -> Dict:
# TODO pydoc
mean_sum, mean_sq_sum = np.zeros(3), np.zeros(3)
for img, _ in tqdm(
self, total=len(self), desc="calculating mean and std for the dataset"
):
d = img.astype(np.float32) / 255
mean_sum[0] += np.mean(d[:, :, 0])
mean_sum[1] += np.mean(d[:, :, 1])
mean_sum[2] += np.mean(d[:, :, 2])
mean_sq_sum[0] += np.mean(d[:, :, 0] ** 2)
mean_sq_sum[1] += np.mean(d[:, :, 1] ** 2)
mean_sq_sum[2] += np.mean(d[:, :, 2] ** 2)
mean = mean_sum / len(self)
std = (mean_sq_sum / len(self) - mean ** 2) ** 0.5
return {"mean": mean.tolist(), "std": std.tolist()}
def get_normalized_boxes(self) -> np.ndarray:
# TODO pydoc
normalized_boxes = []
for img, targets in tqdm(
self, total=len(self), desc="computing normalized target boxes"
):
if targets["target_boxes"].shape[0] == 0:
continue
max_size = max(img.shape)
normalized_boxes.append(targets["target_boxes"] / max_size)
return np.concatenate(normalized_boxes, axis=0)
def get_box_scale_histogram(self) -> Tuple[np.ndarray, np.ndarray]:
bins = map(lambda x: 2 ** x, range(10))
total_boxes = []
for _, targets in tqdm(self, total=len(self), desc="getting box sizes"):
if targets["target_boxes"].shape[0] == 0:
continue
total_boxes.append(targets["target_boxes"])
total_boxes = np.concatenate(total_boxes, axis=0)
areas = (total_boxes[:, 2] - total_boxes[:, 0]) * (
total_boxes[:, 3] - total_boxes[:, 1]
)
return np.histogram(np.sqrt(areas), bins=list(bins))
def download(self, urls: List, target_dir: str):
for k, v in urls.items():
keys = list(v["check"].items())
checked_keys = []
for key, md5hash in keys:
target_sub_dir = os.path.join(target_dir, key)
if not os.path.exists(target_sub_dir):
checked_keys.append(False)
else:
checked_keys.append(
checksumdir.dirhash(target_sub_dir, hashfunc="md5") == md5hash
)
if sum(checked_keys) == len(keys):
logger.debug("found {} at {}".format(k, target_dir))
continue
# download
adapter = v.get("adapter")
kwargs = v.get("kwargs", {})
logger.warning(
"{} not found in the {}, downloading...".format(k, target_dir)
)
download_object(adapter, dest_path=target_dir, **kwargs)
|
14909
|
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in Preprint.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in Preprint.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
14943
|
import os
def get_dirs():
cwd = os.path.dirname(os.path.realpath(__file__))
local_savedir = cwd
local_datadir = cwd
local_wandbdir = cwd
return local_savedir, local_datadir, local_wandbdir
def configure_logging(config, name, model):
if config['wandb_on']:
import wandb
wandb.init(name=name,
project='YOUR_PROJECT_NAME',
entity='YOUR_ENTITY_NAME',
dir=config['wandb_dir'],
config=config)
wandb.watch(model)
def log(key, val):
print(f"{key}: {val}")
wandb.log({key: val})
checkpoint_path = os.path.join(wandb.run.dir, 'checkpoint.tar')
else:
def log(key, val):
print(f"{key}: {val}")
checkpoint_path = './checkpoint.tar'
return log, checkpoint_path
|
14945
|
import torch
from torch import nn
from transformers import BertTokenizer, VisualBertModel, VisualBertConfig
import numpy as np
class VisualBertClassifier(nn.Module):
def __init__(self,
visual_bert_model,
num_classes: int = 8,
initial_visual_embedding_dim: int = 96,
final_dropout_rate: float = 0.1):
"""
pooler_output (torch.FloatTensor of shape (batch_size, hidden_size))
— Last layer hidden-state of the first token of the sequence (classification token)
after further processing through the layers used for the auxiliary pretraining task.
E.g. for BERT-family of models, this returns the classification token after processing through
a linear layer and a tanh activation function.
The linear layer weights are trained from the next sentence prediction (classification) objective
during pretraining.
@param initial_visual_embedding_dim:
"""
super().__init__()
self.visual_embedding_projection = nn.Linear(initial_visual_embedding_dim, 2048)
self.visual_bert = visual_bert_model
self.final_dropout = nn.Dropout(final_dropout_rate)
self.out = nn.Linear(768, num_classes)
def forward(self,
text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask
):
visual_embeds = self.visual_embedding_projection(visual_embeds)
output = self.visual_bert(input_ids=text_input_ids,
token_type_ids=text_token_type_ids,
attention_mask=text_attention_mask,
visual_embeds=visual_embeds,
visual_token_type_ids=visual_token_type_ids,
visual_attention_mask=visual_attention_mask)
output = self.final_dropout(output.pooler_output)
output = self.out(output)
return output
if __name__ == '__main__':
bert_text_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
inputs = bert_text_tokenizer("What is the man eating?", return_tensors="pt")
text_input_ids = inputs.data['input_ids'].to('cuda')
text_token_type_ids = inputs.data['token_type_ids'].to('cuda')
text_attention_mask = inputs.data['attention_mask'].to('cuda')
sample_face_body_embedding_path = "/home/gsoykan20/Desktop/self_development/emotion-recognition-drawings/data/emoreccom_face_body_embeddings_96d/train/0_3_4.jpg.npy"
sample_face_body_embedding = np.load(sample_face_body_embedding_path)
visual_embeds = torch.from_numpy(sample_face_body_embedding)
visual_embeds = visual_embeds.to('cuda')
visual_embeds = torch.unsqueeze(visual_embeds, 0)
visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long).to('cuda')
visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float).to('cuda')
classifier = VisualBertClassifier()
classifier.to('cuda')
classifier.forward(text_input_ids,
text_token_type_ids,
text_attention_mask,
visual_embeds,
visual_token_type_ids,
visual_attention_mask)
|
14969
|
from akagi.data_source import DataSource
from akagi.data_file import DataFile
class SpreadsheetDataSource(DataSource):
'''SpreadsheetSource replesents a data on Google Spreadsheets
'''
def __init__(self, sheet_id, sheet_range='A:Z', no_cache=False):
self._sheet_id = sheet_id
self._sheet_range = sheet_range
@property
def data_files(self):
return [DataFile.spreadsheet(self._sheet_id, self._sheet_range)]
|
14972
|
from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
|
14987
|
import os
from PIL import Image
import seaborn as sn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from sidechainnet.utils.sequence import ProteinVocabulary
from einops import rearrange
# general functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# singleton msa transformer
msa_instances = None
def get_msa_transformer():
global msa_instances
if not exists(msa_instances):
msa_model, alphabet = torch.hub.load("facebookresearch/esm", "esm_msa1_t12_100M_UR50S")
batch_converter = alphabet.get_batch_converter()
return msa_model, batch_converter
return msa_instances
# MSA embedding related functions
VOCAB = ProteinVocabulary()
def ids_to_aa_str(x):
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
is_char = lambda c: isinstance(c, str) and len(c) == 1
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_aa_str(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(is_char, out)):
return ''.join(out)
return out
def aa_str_to_embed_input(x):
assert isinstance(x, list), 'input must be a list'
out = []
for el in x:
if isinstance(el, list):
out.append(aa_str_to_embed_input(el))
elif isinstance(el, str):
out.append((None, el))
else:
raise TypeError('type must be either list or string')
return out
def apc(x):
a1 = x.sum(-1, keepdims=True)
a2 = x.sum(-2, keepdims=True)
a12 = x.sum((-1, -2), keepdims=True)
avg = a1 * a2
avg.div_(a12)
normalized = x - avg
return normalized
def symmetrize(x):
return x + x.transpose(-1, -2)
def pad_image_to(tensor, size, value = 0.):
remainder = size - tensor.shape[-1]
tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)
return tensor
# getting a single MSA attention embedding, with caching
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))
FETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))
os.makedirs(CACHE_PATH, exist_ok = True)
@torch.no_grad()
def get_msa_attention_embedding(
model,
batch_converter,
aa_str,
id,
fetch_msas_fn = lambda t: [],
cache = True
):
device = next(model.parameters()).device
cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')
if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):
try:
loaded = torch.load(cache_full_path).to(device)
except:
loaded = None
if exists(loaded):
return loaded
msas = default(fetch_msas_fn(aa_str), [])
seq_with_msas = [aa_str, *msas]
embed_inputs = aa_str_to_embed_input(seq_with_msas)
_, _, msa_batch_tokens = batch_converter(embed_inputs)
results = model(msa_batch_tokens.to(device), need_head_weights = True)
attentions = results['row_attentions']
attentions = attentions[..., 1:, 1:]
attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')
attentions = apc(symmetrize(attentions))
if cache:
print(f'caching to {cache_full_path}')
torch.save(attentions, cache_full_path)
return attentions
def get_msa_attention_embeddings(
model,
batch_converter,
seqs,
ids,
fetch_msas_fn = lambda t: [],
cache = True
):
n = seqs.shape[1]
seqs = rearrange(seqs, 'b n -> b () n')
aa_strs = ids_to_aa_str(seqs.cpu().tolist())
embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]
embeds_list = [pad_image_to(embed, n) for embed in embeds_list]
embeds = torch.cat(embeds_list, dim = 0)
return embeds
# training utils
def cycle(loader, thres = 256):
while True:
for data in loader:
if data.seqs.shape[1] <= thres:
yield data
def save_heatmap(tensor, filepath, dpi = 200, return_image = False):
heatmap = sn.heatmap(tensor.cpu().numpy())
figure = heatmap.get_figure()
figure.savefig(filepath, dpi = dpi)
plt.clf()
if not return_image:
return
return Image.open(filepath)
|
15007
|
from . import (
yaw,
layout,
base_COE,
optimization,
layout_height,
power_density,
yaw_wind_rose,
power_density_1D,
yaw_wind_rose_parallel,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.