ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfee9945e0ac8148740643fc3ec90d75f8ce2df | # -*- coding: utf-8 -*-
from tests.test_experiments import IS_ACTIVE
from unittest import TestCase
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.controllers.utils import uuid_alpha
from projects.database import engine
TEST_CLIENT = TestClient(app)
PROJECT_ID = str(uuid_alpha())
PROJECT_ID_2 = str(uuid_alpha())
NAME = "foo"
NAME_2 = "foo 2"
NAME_3 = "foo 3"
DESCRIPTION = "Description"
EXPERIMENT_ID = str(uuid_alpha())
EXPERIMENT_ID_2 = str(uuid_alpha())
EXPERIMENT_NAME = "Experimento 1"
DEPLOYMENT_ID = str(uuid_alpha())
STATUS = "Pending"
URL = None
POSITION = 0
IS_ACTIVE = True
CREATED_AT = "2000-01-01 00:00:00"
CREATED_AT_ISO = "2000-01-01T00:00:00"
UPDATED_AT = "2000-01-01 00:00:00"
UPDATED_AT_ISO = "2000-01-01T00:00:00"
class TestProjects(TestCase):
def setUp(self):
self.maxDiff = None
conn = engine.connect()
text = (
f"INSERT INTO projects (uuid, name, description, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s)"
)
conn.execute(text, (PROJECT_ID, NAME, DESCRIPTION, CREATED_AT, UPDATED_AT))
text = (
f"INSERT INTO projects (uuid, name, description, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s)"
)
conn.execute(text, (PROJECT_ID_2, NAME_2, DESCRIPTION, CREATED_AT, UPDATED_AT))
text = (
f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s, %s, %s)"
)
conn.execute(text, (EXPERIMENT_ID, NAME, PROJECT_ID, POSITION, IS_ACTIVE, CREATED_AT, UPDATED_AT))
text = (
f"INSERT INTO experiments (uuid, name, project_id, position, is_active, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s, %s, %s)"
)
conn.execute(text, (EXPERIMENT_ID_2, NAME, PROJECT_ID_2, POSITION, IS_ACTIVE, CREATED_AT, UPDATED_AT))
text = (
f"INSERT INTO deployments (uuid, name, project_id, experiment_id, position, is_active, status, url, created_at, updated_at) "
f"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
conn.execute(text, (DEPLOYMENT_ID, NAME, PROJECT_ID_2, EXPERIMENT_ID_2, POSITION, IS_ACTIVE, STATUS, URL, CREATED_AT, UPDATED_AT))
conn.close()
def tearDown(self):
conn = engine.connect()
text = f"DELETE FROM deployments WHERE project_id = '{PROJECT_ID_2}'"
conn.execute(text)
text = f"DELETE FROM experiments WHERE project_id = '{PROJECT_ID}'"
conn.execute(text)
text = f"DELETE FROM experiments WHERE project_id = '{PROJECT_ID_2}'"
conn.execute(text)
text = f"DELETE e.* FROM experiments e INNER JOIN projects p ON e.project_id = p.uuid WHERE p.name = '{NAME_3}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE uuid = '{PROJECT_ID}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE uuid = '{PROJECT_ID_2}'"
conn.execute(text)
text = f"DELETE FROM projects WHERE name = '{NAME_3}'"
conn.execute(text)
conn.close()
def test_list_projects(self):
rv = TEST_CLIENT.get("/projects")
result = rv.json()
self.assertIsInstance(result['projects'], list)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get("/projects?order=uuid asc")
result = rv.json()
self.assertIsInstance(result["projects"], list)
self.assertIsInstance(result["total"], int)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get("/projects?page=1")
result = rv.json()
self.assertIsInstance(result["projects"], list)
self.assertIsInstance(result["total"], int)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get(f"/projects?name={NAME}&page=1&order=uuid asc")
result = rv.json()
self.assertIsInstance(result["projects"], list)
self.assertIsInstance(result["total"], int)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get(f"/projects?name={NAME}&page=1&page_size=10&order=name desc")
result = rv.json()
self.assertIsInstance(result["projects"], list)
self.assertIsInstance(result["total"], int)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get("/projects?order=name desc")
result = rv.json()
self.assertIsInstance(result["projects"], list)
self.assertIsInstance(result["total"], int)
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.get("/projects?order=name unk")
result = rv.json()
expected = {"message": "Invalid order argument"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 400)
rv = TEST_CLIENT.get("/projects?order=name")
result = rv.json()
expected = {"message": "Invalid order argument"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 400)
def test_create_project(self):
rv = TEST_CLIENT.post("/projects", json={})
self.assertEqual(rv.status_code, 422)
rv = TEST_CLIENT.post("/projects", json={
"name": NAME
})
result = rv.json()
expected = {"message": "a project with that name already exists"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 400)
rv = TEST_CLIENT.post("/projects", json={
"name": NAME_3,
"description": DESCRIPTION
})
result = rv.json()
result_experiments = result.pop("experiments")
expected = {
"name": NAME_3,
"description": DESCRIPTION,
"hasDeployment": False,
"hasExperiment": True,
"hasPreDeployment": False,
"deployments": []
}
# uuid, created_at, updated_at are machine-generated
# we assert they exist, but we don't assert their values
machine_generated = ["uuid", "createdAt", "updatedAt"]
for attr in machine_generated:
self.assertIn(attr, result)
del result[attr]
self.assertDictEqual(expected, result)
expected = {
"name": EXPERIMENT_NAME,
"position": POSITION,
"isActive": IS_ACTIVE,
"operators": [],
}
self.assertEqual(len(result_experiments), 1)
machine_generated = ["uuid", "projectId", "createdAt", "updatedAt"]
for attr in machine_generated:
self.assertIn(attr, result_experiments[0])
del result_experiments[0][attr]
self.assertDictEqual(expected, result_experiments[0])
def test_get_project(self):
rv = TEST_CLIENT.get("/projects/foo")
result = rv.json()
expected = {"message": "The specified project does not exist"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
rv = TEST_CLIENT.get(f"/projects/{PROJECT_ID_2}")
result = rv.json()
result_experiments = result.pop("experiments")
result_deployments = result.pop("deployments")
expected = {
"uuid": PROJECT_ID_2,
"name": NAME_2,
"createdAt": CREATED_AT_ISO,
"updatedAt": UPDATED_AT_ISO,
"description": DESCRIPTION,
"hasDeployment": False,
"hasExperiment": True,
"hasPreDeployment": True,
}
self.assertDictEqual(expected, result)
expected = {
"uuid": EXPERIMENT_ID_2,
"name": NAME,
"projectId": PROJECT_ID_2,
"position": 0,
"isActive": True,
"operators": [],
}
self.assertEqual(len(result_experiments), 1)
machine_generated = ["createdAt", "updatedAt"]
for attr in machine_generated:
self.assertIn(attr, result_experiments[0])
del result_experiments[0][attr]
self.assertDictEqual(expected, result_experiments[0])
expected = {
"uuid": DEPLOYMENT_ID,
"name": NAME,
"projectId": PROJECT_ID_2,
"experimentId": EXPERIMENT_ID_2,
"position": POSITION,
"isActive": IS_ACTIVE,
"operators": [],
"url": URL,
"status": STATUS,
}
self.assertEqual(len(result_deployments), 1)
machine_generated = ["createdAt", "updatedAt", "deployedAt"]
for attr in machine_generated:
self.assertIn(attr, result_deployments[0])
del result_deployments[0][attr]
self.assertDictEqual(expected, result_deployments[0])
def test_update_project(self):
rv = TEST_CLIENT.patch("/projects/foo", json={})
result = rv.json()
expected = {"message": "The specified project does not exist"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
rv = TEST_CLIENT.patch(f"/projects/{PROJECT_ID}", json={
"name": NAME_2,
})
result = rv.json()
expected = {"message": "a project with that name already exists"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 400)
#update project using the same name
rv = TEST_CLIENT.patch(f"/projects/{PROJECT_ID}", json={
"name": NAME,
})
self.assertEqual(rv.status_code, 200)
rv = TEST_CLIENT.patch(f"/projects/{PROJECT_ID}", json={
"name": "bar",
})
result = rv.json()
result_experiments = result.pop("experiments")
expected = {
"uuid": PROJECT_ID,
"name": "bar",
"description": DESCRIPTION,
"createdAt": CREATED_AT_ISO,
"hasPreDeployment": False,
"hasDeployment": False,
"hasExperiment": True,
"deployments": []
}
machine_generated = ["updatedAt"]
for attr in machine_generated:
self.assertIn(attr, result)
del result[attr]
self.assertDictEqual(expected, result)
expected = {
"uuid": EXPERIMENT_ID,
"name": NAME,
"projectId": PROJECT_ID,
"position": POSITION,
"isActive": IS_ACTIVE,
"operators": [],
}
self.assertEqual(len(result_experiments), 1)
machine_generated = ["createdAt", "updatedAt"]
for attr in machine_generated:
self.assertIn(attr, result_experiments[0])
del result_experiments[0][attr]
self.assertDictEqual(expected, result_experiments[0])
def test_delete_project(self):
rv = TEST_CLIENT.delete("/projects/unk")
result = rv.json()
expected = {"message": "The specified project does not exist"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
rv = TEST_CLIENT.delete(f"/projects/{PROJECT_ID}")
result = rv.json()
expected = {"message": "Project deleted"}
self.assertDictEqual(expected, result)
def test_delete_projects(self):
rv = TEST_CLIENT.post("/projects/deleteprojects", json=[])
result = rv.json()
expected = {"message": "inform at least one project"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 400)
rv = TEST_CLIENT.post("/projects/deleteprojects", json=[PROJECT_ID_2])
result = rv.json()
expected = {"message": "Successfully removed projects"}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 200)
|
py | 7dfee9fb93bacbbcdb7f578a10476d998fb4b1b6 | from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "phe"
__summary__ = "Partially Homomorphic Encryption library for Python"
__uri__ = "https://github.com/data61/python-paillier"
# We use semantic versioning - semver.org
__version__ = "1.4.1-dev0"
__author__ = "CSIRO's Data61"
__email__ = "[email protected]"
__license__ = "GPLv3"
__copyright__ = "Copyright 2013-2019 {0}".format(__author__)
|
py | 7dfeea9be624ddad0df7b6b278488a376103dc33 | import numpy as np
import matplotlib.pyplot as plt
import time,sys
nx = 41;
domain = np.linspace(0,2,nx);
dx = 2/(nx-1)
nt = 25
nu = 0.3
sigma = .2
dt = sigma * dx**2 / nu
u = np.ones(nx)
u[int(.5 / dx):int(1 / dx + 1)] = 2
un = np.ones(nx)
for n in range(nt):
un = u.copy()
for i in range(1, nx - 1):
u[i] = un[i] + nu * dt / dx**2 * (un[i+1] - 2 * un[i] + un[i-1])
#plt.plot(domain, u);
#plt.grid()
plt.plot(domain, u);
plt.grid() |
py | 7dfeeb516944fe0279830cd970068e94eb5bb60c | import threading
import time
import smtplib, ssl
import os
import schedule
carrier_dict = {"verizon": "vtext.com",
"tmobile": "tmomail.net",
"sprint": "messaging.sprintpcs.com",
"att": "txt.att.net"}
class server:
def __init__(self, threading = False):
self.threading = threading
self.refresh_interval = None
self.internal_scheduler = schedule.Scheduler()
self.emails = []
self.numbers = []
self.text_on = False
def __send_notification(self, msg):
print("HELLO")
context = ssl.create_default_context()
port = 465 # For SSL
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(self.emails[0][0], self.emails[0][1])
for receiver_email in self.emails:
server.sendmail(self.emails[0][0], receiver_email, msg)
time.sleep(2)
if self.text_on:
for number in self.numbers:
print("sending text")
receiver_sms_gateway = str(number[0]) + "@" + carrier_dict[number[1]]
server.sendmail(self.emails[0][0], receiver_sms_gateway, msg)
def __update_refresh_interval(self, new_interval):
if not self.refresh_interval:
self.refresh_interval = new_interval * 60
else:
self.refresh_interval = min(self.refresh_interval, new_interval * 60)
def __transform_to_threaded_job(self, func):
def threaded_job():
t = threading.Thread(func)
t.start()
return threaded_job
def __notification_wrapper(self, func):
def notification_job():
notify, msg = func()
if notify:
self.__send_notification(msg)
return notification_job
def __add_job(self, func, m):
notif_wrapped_func = self.__notification_wrapper(func)
if self.threading:
notif_wrapped_func = self.__transform_to_threaded_job(notif_wrapped_func)
self.internal_scheduler.every(m).minutes.do(notif_wrapped_func)
def weekly_job(self, func):
self.__update_refresh_interval(60*24*7)
self.__add_job(func, 60 * 24 * 7)
def daily_job(self, func):
self.__update_refresh_interval(60*24)
self.__add_job(func, 60 * 24)
def hourly_job(self, func):
self.__update_refresh_interval(60)
self.__add_job(func, 60)
def minutely_job(self, func):
self.__update_refresh_interval(1)
self.__add_job(func, 1)
def register_email(self, email, password):
self.emails.append((email, password))
def register_number(self, number, carrier):
self.text_on = True
self.numbers.append((number, carrier))
def start(self):
if self.refresh_interval is None:
raise Exception("atleast one job must be specified")
if not len(self.emails):
raise Exception("register email before starting")
while True:
self.internal_scheduler.run_pending()
time.sleep(self.refresh_interval)
if __name__ == '__main__':
# port = 465 # For SSL
# sender_email = os.environ['EMAIL1']
# password = os.environ['EMAIL_PASS']
#
# receiver_email = os.environ['EMAIL2']
# message = """\
# Subject: Hi there
#
# This message is sent from Python."""
#
# # Create a secure SSL context
# context = ssl.create_default_context()
#
# receiver_sms_gateway = "2488805628" + "@" + "pm.sprint.com"
#
# with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
# server.login(sender_email, password)
# server.sendmail(sender_email, receiver_sms_gateway, message)
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
message = MIMEMultipart()
message['From'] = "[email protected]"
message['To'] = "2488805628" + "@" + carrier_dict["sprint"]
message['Subject'] = "Sent from Arun!"
text = ("From: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n"
% (message['From'], ", ".join(message['To']), message['Subject']))
text += "Hello World!\r\n"
text = "\r\nHello World!"
message.attach(MIMEText(text.encode("utf-8"), "plain", "utf-8"))
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(message["From"], "Liculdedav8")
# server.sendmail(message["From"], [message["To"]], text)
server.send_message(message)
|
py | 7dfeeb7892d42a86a5c34d94ef679590ed2e8154 | # -*- coding: utf-8 -*-
import requests
import json
import time
token = 'xoxp-TOKEN' # 슬랙 RTM(Real Time Messaging) API 토큰
hook_url = 'https://' # 슬랙 Incoming Webhook URL
bot_name = '' # 슬랙 메시지에 쓰일 봇 이름
channel_id = "#channel_id" # 채널 ID
api_files_list = 'https://slack.com/api/files.list'
second_day = 60 * 60 * 24
ts_to = time.time() - 60 * second_day
data = {'token': token, 'ts_to': ts_to, 'types': 'images', 'count': 1000, 'page': 1}
response = requests.post(api_files_list, data = data)
num_total = response.json()['paging']['total']
num_pages = response.json()['paging']['pages']
print "files: {}\npages: {}".format(num_total, num_pages)
list_starred = []
list_private = []
list_delete = []
delete_size = 0
if len(response.json()['files']) == 0:
exit(0)
for p in range(1, num_pages+1):
print "Current page: {}".format(p)
data = {'token': token, 'ts_to': ts_to, 'types': 'images', 'count': 1000, 'page': p}
response = requests.post(api_files_list, data = data)
for f in response.json()['files']:
try:
f['num_stars']
list_starred.append(f['id'])
except:
if f['is_public'] == False:
list_private.append(f['id'])
else:
list_delete.append(f['id'])
delete_size += f['size']
print "Starred files count: {}".format(len(list_starred))
print "Private files count: {}".format(len(list_private))
print "Deleting files count: {}".format(len(list_delete))
print "Starred files: {}".format(list_starred)
print "Total size to be cleaned: {}".format(delete_size)
def call_delete(delete_list):
global token
api_files_delete = 'https://slack.com/api/files.delete'
list_success = []
list_fail = []
for f in delete_list:
data = {'token': token, 'file': f}
response = requests.post(api_files_delete, data = data)
if response.json()['ok'] == True:
list_success.append(f)
print "Deletion succeed: {}".format(f)
else:
list_fail.append(f)
print "Deletion failed: {}".format(f)
print "Succeed files count: {}".format(len(list_success))
print "Failed files count: {}".format(len(list_fail))
print "Failed files: {}".format(list_fail)
def call_report(num_delete, delete_size):
json_channel = channel_id
json_fallback = "슬랙 청소 결과"
json_title = "슬랙 청소 결과"
json_text = "슬랙에 올라온지 60일이 지난 *사진 파일* 들을 일괄 삭제 완료하였습니다\n*별표(star)* 되어있거나, *비공개 채널* 혹은 *DM* 에서 공유된 사진들은 삭제 대상에서 *제외* 되었습니다"
json_value1 = num_delete
json_value2 = delete_size
requests.post(hook_url, json.dumps({
"channel": json_channel,
"username": bot_name,
"attachments": [
{
"fallback": json_fallback,
"title": json_title,
"text": json_text,
"mrkdwn_in": ["text"],
"fields": [
{
"title": "삭제된 사진 개수",
"value": json_value1,
"short": "true"
},
{
"title": "비워진 용량",
"value": json_value2,
"short": "true"
}
],
"color": "#1DAFED"
}
]
}))
# byte로 표기된 파일 크기를 사람이 읽을 수 있는 단위로 바꿈
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def human_readable(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s%s' % (f, suffixes[i])
# 실행
call_delete(list_delete)
call_report(len(list_delete), human_readable(delete_size))
|
py | 7dfeec5e3739e49bda37729f586b600ae7385763 | from reapy.errors import DisconnectedClientError, DistError
from reapy.tools import json
from .socket import Socket
class Client(Socket):
def __init__(self, port):
super(Client, self).__init__()
self._connect(port)
def _connect(self, port):
super(Client, self).connect(("localhost", port))
self.address = self.recv(timeout=None).decode("ascii")
def _get_result(self):
s = self.recv(timeout=None).decode()
return json.loads(s)
def run_program(self, program, input):
"""
Send a program to the server and return its output.
Parameters
----------
program : reapy.tools.Program
Program to run.
input : dict
Input to the program.
Returns
-------
result
Program output
Raises
------
DistError
When an error occurs while the server runs the program, its
traceback is sent to the client and used to raise a
DistError.
"""
program = program.to_dict()
request = {"program": program, "input": input}
request = json.dumps(request).encode()
self.send(request)
result = self._get_result()
if result["type"] == "result":
return result["value"]
elif result["type"] == "error":
raise DistError(result["traceback"])
|
py | 7dfeedff4762d3cad73e96ce9eb2b5dd4150f001 | from typing import Any
import pytest
from intents import Sys, LanguageCode, Entity
from intents.model.entity import SystemEntityMixin
from intents.resources.builtin_entities.color import I_IntentsColor
from intents.connectors.interface import entity_mappings
def test_string_entity_mapping():
mapping = entity_mappings.StringEntityMapping(Sys.Integer, "@FakeInteger")
assert mapping.entity_cls == Sys.Integer
assert mapping.service_name == "@FakeInteger"
assert mapping.from_service("42") == Sys.Integer("42")
assert mapping.to_service(42) == "42"
def test_patched_entity_mapping():
mapping = entity_mappings.PatchedEntityMapping(Sys.Color, I_IntentsColor)
assert mapping.entity_cls == Sys.Color
assert mapping.service_name == "I_IntentsColor"
assert mapping.from_service("red") == Sys.Color("red")
assert mapping.to_service(Sys.Color("red")) == "red"
#
# ServiceEntityMappings
#
class MockCustomMapping(entity_mappings.EntityMapping):
entity_cls = Sys.Person
service_name = "@FakePerson"
supported_languages = [LanguageCode.ENGLISH]
def from_service(self, service_data: Any) -> SystemEntityMixin:
return Sys.Person(service_data["name"])
def to_service(self, entity: SystemEntityMixin) -> Any:
return str({"name": entity})
MOCK_MAPPINGS = entity_mappings.ServiceEntityMappings.from_list([
entity_mappings.StringEntityMapping(Sys.PhoneNumber, "@FakePhoneNumber"),
MockCustomMapping()
])
def test_mappings__sys_found():
mapping = MOCK_MAPPINGS.lookup(Sys.PhoneNumber)
assert mapping == entity_mappings.StringEntityMapping(Sys.PhoneNumber, "@FakePhoneNumber")
assert MOCK_MAPPINGS.service_name(Sys.PhoneNumber) == "@FakePhoneNumber"
assert MOCK_MAPPINGS.is_mapped(Sys.PhoneNumber, LanguageCode.ENGLISH)
assert MOCK_MAPPINGS.is_mapped(Sys.PhoneNumber, LanguageCode.SPANISH_LATIN_AMERICA)
def test_mappings__supported_languages():
mapping = MOCK_MAPPINGS.lookup(Sys.Person)
assert isinstance(mapping, MockCustomMapping)
assert MOCK_MAPPINGS.service_name(Sys.Person) == "@FakePerson"
assert MOCK_MAPPINGS.is_mapped(Sys.Person, LanguageCode.ENGLISH)
assert not MOCK_MAPPINGS.is_mapped(Sys.Person, LanguageCode.SPANISH_LATIN_AMERICA)
def test_mappings__sys_not_found():
with pytest.raises(KeyError):
MOCK_MAPPINGS.lookup(Sys.Color)
assert not MOCK_MAPPINGS.is_mapped(Sys.Color, LanguageCode.ENGLISH)
def test_mappings__custom_entity():
class MyCustomEntity(Entity):
pass
mapping = MOCK_MAPPINGS.lookup(MyCustomEntity)
assert mapping == entity_mappings.StringEntityMapping(
entity_cls=MyCustomEntity,
service_name="MyCustomEntity"
)
assert MOCK_MAPPINGS.service_name(MyCustomEntity) == "MyCustomEntity"
|
py | 7dfeee00242ed6334620f58a54180906ac3512af | import os
if __name__ == '__main__':
small_data_set_lst = ['ca-GrQc', 'ca-HepTh', 'p2p-Gnutella06', 'wiki-Vote']
data_set_lst = [
# 'email-Enron', 'email-EuAll',
# 'web-NotreDame', 'web-Stanford', 'web-BerkStan', 'web-Google', 'cit-Patents',
# 'soc-LiveJournal1',
# 'wiki-Link',
'digg-friends',
'flickr-growth',
]
generate_exec_path = '/homes/ywangby/workspace/yche/new-git-repos-yche/SimRank/LPMC-Profile/build/util/bin_converter'
for data_set in small_data_set_lst + data_set_lst:
cmd = ' '.join(map(str, [generate_exec_path, data_set]))
print cmd
os.system(cmd)
|
py | 7dfeee87389b8f757d22190acccf7e15c7b16c16 | from django.contrib import admin
from prozhito_app.models import *
# Register your models here.
from django.contrib.gis import admin
from mapwidgets.widgets import GooglePointFieldWidget
class PersonAdmin(admin.ModelAdmin):
search_fields = ['first_name', 'patronymic', 'family_name',]
list_filter = ['from_tags', 'from_natasha']
admin.site.register(Person, PersonAdmin)
class PlaceAdmin(admin.ModelAdmin):
search_fields = ['name',]
formfield_overrides = {
models.PointField: {"widget": GooglePointFieldWidget}
}
admin.site.register(Place, PlaceAdmin)
class EntryAdmin(admin.ModelAdmin):
search_fields = ['id', ]
list_filter = ['sentiment']
autocomplete_fields = ['people', 'keywords']
admin.site.register(Entry, EntryAdmin)
class KeywordAdmin(admin.ModelAdmin):
search_fields = ['name',]
admin.site.register(Keyword, KeywordAdmin)
class DiaryAdmin(admin.ModelAdmin):
search_fields = ['id',]
list_filter = ['no_entries','first_note', 'last_note']
admin.site.register(Diary, DiaryAdmin)
|
py | 7dfeef0e847518ae997fc5244cc2a1e2aca3416f | import matplotlib.pyplot as plt
import plotly.express as px
import pandas as pd
article_csv = './Data/Original Data/articles.csv'
customer_csv = './Data/Original Data/customers.csv'
tx_csv = './Data/Original Data/transections_train.csv'
wage_csv = './Data/Original Data/Iowa_Wage_Data_by_Occupation.csv'
article_df = pd.read_csv(article_csv)
article_df.head()
print("shape of data article data", article_df.shape)
customer_df = pd.read_csv(customer_csv)
customer_df.head()
print("shape of data customer data", customer_df.shape)
tx_df = pd.read_csv(tx_csv)
tx_df.head()
print("shape of data transections data", tx_df.shape)
# Function to plot the Nan percentages of each columns
def plot_nas(df: pd.DataFrame):
if df.isnull().sum().sum() != 0:
na_df = (df.isnull().sum() / len(df)) * 100
# rearrange the last col
# cols = df.columns.to_list()
# cols = cols[-1:] + cols[:-1]
# na_df = na_df[cols]
# delete the 0 % col
# na_df = na_df.drop(na_df[na_df == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio %': na_df})
missing_data.plot(kind="barh")
plt.show()
else:
print('No NAs found')
print("Checking null in data")
plot_nas(article_df)
plot_nas(customer_df)
plot_nas(tx_df)
def plot_bar(df, column):
long_df = pd.DataFrame(
df.groupby(column)['customer_id'].count().reset_index().rename({'customer_id': 'count'},
axis=1))
fig = px.bar(long_df, x=column, y="count", color=column, title=f"bar plot for {column} ")
fig.show()
def plot_hist(df, column):
fig = px.histogram(df, x=column, nbins=10, title=f'{column} distribution ')
fig.show()
def plot_bar(df, column):
long_df = pd.DataFrame(
df.groupby(column)['article_id'].count().reset_index().rename({'article_id': 'count'},
axis=1))
fig = px.bar(long_df, x=column, y="count", color=column, title=f"bar plot for {column} ")
fig.show()
def plot_hist(df, column):
fig = px.histogram(df, x=column, nbins=10, title=f'{column} distribution ')
fig.show()
plot_bar(customer_df, 'age')
plot_bar(customer_df, 'postal_code')
plot_bar(customer_df, 'product_type_name')
plot_bar(customer_df, 'product_group_name')
plot_bar(customer_df, 'graphical_appearance_name')
plot_bar(customer_df, 'index_name')
plot_bar(customer_df, 'garment_group_name')
|
py | 7dfef032ba47384edda838e5d65730d7e18b3624 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import logging
import os
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
_logger = logging.getLogger('NaiveTuner')
_logger.info('start')
_pwd = os.path.dirname(__file__)
_result = open(os.path.join(_pwd, 'tuner_result.txt'), 'w')
class NaiveTuner(Tuner):
def __init__(self, optimize_mode):
self.cur = 0
_logger.info('init')
def generate_parameters(self, parameter_id, **kwargs):
self.cur += 1
_logger.info('generate parameters: %s' % self.cur)
return { 'x': self.cur }
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
reward = extract_scalar_reward(value)
_logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward))
_result.write('%d %d\n' % (parameters['x'], reward))
_result.flush()
def update_search_space(self, search_space):
_logger.info('update_search_space: %s' % search_space)
with open(os.path.join(_pwd, 'tuner_search_space.json'), 'w') as file_:
json.dump(search_space, file_)
def _on_exit(self):
_result.close()
def _on_error(self):
_result.write('ERROR\n')
_result.close()
|
py | 7dfef18b3316b29654401f9a343e91bdb73824c7 | #!/usr/bin/env python
import rospy
import math
import time
import sys, select, termios, tty
import os
from std_msgs.msg import Empty
import geometry_msgs.msg
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistStamped
x = 0
y = 0
z = 0
from Tkinter import *
import ttk
import threading
import ars_control
import subprocess
global window
window= Tk()
window.config(background= "#41B77F")
prompt='Click any button, or press a key'
L = Label(window, text=prompt, width=len(prompt))
L.pack()
cmd = """
#!/bin/bash
echo "Stopping Robot":
rostopic pub -1 /robot_cmd_stamped geometry_msgs/TwistStamped "
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: ''
twist:
linear:
x: 0.0
y: 0.0
z: 0.0
angular:
x: 0.0
y: 0.0
z: 0.0"
"""
def key(event):
if event.char == event.keysym:
msg ='Normal Key %r' % event.char
elif len(event.char) == 1:
msg ='Punctuation Key %r (%r)' % (event.keysym, event.char)
else:
msg ='Special Key %r' % event.keysym
L.config(text=msg)
L.bind_all('<Key>', key)
def do_mouse(eventname):
def mouse_binding(event):
msg = 'Mouse event %s' % eventname
L.config(text=msg)
L.bind_all('<%s>'%eventname, mouse_binding)
for i in range(1,4):
do_mouse('Button-%s' % i)
do_mouse('ButtonRelease-%s' % i)
do_mouse('Double-Button-%s' % i)
def quit():
distro = window.destroy()
exit = sys.exit()
return distro, exit
def getdistance():
distance = float(distance_selected.get())
return distance
def getspeed():
speed = float(speed_selected.get())
return speed
def stop_moving():
t = threading.Thread(target=os.system(cmd))
t.start()
return
def moveforward_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveX(speed, distance, True))
t.start()
def moveback_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveX(speed, distance, False))
t.start()
def moveleft_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveY(speed, distance, True))
t.start()
def moveright_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveY(speed, distance, False))
t.start()
def return_home():
t = threading.Thread(target= ars_control.control())
t.start()
def goup_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveZ(speed, distance, True))
t.start()
def godown_background():
speed= getspeed()
distance=getdistance()
t = threading.Thread(target= ars_control.moveZ(speed, distance, False))
t.start()
def rotationmoveleft_background():
t = threading.Thread(target= ars_control.rotate(10, 65, True))
t.start()
def rotationmoveright_background():
t = threading.Thread(target= ars_control.rotate(10, 65, False))
t.start()
#Define a callback function for exit
def quit_program(e):
window.destroy()
if __name__ == '__main__':
try:
rospy.init_node('ars_remote_controller_test', anonymous=True)
position_topic = "/robot_cmd_stamped" #(change me /robot_cmd_stamped)
pose_publisher = rospy.Publisher(position_topic, TwistStamped, queue_size=1)
label_title= Label(window, text= "Controller", font=("Courrier",40), bg = "#41B77F", fg= "white")
label_title.pack()
window.title("Nao_Drone")
window.geometry("1080x600")
window.minsize(1000,500)
#user choice of speed and distance
speed_label= Label(window,text = "Distance").place(x=30,y=50)
distance_label= Label(window, text= "Speed").place(x=740,y=50)
distance_var= StringVar()
distance_selected= ttk.Combobox(window, width=20,textvariable=distance_var)
distance_selected['values']=('0.1','0.2','0.3','0.5')
distance_selected.place(x=800,y=50)
distance_selected.current(0)
speed_var= StringVar()
speed_selected= ttk.Combobox(window, width=20,textvariable=speed_var)
speed_selected['values']=('0.1','0.2','0.3','0.5')
speed_selected.place(x=80,y=50)
speed_selected.current(0)
moveforward_button = Button(window, text="Move Forward", height= "3", width="20",command = moveforward_background).place(x=450,y=150)
moveback_button= Button(window, text="Move Back", height= "3", width="20", command = moveback_background).place(x=450,y=350)
moveleft_button= Button(window, text="Move Left", height= "3", width="20", command= moveleft_background).place(x=350,y=250)
moveright_button = Button(window, text="Move Right", height= "3", width="20", command = moveright_background).place(x=550,y=250)
goup_button = Button(window, text= "Go Up", height="3", width="20", command= goup_background).place(x = 450, y = 450)
godown_button = Button(window, text= "Go Down", height="3", width= "20", command= godown_background).place(x= 450, y =520)
quit_button = Button(window, text= "Quit Interface", height = "3", width= "20", command = quit).place(x=30, y= 300)
init_state_button = Button(window, text= "Stop moving", height = "3", width= "20", command = stop_moving).place(x=30, y= 420)
rotationmoveleft_button= Button(window, text= "Rotate to the left", height="3", width= "20", command= rotationmoveleft_background).place(x=800, y=450)
rotationmoveright_button = Button(window, text = "Rotate to the right", height="3", width= "20", command= rotationmoveright_background).place(x=800,y=520)
#Add a Label widget
label = Label(window, text= "Press Ctrl + x to Exit", font= ('Helvetica 15 bold'))
label.pack(pady=10)
#Bind the Keyboard shortcut Key
window.bind('<Control-x>', quit_program)
window.mainloop()
while True:
window.mainloop()
except rospy.ROSInterruptException:
rospy.loginfo("node terminated.") |
py | 7dfef2219106ca2966dacef581d8268b98b2b5c8 | from django.urls import include, path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('landing/', views.mainApp, name='index'),
] |
py | 7dfef26fa2b939da0ab58954cd990865cf387abc | """Functions for running energy evluations with OpenMM."""
from typing import Dict, Optional
import numpy as np
import openmm
from openmm import unit
from openff.interchange.components.interchange import Interchange
from openff.interchange.drivers.report import EnergyReport
kj_mol = unit.kilojoule_per_mole
def get_openmm_energies(
off_sys: Interchange,
round_positions: Optional[int] = None,
hard_cutoff: bool = False,
electrostatics: bool = True,
combine_nonbonded_forces: bool = False,
) -> EnergyReport:
"""
Given an OpenFF Interchange object, return single-point energies as computed by OpenMM.
.. warning :: This API is experimental and subject to change.
Parameters
----------
off_sys : openff.interchange.components.interchange.Interchange
An OpenFF Interchange object to compute the single-point energy of
round_positions : int, optional
The number of decimal places, in nanometers, to round positions. This can be useful when
comparing to i.e. GROMACS energies, in which positions may be rounded.
writer : str, default="internal"
A string key identifying the backend to be used to write OpenMM files. The
default value of `"internal"` results in this package's exporters being used.
hard_cutoff : bool, default=True
Whether or not to apply a hard cutoff (no switching function or disperson correction)
to the `openmm.NonbondedForce` in the generated `openmm.System`. Note that this will
truncate electrostatics to the non-bonded cutoff.
electrostatics : bool, default=True
A boolean indicating whether or not electrostatics should be included in the energy
calculation.
combine_nonbonded_forces : bool, default=False
Whether or not to combine all non-bonded interactions (vdW, short- and long-range
ectrostaelectrostatics, and 1-4 interactions) into a single openmm.NonbondedForce.
Returns
-------
report : EnergyReport
An `EnergyReport` object containing the single-point energies.
"""
positions = off_sys.positions
if "VirtualSites" in off_sys.handlers:
if len(off_sys["VirtualSites"].slot_map) > 0:
if not combine_nonbonded_forces:
raise NotImplementedError(
"Cannot yet split out NonbondedForce components while virtual sites are present."
)
n_virtual_sites = len(off_sys["VirtualSites"].slot_map)
# TODO: Actually compute virtual site positions based on initial conformers
virtual_site_positions = np.zeros((n_virtual_sites, 3))
virtual_site_positions *= off_sys.positions.units
positions = np.vstack([positions, virtual_site_positions])
omm_sys: openmm.System = off_sys.to_openmm(
combine_nonbonded_forces=combine_nonbonded_forces
)
return _get_openmm_energies(
omm_sys=omm_sys,
box_vectors=off_sys.box,
positions=positions,
round_positions=round_positions,
hard_cutoff=hard_cutoff,
electrostatics=electrostatics,
)
def _get_openmm_energies(
omm_sys: openmm.System,
box_vectors,
positions,
round_positions=None,
hard_cutoff=False,
electrostatics: bool = True,
) -> EnergyReport:
"""Given a prepared `openmm.System`, run a single-point energy calculation."""
"""\
if hard_cutoff:
omm_sys = _set_nonbonded_method(
omm_sys, "cutoff", electrostatics=electrostatics
)
else:
omm_sys = _set_nonbonded_method(omm_sys, "PME")
"""
for idx, force in enumerate(omm_sys.getForces()):
force.setForceGroup(idx)
integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)
context = openmm.Context(omm_sys, integrator)
if box_vectors is not None:
if not isinstance(box_vectors, (unit.Quantity, list)):
box_vectors = box_vectors.magnitude * unit.nanometer
context.setPeriodicBoxVectors(*box_vectors)
if isinstance(positions, unit.Quantity):
# Convert list of Vec3 into a NumPy array
positions = np.asarray(positions.value_in_unit(unit.nanometer)) * unit.nanometer
else:
positions = positions.magnitude * unit.nanometer
if round_positions is not None:
rounded = np.round(positions, round_positions)
context.setPositions(rounded)
else:
context.setPositions(positions)
raw_energies = dict()
omm_energies = dict()
for idx in range(omm_sys.getNumForces()):
state = context.getState(getEnergy=True, groups={idx})
raw_energies[idx] = state.getPotentialEnergy()
del state
# This assumes that only custom forces will have duplicate instances
for key in raw_energies:
force = omm_sys.getForce(key)
if type(force) == openmm.HarmonicBondForce:
omm_energies["HarmonicBondForce"] = raw_energies[key]
elif type(force) == openmm.HarmonicAngleForce:
omm_energies["HarmonicAngleForce"] = raw_energies[key]
elif type(force) == openmm.PeriodicTorsionForce:
omm_energies["PeriodicTorsionForce"] = raw_energies[key]
elif type(force) in [
openmm.NonbondedForce,
openmm.CustomNonbondedForce,
openmm.CustomBondForce,
]:
energy_type = _infer_nonbonded_energy_type(force)
if energy_type == "None":
continue
if energy_type in omm_energies:
omm_energies[energy_type] += raw_energies[key]
else:
omm_energies[energy_type] = raw_energies[key]
# Fill in missing keys if interchange does not have all typical forces
for required_key in [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]:
if not any(required_key in val for val in omm_energies):
pass # omm_energies[required_key] = 0.0 * kj_mol
del context
del integrator
report = EnergyReport()
report.update_energies(
{
"Bond": omm_energies.get("HarmonicBondForce", 0.0 * kj_mol),
"Angle": omm_energies.get("HarmonicAngleForce", 0.0 * kj_mol),
"Torsion": _canonicalize_torsion_energies(omm_energies),
}
)
if "Nonbonded" in omm_energies:
report.update_energies(
{"Nonbonded": _canonicalize_nonbonded_energies(omm_energies)}
)
report.energies.pop("vdW")
report.energies.pop("Electrostatics")
else:
report.update_energies({"vdW": omm_energies.get("vdW", 0.0 * kj_mol)})
report.update_energies(
{"Electrostatics": omm_energies.get("Electrostatics", 0.0 * kj_mol)}
)
return report
def _infer_nonbonded_energy_type(force):
if type(force) == openmm.NonbondedForce:
has_electrostatics = False
has_vdw = False
for i in range(force.getNumParticles()):
if has_electrostatics and has_vdw:
continue
params = force.getParticleParameters(i)
if not has_electrostatics:
if params[0]._value != 0:
has_electrostatics = True
if not has_vdw:
if params[2]._value != 0:
has_vdw = True
if has_electrostatics and not has_vdw:
return "Electrostatics"
if has_vdw and not has_electrostatics:
return "vdW"
if has_vdw and has_electrostatics:
return "Nonbonded"
if not has_vdw and not has_electrostatics:
return "None"
if type(force) == openmm.CustomNonbondedForce:
if "epsilon" or "sigma" in force.getEnergyFunction():
return "vdW"
if type(force) == openmm.CustomBondForce:
if "qq" in force.getEnergyFunction():
return "Electrostatics"
else:
return "vdW"
raise Exception(type(force))
def _canonicalize_nonbonded_energies(energies: Dict):
omm_nonbonded = 0.0 * kj_mol
for key in [
"Nonbonded",
"NonbondedForce",
"CustomNonbondedForce",
"CustomBondForce",
]:
try:
omm_nonbonded += energies[key]
except KeyError:
pass
return omm_nonbonded
def _canonicalize_torsion_energies(energies: Dict):
omm_torsion = 0.0 * kj_mol
for key in ["PeriodicTorsionForce", "RBTorsionForce"]:
try:
omm_torsion += energies[key]
except KeyError:
pass
return omm_torsion
|
py | 7dfef2c7aa7709569ef8f490a2a8ae6fe1618867 | """
Script for dumping filtered DB data per Django's dumpdata.
This is intended as a one-time script, so filters are hard-coded. We can add arguments
later if this gets more use.
"""
from typing import Dict, Any, List, Union
import os
import sys
import json
from datetime import date, datetime
import django
from django.db.models import Model, Q
from django.conf import settings
from mypy_extensions import TypedDict
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
if PROJECT_PATH not in sys.path:
sys.path.append(PROJECT_PATH)
django.setup()
from server.models import Prediction # pylint: disable=wrong-import-position
DumpRecord = TypedDict(
"DumpRecord", {"model": str, "pk": int, "fields": Dict[str, Any]}
)
APP_NAME = "server"
def _clean_value(value: Any) -> Union[str, int, float]:
if type(value) in [datetime, date]: # pylint: disable=unidiomatic-typecheck
return str(value)
return value
def _reshape_record_fields(model_name: str, record: Dict[str, Any]) -> DumpRecord:
fields = {k: _clean_value(v) for k, v in record.items() if k != "id"}
return {"model": APP_NAME + "." + model_name, "pk": record["id"], "fields": fields}
def _get_fields_for(model_class: Model) -> List[str]:
return [
field.attname
for field in model_class._meta.get_fields() # pylint: disable=protected-access
# Association fields appear in list, but aren't attributes
# unless the given record has the foreign key.
if hasattr(field, "attname")
]
def main():
"""Dump filtered DB data per Django's dumpdata."""
season_2019_preds_for_tipresias_2020 = Q(ml_model__name="tipresias_2020") & Q(
match__start_date_time__year=2019
)
season_2020_preds_for_round_1 = Q(match__start_date_time__year=2020) & (
Q(match__round_number=1)
)
prediction_records = Prediction.objects.filter(
season_2019_preds_for_tipresias_2020 | season_2020_preds_for_round_1
).values(*_get_fields_for(Prediction))
prediction_dump = [
_reshape_record_fields("prediction", record) for record in prediction_records
]
dump_filepath = os.path.join(
settings.BASE_DIR,
APP_NAME,
"fixtures",
f"{date.today()}-prediction-dump.json",
)
with open(dump_filepath, "w", encoding="utf-8") as file:
json.dump(prediction_dump, file, indent=2)
if __name__ == "__main__":
main()
|
py | 7dfef301548a37737a791de6404e218399b2d84c | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend service."""
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions
class BackendService(object):
"""Abstracts BackendService resource."""
def __init__(self, ref, compute_client=None):
self.ref = ref
self._compute_client = compute_client
@property
def _client(self):
return self._compute_client.apitools_client
@property
def _messages(self):
return self._compute_client.messages
def _MakeGetRequestTuple(self):
region = getattr(self.ref, 'region', None)
if region is not None:
return (self._client.regionBackendServices, 'Get',
self._messages.ComputeRegionBackendServicesGetRequest(
project=self.ref.project,
region=region,
backendService=self.ref.Name()))
else:
return (self._client.backendServices, 'Get',
self._messages.ComputeBackendServicesGetRequest(
project=self.ref.project, backendService=self.ref.Name()))
def _MakeDeleteRequestTuple(self):
region = getattr(self.ref, 'region', None)
if region is not None:
return (self._client.regionBackendServices, 'Delete',
self._messages.ComputeRegionBackendServicesDeleteRequest(
project=self.ref.project,
region=region,
backendService=self.ref.Name()))
else:
return (self._client.backendServices, 'Delete',
self._messages.ComputeBackendServicesDeleteRequest(
project=self.ref.project, backendService=self.ref.Name()))
def _MakeGetHealthRequestTuple(self, group):
region = getattr(self.ref, 'region', None)
if region is not None:
return (self._client.regionBackendServices, 'GetHealth',
self._messages.ComputeRegionBackendServicesGetHealthRequest(
resourceGroupReference=self._messages.ResourceGroupReference(
group=group),
project=self.ref.project,
region=region,
backendService=self.ref.Name()))
else:
return (self._client.backendServices, 'GetHealth',
self._messages.ComputeBackendServicesGetHealthRequest(
resourceGroupReference=self._messages.ResourceGroupReference(
group=group),
project=self.ref.project,
backendService=self.ref.Name()))
def _MakeSetSecurityPolicyRequestTuple(self, security_policy):
region = getattr(self.ref, 'region', None)
if region is not None:
raise exceptions.InvalidArgumentException(
'region', 'Can only set security policy for global backend services.')
return (
self._client.backendServices, 'SetSecurityPolicy',
self._messages.ComputeBackendServicesSetSecurityPolicyRequest(
securityPolicyReference=self._messages.SecurityPolicyReference(
securityPolicy=security_policy),
project=self.ref.project,
backendService=self.ref.Name()))
def Delete(self, only_generate_request=False):
requests = [self._MakeDeleteRequestTuple()]
if not only_generate_request:
return self._compute_client.MakeRequests(requests)
return requests
def Get(self, only_generate_request=False):
"""Fetches the backend service resource."""
requests = [self._MakeGetRequestTuple()]
if not only_generate_request:
responses = self._compute_client.MakeRequests(requests)
return responses[0]
return requests
def GetHealth(self):
"""Issues series of gethealth requests for each backend group.
Yields:
{'backend': backend.group, 'status': backend_service.GetHealthResponse}
"""
backend_service = self.Get()
# Call GetHealth for each group in the backend service
# Instead of batching-up all requests and making a single
# request_helper.MakeRequests call, go one backend at a time.
# We do this because getHealth responses don't say what resource
# they correspond to. It's not obvious how to reliably match up
# responses and backends when there are errors. Additionally the contract
# for batched requests doesn't guarantee response order will match
# request order.
#
# TODO(b/25015230): Make a single batch request once the response
# can be mapped back to resource.
errors = []
for backend in backend_service.backends:
# The list() call below is itended to force the generator returned by
# MakeRequests. If there are exceptions the command will abort, which is
# expected. Having a list simplifies some of the checks that follow.
resources = self._compute_client.MakeRequests(
[self._MakeGetHealthRequestTuple(backend.group)], errors)
# For failed request error information will accumulate in errors
if resources:
yield {'backend': backend.group, 'status': resources[0]}
if errors:
utils.RaiseToolException(
errors, error_message='Could not get health for some groups:')
def SetSecurityPolicy(self, security_policy='', only_generate_request=False):
"""Sets the security policy for the backend service."""
requests = [self._MakeSetSecurityPolicyRequestTuple(security_policy)]
if not only_generate_request:
return self._compute_client.MakeRequests(requests)
return requests
|
py | 7dfef4347f044e8a3b2e68475e3d6960a068124d | #-*- coding:utf-8 -*-
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
Builder.load_file("kvdialog/playmenu.kv")
class CBoxContainer(FloatLayout):
pass
class CPlayMenu(Screen):
m_BoxContainer = ObjectProperty()
def __init__(self, **kwargs):
super(CPlayMenu, self).__init__(**kwargs)
self.m_BoxManager = None
def buildboxs(self):
self.m_BoxContainer.clear_widgets() |
py | 7dfef46908f3c44ac0ede35ec5fc921605cf0d6f | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
List all Anti-DDoS Floating IPs and limit the output with query parameters.
"""
import openstack
openstack.enable_logging(True)
conn = openstack.connect(cloud='otc')
for fip in conn.anti_ddos.floating_ips():
print(fip)
|
py | 7dfef49b380d910253fe9394219234ca21a2948e | """
This file will be copied to a temporary directory in order to
exercise caching compiled Numba functions.
See test_dispatcher.py.
"""
import sys
import numpy as np
from numba import jit, generated_jit, types, prange
from numba.tests.ctypes_usecases import c_sin
from numba.tests.support import TestCase, captured_stderr
@jit(cache=True, nopython=True)
def simple_usecase(x):
return x
def simple_usecase_caller(x):
return simple_usecase(x)
@jit(cache=True, nopython=True)
def add_usecase(x, y):
return x + y + Z
@jit(cache=True, forceobj=True)
def add_objmode_usecase(x, y):
object()
return x + y + Z
@jit(nopython=True)
def add_nocache_usecase(x, y):
return x + y + Z
@generated_jit(cache=True, nopython=True)
def generated_usecase(x, y):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
@jit(cache=True, nopython=True)
def inner(x, y):
return x + y + Z
@jit(cache=True, nopython=True)
def outer(x, y):
return inner(-y, x)
@jit(cache=False, nopython=True)
def outer_uncached(x, y):
return inner(-y, x)
@jit(cache=True, forceobj=True)
def looplifted(n):
object()
res = 0
for i in range(n):
res = res + i
return res
@jit(cache=True, nopython=True)
def use_c_sin(x):
return c_sin(x)
@jit(cache=True, nopython=True)
def use_c_sin_nest1(x):
return use_c_sin(x)
@jit(cache=True, nopython=True)
def use_c_sin_nest2(x):
return use_c_sin_nest1(x)
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 2
renamed_function1 = ambiguous_function
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 6
renamed_function2 = ambiguous_function
def make_closure(x):
@jit(cache=True, nopython=True)
def closure(y):
return x + y
return closure
closure1 = make_closure(3)
closure2 = make_closure(5)
biggie = np.arange(10**6)
@jit(cache=True, nopython=True)
def use_big_array():
return biggie
Z = 1
# Exercise returning a record instance. This used to hardcode the dtype
# pointer's value in the bitcode.
packed_record_type = np.dtype([('a', np.int8), ('b', np.float64)])
aligned_record_type = np.dtype([('a', np.int8), ('b', np.float64)], align=True)
packed_arr = np.empty(2, dtype=packed_record_type)
for i in range(packed_arr.size):
packed_arr[i]['a'] = i + 1
packed_arr[i]['b'] = i + 42.5
aligned_arr = np.array(packed_arr, dtype=aligned_record_type)
@jit(cache=True, nopython=True)
def record_return(ary, i):
return ary[i]
class _TestModule(TestCase):
"""
Tests for functionality of this module's functions.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
self.assertPreciseEqual(mod.add_usecase(2, 3), 6)
self.assertPreciseEqual(mod.add_objmode_usecase(2, 3), 6)
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.assertPreciseEqual(mod.generated_usecase(3, 2), 1)
packed_rec = mod.record_return(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(packed_rec), (2, 43.5))
aligned_rec = mod.record_return(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(aligned_rec), (2, 43.5))
# For 2.x
def runTest(self):
raise NotImplementedError
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
@jit(parallel=True, cache=True, nopython=True)
def parfor_usecase(ary):
return ary * ary + ary
|
py | 7dfef4c57503577a5f5be6afec52f33318e532c9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import copy
import datetime
import glob
import hashlib
import inspect
import json
import logging
import numbers
import os
import pickle
import re
import shutil
import socket
import subprocess
import sys
import threading
import time
import uuid
import zipfile
from io import BytesIO
from pathlib import Path
from queue import Empty as EmptyQueue
from queue import Queue
from string import Template
import yaml
from google.protobuf.any_pb2 import Any
from graphscope.framework import utils
from graphscope.framework.errors import CompilationError
from graphscope.framework.graph_schema import GraphSchema
from graphscope.proto import attr_value_pb2
from graphscope.proto import data_types_pb2
from graphscope.proto import graph_def_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
from gscoordinator.io_utils import PipeWatcher
logger = logging.getLogger("graphscope")
# runtime workspace
try:
WORKSPACE = os.environ["GRAPHSCOPE_RUNTIME"]
except KeyError:
WORKSPACE = "/tmp/gs"
# COORDINATOR_HOME
# 1) get from gscoordinator python module, if failed,
# 2) infer from current directory
try:
import gscoordinator
COORDINATOR_HOME = os.path.abspath(os.path.join(gscoordinator.__file__, "..", ".."))
except ModuleNotFoundError:
COORDINATOR_HOME = os.path.abspath(os.path.join(__file__, "..", ".."))
# template directory for codegen
TEMPLATE_DIR = os.path.join(COORDINATOR_HOME, "gscoordinator", "template")
# builtin app resource
BUILTIN_APP_RESOURCE_PATH = os.path.join(
COORDINATOR_HOME, "gscoordinator", "builtin/app/builtin_app.gar"
)
# default config file in gar resource
DEFAULT_GS_CONFIG_FILE = ".gs_conf.yaml"
DEFAULT_GRAPHSCOPE_HOME = "/opt/graphscope"
# GRAPHSCOPE_HOME
# 1) get from environment variable `GRAPHSCOPE_HOME`, if not exist,
# 2) infer from COORDINATOR_HOME
GRAPHSCOPE_HOME = os.environ.get("GRAPHSCOPE_HOME", None)
# resolve from pip installed package
if GRAPHSCOPE_HOME is None:
if os.path.isdir(os.path.join(COORDINATOR_HOME, "graphscope.runtime")):
GRAPHSCOPE_HOME = os.path.join(COORDINATOR_HOME, "graphscope.runtime")
# find from DEFAULT_GRAPHSCOPE_HOME
if GRAPHSCOPE_HOME is None:
if os.path.isdir(DEFAULT_GRAPHSCOPE_HOME):
GRAPHSCOPE_HOME = DEFAULT_GRAPHSCOPE_HOME
# resolve from develop source tree
if GRAPHSCOPE_HOME is None:
GRAPHSCOPE_HOME = os.path.join(COORDINATOR_HOME, "..")
# ANALYTICAL_ENGINE_HOME
# 1) infer from GRAPHSCOPE_HOME
ANALYTICAL_ENGINE_HOME = os.path.join(GRAPHSCOPE_HOME)
ANALYTICAL_ENGINE_PATH = os.path.join(ANALYTICAL_ENGINE_HOME, "bin", "grape_engine")
if not os.path.isfile(ANALYTICAL_ENGINE_PATH):
# try get analytical engine from build dir
ANALYTICAL_ENGINE_HOME = os.path.join(GRAPHSCOPE_HOME, "analytical_engine")
ANALYTICAL_ENGINE_PATH = os.path.join(
ANALYTICAL_ENGINE_HOME, "build", "grape_engine"
)
# INTERACTIVE_ENGINE_SCRIPT
INTERAVTIVE_INSTANCE_TIMEOUT_SECONDS = 600 # 10 mins
INTERACTIVE_ENGINE_SCRIPT = os.path.join(GRAPHSCOPE_HOME, "bin", "giectl")
if not os.path.isfile(INTERACTIVE_ENGINE_SCRIPT):
INTERACTIVE_ENGINE_SCRIPT = os.path.join(
GRAPHSCOPE_HOME, "interactive_engine", "bin", "giectl"
)
# JAVA SDK related CONSTANTS
LLVM4JNI_HOME = os.environ.get("LLVM4JNI_HOME", None)
LLVM4JNI_USER_OUT_DIR_BASE = "user-llvm4jni-output"
PROCESSOR_MAIN_CLASS = "com.alibaba.graphscope.annotation.Main"
JAVA_CODEGNE_OUTPUT_PREFIX = "gs-ffi"
GRAPE_PROCESSOR_JAR = os.path.join(
GRAPHSCOPE_HOME, "lib", "grape-runtime-0.1-shaded.jar"
)
def get_timestamp():
now = datetime.datetime.now()
return datetime.datetime.timestamp(now)
def get_lib_path(app_dir, app_name):
lib_path = ""
if sys.platform == "linux" or sys.platform == "linux2":
lib_path = os.path.join(app_dir, "lib%s.so" % app_name)
elif sys.platform == "darwin":
lib_path = os.path.join(app_dir, "lib%s.dylib" % app_name)
else:
raise RuntimeError(f"Unsupported platform {sys.platform}")
return lib_path
def get_app_sha256(attr):
(
app_type,
app_header,
app_class,
vd_type,
md_type,
pregel_combine,
java_jar_path,
java_app_class,
) = _codegen_app_info(attr, DEFAULT_GS_CONFIG_FILE)
graph_header, graph_type = _codegen_graph_info(attr)
logger.info("Codegened graph type: %s, Graph header: %s", graph_type, graph_header)
if app_type == "cpp_pie":
return hashlib.sha256(
f"{app_type}.{app_class}.{graph_type}".encode("utf-8")
).hexdigest()
elif app_type == "java_pie":
s = hashlib.sha256()
# CAUTION!!!!!
# We believe jar_path.java_app_class can uniquely define one java app
s.update(f"{app_type}.{java_jar_path}.{java_app_class}".encode("utf-8"))
if types_pb2.GAR in attr:
s.update(attr[types_pb2.GAR].s)
return s.hexdigest()
else:
s = hashlib.sha256()
s.update(f"{app_type}.{app_class}.{graph_type}".encode("utf-8"))
if types_pb2.GAR in attr:
s.update(attr[types_pb2.GAR].s)
return s.hexdigest()
def get_graph_sha256(attr):
_, graph_class = _codegen_graph_info(attr)
return hashlib.sha256(graph_class.encode("utf-8")).hexdigest()
def compile_app(workspace: str, library_name, attr, engine_config: dict):
"""Compile an application.
Args:
workspace (str): working dir.
library_name (str): name of library
attr (`AttrValue`): All information needed to compile an app.
engine_config (dict): for options of NETWORKX
Returns:
str: Path of the built library.
str: Java jar path. For c++/python app, return None.
str: Directory containing generated java and jni code. For c++/python app, return None.
str: App type.
"""
app_dir = os.path.join(workspace, library_name)
os.makedirs(app_dir, exist_ok=True)
_extract_gar(app_dir, attr)
# codegen app and graph info
# vd_type and md_type is None in cpp_pie
(
app_type,
app_header,
app_class,
vd_type,
md_type,
pregel_combine,
java_jar_path,
java_app_class,
) = _codegen_app_info(attr, DEFAULT_GS_CONFIG_FILE)
logger.info(
"Codegened application type: %s, app header: %s, app_class: %s, vd_type: %s, md_type: %s, pregel_combine: %s, \
java_jar_path: %s, java_app_class: %s",
app_type,
app_header,
app_class,
str(vd_type),
str(md_type),
str(pregel_combine),
str(java_jar_path),
str(java_app_class),
)
graph_header, graph_type = _codegen_graph_info(attr)
logger.info("Codegened graph type: %s, Graph header: %s", graph_type, graph_header)
os.chdir(app_dir)
module_name = ""
# Output directory for java codegen
java_codegen_out_dir = ""
cmake_commands = [
"cmake",
".",
f"-DNETWORKX={engine_config['networkx']}",
f"-DCMAKE_PREFIX_PATH={GRAPHSCOPE_HOME}",
]
if app_type == "java_pie":
if not os.path.isfile(GRAPE_PROCESSOR_JAR):
raise RuntimeError("Grape runtime jar not found")
# for java need to run preprocess
java_codegen_out_dir = os.path.join(
workspace, "{}-{}".format(JAVA_CODEGNE_OUTPUT_PREFIX, library_name)
)
cmake_commands += [
"-DENABLE_JAVA_SDK=ON",
"-DJAVA_PIE_APP=ON",
"-DPRE_CP={}:{}".format(GRAPE_PROCESSOR_JAR, java_jar_path),
"-DPROCESSOR_MAIN_CLASS={}".format(PROCESSOR_MAIN_CLASS),
"-DJAR_PATH={}".format(java_jar_path),
"-DOUTPUT_DIR={}".format(java_codegen_out_dir),
]
# if run llvm4jni.sh not found, we just go ahead,since it is optional.
if LLVM4JNI_HOME and os.path.isfile(os.path.join(LLVM4JNI_HOME, "run.sh")):
llvm4jni_user_out_dir = os.path.join(
workspace, "{}-{}".format(LLVM4JNI_USER_OUT_DIR_BASE, library_name)
)
cmake_commands += [
"-DRUN_LLVM4JNI_SH={}".format(os.path.join(LLVM4JNI_HOME, "run.sh")),
"-DLLVM4JNI_OUTPUT={}".format(llvm4jni_user_out_dir),
"-DLIB_PATH={}".format(get_lib_path(app_dir, library_name)),
]
else:
logger.info(
"Skip running llvm4jni since env var LLVM4JNI_HOME not found or run.sh not found under LLVM4JNI_HOME"
)
logger.info(" ".join(cmake_commands))
elif app_type != "cpp_pie":
if app_type == "cython_pregel":
pxd_name = "pregel"
cmake_commands += ["-DCYTHON_PREGEL_APP=True"]
if pregel_combine:
cmake_commands += ["-DENABLE_PREGEL_COMBINE=True"]
else:
pxd_name = "pie"
cmake_commands += ["-DCYTHON_PIE_APP=True"]
# Copy pxd file and generate cc file from pyx
shutil.copyfile(
os.path.join(TEMPLATE_DIR, f"{pxd_name}.pxd.template"),
os.path.join(app_dir, f"{pxd_name}.pxd"),
)
# Assume the gar will have and only have one .pyx file
for pyx_file in glob.glob(app_dir + "/*.pyx"):
module_name = os.path.splitext(os.path.basename(pyx_file))[0]
cc_file = os.path.join(app_dir, module_name + ".cc")
subprocess.check_call(["cython", "-3", "--cplus", "-o", cc_file, pyx_file])
app_header = f"{module_name}.h"
# replace and generate cmakelist
cmakelists_file_tmp = os.path.join(TEMPLATE_DIR, "CMakeLists.template")
cmakelists_file = os.path.join(app_dir, "CMakeLists.txt")
with open(cmakelists_file_tmp, mode="r") as template:
content = template.read()
content = Template(content).safe_substitute(
_analytical_engine_home=ANALYTICAL_ENGINE_HOME,
_frame_name=library_name,
_vd_type=vd_type,
_md_type=md_type,
_graph_type=graph_type,
_graph_header=graph_header,
_module_name=module_name,
_app_type=app_class,
_app_header=app_header,
)
with open(cmakelists_file, mode="w") as f:
f.write(content)
# compile
logger.info("Building app ...")
cmake_process = subprocess.Popen(
cmake_commands,
env=os.environ.copy(),
universal_newlines=True,
encoding="utf-8",
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
cmake_stderr_watcher = PipeWatcher(cmake_process.stderr, sys.stdout)
setattr(cmake_process, "stderr_watcher", cmake_stderr_watcher)
cmake_process.wait()
make_process = subprocess.Popen(
["make", "-j4"],
env=os.environ.copy(),
universal_newlines=True,
encoding="utf-8",
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
make_stderr_watcher = PipeWatcher(make_process.stderr, sys.stdout)
setattr(make_process, "stderr_watcher", make_stderr_watcher)
make_process.wait()
lib_path = get_lib_path(app_dir, library_name)
if not os.path.isfile(lib_path):
raise CompilationError(f"Failed to compile app {app_class}")
return lib_path, java_jar_path, java_codegen_out_dir, app_type
def compile_graph_frame(workspace: str, library_name, attr: dict, engine_config: dict):
"""Compile an application.
Args:
workspace (str): Working dir.
library_name (str): name of library
attr (`AttrValue`): All information needed to compile a graph library.
engine_config (dict): for options of NETWORKX
Raises:
ValueError: When graph_type is not supported.
Returns:
str: Path of the built graph library.
None: For consistency with compiler_app.
None: For consistency with compile_app.
None: for consistency with compile_app.
"""
_, graph_class = _codegen_graph_info(attr)
logger.info("Codegened graph frame type: %s", graph_class)
library_dir = os.path.join(workspace, library_name)
os.makedirs(library_dir, exist_ok=True)
os.chdir(library_dir)
graph_type = attr[types_pb2.GRAPH_TYPE].graph_type
cmake_commands = [
"cmake",
".",
f"-DNETWORKX={engine_config['networkx']}",
f"-DCMAKE_PREFIX_PATH={GRAPHSCOPE_HOME}",
]
if graph_type == graph_def_pb2.ARROW_PROPERTY:
cmake_commands += ["-DPROPERTY_GRAPH_FRAME=True"]
elif graph_type in (
graph_def_pb2.ARROW_PROJECTED,
graph_def_pb2.DYNAMIC_PROJECTED,
graph_def_pb2.ARROW_FLATTENED,
):
cmake_commands += ["-DPROJECT_FRAME=True"]
else:
raise ValueError(f"Illegal graph type: {graph_type}")
# replace and generate cmakelist
cmakelists_file_tmp = os.path.join(TEMPLATE_DIR, "CMakeLists.template")
cmakelists_file = os.path.join(library_dir, "CMakeLists.txt")
with open(cmakelists_file_tmp, mode="r") as template:
content = template.read()
content = Template(content).safe_substitute(
_analytical_engine_home=ANALYTICAL_ENGINE_HOME,
_frame_name=library_name,
_graph_type=graph_class,
)
with open(cmakelists_file, mode="w") as f:
f.write(content)
# compile
logger.info("Building graph library ...")
cmake_process = subprocess.Popen(
cmake_commands,
env=os.environ.copy(),
universal_newlines=True,
encoding="utf-8",
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
cmake_stderr_watcher = PipeWatcher(cmake_process.stderr, sys.stdout)
setattr(cmake_process, "stderr_watcher", cmake_stderr_watcher)
cmake_process.wait()
make_process = subprocess.Popen(
["make", "-j4"],
env=os.environ.copy(),
universal_newlines=True,
encoding="utf-8",
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
make_stderr_watcher = PipeWatcher(make_process.stderr, sys.stdout)
setattr(make_process, "stderr_watcher", make_stderr_watcher)
make_process.wait()
lib_path = get_lib_path(library_dir, library_name)
if not os.path.isfile(lib_path):
raise CompilationError(f"Failed to compile graph {graph_class}")
return lib_path, None, None, None
def op_pre_process(op, op_result_pool, key_to_op, **kwargs): # noqa: C901
if op.op == types_pb2.REPORT_GRAPH:
return
if op.op == types_pb2.CREATE_GRAPH:
_pre_process_for_create_graph_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.ADD_LABELS:
_pre_process_for_add_labels_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.RUN_APP:
_pre_process_for_run_app_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.BIND_APP:
_pre_process_for_bind_app_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.PROJECT_GRAPH:
_pre_process_for_project_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.PROJECT_TO_SIMPLE:
_pre_process_for_project_to_simple_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.ADD_COLUMN:
_pre_process_for_add_column_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.UNLOAD_GRAPH:
_pre_process_for_unload_graph_op(op, op_result_pool, key_to_op, **kwargs)
if op.op in (
types_pb2.CONTEXT_TO_NUMPY,
types_pb2.CONTEXT_TO_DATAFRAME,
types_pb2.TO_VINEYARD_TENSOR,
types_pb2.TO_VINEYARD_DATAFRAME,
):
_pre_process_for_context_op(op, op_result_pool, key_to_op, **kwargs)
if op.op in (types_pb2.GRAPH_TO_NUMPY, types_pb2.GRAPH_TO_DATAFRAME):
_pre_process_for_output_graph_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.UNLOAD_APP:
_pre_process_for_unload_app_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.UNLOAD_CONTEXT:
_pre_process_for_unload_context_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.CREATE_INTERACTIVE_QUERY:
_pre_process_for_create_interactive_query_op(
op, op_result_pool, key_to_op, **kwargs
)
if op.op == types_pb2.GREMLIN_QUERY:
_pre_process_for_gremlin_query_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.FETCH_GREMLIN_RESULT:
_pre_process_for_fetch_gremlin_result(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.CLOSE_INTERACTIVE_QUERY:
_pre_process_for_close_interactive_query_op(
op, op_result_pool, key_to_op, **kwargs
)
if op.op == types_pb2.SUBGRAPH:
_pre_process_for_gremlin_to_subgraph_op(op, op_result_pool, key_to_op, **kwargs)
if op.op == types_pb2.CREATE_LEARNING_INSTANCE:
_pre_process_for_create_learning_graph_op(
op, op_result_pool, key_to_op, **kwargs
)
if op.op == types_pb2.CLOSE_LEARNING_INSTANCE:
_pre_process_for_close_learning_instance_op(
op, op_result_pool, key_to_op, **kwargs
)
if op.op == types_pb2.OUTPUT:
_pre_process_for_output_op(op, op_result_pool, key_to_op, **kwargs)
def _pre_process_for_create_graph_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) <= 1
if len(op.parents) == 1:
key_of_parent_op = op.parents[0]
parent_op = key_to_op[key_of_parent_op]
if parent_op.op == types_pb2.DATA_SOURCE:
for key, value in parent_op.attr.items():
op.attr[key].CopyFrom(value)
def _pre_process_for_add_labels_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 2
for key_of_parent_op in op.parents:
parent_op = key_to_op[key_of_parent_op]
if parent_op.op == types_pb2.DATA_SOURCE:
for key, value in parent_op.attr.items():
op.attr[key].CopyFrom(value)
else:
result = op_result_pool[key_of_parent_op]
op.attr[types_pb2.GRAPH_NAME].CopyFrom(
utils.s_to_attr(result.graph_def.key)
)
def _pre_process_for_close_interactive_query_op(
op, op_result_pool, key_to_op, **kwargs
):
assert len(op.parents) == 1
assert op.parents[0] in op_result_pool
def _pre_process_for_gremlin_to_subgraph_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
assert op.parents[0] in op_result_pool
def _pre_process_for_gremlin_query_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
assert op.parents[0] in op_result_pool
def _pre_process_for_fetch_gremlin_result(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
assert op.parents[0] in op_result_pool
def _pre_process_for_create_interactive_query_op(
op, op_result_pool, key_to_op, **kwargs
):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
result = op_result_pool[key_of_parent_op]
assert result.graph_def.extension.Is(graph_def_pb2.VineyardInfoPb.DESCRIPTOR)
vy_info = graph_def_pb2.VineyardInfoPb()
result.graph_def.extension.Unpack(vy_info)
op.attr[types_pb2.VINEYARD_ID].CopyFrom(utils.i_to_attr(vy_info.vineyard_id))
op.attr[types_pb2.SCHEMA_PATH].CopyFrom(utils.s_to_attr(vy_info.schema_path))
def _pre_process_for_close_learning_instance_op(
op, op_result_pool, key_to_op, **kwargs
):
assert len(op.parents) == 1
assert op.parents[0] in op_result_pool
def _pre_process_for_create_learning_graph_op(op, op_result_pool, key_to_op, **kwargs):
from graphscope.learning.graph import Graph as LearningGraph
nodes = pickle.loads(op.attr[types_pb2.NODES].s)
edges = pickle.loads(op.attr[types_pb2.EDGES].s)
gen_labels = pickle.loads(op.attr[types_pb2.GLE_GEN_LABELS].s)
# get graph schema
op, op_result_pool, key_to_op
key_of_parent_op = op.parents[0]
result = op_result_pool[key_of_parent_op]
assert result.graph_def.extension.Is(graph_def_pb2.VineyardInfoPb.DESCRIPTOR)
schema = GraphSchema()
schema.from_graph_def(result.graph_def)
# get graph vineyard id
vy_info = graph_def_pb2.VineyardInfoPb()
result.graph_def.extension.Unpack(vy_info)
vineyard_id = vy_info.vineyard_id
# gle handle
engine_hosts = kwargs.pop("engine_hosts")
engine_config = kwargs.pop("engine_config")
handle = get_gl_handle(schema, vineyard_id, engine_hosts, engine_config)
config = LearningGraph.preprocess_args(handle, nodes, edges, gen_labels)
config = base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8")
op.attr[types_pb2.VINEYARD_ID].CopyFrom(utils.i_to_attr(vineyard_id))
op.attr[types_pb2.GLE_HANDLE].CopyFrom(utils.s_to_attr(handle))
op.attr[types_pb2.GLE_CONFIG].CopyFrom(utils.s_to_attr(config))
# get `bind_app` runtime informarion in lazy mode
def _pre_process_for_bind_app_op(op, op_result_pool, key_to_op, **kwargs):
for key_of_parent_op in op.parents:
parent_op = key_to_op[key_of_parent_op]
if parent_op.op == types_pb2.CREATE_APP:
# app assets
op.attr[types_pb2.APP_ALGO].CopyFrom(parent_op.attr[types_pb2.APP_ALGO])
if types_pb2.GAR in parent_op.attr:
op.attr[types_pb2.GAR].CopyFrom(parent_op.attr[types_pb2.GAR])
else:
# get graph runtime information from results
result = op_result_pool[key_of_parent_op]
assert result.graph_def.extension.Is(
graph_def_pb2.VineyardInfoPb.DESCRIPTOR
)
vy_info = graph_def_pb2.VineyardInfoPb()
result.graph_def.extension.Unpack(vy_info)
op.attr[types_pb2.GRAPH_NAME].CopyFrom(
attr_value_pb2.AttrValue(s=result.graph_def.key.encode("utf-8"))
)
op.attr[types_pb2.GRAPH_TYPE].CopyFrom(
attr_value_pb2.AttrValue(graph_type=result.graph_def.graph_type)
)
op.attr[types_pb2.OID_TYPE].CopyFrom(
utils.s_to_attr(
utils.normalize_data_type_str(
utils.data_type_to_cpp(vy_info.oid_type)
)
)
)
op.attr[types_pb2.VID_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(vy_info.vid_type))
)
op.attr[types_pb2.V_DATA_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(vy_info.vdata_type))
)
op.attr[types_pb2.E_DATA_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(vy_info.edata_type))
)
# get `run_app` runtime informarion in lazy mode
def _pre_process_for_run_app_op(op, op_result_pool, key_to_op, **kwargs):
# run_app op has only one parent
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
parent_op = key_to_op[key_of_parent_op]
assert parent_op.op == types_pb2.BIND_APP
# set graph key
op.attr[types_pb2.GRAPH_NAME].CopyFrom(parent_op.attr[types_pb2.GRAPH_NAME])
result = op_result_pool[key_of_parent_op]
# set app key
op.attr[types_pb2.APP_NAME].CopyFrom(
attr_value_pb2.AttrValue(s=result.result.decode("utf-8").encode("utf-8"))
)
app_type = parent_op.attr[types_pb2.APP_ALGO].s.decode("utf-8")
if app_type == "java_app":
# For java app, we need lib path as an explicit arg.
param = Any()
lib_path = parent_op.attr[types_pb2.APP_LIBRARY_PATH].s.decode("utf-8")
param.Pack(data_types_pb2.StringValue(value=lib_path))
op.query_args.args.extend([param])
logger.info("Lib path {}".format(lib_path))
def _pre_process_for_unload_graph_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
result = op_result_pool[key_of_parent_op]
assert result.graph_def.extension.Is(graph_def_pb2.VineyardInfoPb.DESCRIPTOR)
vy_info = graph_def_pb2.VineyardInfoPb()
result.graph_def.extension.Unpack(vy_info)
op.attr[types_pb2.GRAPH_NAME].CopyFrom(utils.s_to_attr(result.graph_def.key))
op.attr[types_pb2.VINEYARD_ID].CopyFrom(utils.i_to_attr(vy_info.vineyard_id))
def _pre_process_for_unload_app_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
result = op_result_pool[key_of_parent_op]
op.attr[types_pb2.APP_NAME].CopyFrom(utils.s_to_attr(result.result.decode("utf-8")))
def _pre_process_for_unload_context_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
result = op_result_pool[key_of_parent_op]
parent_op_result = json.loads(result.result.decode("utf-8"))
context_key = parent_op_result["context_key"]
op.attr[types_pb2.CONTEXT_KEY].CopyFrom(
attr_value_pb2.AttrValue(s=context_key.encode("utf-8"))
)
def _pre_process_for_add_column_op(op, op_result_pool, key_to_op, **kwargs):
for key_of_parent_op in op.parents:
parent_op = key_to_op[key_of_parent_op]
if parent_op.op != types_pb2.RUN_APP:
# get graph information
r = op_result_pool[key_of_parent_op]
graph_name = r.graph_def.key
graph_type = r.graph_def.graph_type
schema = GraphSchema()
schema.from_graph_def(r.graph_def)
for key_of_parent_op in op.parents:
parent_op = key_to_op[key_of_parent_op]
if parent_op.op == types_pb2.RUN_APP:
selector = op.attr[types_pb2.SELECTOR].s.decode("utf-8")
r = op_result_pool[key_of_parent_op]
parent_op_result = json.loads(r.result.decode("utf-8"))
context_key = parent_op_result["context_key"]
context_type = parent_op_result["context_type"]
selector = _tranform_dataframe_selector(context_type, schema, selector)
op.attr[types_pb2.GRAPH_NAME].CopyFrom(utils.s_to_attr(graph_name))
op.attr[types_pb2.GRAPH_TYPE].CopyFrom(utils.graph_type_to_attr(graph_type))
op.attr[types_pb2.CONTEXT_KEY].CopyFrom(utils.s_to_attr(context_key))
op.attr[types_pb2.SELECTOR].CopyFrom(utils.s_to_attr(selector))
def _pre_process_for_context_op(op, op_result_pool, key_to_op, **kwargs):
def __backtrack_key_of_graph_op(key):
bfs_queue = Queue()
bfs_queue.put(key)
while not bfs_queue.empty():
next_op_key = bfs_queue.get()
if next_op_key in key_to_op:
next_op = key_to_op[next_op_key]
if next_op.op in (
types_pb2.CREATE_GRAPH,
types_pb2.ADD_LABELS,
types_pb2.TRANSFORM_GRAPH,
types_pb2.PROJECT_GRAPH,
types_pb2.PROJECT_TO_SIMPLE,
):
return next_op
for parent_key in next_op.parents:
bfs_queue.put(parent_key)
return None
assert len(op.parents) == 1
schema = None
key_of_parent_op = op.parents[0]
graph_op = __backtrack_key_of_graph_op(key_of_parent_op)
r = op_result_pool[key_of_parent_op]
# set context key
parent_op_result = json.loads(r.result.decode("utf-8"))
context_key = parent_op_result["context_key"]
context_type = parent_op_result["context_type"]
op.attr[types_pb2.CONTEXT_KEY].CopyFrom(
attr_value_pb2.AttrValue(s=context_key.encode("utf-8"))
)
r = op_result_pool[graph_op.key]
# transform selector
schema = GraphSchema()
schema.from_graph_def(r.graph_def)
selector = op.attr[types_pb2.SELECTOR].s.decode("utf-8")
if op.op in (types_pb2.CONTEXT_TO_DATAFRAME, types_pb2.TO_VINEYARD_DATAFRAME):
selector = _tranform_dataframe_selector(context_type, schema, selector)
else:
# to numpy
selector = _tranform_numpy_selector(context_type, schema, selector)
if selector is not None:
op.attr[types_pb2.SELECTOR].CopyFrom(
attr_value_pb2.AttrValue(s=selector.encode("utf-8"))
)
def _pre_process_for_output_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
parent_op = key_to_op[key_of_parent_op]
result = op_result_pool[key_of_parent_op]
if parent_op.output_type in (
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
# dependent to to_vineyard_dataframe
r = json.loads(result.result.decode("utf-8"))["object_id"]
op.attr[types_pb2.VINEYARD_ID].CopyFrom(utils.s_to_attr(r))
def _pre_process_for_output_graph_op(op, op_result_pool, key_to_op, **kwargs):
assert len(op.parents) == 1
key_of_parent_op = op.parents[0]
r = op_result_pool[key_of_parent_op]
schema = GraphSchema()
schema.from_graph_def(r.graph_def)
graph_name = r.graph_def.key
selector = op.attr[types_pb2.SELECTOR].s.decode("utf-8")
if op.op == types_pb2.GRAPH_TO_DATAFRAME:
selector = _tranform_dataframe_selector(
"labeled_vertex_property", schema, selector
)
else:
# to numpy
selector = _tranform_numpy_selector("labeled_vertex_property", schema, selector)
if selector is not None:
op.attr[types_pb2.SELECTOR].CopyFrom(
attr_value_pb2.AttrValue(s=selector.encode("utf-8"))
)
op.attr[types_pb2.GRAPH_NAME].CopyFrom(
attr_value_pb2.AttrValue(s=graph_name.encode("utf-8"))
)
def _pre_process_for_project_to_simple_op(op, op_result_pool, key_to_op, **kwargs):
# for nx graph
if op.attr[types_pb2.GRAPH_TYPE].graph_type in (
graph_def_pb2.DYNAMIC_PROJECTED,
graph_def_pb2.ARROW_FLATTENED,
):
return
assert len(op.parents) == 1
# get parent graph schema
key_of_parent_op = op.parents[0]
r = op_result_pool[key_of_parent_op]
schema = GraphSchema()
schema.from_graph_def(r.graph_def)
graph_name = r.graph_def.key
check_argument(
schema.vertex_label_num == 1,
"Cannot project to simple, vertex label number is not one.",
)
check_argument(
schema.edge_label_num == 1,
"Cannot project to simple, edge label number is not one.",
)
v_label = schema.vertex_labels[0]
e_label = schema.edge_labels[0]
relation = (v_label, v_label)
check_argument(
relation in schema.get_relationships(e_label),
f"Cannot project to simple, Graph doesn't contain such relationship: {v_label} -> {e_label} <- {v_label}.",
)
v_props = schema.get_vertex_properties(v_label)
e_props = schema.get_edge_properties(e_label)
check_argument(len(v_props) <= 1)
check_argument(len(e_props) <= 1)
v_label_id = schema.get_vertex_label_id(v_label)
e_label_id = schema.get_edge_label_id(e_label)
v_prop_id, vdata_type = (v_props[0].id, v_props[0].type) if v_props else (-1, None)
e_prop_id, edata_type = (e_props[0].id, e_props[0].type) if e_props else (-1, None)
oid_type = schema.oid_type
vid_type = schema.vid_type
op.attr[types_pb2.GRAPH_NAME].CopyFrom(
attr_value_pb2.AttrValue(s=graph_name.encode("utf-8"))
)
op.attr[types_pb2.GRAPH_TYPE].CopyFrom(
utils.graph_type_to_attr(graph_def_pb2.ARROW_PROJECTED)
)
op.attr[types_pb2.V_LABEL_ID].CopyFrom(utils.i_to_attr(v_label_id))
op.attr[types_pb2.V_PROP_ID].CopyFrom(utils.i_to_attr(v_prop_id))
op.attr[types_pb2.E_LABEL_ID].CopyFrom(utils.i_to_attr(e_label_id))
op.attr[types_pb2.E_PROP_ID].CopyFrom(utils.i_to_attr(e_prop_id))
op.attr[types_pb2.OID_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(oid_type))
)
op.attr[types_pb2.VID_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(vid_type))
)
op.attr[types_pb2.V_DATA_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(vdata_type))
)
op.attr[types_pb2.E_DATA_TYPE].CopyFrom(
utils.s_to_attr(utils.data_type_to_cpp(edata_type))
)
def _pre_process_for_project_op(op, op_result_pool, key_to_op, **kwargs):
def _get_all_v_props_id(schema, label):
props = schema.get_vertex_properties(label)
return [schema.get_vertex_property_id(label, prop.name) for prop in props]
def _get_all_e_props_id(schema, label):
props = schema.get_edge_properties(label)
return [schema.get_edge_property_id(label, prop.name) for prop in props]
assert len(op.parents) == 1
# get parent graph schema
key_of_parent_op = op.parents[0]
r = op_result_pool[key_of_parent_op]
schema = GraphSchema()
schema.from_graph_def(r.graph_def)
graph_name = r.graph_def.key
vertices = json.loads(op.attr[types_pb2.VERTEX_COLLECTIONS].s.decode("utf-8"))
edges = json.loads(op.attr[types_pb2.EDGE_COLLECTIONS].s.decode("utf-8"))
vertex_collections = {}
edge_collections = {}
for label, props in vertices.items():
label_id = schema.get_vertex_label_id(label)
if props is None:
vertex_collections[label_id] = _get_all_v_props_id(schema, label)
else:
vertex_collections[label_id] = sorted(
[schema.get_vertex_property_id(label, prop) for prop in props]
)
for label, props in edges.items():
relations = schema.get_relationships(label)
valid = False
for src, dst in relations:
if src in vertices and dst in vertices:
valid = True
break
if not valid:
raise ValueError("Cannot find a valid relation in given vertices and edges")
label_id = schema.get_edge_label_id(label)
if props is None:
edge_collections[label_id] = _get_all_e_props_id(schema, label)
else:
edge_collections[label_id] = sorted(
[schema.get_edge_property_id(label, prop) for prop in props]
)
vertex_collections = dict(sorted(vertex_collections.items()))
edge_collections = dict(sorted(edge_collections.items()))
# construct op attr
attr = attr_value_pb2.AttrValue()
v_attr = attr_value_pb2.NameAttrList()
e_attr = attr_value_pb2.NameAttrList()
for label, props in vertex_collections.items():
v_attr.attr[label].CopyFrom(utils.list_i_to_attr(props))
for label, props in edge_collections.items():
e_attr.attr[label].CopyFrom(utils.list_i_to_attr(props))
attr.list.func.extend([v_attr, e_attr])
op.attr[types_pb2.GRAPH_NAME].CopyFrom(
attr_value_pb2.AttrValue(s=graph_name.encode("utf-8"))
)
op.attr[types_pb2.ARROW_PROPERTY_DEFINITION].CopyFrom(attr)
del op.attr[types_pb2.VERTEX_COLLECTIONS]
del op.attr[types_pb2.EDGE_COLLECTIONS]
def _tranform_numpy_selector(context_type, schema, selector):
if context_type == "tensor":
selector = None
if context_type == "vertex_data":
selector = transform_vertex_data_selector(selector)
if context_type == "labeled_vertex_data":
selector = transform_labeled_vertex_data_selector(schema, selector)
if context_type == "vertex_property":
selector = transform_vertex_property_data_selector(selector)
if context_type == "labeled_vertex_property":
selector = transform_labeled_vertex_property_data_selector(schema, selector)
return selector
def _tranform_dataframe_selector(context_type, schema, selector):
selector = json.loads(selector)
if context_type == "tensor":
selector = {key: None for key, value in selector.items()}
if context_type == "vertex_data":
selector = {
key: transform_vertex_data_selector(value)
for key, value in selector.items()
}
if context_type == "labeled_vertex_data":
selector = {
key: transform_labeled_vertex_data_selector(schema, value)
for key, value in selector.items()
}
if context_type == "vertex_property":
selector = {
key: transform_vertex_property_data_selector(value)
for key, value in selector.items()
}
if context_type == "labeled_vertex_property":
selector = {
key: transform_labeled_vertex_property_data_selector(schema, value)
for key, value in selector.items()
}
return json.dumps(selector)
def _transform_vertex_data_v(selector):
if selector not in ("v.id", "v.data"):
raise SyntaxError("selector of v must be 'id' or 'data'")
return selector
def _transform_vertex_data_e(selector):
if selector not in ("e.src", "e.dst", "e.data"):
raise SyntaxError("selector of e must be 'src', 'dst' or 'data'")
return selector
def _transform_vertex_data_r(selector):
if selector != "r":
raise SyntaxError("selector of r must be 'r'")
return selector
def _transform_vertex_property_data_r(selector):
# The second part of selector or r is user defined name.
# So we will allow any str
return selector
def _transform_labeled_vertex_data_v(schema, label, prop):
label_id = schema.get_vertex_label_id(label)
if prop == "id":
return f"label{label_id}.{prop}"
else:
prop_id = schema.get_vertex_property_id(label, prop)
return f"label{label_id}.property{prop_id}"
def _transform_labeled_vertex_data_e(schema, label, prop):
label_id = schema.get_edge_label_id(label)
if prop in ("src", "dst"):
return f"label{label_id}.{prop}"
else:
prop_id = schema.get_vertex_property_id(label, prop)
return f"label{label_id}.property{prop_id}"
def _transform_labeled_vertex_data_r(schema, label):
label_id = schema.get_vertex_label_id(label)
return f"label{label_id}"
def _transform_labeled_vertex_property_data_r(schema, label, prop):
label_id = schema.get_vertex_label_id(label)
return f"label{label_id}.{prop}"
def transform_vertex_data_selector(selector):
"""Optional values:
vertex selector: 'v.id', 'v.data'
edge selector: 'e.src', 'e.dst', 'e.data'
result selector: 'r'
"""
if selector is None:
raise RuntimeError("selector cannot be None")
segments = selector.split(".")
if len(segments) > 2:
raise SyntaxError("Invalid selector: %s." % selector)
if segments[0] == "v":
selector = _transform_vertex_data_v(selector)
elif segments[0] == "e":
selector = _transform_vertex_data_e(selector)
elif segments[0] == "r":
selector = _transform_vertex_data_r(selector)
else:
raise SyntaxError(f"Invalid selector: {selector}, choose from v / e / r.")
return selector
def transform_vertex_property_data_selector(selector):
"""Optional values:
vertex selector: 'v.id', 'v.data'
edge selector: 'e.src', 'e.dst', 'e.data'
result selector format: 'r.y', y denotes property name.
"""
if selector is None:
raise RuntimeError("selector cannot be None")
segments = selector.split(".")
if len(segments) != 2:
raise SyntaxError(f"Invalid selector: {selector}")
if segments[0] == "v":
selector = _transform_vertex_data_v(selector)
elif segments[0] == "e":
selector = _transform_vertex_data_e(selector)
elif segments[0] == "r":
selector = _transform_vertex_property_data_r(selector)
else:
raise SyntaxError(f"Invalid selector: {selector}, choose from v / e / r.")
return selector
def transform_labeled_vertex_data_selector(schema, selector):
"""Formats: 'v:x.y/id', 'e:x.y/src/dst', 'r:label',
x denotes label name, y denotes property name.
Returned selector will change label name to 'label{id}', where id is x's id in labels.
And change property name to 'property{id}', where id is y's id in properties.
"""
if selector is None:
raise RuntimeError("selector cannot be None")
ret_type, segments = selector.split(":")
if ret_type not in ("v", "e", "r"):
raise SyntaxError(f"Invalid selector: {selector}")
segments = segments.split(".")
ret = ""
if ret_type == "v":
ret = _transform_labeled_vertex_data_v(schema, *segments)
elif ret_type == "e":
ret = _transform_labeled_vertex_data_e(schema, *segments)
elif ret_type == "r":
ret = _transform_labeled_vertex_data_r(schema, *segments)
return "{}:{}".format(ret_type, ret)
def transform_labeled_vertex_property_data_selector(schema, selector):
"""Formats: 'v:x.y/id', 'e:x.y/src/dst', 'r:x.y',
x denotes label name, y denotes property name.
Returned selector will change label name to 'label{id}', where id is x's id in labels.
And change property name to 'property{id}', where id is y's id in properties.
"""
if selector is None:
raise RuntimeError("selector cannot be None")
ret_type, segments = selector.split(":")
if ret_type not in ("v", "e", "r"):
raise SyntaxError(f"Invalid selector: {selector}")
segments = segments.split(".")
ret = ""
if ret_type == "v":
ret = _transform_labeled_vertex_data_v(schema, *segments)
elif ret_type == "e":
ret = _transform_labeled_vertex_data_e(schema, *segments)
elif ret_type == "r":
ret = _transform_labeled_vertex_property_data_r(schema, *segments)
return f"{ret_type}:{ret}"
def _extract_gar(app_dir: str, attr):
"""Extract gar to workspace
Args:
workspace (str): Working directory
attr (`AttrValue`): Optionally it can contains the bytes of gar.
"""
fp = BUILTIN_APP_RESOURCE_PATH # default is builtin app resources.
if types_pb2.GAR in attr:
# if gar sent via bytecode in attr, overwrite.
fp = BytesIO(attr[types_pb2.GAR].s)
with zipfile.ZipFile(fp, "r") as zip_ref:
zip_ref.extractall(app_dir)
def _codegen_app_info(attr, meta_file: str):
"""Codegen application by instanize the template specialization.
Args:
workspace (str): Working directory
meta_file (str): A yaml file that contains metas of all builtin app.
attr (`AttrValue`): For get algorithm name of app.
Raises:
KeyError: If the algorithm name doesn't exist in the `meta_file`
Returns:
type: app_type
app class: for fulfilling the CMakelists.
"""
fp = BUILTIN_APP_RESOURCE_PATH # default is builtin app resources.
if types_pb2.GAR in attr:
# if gar sent via bytecode in attr, overwrite.
fp = BytesIO(attr[types_pb2.GAR].s)
with zipfile.ZipFile(fp, "r") as zip_ref:
with zip_ref.open(meta_file, "r") as f:
config_yaml = yaml.safe_load(f)
algo = attr[types_pb2.APP_ALGO].s.decode("utf-8")
for app in config_yaml["app"]:
if app["algo"] == algo:
app_type = app["type"] # cpp_pie or cython_pregel or cython_pie, java_pie
if app_type == "cpp_pie":
return (
app_type,
app["src"],
f"{app['class_name']}<_GRAPH_TYPE>",
None,
None,
None,
None,
None,
)
if app_type in ("cython_pregel", "cython_pie"):
# cython app doesn't have c-header file
return (
app_type,
"",
"",
app["vd_type"],
app["md_type"],
app["pregel_combine"],
None,
None,
)
if app_type == "java_pie":
return (
app_type,
app["driver_header"], # cxx header
"{}<_GRAPH_TYPE>".format(app["class_name"]), # cxx class name
None, # vd_type,
None, # md_type
None, # pregel combine
app["java_jar_path"],
app["java_app_class"], # the running java app class
)
raise KeyError("Algorithm does not exist in the gar resource.")
# a mapping for classname to header file.
GRAPH_HEADER_MAP = {
graph_def_pb2.IMMUTABLE_EDGECUT: (
"grape::ImmutableEdgecutFragment",
"grape/fragment/immutable_edgecut_fragment.h",
),
graph_def_pb2.DYNAMIC_PROJECTED: (
"gs::DynamicProjectedFragment",
"core/fragment/dynamic_projected_fragment.h",
),
graph_def_pb2.ARROW_PROPERTY: (
"vineyard::ArrowFragment",
"vineyard/graph/fragment/arrow_fragment.h",
),
graph_def_pb2.ARROW_PROJECTED: (
"gs::ArrowProjectedFragment",
"core/fragment/arrow_projected_fragment.h",
),
graph_def_pb2.DYNAMIC_PROPERTY: (
"gs::DynamicFragment",
"core/fragment/dynamic_fragment.h",
),
graph_def_pb2.ARROW_FLATTENED: (
"gs::ArrowFlattenedFragment",
"core/fragment/arrow_flattened_fragment.h",
),
}
def _codegen_graph_info(attr):
graph_type = attr[types_pb2.GRAPH_TYPE].graph_type
graph_class, graph_header = GRAPH_HEADER_MAP[graph_type]
# graph_type is a literal of graph template in c++ side
if graph_class == "vineyard::ArrowFragment":
# in a format of full qualified name, e.g. vineyard::ArrowFragment<double, double>
graph_fqn = "{}<{},{}>".format(
graph_class,
attr[types_pb2.OID_TYPE].s.decode("utf-8"),
attr[types_pb2.VID_TYPE].s.decode("utf-8"),
)
elif graph_class in (
"gs::ArrowProjectedFragment",
"grape::ImmutableEdgecutFragment",
):
# in a format of gs::ArrowProjectedFragment<int64_t, uint32_t, double, double>
# or grape::ImmutableEdgecutFragment<int64_t, uint32_t, double, double>
graph_fqn = "{}<{},{},{},{}>".format(
graph_class,
attr[types_pb2.OID_TYPE].s.decode("utf-8"),
attr[types_pb2.VID_TYPE].s.decode("utf-8"),
attr[types_pb2.V_DATA_TYPE].s.decode("utf-8"),
attr[types_pb2.E_DATA_TYPE].s.decode("utf-8"),
)
elif graph_class == "gs::ArrowFlattenedFragment":
graph_fqn = "{}<{},{},{},{}>".format(
graph_class,
attr[types_pb2.OID_TYPE].s.decode("utf-8"),
attr[types_pb2.VID_TYPE].s.decode("utf-8"),
attr[types_pb2.V_DATA_TYPE].s.decode("utf-8"),
attr[types_pb2.E_DATA_TYPE].s.decode("utf-8"),
)
else:
# gs::DynamicProjectedFragment<double, double>
graph_fqn = "{}<{},{}>".format(
graph_class,
attr[types_pb2.V_DATA_TYPE].s.decode("utf-8"),
attr[types_pb2.E_DATA_TYPE].s.decode("utf-8"),
)
return graph_header, graph_fqn
def create_single_op_dag(op_type, config=None):
op_def = op_def_pb2.OpDef(op=op_type, key=uuid.uuid4().hex)
if config:
for k, v in config.items():
op_def.attr[k].CopyFrom(v)
dag = op_def_pb2.DagDef()
dag.op.extend([op_def])
return dag
def dump_as_json(schema, path):
out = {}
items = []
idx = 0
for i in range(len(schema.vertex_labels)):
vertex = {"id": idx, "label": schema.vertex_labels[i], "type": "VERTEX"}
vertex["propertyDefList"] = []
for j in range(len(schema.vertex_property_names[i].s)):
names = schema.vertex_property_names[i]
types = schema.vertex_property_types[i]
vertex["propertyDefList"].append(
{"id": j, "name": names.s[j], "data_type": types.s[j].upper()}
)
vertex["indexes"] = []
vertex["indexes"].append({"propertyNames": [names.s[0]]})
items.append(vertex)
idx += 1
for i in range(len(schema.edge_labels)):
edge = {"id": idx, "label": schema.edge_labels[i], "type": "EDGE"}
edge["propertyDefList"] = []
for j in range(len(schema.edge_property_names[i].s)):
names = schema.edge_property_names[i]
types = schema.edge_property_types[i]
edge["propertyDefList"].append(
{"id": j, "name": names.s[j], "data_type": types.s[j].upper()}
)
edge["rawRelationShips"] = []
edge["rawRelationShips"].append(
{"srcVertexLabel": "xx", "dstVertexLabel": "xx"}
)
idx += 1
items.append(edge)
out["types"] = items
out["partitionNum"] = 4
with open(path, "w") as fp:
json.dump(out, fp)
def dump_string(schema_string, path):
with open(path, "w") as fp:
fp.write(schema_string)
def parse_readable_memory(value):
value = str(value).strip()
num = value[:-2]
suffix = value[-2:]
try:
float(num)
except ValueError as e:
raise ValueError(f"Argument cannot be interpreted as a number: {value}") from e
if suffix not in ["Ki", "Mi", "Gi"]:
raise ValueError(f"Memory suffix must be one of 'Ki', 'Mi' and 'Gi': {value}")
return value
def parse_as_glog_level(log_level):
# log level in glog: INFO=1, DEBUG=10
# log level in python: DEBUG=10, INFO=20
if isinstance(log_level, str):
if log_level == "silent" or log_level == "SILENT":
log_level = -1
else:
log_level = getattr(logging, log_level.upper())
python_to_glog = {10: 10, 20: 1}
return python_to_glog.get(log_level, 1)
def str2bool(s):
if isinstance(s, bool):
return s
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
return False
class ResolveMPICmdPrefix(object):
"""
Class for resolving prefix of mpi command.
Examples:
.. code:: ipython
>>> # openmpi found
>>> rmcp = ResolveMPICmdPrefix()
>>> (cmd, env) = rmcp.resolve(4, 'h1, h2, h3')
>>> cmd
['mpirun', '--allow-run-as-root',
'-n', '4', '-host', 'h1:2,h2:1,h3:1']
>>> env
{'OMPI_MCA_plm_rsh_agent': '/usr/bin/kube_ssh', # if /usr/bin/kube_ssh in $PATH
'OMPI_MCA_btl_vader_single_copy_mechanism': 'none',
'OMPI_MCA_orte_allowed_exit_without_sync': '1'}
>>> # if openmpi not found, change to mpich
>>> rmcp = ResolveMPICmdPrefix()
>>> (cmd, env) = rmcp.resolve(4, 'h1, h2, h3')
>>> cmd
['mpirun', '-n', '4', '-host', 'h1:2,h2:1,h3:1']
>>> env
{} # always empty
>>> # run without mpi on localhost when setting `num_workers` to 1
>>> rmcp = ResolveMPICmdPrefix()
>>> (cmd, env) = rmcp.resolve(1, 'localhost')
>>> cmd
[]
>>> env
{}
"""
_OPENMPI_RSH_AGENT = "OMPI_MCA_plm_rsh_agent"
_KUBE_SSH_EXEC = "kube_ssh"
def __init__(self, rsh_agent=False):
self._rsh_agent = rsh_agent
@staticmethod
def openmpi():
try:
subprocess.check_call(["ompi_info"], stdout=subprocess.DEVNULL)
except FileNotFoundError:
return False
return True
@staticmethod
def alloc(num_workers, hosts):
host_list = hosts.split(",")
host_list_len = len(host_list)
assert host_list_len != 0
host_to_proc_num = {}
if num_workers >= host_list_len:
quotient = num_workers / host_list_len
residue = num_workers % host_list_len
for host in host_list:
if residue > 0:
host_to_proc_num[host] = quotient + 1
residue -= 1
else:
host_to_proc_num[host] = quotient
else:
raise RuntimeError("The number of hosts less then num_workers")
for i in range(host_list_len):
host_list[i] = f"{host_list[i]}:{host_to_proc_num[host_list[i]]}"
return ",".join(host_list)
def resolve(self, num_workers, hosts):
cmd = []
env = {}
if num_workers == 1 and (hosts == "localhost" or hosts == "127.0.0.1"):
# run without mpi on localhost if workers num is 1
if shutil.which("ssh") is None:
# also need a fake ssh agent
env[self._OPENMPI_RSH_AGENT] = sys.executable
return cmd, env
if self.openmpi():
env["OMPI_MCA_btl_vader_single_copy_mechanism"] = "none"
env["OMPI_MCA_orte_allowed_exit_without_sync"] = "1"
# OMPI sends SIGCONT -> SIGTERM -> SIGKILL to the worker process,
# set the following MCA parameter to zero will emilinates the chances
# where the process dies before receiving the SIGTERM and do cleanup.
env["OMPI_MCA_odls_base_sigkill_timeout"] = "0"
if os.environ.get(self._OPENMPI_RSH_AGENT) is None:
rsh_agent_path = shutil.which(self._KUBE_SSH_EXEC)
if self._rsh_agent and rsh_agent_path is not None:
env[self._OPENMPI_RSH_AGENT] = rsh_agent_path
cmd.extend(
[
"mpirun",
"--allow-run-as-root",
]
)
else:
# ssh agent supported only
cmd.extend(["mpirun"])
cmd.extend(["-n", str(num_workers)])
cmd.extend(["-host", self.alloc(num_workers, hosts)])
logger.debug("Resolve mpi cmd prefix: %s", " ".join(cmd))
logger.debug("Resolve mpi env: %s", json.dumps(env))
return cmd, env
def get_gl_handle(schema, vineyard_id, engine_hosts, engine_config):
"""Dump a handler for GraphLearn for interaction.
Fields in :code:`schema` are:
+ the name of node type or edge type
+ whether the graph is weighted graph
+ whether the graph is labeled graph
+ the number of int attributes
+ the number of float attributes
+ the number of string attributes
An example of the graph handle:
.. code:: python
{
"server": "127.0.0.1:8888,127.0.0.1:8889",
"client_count": 1,
"vineyard_socket": "/var/run/vineyard.sock",
"vineyard_id": 13278328736,
"node_schema": [
"user:false:false:10:0:0",
"item:true:false:0:0:5"
],
"edge_schema": [
"user:click:item:true:false:0:0:0",
"user:buy:item:true:true:0:0:0",
"item:similar:item:false:false:10:0:0"
],
"node_attribute_types": {
"person": {
"age": "i",
"name": "s",
},
},
"edge_attribute_types": {
"knows": {
"weight": "f",
},
},
}
The handle can be decoded using:
.. code:: python
base64.b64decode(handle.encode('ascii')).decode('ascii')
Note that the ports are selected from a range :code:`(8000, 9000)`.
Args:
schema: The graph schema.
vineyard_id: The object id of graph stored in vineyard.
engine_hosts: A list of hosts for GraphScope engine workers.
engine_config: dict of config for GAE engine.
Returns:
str: Base64 encoded handle
"""
def group_property_types(props):
weighted, labeled, i, f, s, attr_types = "false", "false", 0, 0, 0, {}
for prop in props:
if prop.type in [graph_def_pb2.STRING]:
s += 1
attr_types[prop.name] = "s"
elif prop.type in (graph_def_pb2.FLOAT, graph_def_pb2.DOUBLE):
f += 1
attr_types[prop.name] = "f"
else:
i += 1
attr_types[prop.name] = "i"
if prop.name == "weight":
weighted = "true"
elif prop.name == "label":
labeled = "true"
return weighted, labeled, i, f, s, attr_types
node_schema, node_attribute_types = [], dict()
for label in schema.vertex_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
schema.get_vertex_properties(label)
)
node_schema.append(
"{}:{}:{}:{}:{}:{}".format(label, weighted, labeled, i, f, s)
)
node_attribute_types[label] = attr_types
edge_schema, edge_attribute_types = [], dict()
for label in schema.edge_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
schema.get_edge_properties(label)
)
for rel in schema.get_relationships(label):
edge_schema.append(
"{}:{}:{}:{}:{}:{}:{}:{}".format(
rel[0], label, rel[1], weighted, labeled, i, f, s
)
)
edge_attribute_types[label] = attr_types
handle = {
"hosts": engine_hosts,
"client_count": 1,
"vineyard_id": vineyard_id,
"vineyard_socket": engine_config["vineyard_socket"],
"node_schema": node_schema,
"edge_schema": edge_schema,
"node_attribute_types": node_attribute_types,
"edge_attribute_types": edge_attribute_types,
}
handle_json_string = json.dumps(handle)
return base64.b64encode(handle_json_string.encode("utf-8")).decode("utf-8")
# In Analytical engine, assume label ids of vertex entries are continuous
# from zero, and property ids of each label is also continuous from zero.
# When transform schema to Maxgraph style, we gather all property names and
# unique them, assign each name a id (index of the vector), then preserve a
# vector<int> for each label, stores mappings from original id to transformed
# id.
def to_maxgraph_schema(gsa_schema_json):
gsa_schema = json.loads(gsa_schema_json)
prop_set = set()
vertex_label_num = 0
for item in gsa_schema["types"]:
item["id"] = int(item["id"])
if item["type"] == "VERTEX":
vertex_label_num += 1
for prop in item["propertyDefList"]:
prop["id"] = int(prop["id"])
prop_set.add(prop["name"])
prop_list = sorted(list(prop_set))
mg_schema = copy.deepcopy(gsa_schema)
for item in mg_schema["types"]:
if item["propertyDefList"] == "":
item["propertyDefList"] = []
if item["type"] == "VERTEX":
for prop in item["propertyDefList"]:
prop["id"] = 1 + prop_list.index(prop["name"])
elif item["type"] == "EDGE":
item["id"] = vertex_label_num + item["id"]
for prop in item["propertyDefList"]:
prop["id"] = 1 + prop_list.index(prop["name"])
return json.dumps(mg_schema)
def check_argument(condition, message=None):
if not condition:
if message is None:
message = "in '%s'" % inspect.stack()[1].code_context[0]
raise ValueError(f"Check failed: {message}")
def find_java():
java_exec = ""
if "JAVA_HOME" in os.environ:
java_exec = os.path.expandvars("$JAVA_HOME/bin/java")
if not java_exec:
java_exec = shutil.which("java")
if not java_exec:
raise RuntimeError("java command not found.")
return java_exec
def get_java_version():
java_exec = find_java()
pattern = r'"(\d+\.\d+\.\d+).*"'
version = subprocess.check_output([java_exec, "-version"], stderr=subprocess.STDOUT)
return re.search(pattern, version.decode("utf-8")).groups()[0]
def check_gremlin_server_ready(endpoint):
from gremlin_python.driver.client import Client
if "MY_POD_NAME" in os.environ:
# inner kubernetes env
if endpoint == "localhost" or endpoint == "127.0.0.1":
# now, used in mac os with docker-desktop kubernetes cluster,
# which external ip is 'localhost' when service type is 'LoadBalancer'
return True
client = Client(f"ws://{endpoint}/gremlin", "g")
error_message = ""
begin_time = time.time()
while True:
try:
client.submit("g.V().limit(1)").all().result()
except Exception as e:
error_message = str(e)
else:
client.close()
return True
time.sleep(3)
if time.time() - begin_time > INTERAVTIVE_INSTANCE_TIMEOUT_SECONDS:
client.close()
raise TimeoutError(f"Gremlin check query failed: {error_message}")
|
py | 7dfef52717c55676af47645d0e17115684774087 | #! /usr/bin/env python3
# coding=utf-8
# This code is licensed under a non-commercial license.
import argparse
import csv
import json
import math
import numpy as np
import os
import time
import torch
import torch.nn.functional as F
import torch.optim
import torch.optim as optim
import torch.utils.data as data
from nltk.tokenize.treebank import TreebankWordDetokenizer
from torchtext import data as torchtext_data
from torchtext import datasets
from tqdm import tqdm, trange
from transformers import BertTokenizer, BertModel
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from pplm_classification_head import ClassificationHead
torch.manual_seed(0)
np.random.seed(0)
EPSILON = 1e-10
example_sentence = "This is incredible! I love it, this is the best chicken I have ever had."
max_length_seq = 128
class Discriminator(torch.nn.Module):
"""Transformer encoder followed by a Classification Head"""
def __init__(
self,
class_size=None,
pretrained_model="gpt2-medium",
classifier_head=None,
cached_mode=False,
device='cpu'
):
super(Discriminator, self).__init__()
if pretrained_model.startswith("gpt2"):
self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model)
self.embed_size = self.encoder.transformer.config.hidden_size
elif pretrained_model.startswith("bert"):
self.tokenizer = BertTokenizer.from_pretrained(pretrained_model)
self.encoder = BertModel.from_pretrained(pretrained_model)
self.embed_size = self.encoder.config.hidden_size
elif ("finetune" in pretrained_model):
###presume using finetuned bert-base-uncased
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.encoder = BertModel.from_pretrained(pretrained_model, output_hidden_states = False)
self.embed_size = self.encoder.config.hidden_size
if classifier_head:
self.classifier_head = classifier_head
else:
if not class_size:
raise ValueError("must specify class_size")
self.classifier_head = ClassificationHead(
class_size=class_size,
embed_size=self.embed_size
)
self.cached_mode = cached_mode
self.device = device
def get_classifier(self):
return self.classifier_head
def train_custom(self):
for param in self.encoder.parameters():
param.requires_grad = False
self.classifier_head.train()
def avg_representation(self, x):
mask = x.ne(0).unsqueeze(2).repeat(
1, 1, self.embed_size
).float().to(self.device).detach()
if hasattr(self.encoder, 'transformer'):
# for gpt2
hidden, _ = self.encoder.transformer(x)
else:
# for bert
hidden, _ = self.encoder(x)
masked_hidden = hidden * mask
avg_hidden = torch.sum(masked_hidden, dim=1) / (
torch.sum(mask, dim=1).detach() + EPSILON
)
return avg_hidden
def forward(self, x):
if self.cached_mode:
avg_hidden = x.to(self.device)
else:
avg_hidden = self.avg_representation(x.to(self.device))
logits = self.classifier_head(avg_hidden)
probs = F.log_softmax(logits, dim=-1)
return probs
def predict(self, input_sentence):
input_t = self.tokenizer.encode(input_sentence)
input_t = torch.tensor([input_t], dtype=torch.long, device=self.device)
if self.cached_mode:
input_t = self.avg_representation(input_t)
log_probs = self(input_t).data.cpu().numpy().flatten().tolist()
prob = [math.exp(log_prob) for log_prob in log_probs]
return prob
class Dataset(data.Dataset):
def __init__(self, X, y):
"""Reads source and target sequences from txt files."""
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
data = {}
data["X"] = self.X[index]
data["y"] = self.y[index]
return data
def collate_fn(data):
def pad_sequences(sequences):
lengths = [len(seq) for seq in sequences]
padded_sequences = torch.zeros(
len(sequences),
max(lengths)
).long() # padding value = 0
for i, seq in enumerate(sequences):
end = lengths[i]
padded_sequences[i, :end] = seq[:end]
return padded_sequences, lengths
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch, _ = pad_sequences(item_info["X"])
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def cached_collate_fn(data):
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch = torch.cat(item_info["X"], 0)
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def train_epoch(data_loader, discriminator, optimizer,
epoch=0, log_interval=10, device='cpu'):
samples_so_far = 0
discriminator.train_custom()
for batch_idx, (input_t, target_t) in enumerate(data_loader):
input_t, target_t = input_t.to(device), target_t.to(device)
optimizer.zero_grad()
output_t = discriminator(input_t)
loss = F.nll_loss(output_t, target_t)
loss.backward(retain_graph=True)
optimizer.step()
samples_so_far += len(input_t)
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch + 1,
samples_so_far, len(data_loader.dataset),
100 * samples_so_far / len(data_loader.dataset), loss.item()
)
)
def evaluate_performance(data_loader, discriminator, device='cpu'):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for input_t, target_t in data_loader:
input_t, target_t = input_t.to(device), target_t.to(device)
output_t = discriminator(input_t)
# sum up batch loss
test_loss += F.nll_loss(output_t, target_t, reduction="sum").item()
# get the index of the max log-probability
pred_t = output_t.argmax(dim=1, keepdim=True)
correct += pred_t.eq(target_t.view_as(pred_t)).sum().item()
test_loss /= len(data_loader.dataset)
accuracy = correct / len(data_loader.dataset)
print(
"Performance on test set: "
"Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format(
test_loss, correct, len(data_loader.dataset),
100. * accuracy
)
)
return test_loss, accuracy
def predict(input_sentence, model, classes, cached=False, device='cpu'):
input_t = model.tokenizer.encode(input_sentence)
input_t = torch.tensor([input_t], dtype=torch.long, device=device)
if cached:
input_t = model.avg_representation(input_t)
log_probs = model(input_t).data.cpu().numpy().flatten().tolist()
# print("Input sentence:", input_sentence)
print("Predictions:", ", ".join(
"{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in
zip(classes, log_probs)
))
def get_cached_data_loader(dataset, batch_size, discriminator,
shuffle=False, device='cpu'):
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
collate_fn=collate_fn)
xs = []
ys = []
for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)):
with torch.no_grad():
x = x.to(device)
avg_rep = discriminator.avg_representation(x).cpu().detach()
avg_rep_list = torch.unbind(avg_rep.unsqueeze(1))
xs += avg_rep_list
ys += y.cpu().numpy().tolist()
data_loader = torch.utils.data.DataLoader(
dataset=Dataset(xs, ys),
batch_size=batch_size,
shuffle=shuffle,
collate_fn=cached_collate_fn)
return data_loader
def get_idx2class(dataset_fp, label_col = 1):
classes = set()
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for row in tqdm(csv_reader, ascii=True):
if row:
classes.add(row[label_col])
return sorted(classes)
def get_generic_dataset(dataset_fp, tokenizer, device,
idx2class=None, add_eos_token=False, label_col = 1, text_col = 0):
if not idx2class:
idx2class = get_idx2class(dataset_fp)
class2idx = {c: i for i, c in enumerate(idx2class)}
x = []
y = []
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(tqdm(csv_reader, ascii=True)):
if row:
label = row[label_col]
text = row[text_col]
try:
seq = tokenizer.encode(text)
if (len(seq) < max_length_seq):
if add_eos_token:
seq = [50256] + seq
seq = torch.tensor(
seq,
device=device,
dtype=torch.long
)
else:
print(
"Line {} is longer than maximum length {}".format(
i, max_length_seq
))
continue
x.append(seq)
y.append(class2idx[label])
except:
print("Error tokenizing line {}, skipping it".format(i))
pass
return Dataset(x, y)
def train_discriminator(
dataset,
dataset_fp=None,
pretrained_model="gpt2-medium",
epochs=10,
learning_rate=0.0001,
batch_size=64,
log_interval=10,
save_model=False,
cached=False,
no_cuda=False,
output_fp='.'
):
device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"
# If bert then do not add? Why?
add_eos_token = pretrained_model.startswith("gpt2")
if save_model:
if not os.path.exists(output_fp):
os.makedirs(output_fp)
classifier_head_meta_fp = os.path.join(
output_fp, "{}_classifier_head_meta.json".format(dataset)
)
classifier_head_fp_pattern = os.path.join(
output_fp, "{}_classifier_head_epoch".format(dataset) + "_{}.pt"
)
print("Preprocessing {} dataset...".format(dataset))
start = time.time()
if dataset == "SST":
idx2class = ["positive", "negative", "very positive", "very negative",
"neutral"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class),
pretrained_model=pretrained_model,
cached_mode=cached,
device=device
).to(device)
text = torchtext_data.Field()
label = torchtext_data.Field(sequential=False)
train_data, val_data, test_data = datasets.SST.splits(
text,
label,
fine_grained=True,
train_subtrees=True,
)
x = []
y = []
for i in trange(len(train_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(
vars(train_data[i])["text"]
)
seq = discriminator.tokenizer.encode(seq)
if add_eos_token:
seq = [50256] + seq
seq = torch.tensor(seq, device=device, dtype=torch.long)
x.append(seq)
y.append(class2idx[vars(train_data[i])["label"]])
train_dataset = Dataset(x, y)
test_x = []
test_y = []
for i in trange(len(test_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(
vars(test_data[i])["text"]
)
seq = discriminator.tokenizer.encode(seq)
if add_eos_token:
seq = [50256] + seq
seq = torch.tensor(seq, device=device, dtype=torch.long)
test_x.append(seq)
test_y.append(class2idx[vars(test_data[i])["label"]])
test_dataset = Dataset(test_x, test_y)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 2,
}
elif dataset == "clickbait":
idx2class = ["non_clickbait", "clickbait"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class),
pretrained_model=pretrained_model,
cached_mode=cached,
device=device
).to(device)
with open("datasets/clickbait/clickbait.txt") as f:
data = []
for i, line in enumerate(f):
try:
data.append(eval(line))
except:
print("Error evaluating line {}: {}".format(
i, line
))
continue
x = []
y = []
with open("datasets/clickbait/clickbait.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
if add_eos_token:
seq = [50256] + seq
seq = torch.tensor(
seq, device=device, dtype=torch.long
)
else:
print("Line {} is longer than maximum length {}".format(
i, max_length_seq
))
continue
x.append(seq)
y.append(d["label"])
except:
print("Error evaluating / tokenizing"
" line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(
full_dataset, [train_size, test_size]
)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 1,
}
elif dataset == "toxic":
idx2class = ["non_toxic", "toxic"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class),
pretrained_model=pretrained_model,
cached_mode=cached,
device=device
).to(device)
x = []
y = []
with open("datasets/toxic/toxic_train.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
if add_eos_token:
seq = [50256] + seq
seq = torch.tensor(
seq, device=device, dtype=torch.long
)
else:
print("Line {} is longer than maximum length {}".format(
i, max_length_seq
))
continue
x.append(seq)
y.append(int(np.sum(d["label"]) > 0))
except:
print("Error evaluating / tokenizing"
" line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(
full_dataset, [train_size, test_size]
)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 0,
}
else: # if dataset == "generic":
# This assumes the input dataset is a TSV with the following structure:
# class \t text
if dataset_fp is None:
raise ValueError("When generic dataset is selected, "
"dataset_fp needs to be specified aswell.")
idx2class = get_idx2class(dataset_fp)
discriminator = Discriminator(
class_size=len(idx2class),
pretrained_model=pretrained_model,
cached_mode=cached,
device=device
).to(device)
full_dataset = get_generic_dataset(
dataset_fp, discriminator.tokenizer, device,
idx2class=idx2class, add_eos_token=add_eos_token
)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(
full_dataset,
[train_size, test_size]
)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": {c: i for i, c in enumerate(idx2class)},
"default_class": 0,
}
end = time.time()
print("Preprocessed {} data points".format(
len(train_dataset) + len(test_dataset))
)
print("Data preprocessing took: {:.3f}s".format(end - start))
if cached:
print("Building representation cache...")
start = time.time()
train_loader = get_cached_data_loader(
train_dataset, batch_size, discriminator,
shuffle=True, device=device
)
test_loader = get_cached_data_loader(
test_dataset, batch_size, discriminator, device=device
)
end = time.time()
print("Building representation cache took: {:.3f}s".format(end - start))
else:
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=collate_fn)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
collate_fn=collate_fn)
if save_model:
with open(classifier_head_meta_fp, "w") as meta_file:
json.dump(discriminator_meta, meta_file)
optimizer = optim.Adam(discriminator.parameters(), lr=learning_rate)
test_losses = []
test_accuracies = []
for epoch in range(epochs):
start = time.time()
print("\nEpoch", epoch + 1)
train_epoch(
discriminator=discriminator,
data_loader=train_loader,
optimizer=optimizer,
epoch=epoch,
log_interval=log_interval,
device=device
)
test_loss, test_accuracy = evaluate_performance(
data_loader=test_loader,
discriminator=discriminator,
device=device
)
end = time.time()
print("Epoch took: {:.3f}s".format(end - start))
test_losses.append(test_loss)
test_accuracies.append(test_accuracy)
# print("\nExample prediction")
# predict(example_sentence, discriminator, idx2class,
# cached=cached, device=device)
if save_model:
# torch.save(discriminator.state_dict(),
# "{}_discriminator_{}.pt".format(
# args.dataset, epoch + 1
# ))
torch.save(discriminator.get_classifier().state_dict(),
classifier_head_fp_pattern.format(epoch + 1))
min_loss = float("inf")
min_loss_epoch = 0
max_acc = 0.0
max_acc_epoch = 0
print("Test performance per epoch")
print("epoch\tloss\tacc")
for e, (loss, acc) in enumerate(zip(test_losses, test_accuracies)):
print("{}\t{}\t{}".format(e + 1, loss, acc))
if loss < min_loss:
min_loss = loss
min_loss_epoch = e + 1
if acc > max_acc:
max_acc = acc
max_acc_epoch = e + 1
print("Min loss: {} - Epoch: {}".format(min_loss, min_loss_epoch))
print("Max acc: {} - Epoch: {}".format(max_acc, max_acc_epoch))
return discriminator, discriminator_meta
def load_classifier_head(weights_path, meta_path, device='cpu'):
with open(meta_path, 'r', encoding="utf8") as f:
meta_params = json.load(f)
classifier_head = ClassificationHead(
class_size=meta_params['class_size'],
embed_size=meta_params['embed_size']
).to(device)
classifier_head.load_state_dict(
torch.load(weights_path, map_location=device))
classifier_head.eval()
return classifier_head, meta_params
def load_discriminator(weights_path, meta_path, device='cpu'):
classifier_head, meta_param = load_classifier_head(
weights_path, meta_path, device
)
discriminator = Discriminator(
pretrained_model=meta_param['pretrained_model'],
classifier_head=classifier_head,
cached_mode=False,
device=device
)
return discriminator, meta_param
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a discriminator on top of GPT-2 representations")
parser.add_argument("--dataset", type=str, default="SST",
choices=("SST", "clickbait", "toxic", "generic"),
help="dataset to train the discriminator on."
"In case of generic, the dataset is expected"
"to be a TSBV file with structure: class \\t text")
parser.add_argument("--dataset_fp", type=str, default="",
help="File path of the dataset to use. "
"Needed only in case of generic datadset")
parser.add_argument("--pretrained_model", type=str, default="bert-base-uncased",
help="Pretrained model to use as encoder")
parser.add_argument("--epochs", type=int, default=3, metavar="N",
help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=0.001,
help="Learnign rate")
parser.add_argument("--batch_size", type=int, default=64, metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument("--log_interval", type=int, default=10, metavar="N",
help="how many batches to wait before logging training status")
parser.add_argument("--save_model", action="store_true",
help="whether to save the model")
parser.add_argument("--cached", action="store_true",
help="whether to cache the input representations")
parser.add_argument("--no_cuda", action="store_true",
help="use to turn off cuda")
parser.add_argument("--output_fp", default=".",
help="path to save the output to")
args = parser.parse_args()
train_discriminator(**(vars(args)))
|
py | 7dfef53d7fc74335cda1b39c5fd8cb411c41b42e | import numpy as np
class Node:
def __init__(self, X, y, depth):
self.left = None
self.right = None
self.min_error = np.inf
self.split_col_idx = None
self.num_rows = X.shape[0]
self.num_cols = X.shape[1]
self.split_val = None
self.depth = depth + 1
self.split(X, y)
def split(self, X, y):
if len(X) == 1:
return None
for i in range(self.num_cols):
self.calculate_split_idx(X[:, i], y, i)
if self.split_col_idx is None:
return None
X_left = X[self.filt]
y_left = y[self.filt]
self.left = Node(X_left, y_left, self.depth)
X_right = X[~self.filt]
y_right = y[~self.filt]
self.right = Node(X_right, y_right, self.depth)
def calculate_split_idx(self, X, y, var_idx):
for x in X:
filt = X <= x
y_left = y[filt]
y_right = y[~filt]
if len(y_right) == 0:
continue
error = ((y_left - y_left.mean()) ** 2).sum() + ((y_right - y_right.mean()) ** 2).sum()
if error < self.min_error:
self.min_error = error
self.split_col_idx = var_idx
self.split_val = x
self.filt = filt
class DecisionTree:
def __init__(self, max_depth=None):
self.max_depth = max_depth
def fit(self, X, y):
self.tree = Node(X, y, -1)
def predict(self, X):
pass
def score(self, X):
pass
|
py | 7dfef569f4d4fcc0049681ea92159812805e9a72 | """Test artefacts config tool."""
import unittest
import unittest.mock as mock
from pathlib import Path
from click.testing import CliRunner
from tests.test_utils import run_in_test_environment, UnitTestFixtures
from varats.data.discover_reports import initialize_reports
from varats.paper_mgmt.artefacts import Artefact
from varats.paper_mgmt.paper_config import get_paper_config, load_paper_config
from varats.plots.discover_plots import initialize_plots
from varats.table.table import Table
from varats.tables.discover_tables import initialize_tables
from varats.tools import driver_artefacts
from varats.utils.settings import vara_cfg
def _mock_table(table: Table):
(Path(table.table_kwargs["table_dir"]) / table.table_file_name()).touch()
class TestDriverArtefacts(unittest.TestCase):
"""Tests for the driver_artefacts module."""
@classmethod
def setUp(cls):
"""Setup artefacts file from yaml doc."""
initialize_reports()
initialize_tables()
initialize_plots()
@run_in_test_environment(UnitTestFixtures.PAPER_CONFIGS)
@mock.patch('varats.table.tables.build_table', side_effect=_mock_table)
# pylint: disable=unused-argument
def test_artefacts_generate(self, build_tables):
"""Test whether `vara-art generate` generates all expected files."""
# setup config
vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver"
load_paper_config()
artefacts = get_paper_config().get_all_artefacts()
base_output_dir = Artefact.base_output_dir()
# vara-art generate
runner = CliRunner()
result = runner.invoke(driver_artefacts.main, ["generate"])
self.assertEqual(0, result.exit_code, result.exception)
# check that overview files are present
self.assertTrue((base_output_dir / "index.html").exists())
self.assertTrue((base_output_dir / "plot_matrix.html").exists())
# check that artefact files are present
for artefact in artefacts:
self.__check_artefact_files_present(artefact)
def __check_artefact_files_present(self, artefact: Artefact):
for file_info in artefact.get_artefact_file_infos():
self.assertTrue((artefact.output_dir / file_info.file_name).exists()
)
@run_in_test_environment(UnitTestFixtures.PAPER_CONFIGS)
def test_artefacts_list(self):
"""Test whether `vara-art list` produces expected output."""
# setup config
vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver"
load_paper_config()
# vara-art generate
runner = CliRunner()
result = runner.invoke(driver_artefacts.main, ["list"])
self.assertEqual(0, result.exit_code, result.exception)
self.assertEqual(
"Paper Config Overview [plot]\nCorrelation Table [table]\n",
result.stdout
)
@run_in_test_environment(UnitTestFixtures.PAPER_CONFIGS)
def test_artefacts_show(self):
"""Test whether `vara-art show` produces expected output."""
# setup config
vara_cfg()['paper_config']['current_config'] = "test_artefacts_driver"
load_paper_config()
expected = r"""Artefact 'Paper Config Overview':
artefact_type: plot
artefact_type_version: 2
dry_run: false
file_type: png
name: Paper Config Overview
output_dir: .
plot_config: {}
plot_generator: pc-overview-plot
report_type: EmptyReport
view: false
"""
# vara-art generate
runner = CliRunner()
result = runner.invoke(
driver_artefacts.main, ["show", "Paper Config Overview"]
)
self.assertEqual(0, result.exit_code, result.exception)
self.assertEqual(expected, result.stdout)
|
py | 7dfef59113c201a18f6258f5cb85d63d9b8d529d | """ A PostController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Post import Post
class PostController(Controller):
"""Class Docstring Description
"""
def __init__(self,request:Request):
self.request = request
def show(self):
"""Show a single resource listing
ex. Model.find('id')
Get().route("/show", PostController)
"""
id= self.request.param("id")
return Post.where("id",id).get()
def index(self):
"""Show several resource listings
ex. Model.all()
Get().route("/index", PostController)
"""
return Post.all()
def create(self):
"""Show form to create new resource listings
ex. Get().route("/create", PostController)
"""
title = self.request.input("title")
body = self.request.input("body")
body = Post.create({"title":title,"body":body})
return body
def update(self):
"""Edit an existing resource listing
ex. Post target to update new Model
Post().route("/update", PostController)
"""
id=self.request.param("id")
title = self.request.input("title")
body = self.request.input("body")
Post.where("id",id).update({"title":title,"body":body})
return Post.where("id",id).get()
def destroy(self):
"""Delete an existing resource listing
ex. Delete().route("/destroy", PostController)
"""
id=self.request.param("id")
title=self.request.input("title")
body = self.request.input("body")
post = Post.where("id",id).get()
Post.where("id",id).delete()
return post
|
py | 7dfef8c3e7baf1465b1b6673dbc5f8ae0c873ac0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "movierecommendation.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 7dfef8d3718a638e5d60b3774bcc1116c06a00b1 | #!/usr/bin/env python3
# programming-with-guis
# Ex. 3.13
import os, pickle
from random import shuffle, randint
from guizero import App, Box, ListBox, Picture, PushButton, Text, Window, warn
def load_emojis():
emojis_dir = "./assets/emojis"
emojis = [os.path.join(emojis_dir, f) for f in os.listdir(emojis_dir) if os.path.isfile(os.path.join(emojis_dir, f))]
shuffle(emojis)
return emojis
def match_emoji(matched):
global in_a_row, player_num
if matched:
in_a_row += 1
result.value = "Correct"
result.text_color = "green"
players[player_num]["score"].value = int(players[player_num]["score"].value) + 1
if in_a_row == 3:
# add a bonus point
#players[player_num]["score"].value = int(players[player_num]["score"].value) + 1
#app.info("info", "Bonus points for 3 in a row!")
# add extra time
extra_time = randint(1,10)
timer.value = int(timer.value) + extra_time
result.value = "Extra " + str(extra_time) + " secs for 3 in a row!"
result.text_color = "blue"
in_a_row = 0
else:
result.value = "Incorrect"
result.text_color = "red"
players[player_num]["score"].value = int(players[player_num]["score"].value) - 1
if in_a_row > 0:
in_a_row -= 1
setup_round()
def setup_round():
pictures_box.visible = True
buttons_box.visible = True
players[player_num]["round"].value = int(players[player_num]["round"].value) + 1
emojis = load_emojis()
for picture in pictures:
picture.image = emojis.pop()
for button in buttons:
button.image = emojis.pop()
# set the command to be called and pass False, as these emoji wont be the matching ones
button.update_command(match_emoji, [False])
# choose a new emoji
matched_emoji = emojis.pop()
# select a number at random
random_picture = randint(0,8)
# change the image feature of the Picture with this index in the list of pictures to the new emoji
pictures[random_picture].image = matched_emoji
random_button = randint(0,8)
print(random_button)
# change the image feature of the PushButton with this index in the list of buttons to the new emoji
buttons[random_button].image = matched_emoji
# set the command to be called and pass True, as this is the matching emoji
buttons[random_button].update_command(match_emoji, [True])
def reset_game():
global player_num, in_a_row
in_a_row = 0
result.value = ""
players[player_num]["round"].value = "0"
players[player_num]["score"].value = "0"
setup_round()
timer.value = "20"
timer.repeat(1000, counter)
return
def counter():
global hi_scores, player_num
timer.value = int(timer.value) - 1
if int(timer.value) == 0:
timer.cancel(counter)
# add score to high scores
hi_scores.append(tuple((players[player_num]["label"].value, int(players[player_num]["score"].value))))
print(hi_scores)
save_hiscores()
# check who won
msg = "Game over!"
if int(players["1"]["round"].value) > 1 and int(players["2"]["round"].value) > 1:
print(players["1"]["score"].value, players["2"]["score"].value)
if int(players["1"]["score"].value) > int(players["2"]["score"].value):
msg = "Player 1 won!"
elif int(players["2"]["score"].value) > int(players["1"]["score"].value):
msg = "Player 2 won!"
else:
msg = "It's a draw"
result.value = msg
result.text_color = "black"
app.info("Info", msg)
# prompt = app.yesno(msg, "Do you want to play again?")
# if prompt == True:
# reset_game()
def start_game(num):
global player_num
player_num = num
player_name = app.question("Player " + num, "Enter your name")
if player_name is not None:
players[num]["label"].value = player_name
if num == "1":
spacer1.value = "<<<"
spacer2.value = " "
elif num == "2":
spacer1.value = " "
spacer2.value = ">>>"
else:
spacer1.value = " "
spacer2.value = " "
reset_game()
return
def load_hiscores():
if os.path.exists("hiscores.dat"):
fh = open('hiscores.dat', 'rb')
# deserialize data
hi_scores = pickle.load(fh)
fh.close()
else:
# populate with sample scores
hi_scores = [
("Carol Danvers", 10),
("Bruce Banner", 9),
("Thor Odinson", 8),
("Stephen Strange", 7),
("Wanda Maximoff", 6),
("Tony Stark", 5),
("Steve Rogers", 4),
("Peter Parker", 3),
("Scott Lang", 2),
("Natasha Romanoff", 1)
]
#print("load_hiscores:", hi_scores, type(hi_scores))
return hi_scores
def save_hiscores():
global hi_scores
#print("save_hiscores:", hi_scores)
fh = open('hiscores.dat', 'wb')
# serialize data
pickle.dump(hi_scores, fh)
fh.close()
return
# sort list of tuples by second element
# https://www.afternerd.com/blog/python-sort-list/#sort-tuples-second-element
def sort_hiscores(t):
return t[1]
def show_hiscores():
global hi_scores
hi_title = Text(wnd_hiscores, font=font_default, align="top", text="Top Players", size=18)
hi_box = Box(wnd_hiscores, align="top", layout="grid")
#hi_box.bg = "#CCCCCC"
scores = []
hi_scores.sort(reverse=True, key=sort_hiscores)
#print(hi_scores)
for x in range(0, len(hi_scores)):
if x < 20:
hi_line = Text(hi_box, font=font_default, text=str(x+1)+". ", align="left", grid=[0,x])
scores.append(hi_line)
for y in range(0, len(hi_scores[x])):
#print(x,y,hi_scores[x][y])
hi_line = Text(hi_box, font=font_default, text=hi_scores[x][y], align="left", grid=[y+1,x])
scores.append(hi_line)
else:
break
wnd_hiscores.show(wait=True) # modal window
return
def show_help():
wnd_help.show(wait=True) # modal window
return
# clear the contents since the high score list is appended each time the
# window is opened
def clear_hiscores_window():
for child in wnd_hiscores.children:
#print(child, type(child))
if hasattr(child, 'children'):
if child.children:
for grandchild in child.children:
grandchild.destroy()
child.destroy()
for child in wnd_hiscores.children:
child.destroy()
wnd_hiscores.hide()
return
def quit_app():
confirm = app.yesno("Confirm", "Do you want to exit?")
if confirm == True:
app.destroy()
app.display()
return
#---[ Main ]------------------------------------------------------------------
app = App("Emoji Match", layout="auto", width=700, height=480)
font_default = "JetBrains Mono"
in_a_row = 0
player_num = ""
# Set up High Score window
wnd_hiscores = Window(app, title="High Scores", bg="#DDDDDD", width=480, height=480, visible=False)
wnd_hiscores.when_closed = clear_hiscores_window
hi_scores = load_hiscores()
# Set up Help window
wnd_help = Window(app, title="Help", bg="#DDDDDD", width=700, height=480, visible=False)
help_box = Box(wnd_help, align="top", width="fill", layout="grid")
help_sec1_title = Text(help_box, font=font_default, text="How to play:", align="left", size=16, grid=[0,0])
help_sec1_line1 = Text(help_box, font=font_default, text=" ◉ Each player takes turns by clicking either the 'Player 1' or 'Player 2' buttons", align="left", grid=[0,1])
help_sec2_title = Text(help_box, font=font_default, text="Scoring:", align="left", size=16, grid=[0,2])
help_sec2_line1 = Text(help_box, font=font_default, text=" ◉ +1 pt for correct matches", align="left", grid=[0,3])
help_sec2_line2 = Text(help_box, font=font_default, text=" ◉ –1 pt for incorrect guesses", align="left", grid=[0,4])
help_sec2_line3 = Text(help_box, font=font_default, text=" ◉ For 3 matches in a row, extra time is added, randomly between 1-10 secs.", align="left", grid=[0,5])
# Set up scoreboard
scoreboard = Box(app, align="top", width="fill", layout="grid")
scoreboard.bg = "#C0C0C0"
players = {"1": {}, "2": {}}
players["1"]["label"] = Text(scoreboard, font=font_default, text="Player 1:", size=12, grid=[0,0,2,1])
spacer1 = Text(scoreboard, font=font_default, text=" ", color="#990000", size=20, width=20, grid=[2,0,1,3])
timer_lbl = Text(scoreboard, font=font_default, text="Timer: ", size=12, grid=[3,0])
spacer2 = Text(scoreboard, font=font_default, text=" ", color="#990000", size=20, width=20, grid=[4,0,1,3])
players["2"]["label"] = Text(scoreboard, font=font_default, text="Player 2:", size=12, grid=[5,0,2,1])
players["1"]["score_label"] = Text(scoreboard, font=font_default, text="Score", size=10, grid=[0,1])
players["1"]["round_label"] = Text(scoreboard, font=font_default, text="Round", size=10, grid=[1,1])
timer = Text(scoreboard, font=font_default, text="0", size=20, grid=[3,1])
players["2"]["score_label"] = Text(scoreboard, font=font_default, text="Score", size=10, grid=[5,1])
players["2"]["round_label"] = Text(scoreboard, font=font_default, text="Round", size=10, grid=[6,1])
players["1"]["score"] = Text(scoreboard, font=font_default, text="0", size=20, grid=[0,2])
players["1"]["round"] = Text(scoreboard, font=font_default, text="0", size=20, grid=[1,2])
players["2"]["score"] = Text(scoreboard, font=font_default, text="0", size=20, grid=[5,2])
players["2"]["round"] = Text(scoreboard, font=font_default, text="0", size=20, grid=[6,2])
# Set up game grids
game_box = Box(app, align="top", layout="grid")
result = Text(game_box, font=font_default, text="Ready?", size=24, grid=[0,0,3,1])
pictures_box = Box(game_box, layout="grid", grid=[0,1], visible=False)
spacer3 = Text(game_box, font=font_default, text=" ", width=10, grid=[1,1])
buttons_box = Box(game_box, layout="grid", grid=[2,1], visible=False)
instructions = Text(game_box, font=font_default, text="Choose an option:", size=10, grid=[0,2,3,1])
print("Font:", result.font)
buttons = []
pictures = []
for x in range(0,3):
for y in range(0,3):
picture = Picture(pictures_box, grid=[x,y])
pictures.append(picture)
button = PushButton(buttons_box, grid=[x,y])
buttons.append(button)
# Set up player controls
controls_box = Box(app, align="top", layout="grid")
btn_player1 = PushButton(controls_box, text="1 Player", image="./assets/btn_player1.gif", grid=[0,0], command=start_game, args=["1"])
btn_player2 = PushButton(controls_box, text="2 Players", image="./assets/btn_player2.gif", grid=[1,0], command=start_game, args=["2"])
btn_hiscores = PushButton(controls_box, text="High Scores", image="./assets/btn_hiscores.gif", grid=[2,0], command=show_hiscores)
btn_help = PushButton(controls_box, text="Help", image="./assets/btn_help.gif", grid=[3,0], command=show_help)
btn_exit = PushButton(controls_box, text="Exit", image="./assets/btn_exit.gif", grid=[4,0], command=quit_app)
app.display()
|
py | 7dfef8ed1e75c413aeb51dbbfcc5916d73adbd25 | # 345. Reverse Vowels of a String
# Write a function that takes a string as input and reverse only the vowels of a string.
# Example 1:
# Given s = "hello", return "holle".
# Example 2:
# Given s = "leetcode", return "leotcede".
# Note:
# The vowels does not include the letter "y".
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
v = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
vowels = []
indices = []
res = []
for i, ch in enumerate(s):
if ch in v:
vowels.append(ch)
indices.append(i)
else:
res.append(ch)
vowels.reverse()
for (i, x) in zip(indices, vowels):
res.insert(i, x)
return ''.join(res)
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
vowels = [i for i in range(len(s)) if s[i] in 'aeiouAEIOU']
s1 = list(s)
for i in range(len(vowels)//2):
s1[vowels[i]], s1[vowels[-i-1]] = s1[vowels[-i-1]], s1[vowels[i]]
return ''.join(s1)
|
py | 7dfef96492f549bfe515d20c866d4000c60cdc8a | """
矩阵的转置
Example 1:
Input: [[1,2,3],[4,5,6],[7,8,9]]
Output: [[1,4,7],[2,5,8],[3,6,9]]
Example 2:
Input: [[1,2,3],[4,5,6]]
Output: [[1,4],[2,5],[3,6]]
"""
class Solution():
def transpose(self, A):
R, C = len(A), len(A[0])
ans = [[None] * R for _ in range(C)]
for r, row in enumerate(A):
for c, val in enumerate(row):
ans[c][r] = val
return ans
if __name__ == '__main__':
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
s = Solution()
s.transpose(a) |
py | 7dfef98991c28ec4a6549046bd7d13bbbda4f9a9 |
# third-party
import numpy
# this package
from tuna import BaseClass
class UniformConvolution(object):
"""
A bounded uniform convolver
"""
def __init__(self, half_range, lower_bound, upper_bound):
"""
UniformConvolution constructor
:param:
- `half_range`: (-half_range, half_range) bounds the noise
- `lower_bound`: minimum value to allow in convolved arrays
- `upper_bound`: maximum value to allow in convolved array
"""
self.half_range = half_range
self.lower_bound = lower_bound
self.upper_bound = upper_bound
return
def __call__(self, vector):
"""
adds random noise, bounded by the lower and upper bound values
"""
tweak = numpy.random.uniform(low=-self.half_range,
high=self.half_range,
size=len(vector))
tweaked = vector + tweak
return tweaked.clip(self.lower_bound, self.upper_bound)
# end UniformConvolution
class GaussianConvolution(BaseClass):
"""
A Tweak that uses the Normal distribution
"""
def __init__(self, lower_bound, upper_bound,
location=0, scale=1, number_type=float,
scalar_multiplier=1):
"""
GaussianConvolution constructor
:param:
- `lower_bound`: minimum value to allow in tweaked arrays
- `upper_bound`: maximum value to allow in tweaked arrays
- `location`: Center of the distribution
- `scale`: Spread of the distribution
- `number_type`: type to cast random vector to
- `scalar_multiplier`: value to multiply tweak by
"""
super(GaussianConvolution, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.location = location
self.scale = scale
self.number_type = number_type
self.scalar_multiplier = scalar_multiplier
return
def set_seed(self, seed):
"""
Sets the numpy random seed (for reproducibility)
"""
numpy.random.seed(seed)
return
def __call__(self, vector):
"""
Adds normally distributed random noise to the vector
Casts the tweak values to type specified by self.number_type
:return: vector + noise, bounded by upper and lower bounds
"""
tweak = numpy.random.normal(loc=self.location,
scale=self.scale,
size=len(vector)).astype(self.number_type)
tweaked = vector + self.scalar_multiplier * tweak
tweaked = tweaked.clip(self.lower_bound, self.upper_bound)
#self.logger.debug("Tweaked: {0}".format(tweaked))
return tweaked
# class GaussianConvolution
class GaussianConvolutionConstants(object):
__slots__ = ()
# options
lower_bound = 'lower_bound'
upper_bound = 'upper_bound'
location = 'location'
scale = 'scale'
number_type = 'number_type'
# defaults
location_default = 0
scale_default = 1
number_type_default='float'
class GaussianConvolutionBuilder(BaseClass):
"""
builds GaussianConvolutions
"""
def __init__(self, configuration, section):
"""
GaussianConvolutionBuilder constructor
:param:
- `configuration`: configuration map
- `section`: name of section with needed options
"""
self.configuration = configuration
self.section = section
self._product = None
return
@property
def product(self):
"""
A built GaussianConvolution
"""
if self._product is None:
config = self.configuration
constants = GaussianConvolutionConstants
num_type = config.get(section=self.section,
option=constants.number_type,
optional=False)
if num_type.lower().startswith('int'):
number_type = int
else:
number_type = float
location=config.get_float(section=self.section,
option=constants.location,
optional=True,
default=constants.location_default)
scale=config.get_float(section=self.section,
option=constants.scale,
optional=True,
default=constants.scale_default)
lower_bound=config.get_float(section=self.section,
option=constants.lower_bound)
upper_bound=config.get_float(section=self.section,
option=constants.upper_bound)
self._product = GaussianConvolution(location=location,
scale=scale,
lower_bound=lower_bound,
upper_bound=upper_bound,
number_type=number_type)
return self._product
class XYConvolution(BaseClass):
"""
A Tweak that uses the Normal distribution
"""
def __init__(self, x_min, x_max, y_min, y_max,
location=0, scale=1, number_type=float,
scalar_multiplier=1):
"""
GaussianConvolution constructor
:param:
- `x_min`: minimum value for x-value
- `x_max`: maximum value for x-value
- `y_min`: minimum value for y-value
- `y_max`: maximum value for y-value
- `location`: Center of the distribution
- `scale`: Spread of the distribution
- `number_type`: type to cast random vector to
- `scalar_multiplier`: value to multiply tweak by
"""
super(XYConvolution, self).__init__()
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.location = location
self.scale = scale
self.number_type = number_type
self.scalar_multiplier = scalar_multiplier
return
def set_seed(self, seed):
"""
Sets the numpy random seed (for reproducibility)
"""
numpy.random.seed(seed)
return
def __call__(self, vector):
"""
Adds normally distributed random noise to the vector
Casts the tweak values to type specified by self.number_type
:return: vector + noise, bounded by upper and lower bounds
"""
x = self.number_type(numpy.random.normal(loc=self.location,
scale=self.scale))
y = self.number_type(numpy.random.normal(loc=self.location,
scale=self.scale))
tweaked = vector + self.scalar_multiplier * numpy.array([x, y])
# this is done so that a non-square grid can be used
# so the 'clip' method won't work
x = max(self.x_min, tweaked[0])
x = min(self.x_max, x)
y = max(self.y_min, tweaked[1])
y = min(self.y_max, y)
tweaked = numpy.array([x, y])
#self.logger.debug("Tweaked: {0}".format(tweaked))
return tweaked
# class XYConvolution
class XYConvolutionConstants(object):
__slots__ = ()
# options
x_min = 'x_min'
x_max = 'x_max'
y_min = 'y_min'
y_max = 'y_max'
location = 'location'
scale = 'scale'
number_type = 'number_type'
# defaults
location_default = 0
scale_default = 1
number_type_default='float'
class XYConvolutionBuilder(BaseClass):
"""
builds XYConvolutions
"""
def __init__(self, configuration, section):
"""
XYConvolutionBuilder constructor
:param:
- `configuration`: configuration map
- `section`: name of section with needed options
"""
self.configuration = configuration
self.section = section
self._product = None
return
@property
def product(self):
"""
A built XYConvolution
"""
if self._product is None:
config = self.configuration
constants = XYConvolutionConstants
num_type = config.get(section=self.section,
option=constants.number_type,
optional=False)
if num_type.lower().startswith('int'):
number_type = int
else:
number_type = float
location=config.get_float(section=self.section,
option=constants.location,
optional=True,
default=constants.location_default)
scale=config.get_float(section=self.section,
option=constants.scale,
optional=True,
default=constants.scale_default)
x_min=config.get_float(section=self.section,
option=constants.x_min)
x_max=config.get_float(section=self.section,
option=constants.x_max)
y_min=config.get_float(section=self.section,
option=constants.y_min)
y_max=config.get_float(section=self.section,
option=constants.y_max)
self._product = XYConvolution(location=location,
scale=scale,
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
number_type=number_type)
return self._product
if __name__ == '__builtin__':
gaussian = GaussianConvolution(lower_bound=-100,
upper_bound=100)
candidate = numpy.array([5,6])
print gaussian(candidate)
# change the candidate, move the mean up, widen the distribution
gaussian.scale = 20
gaussian.location = 5
candidate = numpy.array([0, 1, 2])
gaussian.number_type = int
print gaussian(candidate)
# clip the values so it's right-skewed
gaussian.lower_bound = 5
gaussian.upper_bound = 100
print gaussian(candidate)
|
py | 7dfef98e56cac69ea2b77aea18ade1261cd41ddb | # -*- coding: utf-8 -*-
from FINDER_torch import FINDER
def main():
dqn = FINDER()
dqn.Train()
if __name__=="__main__":
main()
|
py | 7dfefa4ed8d5d6288d71b47e6f94e31d462598a2 | import pandas as pd # datu apstrāde
from termcolor import colored as cl # teksta izvade
def info(datne):
print(cl("\n\nInformācija par datni " + datne, 'green', attrs = ['reverse']))
# # importējam datus
# # df is saīsinajums no Data Frame,
# # Pandas bibliotēkas pamata datu struktūras
df = pd.read_csv(datne)
# # apskatīt pirmās 5 datu rindiņas
print(cl("\nPirmās 5 rindiņas", attrs = ['bold']))
print(df.head(5))
# # aplūkojam kolonnu nosaukumus
print(cl("\nKolonnu nosaukumi", attrs = ['bold']))
print(df.columns)
# # aplūkojam statistisku informāciju
print(cl("\nStatistika", attrs = ['bold']))
print(df.describe())
# print(cl("\nDatu tipi", attrs = ['bold']))
# aplūkojam datu tipus
print(cl(df.dtypes, attrs = ['bold']))
# # parāda, kur datos ir tukšas vērtības
print(cl("\nTukšas vērtības datos", attrs = ['bold']))
print(df.isnull().sum())
datne1 = 'dati/auto_simple.csv'
datne2 = 'dati/auto_imports.csv'
datne3 = 'dati/ss_auto.csv'
# parādām informāciju par datnē esošajiem datiem
info(datne3) |
py | 7dfefb8758d37060d0c258c9e4b764722cd95f5b | #!/usr/bin/env python3
# Tests check_format.py. This must be run in a context where the clang
# version and settings are compatible with the one in the Envoy
# docker. Normally this is run via check_format_test.sh, which
# executes it in under docker.
from __future__ import print_function
from run_command import runCommand
import argparse
import logging
import os
import shutil
import sys
import tempfile
curr_dir = os.path.dirname(os.path.realpath(__file__))
tools = os.path.dirname(curr_dir)
src = os.path.join(tools, 'testdata', 'check_format')
check_format = sys.executable + " " + os.path.join(curr_dir, 'check_format.py')
errors = 0
# Runs the 'check_format' operation, on the specified file, printing
# the comamnd run and the status code as well as the stdout, and returning
# all of that to the caller.
def runCheckFormat(operation, filename):
command = check_format + " " + operation + " " + filename
status, stdout, stderr = runCommand(command)
return (command, status, stdout + stderr)
def getInputFile(filename, extra_input_files=None):
files_to_copy = [filename]
if extra_input_files is not None:
files_to_copy.extend(extra_input_files)
for f in files_to_copy:
infile = os.path.join(src, f)
directory = os.path.dirname(f)
if not directory == '' and not os.path.isdir(directory):
os.makedirs(directory)
shutil.copyfile(infile, f)
return filename
# Attempts to fix file, returning a 4-tuple: the command, input file name,
# output filename, captured stdout as an array of lines, and the error status
# code.
def fixFileHelper(filename, extra_input_files=None):
command, status, stdout = runCheckFormat(
"fix", getInputFile(filename, extra_input_files=extra_input_files))
infile = os.path.join(src, filename)
return command, infile, filename, status, stdout
# Attempts to fix a file, returning the status code and the generated output.
# If the fix was successful, the diff is returned as a string-array. If the file
# was not fixable, the error-messages are returned as a string-array.
def fixFileExpectingSuccess(file, extra_input_files=None):
command, infile, outfile, status, stdout = fixFileHelper(file,
extra_input_files=extra_input_files)
if status != 0:
print("FAILED:")
emitStdoutAsError(stdout)
return 1
status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile + '.gold')
if status != 0:
print("FAILED:")
emitStdoutAsError(stdout + stderr)
return 1
return 0
def fixFileExpectingNoChange(file):
command, infile, outfile, status, stdout = fixFileHelper(file)
if status != 0:
return 1
status, stdout, stderr = runCommand('diff ' + outfile + ' ' + infile)
if status != 0:
logging.error(file + ': expected file to remain unchanged')
return 1
return 0
def emitStdoutAsError(stdout):
logging.error("\n".join(stdout))
def expectError(filename, status, stdout, expected_substring):
if status == 0:
logging.error("%s: Expected failure `%s`, but succeeded" % (filename, expected_substring))
return 1
for line in stdout:
if expected_substring in line:
return 0
logging.error("%s: Could not find '%s' in:\n" % (filename, expected_substring))
emitStdoutAsError(stdout)
return 1
def fixFileExpectingFailure(filename, expected_substring):
command, infile, outfile, status, stdout = fixFileHelper(filename)
return expectError(filename, status, stdout, expected_substring)
def checkFileExpectingError(filename, expected_substring, extra_input_files=None):
command, status, stdout = runCheckFormat(
"check", getInputFile(filename, extra_input_files=extra_input_files))
return expectError(filename, status, stdout, expected_substring)
def checkAndFixError(filename, expected_substring, extra_input_files=None):
errors = checkFileExpectingError(filename,
expected_substring,
extra_input_files=extra_input_files)
errors += fixFileExpectingSuccess(filename, extra_input_files=extra_input_files)
return errors
def checkToolNotFoundError():
# Temporarily change PATH to test the error about lack of external tools.
oldPath = os.environ["PATH"]
os.environ["PATH"] = "/sbin:/usr/sbin"
clang_format = os.getenv("CLANG_FORMAT", "clang-format-9")
# If CLANG_FORMAT points directly to the binary, skip this test.
if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK):
os.environ["PATH"] = oldPath
return 0
errors = checkFileExpectingError("no_namespace_envoy.cc", "Command %s not found." % clang_format)
os.environ["PATH"] = oldPath
return errors
def checkUnfixableError(filename, expected_substring):
errors = checkFileExpectingError(filename, expected_substring)
errors += fixFileExpectingFailure(filename, expected_substring)
return errors
def checkFileExpectingOK(filename):
command, status, stdout = runCheckFormat("check", getInputFile(filename))
if status != 0:
logging.error("Expected %s to have no errors; status=%d, output:\n" % (filename, status))
emitStdoutAsError(stdout)
return status + fixFileExpectingNoChange(filename)
def runChecks():
errors = 0
# The following error is the error about unavailability of external tools.
errors += checkToolNotFoundError()
# The following errors can be detected but not fixed automatically.
errors += checkUnfixableError("no_namespace_envoy.cc",
"Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += checkUnfixableError("mutex.cc", "Don't use <mutex> or <condition_variable*>")
errors += checkUnfixableError("condition_variable.cc",
"Don't use <mutex> or <condition_variable*>")
errors += checkUnfixableError("condition_variable_any.cc",
"Don't use <mutex> or <condition_variable*>")
errors += checkUnfixableError("shared_mutex.cc", "shared_mutex")
errors += checkUnfixableError("shared_mutex.cc", "shared_mutex")
real_time_inject_error = (
"Don't reference real-world time sources from production code; use injection")
errors += checkUnfixableError("real_time_source.cc", real_time_inject_error)
errors += checkUnfixableError("real_time_system.cc", real_time_inject_error)
errors += checkUnfixableError("system_clock.cc", real_time_inject_error)
errors += checkUnfixableError("steady_clock.cc", real_time_inject_error)
errors += checkUnfixableError(
"unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
errors += checkUnfixableError("condvar_wait_for.cc", real_time_inject_error)
errors += checkUnfixableError("sleep.cc", real_time_inject_error)
errors += checkUnfixableError("std_atomic_free_functions.cc", "std::atomic_*")
errors += checkUnfixableError("std_get_time.cc", "std::get_time")
errors += checkUnfixableError("no_namespace_envoy.cc",
"Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += checkUnfixableError("bazel_tools.BUILD", "unexpected @bazel_tools reference")
errors += checkUnfixableError("proto.BUILD", "unexpected direct external dependency on protobuf")
errors += checkUnfixableError("proto_deps.cc", "unexpected direct dependency on google.protobuf")
errors += checkUnfixableError("attribute_packed.cc", "Don't use __attribute__((packed))")
errors += checkUnfixableError("designated_initializers.cc", "Don't use designated initializers")
errors += checkUnfixableError("elvis_operator.cc", "Don't use the '?:' operator")
errors += checkUnfixableError("testing_test.cc",
"Don't use 'using testing::Test;, elaborate the type instead")
errors += checkUnfixableError(
"serialize_as_string.cc",
"Don't use MessageLite::SerializeAsString for generating deterministic serialization")
errors += checkUnfixableError(
"version_history.rst",
"Version history line malformed. Does not match VERSION_HISTORY_NEW_LINE_REGEX in "
"check_format.py")
errors += checkUnfixableError(
"counter_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += checkUnfixableError(
"gauge_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += checkUnfixableError(
"histogram_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += checkUnfixableError(
"regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
errors += checkUnfixableError(
"grpc_init.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " +
"See #8282")
errors += checkUnfixableError(
"grpc_shutdown.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " +
"See #8282")
errors += checkUnfixableError("clang_format_double_off.cc", "clang-format nested off")
errors += checkUnfixableError("clang_format_trailing_off.cc", "clang-format remains off")
errors += checkUnfixableError("clang_format_double_on.cc", "clang-format nested on")
errors += fixFileExpectingFailure(
"api/missing_package.proto",
"Unable to find package name for proto file: ./api/missing_package.proto")
errors += checkUnfixableError("proto_enum_mangling.cc",
"Don't use mangled Protobuf names for enum constants")
# The following files have errors that can be automatically fixed.
errors += checkAndFixError("over_enthusiastic_spaces.cc",
"./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += checkAndFixError("extra_enthusiastic_spaces.cc",
"./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += checkAndFixError("angle_bracket_include.cc",
"envoy includes should not have angle brackets")
errors += checkAndFixError("proto_style.cc", "incorrect protobuf type reference")
errors += checkAndFixError("long_line.cc", "clang-format check failed")
errors += checkAndFixError("header_order.cc", "header_order.py check failed")
errors += checkAndFixError("clang_format_on.cc",
"./clang_format_on.cc:7: over-enthusiastic spaces")
# Validate that a missing license is added.
errors += checkAndFixError("license.BUILD", "envoy_build_fixer check failed")
# Validate that an incorrect license is replaced and reordered.
errors += checkAndFixError("update_license.BUILD", "envoy_build_fixer check failed")
# Validate that envoy_package() is added where there is an envoy_* rule occurring.
errors += checkAndFixError("add_envoy_package.BUILD", "envoy_build_fixer check failed")
# Validate that we don't add envoy_packag() when no envoy_* rule.
errors += checkFileExpectingOK("skip_envoy_package.BUILD")
# Validate that we clean up gratuitous blank lines.
errors += checkAndFixError("canonical_spacing.BUILD", "envoy_build_fixer check failed")
# Validate that unused loads are removed.
errors += checkAndFixError("remove_unused_loads.BUILD", "envoy_build_fixer check failed")
# Validate that API proto package deps are computed automagically.
errors += checkAndFixError("canonical_api_deps.BUILD",
"envoy_build_fixer check failed",
extra_input_files=[
"canonical_api_deps.cc", "canonical_api_deps.h",
"canonical_api_deps.other.cc"
])
errors += checkAndFixError("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix")
errors += checkAndFixError("proto_format.proto", "clang-format check failed")
errors += checkAndFixError(
"cpp_std.cc",
"term absl::make_unique< should be replaced with standard library term std::make_unique<")
errors += checkFileExpectingOK("real_time_source_override.cc")
errors += checkFileExpectingOK("time_system_wait_for.cc")
errors += checkFileExpectingOK("clang_format_off.cc")
return errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tester for check_format.py.')
parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=args.log)
# Now create a temp directory to copy the input files, so we can fix them
# without actually fixing our testdata. This requires chdiring to the temp
# directory, so it's annoying to comingle check-tests and fix-tests.
with tempfile.TemporaryDirectory() as tmp:
os.chdir(tmp)
errors = runChecks()
if errors != 0:
logging.error("%d FAILURES" % errors)
exit(1)
logging.warning("PASS")
|
py | 7dfefc6c37e04c56c10676abd66ef6dec58f45fa | #
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Iterator
from typedb.api.concept.type.type import Type, RemoteType
if TYPE_CHECKING:
from typedb.api.concept.type.relation_type import RelationType
from typedb.api.concept.type.thing_type import ThingType
from typedb.api.connection.transaction import TypeDBTransaction
class RoleType(Type, ABC):
def is_role_type(self) -> bool:
return True
@abstractmethod
def as_remote(self, transaction: "TypeDBTransaction") -> "RemoteRoleType":
pass
class RemoteRoleType(RemoteType, RoleType, ABC):
@abstractmethod
def get_supertype(self) -> RoleType:
pass
@abstractmethod
def get_supertypes(self) -> Iterator[RoleType]:
pass
@abstractmethod
def get_subtypes(self) -> Iterator[RoleType]:
pass
@abstractmethod
def get_relation_type(self) -> "RelationType":
pass
@abstractmethod
def get_relation_types(self) -> Iterator["RelationType"]:
pass
@abstractmethod
def get_players(self) -> Iterator["ThingType"]:
pass
|
py | 7dfefdefa653ba6eaee86af787a3832c649dcf0c | from typing import Optional
from typing import Text, Dict, Any
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
from tfx.types.component_spec import ExecutionParameter
from tfx.types.standard_artifacts import Examples, Model, Schema, \
ModelEvaluation, ModelBlessing
from zenml.components.evaluator import constants
from zenml.components.evaluator import executor
class ZenMLEvaluatorSpec(ComponentSpec):
PARAMETERS = {constants.SOURCE: ExecutionParameter(type=Text),
constants.ARGS: ExecutionParameter(Text)}
INPUTS = {
constants.EXAMPLES: ChannelParameter(type=Examples),
constants.MODEL: ChannelParameter(type=Model, optional=True),
constants.BASELINE_MODEL: ChannelParameter(type=Model, optional=True),
constants.SCHEMA: ChannelParameter(type=Schema, optional=True)
}
OUTPUTS = {
constants.EVALUATION: ChannelParameter(type=ModelEvaluation),
constants.BLESSING: ChannelParameter(type=ModelBlessing,
optional=True),
}
class Evaluator(base_component.BaseComponent):
"""
A new adapted version version of the TFX Evaluator component.
In contrast to the original evaluator component, it utilizes a ZenML
EvaluatorStep and allows the model agnostic evaluation.
"""
SPEC_CLASS = ZenMLEvaluatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
source: Text,
source_args: Dict[Text, Any],
examples: types.Channel = None,
model: types.Channel = None,
baseline_model: Optional[types.Channel] = None,
blessing: Optional[types.Channel] = None,
output: Optional[types.Channel] = None,
schema: Optional[types.Channel] = None):
# Create the output artifact if not provided
evaluation = output or types.Channel(type=ModelEvaluation)
blessing = blessing or types.Channel(type=ModelBlessing)
# Create the spec
spec = ZenMLEvaluatorSpec(source=source,
args=source_args,
examples=examples,
model=model,
baseline_model=baseline_model,
blessing=blessing,
schema=schema,
evaluation=evaluation)
super(Evaluator, self).__init__(spec=spec)
|
py | 7dfefedbf718ba160caa9fbddffda8019a16268a | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1ReplicationControllerCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None): # noqa: E501
"""V1ReplicationControllerCondition - a model defined in OpenAPI""" # noqa: E501
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
The last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1ReplicationControllerCondition.
The last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1ReplicationControllerCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1ReplicationControllerCondition. # noqa: E501
A human readable message indicating details about the transition. # noqa: E501
:return: The message of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ReplicationControllerCondition.
A human readable message indicating details about the transition. # noqa: E501
:param message: The message of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1ReplicationControllerCondition. # noqa: E501
The reason for the condition's last transition. # noqa: E501
:return: The reason of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ReplicationControllerCondition.
The reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1ReplicationControllerCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ReplicationControllerCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1ReplicationControllerCondition. # noqa: E501
Type of replication controller condition. # noqa: E501
:return: The type of this V1ReplicationControllerCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ReplicationControllerCondition.
Type of replication controller condition. # noqa: E501
:param type: The type of this V1ReplicationControllerCondition. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicationControllerCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dff0156ae2efb16e0ced6fed76fed4cb79a2d0d | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Update an existing ServiceNow CI record"
class Input:
SYSTEM_ID = "system_id"
TABLE = "table"
UPDATE_DATA = "update_data"
class Output:
SUCCESS = "success"
class UpdateCiInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"system_id": {
"type": "string",
"title": "System ID",
"description": "System ID of the CI record to update",
"order": 2
},
"table": {
"type": "string",
"title": "Table",
"description": "The ServiceNow table where the CI record will be updated",
"order": 1
},
"update_data": {
"type": "object",
"title": "Update Data",
"description": "JSON object containing the fields and values to perform a CI update",
"order": 3
}
},
"required": [
"system_id",
"table",
"update_data"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class UpdateCiOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "True if the update was successful",
"order": 1
}
},
"required": [
"success"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | 7dff01d9441c731cdc011f67431c8b46da539e2c | import numpy as np
import keras
import tensorflow as tf
from keras.models import Model, load_model
import keras.backend as K
from model import get_card_model
import os, pickle
from model import *
from datetime import date
l_exp = [0.00018, 0.00033, 0.00050, 0.00054, 0.00062, 0.00067, 0.00075, 0.00085, 0.00103, 0.00138, 0.00145, 0.00156, 0.00170, 0.00224, 0.00224, 0.00229, 0.00259, 0.00264, 0.00264, 0.00266, 0.00290, 0.00290, 0.00373, 0.00392, 0.00410, 0.00425, 0.00438, 0.00445, 0.00466, 0.00474, 0.00492, 0.00503, 0.00503, 0.00508, 0.00531, 0.00532, 0.00550, 0.00552, 0.00570, 0.00584, 0.00642, 0.00642, 0.00679, 0.00692, 0.00693, 0.00697, 0.00719, 0.00733, 0.00735, 0.00740, 0.00743, 0.00749, 0.00782, 0.00790, 0.00810, 0.00812, 0.00828, 0.00856, 0.00870, 0.00876, 0.00884, 0.00889, 0.00922, 0.00933, 0.00987, 0.00989, 0.00999, 0.01031, 0.01043, 0.01090, 0.01115, 0.01127, 0.01131, 0.01134, 0.01171, 0.01187, 0.01189, 0.01190, 0.01199, 0.01228, 0.01234, 0.01255, 0.01259, 0.01266, 0.01275, 0.01305, 0.01326, 0.01326, 0.01398, 0.01437, 0.01456, 0.01461, 0.01473, 0.01479, 0.01496, 0.01503, 0.01517, 0.01537, 0.01542, 0.01567, 0.01579, 0.01588, 0.01591, 0.01597, 0.01631, 0.01639, 0.01663, 0.01682, 0.01690, 0.01712, 0.01714, 0.01717, 0.01766, 0.01774, 0.01790, 0.01818, 0.01820, 0.01822, 0.01835, 0.01835, 0.01842, 0.01848, 0.01852, 0.01893, 0.01913, 0.01914, 0.01944, 0.01963, 0.01971, 0.02006, 0.02011, 0.02015, 0.02033, 0.02042, 0.02104, 0.02111, 0.02121, 0.02137, 0.02141, 0.02187, 0.02194, 0.02203, 0.02224, 0.02247, 0.02278, 0.02286, 0.02336, 0.02344, 0.02351, 0.02364, 0.02365, 0.02381, 0.02381, 0.02455, 0.02460, 0.02463, 0.02466, 0.02470, 0.02476, 0.02490, 0.02498, 0.02520, 0.02529, 0.02551, 0.02595, 0.02618, 0.02634, 0.02646, 0.02653, 0.02654, 0.02656, 0.02658, 0.02682, 0.02752, 0.02757, 0.02764, 0.02769, 0.02770, 0.02809, 0.02817, 0.02825, 0.02832, 0.02835, 0.02836, 0.02838, 0.02890, 0.02897, 0.02898, 0.02900, 0.02933, 0.02972, 0.02973, 0.02995, 0.02999, 0.03025, 0.03026, 0.03034, 0.03072, 0.03077, 0.03110, 0.03115, 0.03142, 0.03148, 0.03169, 0.03182, 0.03211, 0.03215, 0.03220, 0.03227, 0.03250, 0.03276, 0.03288, 0.03290, 0.03318, 0.03319, 0.03334, 0.03344, 0.03381, 0.03408, 0.03444, 0.03446, 0.03476, 0.03502, 0.03516, 0.03558, 0.03587, 0.03598, 0.03629, 0.03670, 0.03680, 0.03695, 0.03708, 0.03709, 0.03740, 0.03767, 0.03773, 0.03807, 0.03809, 0.03817, 0.03835, 0.03843, 0.03845, 0.03849, 0.03874, 0.03879, 0.03887, 0.03956, 0.03964, 0.03965, 0.04002, 0.04019, 0.04037, 0.04054, 0.04077, 0.04101, 0.04104, 0.04117, 0.04120, 0.04136, 0.04162, 0.04172, 0.04219, 0.04219, 0.04233, 0.04249, 0.04249, 0.04250, 0.04271, 0.04285, 0.04315, 0.04318, 0.04319, 0.04324, 0.04337, 0.04342, 0.04344, 0.04348, 0.04359, 0.04362, 0.04366, 0.04381, 0.04415, 0.04422, 0.04446, 0.04559, 0.04591, 0.04617, 0.04646, 0.04652, 0.04713, 0.04776, 0.04788, 0.04799, 0.04847, 0.04853, 0.04907, 0.05049, 0.05128, 0.05136, 0.05141, 0.05148, 0.05153, 0.05158, 0.05203, 0.05219, 0.05236, 0.05261, 0.05275, 0.05301, 0.05305, 0.05341, 0.05384, 0.05435, 0.05440, 0.05481, 0.05502, 0.05511, 0.05529, 0.05538, 0.05578, 0.05635, 0.05647, 0.05656, 0.05669, 0.05700, 0.05719, 0.05755, 0.05781, 0.05806, 0.05846, 0.05878, 0.05886, 0.05915, 0.05919, 0.05934, 0.05963, 0.05980, 0.06000, 0.06037, 0.06044, 0.06092, 0.06116, 0.06155, 0.06167, 0.06171, 0.06175, 0.06184, 0.06229, 0.06243, 0.06256, 0.06275, 0.06319, 0.06340, 0.06345, 0.06364, 0.06380, 0.06390, 0.06391, 0.06395, 0.06406, 0.06408, 0.06451, 0.06453, 0.06509, 0.06514, 0.06582, 0.06586, 0.06608, 0.06609, 0.06612, 0.06624, 0.06634, 0.06641, 0.06669, 0.06680, 0.06697, 0.06701, 0.06745, 0.06749, 0.06754, 0.06775, 0.06778, 0.06790, 0.06793, 0.06796, 0.06806, 0.06816, 0.06834, 0.06841, 0.06864, 0.06888, 0.06910, 0.06914, 0.06929, 0.06929, 0.06930, 0.06943, 0.06945, 0.06977, 0.07034, 0.07068, 0.07069, 0.07081, 0.07108, 0.07113, 0.07123, 0.07131, 0.07133, 0.07134, 0.07191, 0.07217, 0.07228, 0.07243, 0.07347, 0.07365, 0.07368, 0.07373, 0.07383, 0.07385, 0.07445, 0.07446, 0.07478, 0.07503, 0.07556, 0.07562, 0.07575, 0.07581, 0.07629, 0.07636, 0.07667, 0.07685, 0.07703, 0.07789, 0.07790, 0.07837, 0.07848, 0.07947, 0.07982, 0.07993, 0.08068, 0.08082, 0.08110, 0.08115, 0.08159, 0.08264, 0.08278, 0.08329, 0.08355, 0.08366, 0.08367, 0.08417, 0.08419, 0.08419, 0.08475, 0.08485, 0.08504, 0.08522, 0.08532, 0.08540, 0.08550, 0.08555, 0.08573, 0.08597, 0.08608, 0.08641, 0.08685, 0.08705, 0.08732, 0.08741, 0.08769, 0.08771, 0.08820, 0.08834, 0.08855, 0.08896, 0.08899, 0.08917, 0.08924, 0.08924, 0.08926, 0.08945, 0.08982, 0.09027, 0.09043, 0.09080, 0.09104, 0.09161, 0.09201, 0.09201, 0.09264, 0.09280, 0.09299, 0.09374, 0.09420, 0.09426, 0.09459, 0.09465, 0.09493, 0.09509, 0.09512, 0.09551, 0.09552, 0.09620, 0.09626, 0.09643, 0.09759, 0.09772, 0.09772, 0.09839, 0.09848, 0.09848, 0.09859, 0.09861, 0.09871, 0.09937, 0.09948, 0.10015, 0.10035, 0.10038, 0.10070, 0.10081, 0.10098, 0.10101, 0.10110, 0.10144, 0.10195, 0.10241, 0.10272, 0.10316, 0.10349, 0.10393, 0.10396, 0.10419, 0.10427, 0.10494, 0.10497, 0.10499, 0.10520, 0.10520, 0.10530, 0.10575, 0.10576, 0.10581, 0.10591, 0.10647, 0.10649, 0.10737, 0.10791, 0.10794, 0.10824, 0.10855, 0.10876, 0.10883, 0.10924, 0.10993, 0.10997, 0.11029, 0.11029, 0.11061, 0.11106, 0.11126, 0.11185, 0.11191, 0.11199, 0.11212, 0.11270, 0.11305, 0.11315, 0.11315, 0.11354, 0.11432, 0.11477, 0.11480, 0.11587, 0.11594, 0.11597, 0.11618, 0.11667, 0.11670, 0.11710, 0.11747, 0.11775, 0.11786, 0.11798, 0.11821, 0.11851, 0.11901, 0.11909, 0.11915, 0.11995, 0.12029, 0.12048, 0.12056, 0.12056, 0.12171, 0.12198, 0.12220, 0.12245, 0.12254, 0.12355, 0.12480, 0.12494, 0.12543, 0.12546, 0.12573, 0.12577, 0.12629, 0.12641, 0.12693, 0.12724, 0.12726, 0.12814, 0.12822, 0.12840, 0.12850, 0.12893, 0.12953, 0.13016, 0.13103, 0.13116, 0.13125, 0.13134, 0.13146, 0.13243, 0.13266, 0.13373, 0.13391, 0.13393, 0.13444, 0.13453, 0.13463, 0.13612, 0.13667, 0.13729, 0.13770, 0.13814, 0.13830, 0.13886, 0.13887, 0.14018, 0.14041, 0.14085, 0.14130, 0.14135, 0.14149, 0.14338, 0.14343, 0.14343, 0.14382, 0.14392, 0.14414, 0.14436, 0.14480, 0.14622, 0.14648, 0.14741, 0.14751, 0.14843, 0.14914, 0.14916, 0.15002, 0.15031, 0.15071, 0.15089, 0.15261, 0.15269, 0.15276, 0.15346, 0.15419, 0.15441, 0.15449, 0.15460, 0.15469, 0.15547, 0.15585, 0.15604, 0.15630, 0.15726, 0.15733, 0.15782, 0.15829, 0.15863, 0.15902, 0.16016, 0.16052, 0.16078, 0.16080, 0.16105, 0.16123, 0.16213, 0.16344, 0.16359, 0.16416, 0.16420, 0.16444, 0.16451, 0.16477, 0.16484, 0.16558, 0.16578, 0.16670, 0.16691, 0.16706, 0.16744, 0.16762, 0.16829, 0.16847, 0.16855, 0.16861, 0.16916, 0.16941, 0.17016, 0.17056, 0.17115, 0.17125, 0.17192, 0.17240, 0.17240, 0.17291, 0.17309, 0.17336, 0.17392, 0.17441, 0.17459, 0.17575, 0.17584, 0.17596, 0.17611, 0.17772, 0.17856, 0.17880, 0.17900, 0.17918, 0.18094, 0.18204, 0.18205, 0.18252, 0.18316, 0.18333, 0.18372, 0.18373, 0.18376, 0.18603, 0.18621, 0.18629, 0.18687, 0.18742, 0.18922, 0.18931, 0.19010, 0.19055, 0.19137, 0.19137, 0.19152, 0.19244, 0.19284, 0.19348, 0.19403, 0.19454, 0.19471, 0.19490, 0.19503, 0.19681, 0.19722, 0.19834, 0.19936, 0.20010, 0.20218, 0.20262, 0.20304, 0.20341, 0.20376, 0.20494, 0.20504, 0.20524, 0.20528, 0.20552, 0.20615, 0.20621, 0.20670, 0.20753, 0.20828, 0.21067, 0.21167, 0.21188, 0.21321, 0.21412, 0.21487, 0.21691, 0.21713, 0.21728, 0.21790, 0.21886, 0.21890, 0.21946, 0.21970, 0.22132, 0.22228, 0.22340, 0.22543, 0.22549, 0.22582, 0.22633, 0.22640, 0.22658, 0.22682, 0.22688, 0.22695, 0.22727, 0.22943, 0.23070, 0.23096, 0.23261, 0.23409, 0.23462, 0.23523, 0.23592, 0.23609, 0.23819, 0.23969, 0.24000, 0.24036, 0.24059, 0.24337, 0.24433, 0.24445, 0.24503, 0.24626, 0.24634, 0.24687, 0.24783, 0.24974, 0.25048, 0.25057, 0.25070, 0.25339, 0.25411, 0.25454, 0.25607, 0.25814, 0.25841, 0.25868, 0.25904, 0.25951, 0.26214, 0.26357, 0.26394, 0.26436, 0.26478, 0.26763, 0.26850, 0.26996, 0.27076, 0.27411, 0.27419, 0.27478, 0.27551, 0.27553, 0.27553, 0.27752, 0.27876, 0.27977, 0.28000, 0.28081, 0.28220, 0.28277, 0.28291, 0.28313, 0.28359, 0.28492, 0.28576, 0.28774, 0.28780, 0.28837, 0.28945, 0.28984, 0.29330, 0.29752, 0.29760, 0.29926, 0.30028, 0.30215, 0.30398, 0.30678, 0.30809, 0.30847, 0.30918, 0.30951, 0.31014, 0.31042, 0.31134, 0.31136, 0.31214, 0.31524, 0.31588, 0.31632, 0.31666, 0.32025, 0.32260, 0.32451, 0.32487, 0.32531, 0.32670, 0.32697, 0.32800, 0.32843, 0.33150, 0.33363, 0.33445, 0.33569, 0.33674, 0.33830, 0.33872, 0.33933, 0.34315, 0.34444, 0.34686, 0.34787, 0.35183, 0.35225, 0.35287, 0.35857, 0.35875, 0.36265, 0.36269, 0.36482, 0.37436, 0.37446, 0.37729, 0.37846, 0.37895, 0.38758, 0.39093, 0.39110, 0.39129, 0.39369, 0.39817, 0.39861, 0.40254, 0.40260, 0.40305, 0.40424, 0.40445, 0.40554, 0.41336, 0.41625, 0.41912, 0.42133, 0.42574, 0.42887, 0.42893, 0.43552, 0.43912, 0.44271, 0.45022, 0.45049, 0.45158, 0.45470, 0.45552, 0.45860, 0.46377, 0.46500, 0.46722, 0.47138, 0.47613, 0.48200, 0.49551, 0.50962, 0.52365, 0.52400, 0.53008, 0.53431, 0.53801, 0.53885, 0.53996, 0.54616, 0.55390, 0.55697, 0.56252, 0.56381, 0.57993, 0.58727, 0.60287, 0.61361, 0.64448, 0.65605, 0.66612, 0.66887, 0.68660, 0.68949, 0.69482, 0.70933, 0.75816, 0.78771, 0.80919, 0.84408, 0.86884, 0.96250, 0.97324, 1.00000]
primes = [11, 9433, 42533, 102931]
def prod(nss, nd):
return np.prod(nss[:nd])
def shrink(X, c, l):
cm = (int)(l*(1-c))
idremove = []
lenlost = 0.0
i = 0
while(True):
x = list(np.where(X[:,0]==i)[0])
if len(idremove) + len(x) < cm:
idremove += x
lenlost += len(x) * i
i+=1
else:
lenlost += (cm-len(idremove)) * i
idremove += x[:cm-len(idremove)]
break;
X = np.delete(X, idremove, 0)
del idremove[:]
return X
def rdn(date):
y = date[0]
m = date[1]
d = date[2]
if m < 3:
y -= 1
m += 12
return 365 * y + y / 4 - y / 100 + y / 400 + (153 * m - 457) / 5 + d - 306
def parse_date(s):
return list(map(int, s.split('-')))
def date_to_string(s):
return str(s.year) + '-' + str(s.month) + '-' + str(s.day)
def dif_str(str1, str2, type):
if type == "R":
vd = float(str1) - float(str2)
elif type == "D":
date1 = parse_date(str1)
date2 = parse_date(str2)
vd = (date(date1[0], date1[1], date1[2]) - date(date2[0], date2[1], date2[2])).days
else:
vd = 1
return vd
def parse_keys(keys, type):
if type == "R":
return np.array([float(key) for key in keys])
elif type == "D":
keys = [parse_date(key) for key in keys]
return np.array([(date(key[0], key[1], key[2]) - date(1900, 1, 1)).days for key in keys])
else:
return np.array(keys)
def extract_emb(iris_model, model_fnm, neb=128, ncd=0, nd=2):
vf = iris_model.get_layer('lambda_6').output
model_vf = Model(iris_model.layers[0].input, vf)
emb = {}
xs = np.zeros((1, ncd+2, nd + 1))
for id in range(neb * neb):
xs.fill(0)
xs[0, 0, 0] = 1
xs[0,-2:,0] = 1
for d in range(nd):
xs[0, 0, d + 1] = id % neb
id = int(id / neb)
xs[0, -2, :] = xs[0, 0, :]
xs[0, :, 1:] = np.maximum(xs[0, :, 1:], -1) + 1
v = model_vf.predict(xs)[0]
emb[','.join([str(int(s)) for s in xs[0,0,1:]])] = v
pickle.dump(emb, open('tmp/emb-' + os.path.splitext(os.path.basename(model_fnm))[0] + '.pkl', 'wb'))
return emb
|
py | 7dff02452188d4f23bc0b896edf5fa52fb65680b | from typing import Dict, List
from aioquic.h3.events import DataReceived, H3Event, Headers, HeadersReceived
from aioquic.quic.connection import QuicConnection
from aioquic.quic.events import QuicEvent, StreamDataReceived
H0_ALPN = ["hq-32", "hq-31", "hq-30", "hq-29", "hq-28", "hq-27"]
class H0Connection:
"""
An HTTP/0.9 connection object.
"""
def __init__(self, quic: QuicConnection):
self._buffer: Dict[int, bytes] = {}
self._headers_received: Dict[int, bool] = {}
self._is_client = quic.configuration.is_client
self._quic = quic
def handle_event(self, event: QuicEvent) -> List[H3Event]:
http_events: List[H3Event] = []
if isinstance(event, StreamDataReceived) and (event.stream_id % 4) == 0:
data = self._buffer.pop(event.stream_id, b"") + event.data
if not self._headers_received.get(event.stream_id, False):
if self._is_client:
http_events.append(
HeadersReceived(
headers=[], stream_ended=False, stream_id=event.stream_id
)
)
elif data.endswith(b"\r\n") or event.end_stream:
method, path = data.rstrip().split(b" ", 1)
http_events.append(
HeadersReceived(
headers=[(b":method", method), (b":path", path)],
stream_ended=False,
stream_id=event.stream_id,
)
)
data = b""
else:
# incomplete request, stash the data
self._buffer[event.stream_id] = data
return http_events
self._headers_received[event.stream_id] = True
http_events.append(
DataReceived(
data=data, stream_ended=event.end_stream, stream_id=event.stream_id
)
)
return http_events
def send_data(self, stream_id: int, data: bytes, end_stream: bool) -> None:
self._quic.send_stream_data(stream_id, data, end_stream)
def send_headers(
self, stream_id: int, headers: Headers, end_stream: bool = False
) -> None:
if self._is_client:
headers_dict = dict(headers)
data = headers_dict[b":method"] + b" " + headers_dict[b":path"] + b"\r\n"
else:
data = b""
self._quic.send_stream_data(stream_id, data, end_stream)
|
py | 7dff035cd2c040df5d6017aac26089319f525436 | import numpy as np
import torch
from tinygrad.tensor import Tensor
x_init = np.random.randn(1,3).astype(np.float32)
W_init = np.random.randn(3,3).astype(np.float32)
m_init = np.random.randn(1,3).astype(np.float32)
def test_tinygrad():
x = Tensor(x_init)
W = Tensor(W_init)
m = Tensor(m_init)
out = x.dot(W)
outr = out.relu()
outl = outr.logsoftmax()
outm = outl.mul(m)
outa = outm.add(m)
outx = outa.sum()
outx.backward()
return outx.data, x.grad, W.grad
def test_pytorch():
x = torch.tensor(x_init, requires_grad=True)
W = torch.tensor(W_init, requires_grad=True)
m = torch.tensor(m_init)
out = x.matmul(W)
outr = out.relu()
outl = torch.nn.functional.log_softmax(outr, dim=1)
outm = outl.mul(m)
outa = outm.add(m)
outx = outa.sum()
outx.backward()
return outx.detach().numpy(), x.grad, W.grad
for x,y in zip(test_tinygrad(), test_pytorch()):
print(x,y)
np.testing.assert_allclose(x, y, atol=1e-6)
|
py | 7dff0466b26a4df96a612b0e23e3aa391d9c3af4 | import os.path
import tarfile
from unittest import mock
import pytest
import pre_commit.constants as C
from pre_commit import parse_shebang
from pre_commit.languages import ruby
from pre_commit.prefix import Prefix
from pre_commit.util import cmd_output
from pre_commit.util import resource_bytesio
from testing.util import xfailif_windows
ACTUAL_GET_DEFAULT_VERSION = ruby.get_default_version.__wrapped__
@pytest.fixture
def find_exe_mck():
with mock.patch.object(parse_shebang, 'find_executable') as mck:
yield mck
def test_uses_default_version_when_not_available(find_exe_mck):
find_exe_mck.return_value = None
assert ACTUAL_GET_DEFAULT_VERSION() == C.DEFAULT
def test_uses_system_if_both_gem_and_ruby_are_available(find_exe_mck):
find_exe_mck.return_value = '/path/to/exe'
assert ACTUAL_GET_DEFAULT_VERSION() == 'system'
@pytest.fixture
def fake_gem_prefix(tmpdir):
gemspec = '''\
Gem::Specification.new do |s|
s.name = 'pre_commit_placeholder_package'
s.version = '0.0.0'
s.summary = 'placeholder gem for pre-commit hooks'
s.authors = ['Anthony Sottile']
end
'''
tmpdir.join('placeholder_gem.gemspec').write(gemspec)
yield Prefix(tmpdir)
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_system(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, 'system', ())
# Should be able to activate and use rbenv install
with ruby.in_env(fake_gem_prefix, 'system'):
_, out, _ = cmd_output('gem', 'list')
assert 'pre_commit_placeholder_package' in out
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_default(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, C.DEFAULT, ())
# Should have created rbenv directory
assert os.path.exists(fake_gem_prefix.path('rbenv-default'))
# Should be able to activate using our script and access rbenv
with ruby.in_env(fake_gem_prefix, 'default'):
cmd_output('rbenv', '--help')
@xfailif_windows # pragma: win32 no cover
def test_install_ruby_with_version(fake_gem_prefix):
ruby.install_environment(fake_gem_prefix, '2.7.2', ())
# Should be able to activate and use rbenv install
with ruby.in_env(fake_gem_prefix, '2.7.2'):
cmd_output('rbenv', 'install', '--help')
@pytest.mark.parametrize(
'filename',
('rbenv.tar.gz', 'ruby-build.tar.gz', 'ruby-download.tar.gz'),
)
def test_archive_root_stat(filename):
with resource_bytesio(filename) as f:
with tarfile.open(fileobj=f) as tarf:
root, _, _ = filename.partition('.')
assert oct(tarf.getmember(root).mode) == '0o755'
|
py | 7dff04abc463e796a9faaf1f7f0c6c230e01e632 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START sheets_quickstart]
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
SAMPLE_RANGE_NAME = 'Class Data!A2:E'
def main():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Name, Major:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s, %s' % (row[0], row[4]))
if __name__ == '__main__':
main()
# [END sheets_quickstart]
|
py | 7dff064466cfb2c408fef9a1f69aefbae2d6e04b | def robot_grid_rec(grid, n, m, mem):
if n < 0 :
return 999
if m < 0 :
return 999
if mem[n][m] != 999:
return mem[n][m]
if grid[n][m] == 999:
return 999
if n == 0 and m == 0:
mem[n][m] = 0
return 0
top = robot_grid_rec(grid, n - 1, m, mem)
left = robot_grid_rec(grid, n, m-1, mem)
if top == 999 and left == 999:
raise AssertionError("Cant find a solution")
mem[n][m] = min(top, left) + 1
return mem[n][m]
def robot_grid_bu(grid):
n = len(grid)
m = len(grid[0])
mem = [[999 for _ in range(m + 1)] for _ in range(n + 1)]
mem[0][1] = -1 # BASE CASE THAT IS THE ROBOT STARTING POSITION
mem[1][0] = -1
for i in range(1, n+1):
for j in range(1, m+1):
if grid[i][j] == 999:
continue
top = mem[i-1][j]
left = mem[i][j-1]
if top == 999 and left == 999:
raise AssertionError("Cant find a solution")
mem[i][j] = min(top, left) + 1
def printPath(mem, n, m):
print("(0,0)")
i = 0
j = 0
while i != n -1 or j != m - 1:
# check right
right = mem[i][j + 1] if j + 1 < m else 999
down = mem[i + 1][j] if i + 1 < n else 999
if right < down:
print("({},{})".format(i, j + 1))
j += 1
else:
print("({},{})".format(i + 1, j))
i += 1
def print_mem(mem):
for i in range(len(mem)):
print(mem[i])
if __name__ == "__main__":
grid = [
[0,0,0],
[0,0,0],
[0,0,0]
]
n = len(grid)
m = len(grid[0])
mem = [[999 for _ in range(m)] for _ in range(n)]
robot_grid_rec(grid, n-1, m-1, mem)
print_mem(mem)
printPath(mem, n, m)
grid = [
[0,-1,0],
[0,-1,0],
[0,0,0]
]
grid = [
[0,-1,0],
[0,-1,0],
[0,-1,0]
] |
py | 7dff066bcab58b79c987df537ac60414c7941953 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automated bug filing."""
from __future__ import absolute_import
import datetime
import itertools
import json
from . import grouper
from base import dates
from base import errors
from base import utils
from datastore import data_handler
from datastore import data_types
from datastore import ndb_utils
from handlers import base_handler
from libs import handler
from libs.issue_management import issue_filer
from libs.issue_management import issue_tracker_policy
from libs.issue_management import issue_tracker_utils
from metrics import crash_stats
from metrics import logs
UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES = [
'Out-of-memory', 'Stack-overflow', 'Timeout'
]
TRIAGE_MESSAGE_KEY = 'triage_message'
def _add_triage_message(testcase, message):
"""Add a triage message."""
if testcase.get_metadata(TRIAGE_MESSAGE_KEY) == message:
# Message already exists, skip update.
return
# Re-fetch testcase to get latest entity and avoid race condition in updates.
testcase = data_handler.get_testcase_by_id(testcase.key.id())
testcase.set_metadata(TRIAGE_MESSAGE_KEY, message)
def _create_filed_bug_metadata(testcase):
"""Create a dummy bug entry for a test case."""
metadata = data_types.FiledBug()
metadata.timestamp = datetime.datetime.utcnow()
metadata.testcase_id = testcase.key.id()
metadata.bug_information = int(testcase.bug_information)
metadata.group_id = testcase.group_id
metadata.crash_type = testcase.crash_type
metadata.crash_state = testcase.crash_state
metadata.security_flag = testcase.security_flag
metadata.platform_id = testcase.platform_id
metadata.put()
def _get_excluded_jobs():
"""Return list of jobs excluded from bug filing."""
excluded_jobs = []
jobs = ndb_utils.get_all_from_model(data_types.Job)
for job in jobs:
job_environment = job.get_environment()
# Exclude experimental jobs.
if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
excluded_jobs.append(job.name)
return excluded_jobs
def _is_bug_filed(testcase):
"""Indicate if the bug is already filed."""
# Check if the testcase is already associated with a bug.
if testcase.bug_information:
return True
# Re-check our stored metadata so that we don't file the same testcase twice.
is_bug_filed_for_testcase = data_types.FiledBug.query(
data_types.FiledBug.testcase_id == testcase.key.id()).get()
if is_bug_filed_for_testcase:
return True
return False
def _is_crash_important(testcase):
"""Indicate if the crash is important to file."""
if not testcase.one_time_crasher_flag:
# A reproducible crash is an important crash.
return True
if testcase.status != 'Processed':
# A duplicate or unreproducible crash is not an important crash.
return False
# Testcase is unreproducible. Only those crashes that are crashing frequently
# are important.
if testcase.crash_type in UNREPRODUCIBLE_CRASH_IGNORE_CRASH_TYPES:
return False
# Ensure that there is no reproducible testcase in our group.
if testcase.group_id:
other_reproducible_testcase = data_types.Testcase.query(
data_types.Testcase.group_id == testcase.group_id,
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag)).get()
if other_reproducible_testcase:
# There is another reproducible testcase in our group. So, this crash is
# not important.
return False
# Get crash statistics data on this unreproducible crash for last X days.
last_hour = crash_stats.get_last_successful_hour()
if not last_hour:
# No crash stats available, skip.
return False
_, rows = crash_stats.get(
end=last_hour,
block='day',
days=data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE,
group_by='reproducible_flag',
where_clause=(
'crash_type = %s AND crash_state = %s AND security_flag = %s' %
(json.dumps(testcase.crash_type), json.dumps(testcase.crash_state),
json.dumps(testcase.security_flag))),
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
# Calculate total crash count and crash days count.
crash_days_indices = set([])
total_crash_count = 0
for row in rows:
if 'groups' not in row:
continue
total_crash_count += row['totalCount']
for group in row['groups']:
for index in group['indices']:
crash_days_indices.add(index['hour'])
crash_days_count = len(crash_days_indices)
# Only those unreproducible testcases are important that happened atleast once
# everyday for the last X days and total crash count exceeded our threshold
# limit.
return (crash_days_count ==
data_types.FILE_CONSISTENT_UNREPRODUCIBLE_TESTCASE_DEADLINE and
total_crash_count >=
data_types.FILE_UNREPRODUCIBLE_TESTCASE_MIN_CRASH_THRESHOLD)
def _check_and_update_similar_bug(testcase, issue_tracker):
"""Get list of similar open issues and ones that were recently closed."""
# Get similar testcases from the same group.
similar_testcases_from_group = []
if testcase.group_id:
group_query = data_types.Testcase.query(
data_types.Testcase.group_id == testcase.group_id)
similar_testcases_from_group = ndb_utils.get_all_from_query(
group_query, batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2)
# Get testcases with the same crash params. These might not be in the a group
# if they were just fixed.
same_crash_params_query = data_types.Testcase.query(
data_types.Testcase.crash_type == testcase.crash_type,
data_types.Testcase.crash_state == testcase.crash_state,
data_types.Testcase.security_flag == testcase.security_flag,
data_types.Testcase.project_name == testcase.project_name,
data_types.Testcase.status == 'Processed')
similar_testcases_from_query = ndb_utils.get_all_from_query(
same_crash_params_query,
batch_size=data_types.TESTCASE_ENTITY_QUERY_LIMIT // 2)
for similar_testcase in itertools.chain(similar_testcases_from_group,
similar_testcases_from_query):
# Exclude ourself from comparison.
if similar_testcase.key.id() == testcase.key.id():
continue
# Exclude similar testcases without bug information.
if not similar_testcase.bug_information:
continue
# Get the issue object given its ID.
issue = issue_tracker.get_issue(similar_testcase.bug_information)
if not issue:
continue
# If the reproducible issue is not verified yet, bug is still valid and
# might be caused by non-availability of latest builds. In that case,
# don't file a new bug yet.
if similar_testcase.open and not similar_testcase.one_time_crasher_flag:
return True
# If the issue is still open, no need to file a duplicate bug.
if issue.is_open:
return True
# If the issue indicates that this crash needs to be ignored, no need to
# file another one.
policy = issue_tracker_policy.get(issue_tracker.project)
ignore_label = policy.label('ignore')
if ignore_label in issue.labels:
_add_triage_message(
testcase,
('Skipping filing a bug since similar testcase ({testcase_id}) in '
'issue ({issue_id}) is blacklisted with {ignore_label} label.'
).format(
testcase_id=similar_testcase.key.id(),
issue_id=issue.id,
ignore_label=ignore_label))
return True
# If the issue is recently closed, wait certain time period to make sure
# our fixed verification has completed.
if (issue.closed_time and not dates.time_has_expired(
issue.closed_time, hours=data_types.MIN_ELAPSED_TIME_SINCE_FIXED)):
_add_triage_message(
testcase,
('Delaying filing a bug since similar testcase '
'({testcase_id}) in issue ({issue_id}) was just fixed.').format(
testcase_id=similar_testcase.key.id(), issue_id=issue.id))
return True
return False
class Handler(base_handler.Handler):
"""Triage testcases."""
@handler.cron()
def get(self):
"""Handle a get request."""
try:
grouper.group_testcases()
except:
logs.log_error('Error occurred while grouping test cases.')
return
# Free up memory after group task run.
utils.python_gc()
# Get a list of jobs excluded from bug filing.
excluded_jobs = _get_excluded_jobs()
# Get a list of all jobs. This is used to filter testcases whose jobs have
# been removed.
all_jobs = data_handler.get_all_job_type_names()
for testcase_id in data_handler.get_open_testcase_id_iterator():
try:
testcase = data_handler.get_testcase_by_id(testcase_id)
except errors.InvalidTestcaseError:
# Already deleted.
continue
# Skip if testcase's job is removed.
if testcase.job_type not in all_jobs:
continue
# Skip if testcase's job is in exclusions list.
if testcase.job_type in excluded_jobs:
continue
# Skip if we are running progression task at this time.
if testcase.get_metadata('progression_pending'):
continue
# If the testcase has a bug filed already, no triage is needed.
if _is_bug_filed(testcase):
continue
# Check if the crash is important, i.e. it is either a reproducible crash
# or an unreproducible crash happening frequently.
if not _is_crash_important(testcase):
continue
# Require that all tasks like minimizaton, regression testing, etc have
# finished.
if not data_handler.critical_tasks_completed(testcase):
continue
# For testcases that are not part of a group, wait an additional time till
# group task completes.
# FIXME: In future, grouping might be dependent on regression range, so we
# would have to add an additional wait time.
if not testcase.group_id and not dates.time_has_expired(
testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT):
continue
# If this project does not have an associated issue tracker, we cannot
# file this crash anywhere.
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
testcase)
if not issue_tracker:
continue
# If there are similar issues to this test case already filed or recently
# closed, skip filing a duplicate bug.
if _check_and_update_similar_bug(testcase, issue_tracker):
continue
# Clean up old triage messages that would be not applicable now.
testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False)
# File the bug first and then create filed bug metadata.
try:
issue_filer.file_issue(testcase, issue_tracker)
except Exception:
logs.log_error('Failed to file issue for testcase %d.' % testcase_id)
continue
_create_filed_bug_metadata(testcase)
logs.log('Filed new issue %s for testcase %d.' %
(testcase.bug_information, testcase_id))
|
py | 7dff07dc8b4246c3aab299854ac307df7a402a7a | #!/usr/bin/env python
import graphitesend
import rospy
import sys
from pal_statistics_msgs.msg import Statistics
from pal_carbon_collector.carbon_collector import CarbonCollector
if __name__ == "__main__":
rospy.init_node('carbon_collector_node')
if not rospy.has_param('~topics'):
print "No topics were specified"
sys.exit(1)
topics = rospy.get_param('~topics')
dry_run = False
if rospy.has_param('~dry_run'):
dry_run = rospy.get_param('~dry_run')
carbon_collector = CarbonCollector(topics, dry_run=dry_run)
rospy.spin()
sys.exit(0)
|
py | 7dff0a035c976ef9125e15944e68742ab3fce8b2 | import sys
import os
import numpy as np
import random
import math
import pandas as pd
import time
input1 = np.array([[0, 6, 4, 5],
[1, 3, 3, 9],
[4, 9, 2, 1],
[9, 6, 1, 2],
[2, 3, 4, 5]])
input2_v1 = np.array([[1, 9, 9, 9, 9, 9],
[1, 1, 0, 1, 0, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 1, 1, 9, 1]])
input2_v2 = np.array([[1, 9, 9, 9, 9, 9],
[1, 1, 1, 1, 1, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 9, 1, 9, 1],
[9, 1, 1, 1, 9, 1]])
input3 = np.array([[1, 6, 4, 5, 1, 4, 3, 6, 8, 7],
[1, 3, 3, 9, 1, 4, 3, 6, 2, 1],
[4, 1, 9, 1, 1, 4, 3, 6, 5, 3],
[9, 6, 1, 2, 1, 4, 3, 6, 2, 1],
[1, 3, 5, 4, 1, 4, 3, 6, 8, 4],
[8, 7, 2, 9, 1, 4, 3, 6, 7, 5],
[8, 7, 2, 9, 1, 4, 3, 6, 7, 5],
[1, 6, 3, 5, 1, 4, 3, 6, 2, 2],
[8, 7, 2, 9, 1, 4, 3, 6, 7, 5],
[1, 6, 3, 5, 1, 4, 3, 6, 2, 2]])
input4 = np.array([[0, 6, 4, 5, 1, 4, 3, 5, 6, 8, 7],
[1, 3, 3, 9, 1, 4, 3, 5, 6, 2, 1],
[4, 1, 9, 1, 1, 4, 3, 5, 6, 5, 3],
[9, 6, 1, 2, 1, 4, 3, 5, 6, 2, 1],
[1, 3, 5, 4, 1, 4, 3, 5, 6, 8, 4],
[8, 7, 2, 9, 1, 4, 3, 5, 6, 7, 5],
[1, 6, 3, 5, 1, 4, 3, 5, 6, 2, 2],
[8, 7, 2, 9, 1, 4, 3, 5, 6, 7, 5],
[1, 6, 3, 5, 1, 4, 3, 5, 6, 2, 2],
[8, 7, 2, 9, 1, 4, 3, 5, 6, 7, 5],
[1, 6, 3, 5, 1, 4, 3, 5, 6, 2, 2]])
# Game class
class Task_one_game():
# Initializes the game by storing the input grid to be used on the different path finding modes.
def __init__(self, input):
self.input = input
# Reference: YouTube. Oct. 4, 2018.
# How the Ant Colony Optimization algorithm works - YouTube.
# [ONLINE] Available at: https://www.youtube.com/watch?v=783ZtAF4j5g.
# [Accessed 19 December 2020].
# Reference: Wikipedia.
# Ant colony optimization algorithms - Wikipedia.
# [ONLINE] Available at: https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms#Algorithm_and_formulae .
# [Accessed 19 December 2020].
def ant_colony_optimization(self, generations, ants):
grid = self.input
row = grid.shape[0]
col = grid.shape[1]
# Initializes the pheromone grid with ones
pheromone_grid = np.ones((row, col))
gens = generations
# Evaporation rate for the formula
# If a point in the grid doesn't get traversed, its pheromone gradually disipates by the rate
evap_rate = 0.7
ants = ants
alpha = 1.5
def calculate_shortest_path(grid, pheromone_grid, evap_rate, gens, ants):
# Stores the shortest distances for each generation
shortest_dist_found = []
# Iterates through each generation
for gen in range(gens):
# Stores the distances of each ant for the current generation
ants_distances = []
interim_grid = np.zeros((row, col))
pheromone_grid = (1 - evap_rate) * \
pheromone_grid + interim_grid
# print("GENERATION ", generation)
for ant in range(ants):
# For each ant, traverses the grid and gets its path/history and distance made
ant_history, distance_traveled = traverse_grid(
grid, pheromone_grid)
ants_distances.append(distance_traveled)
# If the ant takes 0 distance to reach its destination,
# then it updates the interim_grid by the alpha constant
if distance_traveled == 0:
distance_traveled = 0.7
# Stacks the positions traversed of each ant while dividing the alpha constant
# by the total distance traveled by each ant. This is the formula from Wikipedia
for (x, y) in ant_history:
interim_grid[x][y] += alpha / distance_traveled
# appends the shortest distance after the generation is over
shortest_dist_found.append(np.min(ants_distances))
# Updates the pheromones with the interim_grid by using the formula from Wikipedia.
pheromone_grid = (1 - evap_rate) * \
pheromone_grid + interim_grid
# Removes the starting point added in by the ant to the total,
# adds in the final destination point, as it was not added in the algorithm,
# and prints out the shortest path
print("ACO shortest path: ", np.min(
shortest_dist_found + grid[row - 1][col - 1] - grid[0][0]))
def traverse_grid(grid, pheromone_grid):
# Starts at 0,0
current_node = (0, 0)
end_node = (row - 1, col - 1)
# Initializes the points set traversed by the ant with the starting point
history = set()
history.add(current_node)
distance_traveled = 0
# Generates the initial potential nodes (1,0) and (0,1)
potential_nodes = (
calculate_potential_nodes(history, current_node))
# Parses through the grid while it hasn't reached the end point
# or until there still are potential nodes(nodes act as points on the grid) to go to
while (current_node != end_node) and len(potential_nodes) != 0:
# Updates distance traveled with the current node's value
distance_traveled += grid[current_node[0]][current_node[1]]
# Updates the current node
current_node = choose_node(current_node, potential_nodes)
# Adds it to the history
history.add(current_node)
# Generates new nodes to choose from for the next iteration
potential_nodes = calculate_potential_nodes(
history, current_node)
# Sets the distance_traversed to 100000 if the ant hasn't reached the end
if current_node != end_node:
distance_traveled = 100000
return history, distance_traveled
# The formulas used in this method are from the References above.
def choose_node(current_node, potential_nodes):
potential_nodes_sum = 0
# Calculates sum of each possible node for use in the main probability
for (x, y) in potential_nodes:
if x == current_node[0] + 1:
potential_nodes_sum += calculate_south(current_node)
elif x == current_node[0] - 1:
potential_nodes_sum += calculate_north(current_node)
elif y == current_node[1] + 1:
potential_nodes_sum += calculate_east(current_node)
elif y == current_node[1] - 1:
potential_nodes_sum += calculate_west(current_node)
north_prob = 0
west_prob = 0
east_prob = 0
south_prob = 0
# Calculates the probabilities
for (x, y) in potential_nodes:
if x == current_node[0] + 1:
south_prob = calculate_south(
current_node)/potential_nodes_sum
if x == current_node[0] - 1:
north_prob = calculate_north(
current_node)/potential_nodes_sum
if y == current_node[1] + 1:
east_prob = calculate_east(
current_node)/potential_nodes_sum
if y == current_node[1] - 1:
west_prob = calculate_west(
current_node)/potential_nodes_sum
# roullete_select takes into acocunt chances of probabilities being 0 when passed as parameters
chosen_prob = roullete_select(
[north_prob, west_prob, east_prob, south_prob])
# roullete_select returns the index of the chosen probability
# in the order which they we're inputted above.
# For index 0 it's north_prob, for index 1 it's west_prob and so on... .
# This 'if' function distinguishes which potential node to return based on that index rule.
if chosen_prob == 0:
return (current_node[0] - 1, current_node[1])
elif chosen_prob == 1:
return (current_node[0], current_node[1] - 1)
elif chosen_prob == 2:
return (current_node[0], current_node[1] + 1)
elif chosen_prob == 3:
return (current_node[0] + 1, current_node[1])
def calculate_potential_nodes(history, curr_node):
a = set()
# Calculates if the nodes from the north/east/west/south region with respect to the current_node
# are eligible to be added in the potential_nodes set for the next iteration
# by checking if it hits any walls as well as the ant's history
if curr_node[0] + 1 <= row - 1 and (curr_node[0] + 1, curr_node[1]) not in history:
a.add((curr_node[0] + 1, curr_node[1]))
if curr_node[0] - 1 >= 0 and (curr_node[0] - 1, curr_node[1]) not in history:
a.add((curr_node[0] - 1, curr_node[1]))
if curr_node[1] - 1 >= 0 and (curr_node[0], curr_node[1] - 1) not in history:
a.add((curr_node[0], curr_node[1] - 1))
if curr_node[1] + 1 <= col - 1 and (curr_node[0], curr_node[1] + 1) not in history:
a.add((curr_node[0], curr_node[1] + 1))
return a
def roullete_select(probabilities):
# Randomizes a number to be used in the probability loop below
r = random.uniform(0, 1)
# sum_index acts as the cummulative sum as it increments
# without any need for extra storage use
sum_index = 0
chosen_prob_index = 0
# Iterates through the number of probabilities
for i in range(len(probabilities)):
# Skipping the probabilities in case there are any zero probabilities.
# This happens when the current point has reached one or more walls
# and has no potential node to calculate probability for.
if probabilities[i] == 0:
continue
# If the random number is less than the incremental sum_index,
# then it chooses the current index
# For instance, if we have the probabilities:
# [0.1,0.2,0.1,0.6] and random number 0.3
# then it would choose the 2nd probability as 0.1 + 0.2 is 0.3, which is <= than the rand no.,
# but if you add the next probability it reaches 0.4 which doesn't satisfy the condition,
# making the chosen probability index to stay at 2, which is the 0.2 probability.
if sum_index <= r:
chosen_prob_index = i
sum_index += probabilities[i]
return chosen_prob_index
def calculate_north(a):
# Calculates the probability for the north node, given from the formulas in the references above
if grid[a[0] - 1][a[1]] == 0:
return 0.49
return pheromone_grid[a[0] - 1][a[1]]*(1/grid[a[0] - 1][a[1]])
def calculate_west(a):
# Calculates the probability for the west node, given from the formulas in the references above
if grid[a[0]][a[1] - 1] == 0:
return 0.49
return pheromone_grid[a[0]][a[1] - 1]*(1/grid[a[0]][a[1] - 1])
def calculate_east(a):
# Calculates the probability for the east node, given from the formulas in the references above
if grid[a[0]][a[1] + 1] == 0:
return 0.49
east = pheromone_grid[a[0]][a[1] + 1]*(1/grid[a[0]][a[1] + 1])
return east
def calculate_south(a):
# Calculates the probability for the south node, given from the formulas in the references above
if grid[a[0] + 1][a[1]] == 0:
return 0.49
return pheromone_grid[a[0] + 1][a[1]]*(1/grid[a[0] + 1][a[1]])
calculate_shortest_path(grid, pheromone_grid, evap_rate, gens, ants)
def heuristic(self):
# Sets the boundaries for the while loop below
size_x = self.input.shape[0] - 1
size_y = self.input.shape[1] - 1
grid = self.input
time_spent = 0
# Initializes the positions on the start
x = y = 0
while x <= size_x and y <= size_y:
# Calculates the next point if it's not close to any wall
if x < size_x and y < size_y:
look_right = grid[x, y + 1]
look_down = grid[x + 1, y]
# Chooses the shortest distance point
if look_right == min(look_right, look_down):
time_spent += look_right
y += 1
else:
time_spent += look_down
x += 1
# If it's next to the horizontal walls, it only goes to the right
if size_x == x and y < size_y:
time_spent += grid[x, y + 1]
y += 1
# If it's next to the vertical walls, it only goes below
if size_y == y and x < size_x:
time_spent += grid[x + 1, y]
x += 1
# If it has reached the end, break the loop
if size_y == y and x == size_x:
break
print("Heuristic shortest path: ", time_spent)
# Reference: Wikipedia.
# Dijkstra's algorithm - Wikipedia.
# [ONLINE] Available at: https://en.wikipedia.org/wiki/Dijkstra's_algorithm?fbclid=IwAR3EvRxdGdemWFGdZYVbyARZmViMWMtaoS18Ck4m7QYDVN22tCdl95WmNOk#Algorithm .
# [Accessed 19 December 2020].
def dijkstra(self):
grid = self.input
row = grid.shape[0]
col = grid.shape[1]
# Initializes the node params
start_node = (0, 0)
end_node = (row - 1, col - 1)
current_node = start_node
# Sets the distance grid to a high number (easier to use/visualise compared to infinity - wikipedia)
distance_grid = np.full((row, col), 99999)
# Sets the start distance to 0
distance_grid[0][0] = 0
# Creates a set of unvisitied nodes and adds all the nodes to it - marked as unvisited
unvisited_nodes = set()
for x in range(row):
for y in range(col):
unvisited_nodes.add((x, y))
def start(current_node):
# First neighbour nodes are the right and bottom ones
neighbour_nodes = ((1, 0), (0, 1))
# Parses through the grid until it visits end_node
while end_node in unvisited_nodes:
# Updates the distances of the neighbour nodes
update_distances(current_node, neighbour_nodes)
# Marks the current node as visited
unvisited_nodes.remove(current_node)
current_node = get_next_node()
# Gets the neighbours of the new current_node
neighbour_nodes = update_neighbour_nodes(current_node)
print("Dijkstra shortest path: ",
distance_grid[end_node[0]][end_node[1]])
def update_distances(curr_node, neighbour_nodes):
for (x, y) in neighbour_nodes:
distance = distance_grid[curr_node[0]
][curr_node[1]] + grid[x][y]
if distance_grid[x][y] > distance:
distance_grid[x][y] = distance
def get_next_node():
# Sets an initial min distance to be compared to
min_distance = 9999999
# Initialises min_node with a random pos tuple
min_node = (999, 999)
# Searches through the unvisited nodes the smallest distance and updates min_node
for (x, y) in unvisited_nodes:
if min_distance > distance_grid[x][y]:
min_distance = distance_grid[x][y]
min_node = (x, y)
return min_node
def update_neighbour_nodes(curr_node):
updated_nodes = set()
x = curr_node[0]
y = curr_node[1]
# Checks for walls or possible neighbours that haven't been visited
# based on the indices of the curr_node
if x + 1 < row and (x+1, y) in unvisited_nodes:
updated_nodes.add((x+1, y))
if x - 1 >= 0 and (x-1, y) in unvisited_nodes:
updated_nodes.add((x-1, y))
if y + 1 < col and (x, y + 1) in unvisited_nodes:
updated_nodes.add((x, y + 1))
if y - 1 >= 0 and (x, y - 1) in unvisited_nodes:
updated_nodes.add((x, y - 1))
return updated_nodes
start(current_node)
rand_input = np.random.randint(10, size=(11, 11))
main_input = input4
game = Task_one_game(main_input)
print(main_input)
start = time.time()
game.heuristic()
end = time.time()
print("Time elapsed for Heuristic: ", end - start)
start = time.time()
game.dijkstra()
end = time.time()
print("Time elapsed for Dijkstra: ", end - start)
start = time.time()
game.ant_colony_optimization(generations=50, ants=800)
end = time.time()
print("Time elapsed for ACO: ", end - start)
|
py | 7dff0a9c58b0d0c5cedc20b1efdafe6c1c1d1f05 | import subprocess
import time
from typing import Any, Dict, List
import requests
import determined_common.api.authentication as auth
from determined_common import api
from tests.integrations import config as conf
def det_version() -> str:
output = subprocess.check_output(["det", "--version"], universal_newlines=True) # type: str
return output.split()[1]
def cluster_slots() -> Dict[str, Any]:
"""
cluster_slots returns a dict of slots that each agent has.
:return: Dict[AgentID, List[Slot]]
"""
auth.initialize_session(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "agents")
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return {agent["id"]: agent["slots"].values() for agent in json.values()}
def num_slots() -> int:
return sum(len(agent_slots) for agent_slots in cluster_slots().values())
def max_slots_per_agent() -> int:
return max(map(len, cluster_slots().values()))
def gpu_slots_per_agent() -> List[int]:
return [
sum(1 if slot["type"] == "gpu" else 0 for slot in slot_list)
for slot_list in cluster_slots().values()
]
def num_free_slots() -> int:
return sum(
0 if slot["container"] else 1
for agent_slots in cluster_slots().values()
for slot in agent_slots
)
def running_on_gpu() -> bool:
return any(
slot["device"]["type"] == "gpu"
for slot_list in cluster_slots().values()
for slot in slot_list
)
def wait_for_agents(min_agent_count: int) -> None:
while True:
if num_agents() >= min_agent_count:
return
print("Waiting for {} agents to register...".format(min_agent_count))
time.sleep(1)
def num_agents() -> int:
auth.initialize_session(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "agents")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
|
py | 7dff0b15c661aa83cb5e2f2f99ba5ac7daded503 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="spacyjsonnlp",
version='0.1.3',
python_requires='>=3.6',
author="Damir Cavar, Oren Baldinger, Maanvitha Gongalla, Anurag Kumar, Murali Kammili",
author_email="[email protected]",
description="The Python spaCy JSON-NLP package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dcavar/spaCy-JSON-NLP",
packages=setuptools.find_packages(),
install_requires=[
'spacy==2.1.0',
'neuralcoref>=4.0',
'pyjsonnlp>=0.2.12',
'benepar[cpu]>=0.1.2',
'cython',
'numpy>=1.14'
],
setup_requires=["cython", "numpy>=1.14", "pytest-runner"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
test_suite="tests",
tests_require=["pytest", "coverage"]
)
|
py | 7dff0b3121b2c11ec7d1be5a82823430c3402110 | import os
from dotenv import load_dotenv as ld
|
py | 7dff0be63886cecf13a3f2b47effc1196d91d9c9 | from torch.utils.data import Dataset
import torch
import json, os, random, time
import cv2
import torchvision.transforms as transforms
from data_transform.transform_wrapper import TRANSFORMS
import numpy as np
from utils.utils import get_category_list
import math
from PIL import Image
class BaseSet(Dataset):
def __init__(self, mode="train", cfg=None, transform=None):
self.mode = mode
self.transform = transform
self.cfg = cfg
self.input_size = cfg.INPUT_SIZE
self.color_space = cfg.COLOR_SPACE
self.size = self.input_size
print("Use {} Mode to train network".format(self.color_space))
if self.mode == "train":
print("Loading train data ...", end=" ")
self.json_path = cfg.DATASET.TRAIN_JSON
elif "valid" in self.mode:
print("Loading valid data ...", end=" ")
self.json_path = cfg.DATASET.VALID_JSON
else:
raise NotImplementedError
self.update_transform()
with open(self.json_path, "r") as f:
self.all_info = json.load(f)
self.num_classes = self.all_info["num_classes"]
if not self.cfg.DATASET.USE_CAM_BASED_DATASET or self.mode != 'train':
self.data = self.all_info['annotations']
else:
assert os.path.isfile(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH), \
'the CAM-based generated json file does not exist!'
self.data = json.load(open(self.cfg.DATASET.CAM_DATA_JSON_SAVE_PATH))
print("Contain {} images of {} classes".format(len(self.data), self.num_classes))
self.class_weight, self.sum_weight = self.get_weight(self.data, self.num_classes)
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and mode == "train":
print('-'*20+' dataset'+'-'*20)
print('class_weight is (the first 10 classes): ')
print(self.class_weight[:10])
num_list, cat_list = get_category_list(self.get_annotations(), self.num_classes, self.cfg)
self.instance_p = np.array([num / sum(num_list) for num in num_list])
self.class_p = np.array([1/self.num_classes for _ in num_list])
num_list = [math.sqrt(num) for num in num_list]
self.square_p = np.array([num / sum(num_list) for num in num_list])
self.class_dict = self._get_class_dict()
def update(self, epoch):
self.epoch = max(0, epoch-self.cfg.TRAIN.TWO_STAGE.START_EPOCH) if self.cfg.TRAIN.TWO_STAGE.DRS else epoch
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "progressive":
self.progress_p = epoch/self.cfg.TRAIN.MAX_EPOCH * self.class_p + (1-epoch/self.cfg.TRAIN.MAX_EPOCH)*self.instance_p
print('self.progress_p', self.progress_p)
def __getitem__(self, index):
print('start get item...')
now_info = self.data[index]
img = self._get_image(now_info)
print('complete get img...')
meta = dict()
image = self.transform(img)
image_label = (
now_info["category_id"] if "test" not in self.mode else 0
) # 0-index
if self.mode not in ["train", "valid"]:
meta["image_id"] = now_info["image_id"]
meta["fpath"] = now_info["fpath"]
return image, image_label, meta
def update_transform(self, input_size=None):
normalize = TRANSFORMS["normalize"](cfg=self.cfg, input_size=input_size)
transform_list = [transforms.ToPILImage()]
transform_ops = (
self.cfg.TRANSFORMS.TRAIN_TRANSFORMS
if self.mode == "train"
else self.cfg.TRANSFORMS.TEST_TRANSFORMS
)
for tran in transform_ops:
transform_list.append(TRANSFORMS[tran](cfg=self.cfg, input_size=input_size))
transform_list.extend([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(transform_list)
def get_num_classes(self):
return self.num_classes
def get_annotations(self):
return self.all_info['annotations']
def __len__(self):
return len(self.all_info['annotations'])
def imread_with_retry(self, fpath):
retry_time = 10
for k in range(retry_time):
try:
img = cv2.imread(fpath)
if img is None:
print("img is None, try to re-read img")
continue
return img
except Exception as e:
if k == retry_time - 1:
assert False, "pillow open {} failed".format(fpath)
time.sleep(0.1)
def _get_image(self, now_info):
fpath = os.path.join(now_info["fpath"])
img = self.imread_with_retry(fpath)
if self.color_space == "RGB":
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def _get_trans_image(self, img_idx):
now_info = self.data[img_idx]
fpath = os.path.join(now_info["fpath"])
img = self.imread_with_retry(fpath)
if self.color_space == "RGB":
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return self.transform(img)[None, :, :, :]
def _get_class_dict(self):
class_dict = dict()
for i, anno in enumerate(self.data):
cat_id = (
anno["category_id"] if "category_id" in anno else anno["image_label"]
)
if not cat_id in class_dict:
class_dict[cat_id] = []
class_dict[cat_id].append(i)
return class_dict
def get_weight(self, annotations, num_classes):
num_list = [0] * num_classes
cat_list = []
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
max_num = max(num_list)
class_weight = [max_num / i if i != 0 else 0 for i in num_list]
sum_weight = sum(class_weight)
return class_weight, sum_weight
def sample_class_index_by_weight(self):
rand_number, now_sum = random.random() * self.sum_weight, 0
for i in range(self.num_classes):
now_sum += self.class_weight[i]
if rand_number <= now_sum:
return i
|
py | 7dff0be6f06a4fb65d0ffe298eadfc827f8363ee | """
Lists with Duplicates - SOLUTION
"""
# First remove dups from a
# then merge b without adding dups
a = [2, 4, 10, 20, 5, 2, 20, 4]
b = [13, 2, 25, 20, 4, 8]
print(f'BASE LIST: {a}\n')
# USING A LOOP, remove the duplicate items from list a and print out the updated list.
x = []
for i in a:
if i not in x:
x.append(i)
a = x
print(f'Removed Dups: {a}\n')
# SING A LOOP, merge list b into list a without adding any duplicates.
for i in b:
if i not in a:
a.append(i)
print(f'Merged: {a}')
|
py | 7dff0c01d0916fe22b2e87ff1cfa870b024a908d | from math import prod
def persistence(n):
counter = 0
while n > 9:
counter = counter + 1
n = list(map(int,str(n)))
n = prod(n)
return counter
print(persistence(4444))
# Best Practices (I doubt :/)
# import operator
# def persistence(n):
# i = 0
# while n>=10:
# n=reduce(operator.mul,[int(x) for x in str(n)],1)
# i+=1
# return i
|
py | 7dff0d15f599435b128b9c2ddc55c88ea0f5cc40 | import tensorflow as tf
import numpy as np
from typing import Tuple
from modules.utils import PostNet, CBHGLayer, PreNet, PositionalEncoding
from modules.attention import BahdanauAttention, CrossAttentionBLK
class BasePosterior(tf.keras.layers.Layer):
"""Encode the target sequence into latent distributions"""
def __init__(self, name='Posterior', **kwargs):
super(BasePosterior, self).__init__(name=name, **kwargs)
def call(self, inputs, src_enc, src_lengths=None, target_lengths=None
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
raise NotImplementedError
@staticmethod
def reparameterize(mu, logvar, nsamples=tf.constant(1), random=tf.constant(True)):
"""
:param mu: [batch, max_time, dim]
:param logvar: [batch, max_time, dim]
:param nsamples: int
:param random: whether sample from N(0, 1) or just use zeros
:return: samples, noises, [batch, nsamples, max_time, dim]
"""
print('tracing back at posterior reparameterize')
batch = tf.shape(mu)[0]
max_time = tf.shape(mu)[1]
dim = tf.shape(mu)[2]
std = tf.math.exp(0.5 * logvar)
if random:
eps = tf.random.normal([batch, nsamples, max_time, dim])
else:
eps = tf.zeros([batch, nsamples, max_time, dim])
samples = eps * tf.expand_dims(std, axis=1) + tf.expand_dims(mu, axis=1)
return samples, eps
@staticmethod
def log_probability(mu, logvar, z=None, eps=None, seq_lengths=None, epsilon=tf.constant(1e-8)):
"""
:param mu: [batch, max_time, dim]
:param logvar: [batch, max_time, dim]
:param z: [batch, nsamples, max_time, dim]
:param eps: [batch, nsamples, max_time, dim]
:param seq_lengths: [batch, ]
:param epsilon: small float number to avoid overflow
:return: log probabilities, [batch, nsamples]
"""
print('tracing back at posterior log-probability')
batch = tf.shape(mu)[0]
max_time = tf.shape(mu)[1]
dim = tf.shape(mu)[2]
std = tf.math.exp(0.5 * logvar)
normalized_samples = (eps if eps is not None
else (z - tf.expand_dims(mu, axis=1))
/ (tf.expand_dims(std, axis=1) + epsilon))
expanded_logvar = tf.expand_dims(logvar, axis=1)
# time_level_log_probs [batch, nsamples, max_time]
time_level_log_probs = -0.5 * (
tf.cast(dim, tf.float32) * tf.math.log(2 * np.pi)
+ tf.reduce_sum(expanded_logvar + normalized_samples ** 2.,
axis=3))
seq_mask = (tf.sequence_mask(seq_lengths, maxlen=max_time, dtype=tf.float32)
if seq_lengths is not None
else tf.ones([batch, max_time]))
seq_mask = tf.expand_dims(seq_mask, axis=1) # [batch, 1, max_time]
sample_level_log_probs = tf.reduce_sum(seq_mask * time_level_log_probs,
axis=2) # [batch, nsamples]
return sample_level_log_probs
def sample(self, inputs, src_enc, input_lengths, src_lengths,
nsamples=tf.constant(1), random=tf.constant(True)) -> Tuple[tf.Tensor, tf.Tensor]:
"""
:param inputs: [batch, tgt_max_time, in_dim]
:param src_enc: [batch, src_max_time, emb_dim]
:param input_lengths: [batch, ]
:param src_lengths: [batch, ]
:param nsamples:
:param random:
:return:
tensor1: samples from the posterior, [batch, nsamples, tgt_max_time, dim]
tensor2: log-probabilities, [batch, nsamples]
"""
raise NotImplementedError
class TransformerPosterior(BasePosterior):
def __init__(self, pre_hidden, pre_drop_rate, pre_activation,
pos_drop_rate, nblk, attention_dim, attention_heads,
temperature, ffn_hidden, latent_dim, name='TransformerPosterior'):
super(TransformerPosterior, self).__init__(name=name)
self.pos_weight = tf.Variable(1.0, trainable=True)
self.prenet = PreNet(units=pre_hidden, drop_rate=pre_drop_rate,
activation=pre_activation, name='decoder_prenet')
self.pe = PositionalEncoding('EncoderPositionEncoding')
self.pe_dropout = tf.keras.layers.Dropout(rate=pos_drop_rate)
self.attentions = []
for i in range(nblk):
attention = CrossAttentionBLK(input_dim=pre_hidden,
attention_dim=attention_dim,
attention_heads=attention_heads,
attention_temperature=temperature,
ffn_hidden=ffn_hidden)
self.attentions.append(attention)
self.mu_projection = tf.keras.layers.Dense(latent_dim,
kernel_initializer='zeros',
name='mu_projection')
self.logvar_projection = tf.keras.layers.Dense(latent_dim,
kernel_initializer='zeros',
name='logvar_projection')
def call(self, inputs, src_enc, src_lengths=None, target_lengths=None, training=None):
print('tracing back at posterior call')
prenet_outs = self.prenet(inputs)
max_time = tf.shape(prenet_outs)[1]
dim = tf.shape(prenet_outs)[2]
pos = self.pe.positional_encoding(max_time, dim)
pos_embs = prenet_outs + self.pos_weight * pos
pos_embs = self.pe_dropout(pos_embs, training=training)
att_outs = pos_embs
for att in self.attentions:
att_outs, alignments = att(
inputs=att_outs, memory=src_enc, query_lengths=target_lengths,
memory_lengths=src_lengths, training=training)
mu = self.mu_projection(att_outs)
logvar = self.logvar_projection(att_outs)
return mu, logvar, None
def sample(self, inputs, src_enc, input_lengths, src_lengths,
nsamples=tf.constant(1), random=tf.constant(True), training=None):
mu, logvar, _ = self.call(inputs, src_enc, input_lengths, src_lengths,
training=training)
samples, eps = self.reparameterize(mu, logvar, nsamples, random)
log_probs = self.log_probability(mu, logvar, eps, input_lengths)
return samples, log_probs
|
py | 7dff0d9a5995fa047058c75206157727b8505701 | #!/usr/bin/env python3
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# See LICENSE.txt included in this distribution for the specific
# language governing permissions and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at LICENSE.txt.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
# Portions Copyright (c) 2017-2018, Chris Fraire <[email protected]>.
#
import argparse
import logging
import os
import tempfile
from zipfile import ZipFile
from shutil import copyfile
from .utils.log import get_console_logger
"""
deploy war file
"""
def repack_war(logger, sourceWar, targetWar, configFile, defaultConfigFile):
"""
Repack sourceWar into targetWar, performing substitution of configFile
in the process.
"""
WEB_XML = 'WEB-INF/web.xml'
with ZipFile(sourceWar, 'r') as infile, ZipFile(targetWar, 'w') as outfile:
for item in infile.infolist():
data = infile.read(item.filename)
if item.filename == WEB_XML:
logger.debug("Performing substitution of '{}' with '{}'".
format(defaultConfigFile, configFile))
defaultConfigFile = defaultConfigFile.encode()
configFile = configFile.encode()
data = data.replace(defaultConfigFile, configFile)
outfile.writestr(item, data)
def deploy_war(logger, sourceWar, targetWar, configFile=None):
"""
Copy warSource to warTarget (checking existence of both), optionally
repacking the warTarget archive if configuration file resides in
non-default location.
"""
if not os.path.isfile(sourceWar):
logger.error("{} is not a file".format(sourceWar))
if os.path.isdir(targetWar):
orig = targetWar
targetWar = os.path.join(targetWar, os.path.basename(sourceWar))
logger.debug("Target {} is directory, will use {}".
format(orig, targetWar))
# If user does not use default configuration file location then attempt to
# extract WEB-INF/web.xml from the war file using jar or zip utility,
# update the hardcoded values and then update source.war with the new
# WEB-INF/web.xml.
tmpWar = None
DEFAULT_CONFIG_FILE = '/var/opengrok/etc/configuration.xml'
if configFile and configFile != DEFAULT_CONFIG_FILE:
with tempfile.NamedTemporaryFile(prefix='OpenGroktmpWar',
suffix='.war',
delete=False) as tmpWar:
logger.info('Repacking {} with custom configuration path to {}'.
format(sourceWar, tmpWar.name))
repack_war(logger, sourceWar, tmpWar.name, configFile,
DEFAULT_CONFIG_FILE)
sourceWar = tmpWar.name
logger.info("Installing {} to {}".format(sourceWar, targetWar))
copyfile(sourceWar, targetWar)
if tmpWar:
os.remove(tmpWar.name)
def main():
parser = argparse.ArgumentParser(description='Deploy WAR file')
parser.add_argument('-D', '--debug', action='store_true',
help='Enable debug prints')
parser.add_argument('-c', '--config',
help='Path to OpenGrok configuration file')
parser.add_argument('source_war', nargs=1,
help='Path to war file to deploy')
parser.add_argument('target_war', nargs=1,
help='Path where to deploy source war file to')
args = parser.parse_args()
loglevel = logging.INFO
if args.debug:
loglevel = logging.DEBUG
logger = get_console_logger(__name__, loglevel)
deploy_war(logger, args.source_war[0], args.target_war[0], args.config)
print("Start your application server (if it is not already running) "
"or wait until it loads the just installed web application.\n"
"OpenGrok should be available on <HOST>:<PORT>/{APP_CONTEXT}")
if __name__ == '__main__':
main()
|
py | 7dff0da7564a68ce55c372a8984efd52896d431e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import combinations
from collections import Counter
import os.path
import numpy as np
from scipy.stats import mode
from scipy.linalg import orth
from numpy.linalg import svd, lstsq, inv, pinv, multi_dot
from scipy.special import logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import as_float_array, check_array, check_X_y, check_random_state
#from sklearn.utils.fixes import expit as sigmoid
from scipy.special import expit as sigmoid
#from sklearn.utils.estimator_checks import check_estimator
#from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelBinarizer, label_binarize
from sklearn.linear_model import Ridge, RidgeClassifier, Lasso
from sklearn import metrics
#import matlab.engine
#from cvxpy import *
#from utils import *
#from mysoftclassifier import *
dot = np.dot # alias for np.dot
#def sigmoid(x):
# return 0.5*np.tanh(0.5*x)+0.5
class ELMClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hidden=100, C=1.0, batch_size=None, fit_intercept=False, ovo=False, classes=None, activation_func='sigmoid', return_y=False, random_projection=True, random_state=None):
self.n_hidden = n_hidden
self.C = C
self.W = None
self.b = None
self.beta = None
self.P = None # P = (H'*H+C*I)^-1
self.activation_func = activation_func.lower()
self.batch_size = batch_size
self.fit_intercept = fit_intercept
self.random_projection = random_projection
self.random_state = random_state
self.random_state_ = None
self.ovo = ovo
self.classes = classes#np.array([1,2,3,4,5])
self.return_y = return_y
self.label_binarizer = None
self.fitted_ = False
def _validate_X(self, X):
if len(X.shape)==1:
raise ValueError('X should be a 2-dimensional array.')
# if one feature:
# X = X.reshape(1,-1)
# else: # one sample
# X = X.reshape(-1,1)
if X.shape[0]==0:
raise ValueError('Empty samples.')
if X.shape[1]==0:
raise ValueError('0 feature(s) (shape=(3, 0)) while a minimum of %d is required.'%(1,))
return as_float_array(check_array(X))
def _validate_X_y(self, X, y):
X = self._validate_X(X)
X, y = check_X_y(X, y)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
if np.allclose(np.array(y,dtype=int),y):
self.label_binarizer = LabelBinarizer(neg_label=-2,pos_label=2,sparse_output=False)# y \in {-2,2} according to extreme logistic regression
if self.classes is not None:
self.label_binarizer.fit(self.classes)
y = self.label_binarizer.transform(y)
else:
y = self.label_binarizer.fit_transform(y)
self.classes = self.label_binarizer.classes_
if self.label_binarizer.classes_.shape[0]<2:
raise ValueError('Label contains less than 2 classes.')
else:
self.label_binarizer = None
self.fit_intercept = True
return X, y
def fit(self, X, y, sample_weight=None):
self.fitted_ = False
self.random_state_ = check_random_state(self.random_state)
if np.any(np.isnan(y)):
nonnan_ids = np.logical_not(np.isnan(y))
X = X[nonnan_ids,:]
y = y[nonnan_ids]
X, y = self._validate_X_y(X, y)
N, dx = X.shape
N_ = N-self.n_hidden
self.classes_ = self.classes
#self.n_classes_ = len(self.classes)
if self.random_projection and (self.batch_size is None or self.P is None):
self.b = self.random_state_.uniform(size=self.n_hidden)*2-1
self.W = self.random_state_.uniform(size=(dx,self.n_hidden))*2-1
if self.batch_size is None or N_<=0:
# fit all
if self.random_projection:
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X,self.W)+self.b)
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
else:
self.n_hidden = X.shape[1]
H = X
if self.label_binarizer is None:
if self.ovo:
raise NotImplementedError('OVO for probabilistic label is not implemented yet.')
if sample_weight is not None:
raise NotImplementedError('sampled_weight for probabilistic label is not implemented yet.')
if not hasattr(self,'fit_intercept') or not self.fit_intercept:
raise TypeError('For probabilistic labels, self.fit_intercept must be True.')
output_layer=SoftLogisticRegression(C=self.C, learning_rate=0.01, momentum=0.9, max_iter=200,
random_state=self.random_state, tol=1e-4, verbose=False).fit(H,y)
self.beta = np.r_[output_layer.coefs_[-1].ravel(),output_layer.intercepts_[-1]]
else:
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[X,np.ones((N,1))]
nh = self.n_hidden+1
else:
nh = self.n_hidden
if N>self.n_hidden:
if self.ovo:
if sample_weight is not None:
raise NotImplementedError('OVO and sampled_weight at the same time is not implemented yet.')
self.beta = np.empty((nh,self.label_binarizer.classes_.shape[0]*(self.label_binarizer.classes_.shape[0]-1)//2))
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = np.where(np.logical_or(y[:,ii[0]]==2,y[:,ii[1]]==2))[0]
#if self.C==0:
# self.beta[:,cc] = dot(pinv(H[id_,:]),y[id_,ii[0]])
#else:
Ht_ = H[id_,:].T
self.beta[:,cc] = multi_dot((inv(dot(Ht_,Ht_.T)+self.C*N*1.0/nh*np.eye(nh)),Ht_,y[id_,ii[0]]))
cc += 1
else:
if sample_weight is None:
#if self.C==0:
# self.beta = dot(pinv(H),y)
#else:
self.beta = multi_dot((inv(dot(H.T,H)+self.C*N*1.0/nh*np.eye(nh)),H.T,y))
else:
Ht =sample_weight*H.T
#if self.C==0:
# self.beta = dot(pinv(Ht.T),y)
#else:
self.beta = multi_dot((inv(dot(Ht,H)+self.C*1.0/nh*np.eye(nh)),Ht,y))
else:
if self.ovo:
if sample_weight is not None:
raise NotImplementedError('OVO and sampled_weight at the same time is not implemented yet.')
n_beta = self.label_binarizer.classes_.shape[0]*(self.label_binarizer.classes_.shape[0]-1)//2
self.beta = np.empty((nh,n_beta))
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = np.where(np.logical_or(y[:,ii[0]]==2,y[:,ii[1]]==2))[0]
H_ = H[id_,:]
#if self.C==0:
# self.beta[:,cc] = dot(pinv(H_),y[id_,ii[0]])
#else:
self.beta[:,cc] = multi_dot((H_.T,inv(dot(H_,H_.T)+self.C*N*1.0/nh*np.eye(N)),y[id_,ii[0]]))
cc += 1
else:
if sample_weight is None:
#if self.C==0:
# self.beta = dot(pinv(H),y)
#else:
self.beta = multi_dot((H.T,inv(dot(H,H.T)+self.C*N*1.0/nh*np.eye(N)),y))
else:
self.beta = multi_dot((H.T,inv((sample_weight*dot(H,H.T)).T+self.C*1.0/nh*np.eye(N)),(sample_weight*y.T).T))
else:
# OS-ELM
raise NotImplementedError('OS-ELM is not implemented yet.')
if self.ovo:
raise NotImplementedError('OVO in batch mode is not implemented yet.')
if sample_weight is not None:
raise NotImplementedError('sampled_weight in batch mode is not implemented yet.')
if N_%self.batch_size==0:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)
else:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)+[N_%self.batch_size]
#shuffled_id = list(range(N))
#self.random_state_.shuffle(shuffled_id)
#X = X[shuffled_id,:]
#y = y[shuffled_id]
for i in range(len(batches)):
start_n = sum(batches[:i])
end_n = sum(batches[:i+1])
y_part = y[start_n:end_n]
if self.random_projection:
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X[start_n:end_n,:],self.W)+self.b)
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[H,np.ones((batches[i],1))]
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
else:
self.n_hidden = X.shape[1]
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.c_[X[start_n:end_n,:],np.ones((batches[i],1))]
else:
H = X[start_n:end_n,:]
if i==0 or self.P is None:
if hasattr(self,'fit_intercept') and self.fit_intercept:
nh = self.n_hidden+1
else:
nh = self.n_hidden
self.P = inv(dot(H.T,H)+self.C*N*1.0/nh*np.eye(nh))
self.beta = multi_dot((self.P,H.T,y_part))
else:
if N==1:
h = H.ravel()
hht = np.outer(h,h)
self.P = self.P - multi_dot((self.P,hht,self.P))/(1.+(self.P*hht).sum())
else:
PHt = dot(self.P,H.T)
self.P = self.P - multi_dot((PHt,inv(dot(H,PHt)+np.eye(batches[i])),H,self.P))
self.beta = self.beta + dot(dot(self.P,H.T),y_part-dot(H,self.beta))
self.fitted_ = True
return self
def fit_transform(self, X, y):
return self.fit(X,y).transform(X)
def transform(self, X):
return self.decision_function(X)
def decision_function(self, X):
if not self.fitted_:
raise ValueError('This ELMClassifier instance is not fitted yet.')
X = self._validate_X(X)
if self.random_projection:
H = sigmoid(dot(X,self.W)+self.b)
else:
H = X
if hasattr(self,'fit_intercept') and self.fit_intercept:
H = np.hstack((H,np.ones((X.shape[0],1))))
return dot(H,self.beta)
def predict(self, X):
if self.ovo:
yy = self.decision_function(X)
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
id_ = yy[:,cc]>=0
yy[:,cc][id_] = ii[0]
yy[:,cc][np.logical_not(id_)] = ii[1]
cc += 1
yy = mode(yy,axis=1)[0].ravel()
return self.label_binarizer.inverse_transform(label_binarize(yy, range(self.label_binarizer.classes_.shape[0])))
else:
proba, y = self.predict_proba(X,return_y=True)
if y is None:
return proba
else:
return y
def predict_proba(self, X):
# [1] Ngufor, C., & Wojtusiak, J. (2013).
# Learning from large-scale distributed health data: An approximate logistic regression approach.
# In Proceedings of the 30th International Conference on Machine Learning, Atlanta, Georgia, USA, JMLR: W&CP (pp. 1-8).
# [2] Ngufor, C., Wojtusiak, J., Hooker, A., Oz, T., & Hadley, J. (2014, May).
# Extreme Logistic Regression: A Large Scale Learning Algorithm with Application to Prostate Cancer Mortality Prediction.
# In FLAIRS Conference.
#if self.label_binarizer.classes_.shape[0]!=2:
# print('Warning: This is one-vs-all probability for each class.')
if self.ovo:
proba = label_binarize(self.predict(X),self.label_binarizer.classes_)
"""
K = self.label_binarizer.classes_.shape[0]
proba = np.zeros((X.shape[0],K))
for i in range(K):
cc = 0
for ii in combinations(range(self.label_binarizer.classes_.shape[0]),2):
if ii[0]==i:
proba[:,i] = np.maximum(proba[:,i],proba_[:,cc])
elif ii[1]==i:
proba[:,i] = np.maximum(proba[:,i],1-proba_[:,cc])
cc += 1
"""
else:
hb = self.decision_function(X)
proba = sigmoid(hb)
if proba.ndim>1:
proba = (proba.T/proba.sum(axis=1)).T
if self.return_y:
if self.label_binarizer is None:
return proba, None
else:
return proba, self.label_binarizer.inverse_transform(hb)
else:
return proba
class WeightedELMClassifier(ELMClassifier):
def __init__(self, n_hidden=100, C=1.0, batch_size=None, fit_intercept=False, ovo=False, classes=None, activation_func='sigmoid', random_projection=True, return_y=False, random_state=None):
super(WeightedELMClassifier, self).__init__(n_hidden=n_hidden, C=C, batch_size=batch_size, fit_intercept=fit_intercept, ovo=ovo, classes=classes, activation_func=activation_func, random_projection=random_projection, random_state=random_state, return_y=return_y)
def fit(self, X, y):
yc = Counter(y)
sample_weight = np.empty(X.shape[0])
#average_yc = np.mean(yc.values())
for yy in yc:
#if yc[yy]>average_yc:
# sample_weight[y==yy] = 1./np.sqrt(yc[yy])
#else:
# sample_weight[y==yy] = (np.sqrt(5)-1)/2/np.sqrt(yc[yy])
sample_weight[y==yy] = 1./np.sqrt(yc[yy])
return super(WeightedELMClassifier, self).fit(X, y, sample_weight=sample_weight/sample_weight.sum())
class SSELMClassifier(ELMClassifier):
def __init__(self, n_hidden=100, C=1.0, lambda_=1.0, activation_func='sigmoid', matlab_code_path=None, classes=None, random_projection=True, random_state=None):
super(SSELMClassifier, self).__init__(n_hidden=n_hidden, C=C, batch_size=None, fit_intercept=False, activation_func=activation_func, classes=classes, random_projection=random_projection, random_state=random_state)
self.lambda_ = self.lambda_
self.eng = None
self.L = None
#self.model_matlab = None
if matlab_code_path is None:
self.matlab_code_path = None
else:
self.matlab_code_path = os.path.normpath(matlab_code_path)
def start_matlab_connection(self):
if self.matlab_code_path is not None:
if self.eng is None:
self.eng = matlab.engine.start_matlab()
self.eng.addpath(self.matlab_code_path, nargout=0)
else:
self.eng = None
def close_matlab_connection(self):
if self.eng is not None:
self.eng.exit()
self.eng = None
def compute_graph_laplacian(self, X, params):
self.start_matlab_connection()
self.L = self.eng.laplacian(params, matlab.double(X.tolist()), nargout=1)
def fit(self, X, y):
self.fitted_ = False
self.random_state_ = check_random_state(self.random_state)
X, y = self._validate_X_y(X, y)
if self.matlab_code_path is None:
raise NotImplementedError('No Python implementation for SSELM yet.')
"""
N, dx = X.shape
Nu = np.sum(np.isnan(y))
Nl = N-Nu
self.b = self.random_state_.uniform(size=self.n_hidden)*2-1
self.W = self.random_state_.uniform(size=(dx,self.n_hidden))*2-1
if self.activation_func == 'sigmoid':
H = sigmoid(dot(X,self.W)+self.b)
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
C = np.eye(N,dtype=float)*self.C
C[range(Nl,N),range(Nl,N)] = 0.
L = ???
if Nl>self.n_hidden:
self.beta = multi_dot((inv(np.eye(self.n_hidden,dtype=float)+multi_dot((H.T,C+self.lambda_*L,H))),H.T,C,y))
else:
self.beta = multi_dot(H.T,inv(np.eye(N,dtype=float)+multi_dot((C+self.lambda_*L,H,H.T))),C,y)
"""
else:
unlabeled_id = np.isnan(y)
labeled_id = np.logical_not(unlabeled_id)
self.start_matlab_connection()
params = {'NN':50,'GraphWeights':'binary','GraphDistanceFunction':'euclidean',
'LaplacianNormalize':1,'LaplacianDegree':5,
'NoDisplay':1,'Kernel':'sigmoid','random_state':self.random_state,'random_projection':self.random_projection,
'NumHiddenNeuron':self.n_hidden,'C':self.C,'lambda':self.lambda_}
if self.L is None:
L = self.compute_graph_laplacian(X, params)
else:
L = self.L
import scipy.io as sio
sio.savemat('bb.mat',{'paras':params,'X':X,'Xl':X[labeled_id,:],'Yl':y[labeld_id],'Xu':X[unlabeled_id,:],'L':L})
model_matlab = self.eng.sselm(matlab.double(X[labeled_id,:].tolist()),matlab.double(y[labeled_id].tolist()),
matlab.double(X[unlabeled_id,:].tolist()), L, params, nargout=1)
self.W = self.model_matlab._data['InputWeight']
self.b = self.model_matlab._data['InputBias']
self.beta = self.model_matlab._data['OutputWeight']
self.fitted_ = True
return self
class ELMAutoEncoderClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_hiddens, Cs, reg_type='l2',output_layer=None, SSELM_lambda_=1., sigparas=1., sigparas1=1., matlab_code_path=None, random_state=None):
self.n_hiddens = n_hiddens
self.Cs = Cs
self.output_layer = output_layer
self.SSELM_lambda_ = SSELM_lambda_
if type(sigparas)==list:
self.sigparas = sigparas
else:
self.sigparas = [sigparas]*len(self.n_hiddens)
if type(sigparas1)==list:
self.sigparas1 = sigparas1
else:
self.sigparas1 = [sigparas1]*len(self.n_hiddens)
if matlab_code_path is None:
self.matlab_code_path = None
self.layers = None
else:
self.matlab_code_path = os.path.normpath(matlab_code_path)
self.layers_matlab = None
self.eng = None
#self.batch_size = batch_size
#self.fit_intercept = fit_intercept
#self.activation_func = activation_func
self.reg_type = reg_type
self.L = None
self.random_state = random_state
self.random_state_ = None
self.fitted_ = False
def start_matlab_connection(self):
if self.matlab_code_path is not None:
if self.eng is None:
self.eng = matlab.engine.start_matlab()
self.eng.addpath(self.matlab_code_path, nargout=0)
else:
self.eng = None
def close_matlab_connection(self):
if self.eng is not None:
self.eng.exit()
self.eng = None
def _validate_X(self, X):
if len(X.shape)==1:
raise ValueError('X should be a 2-dimensional array.')
# if one feature:
# X = X.reshape(1,-1)
# else: # one sample
# X = X.reshape(-1,1)
if X.shape[0]==0:
raise ValueError('Empty samples.')
if X.shape[1]==0:
raise ValueError('0 feature(s) (shape=(3, 0)) while a minimum of %d is required.'%(1,))
return as_float_array(check_array(X))
def _validate_X_y(self, X, y):
return self.output_layer._validate_X_y(X,y)
def compute_graph_laplacian(self, X, params):
self.start_matlab_connection()
import scipy.io as sio
sio.savemat('aa.mat',{'paras':params,'X':X})
self.L = self.eng.laplacian(params, matlab.double(X.tolist()), nargout=1)
def fit(self, X, y=None):
self.reg_type = self.reg_type.lower()
self.fitted_ = False
self.random_state_ = check_random_state(self.random_state)
if self.output_layer is None or y is None:
X = self._validate_X(X)
else:
X, y = self._validate_X_y(X, y)
if self.matlab_code_path is None:
# our python translation of the original ELM-Autoencoder in Matlab
hidden_layer_num = len(self.n_hiddens)
self.layers = []
X = X.T
dx, N = X.shape
n_layers = np.r_[dx,self.n_hiddens]
for i in range(hidden_layer_num):
W = self.random_state_.rand(n_layers[i+1],n_layers[i])*2.-1.
if n_layers[i+1] > n_layers[i]:
W = orth(W)
else:
W = orth(W.T).T
b = orth(self.random_state_.rand(n_layers[i+1],1)*2-1).ravel()
H = (dot(W,X).T+b).T
#print('AutoEncorder Max Val %f Min Val %f',H.max(),H.min())
H = sigmoid(self.sigparas1[i]*H)
self.layers.append({})
self.layers[-1]['W'] = W
self.layers[-1]['b'] = b
self.layers[-1]['n_hidden'] = n_layers[i+1]
self.layers[-1]['sigpara'] = self.sigparas[i]
self.layers[-1]['sigpara1'] = self.sigparas1[i]
if n_layers[i+1]==n_layers[i]:
C = dot(H,X.T)
_,_,v1 = svd(dot(C.T,C))
u2,_,_ = svd(dot(C,C.T))
self.layers[-1]['beta'] = dot(u2,v1)
else:
if self.Cs[i] == 0:
self.layers[-1]['beta'], _, _, _ = lstsq(H.T,X.T)
elif self.reg_type=='l2':
rho = 0.05
rhohats = np.mean(H,axis=1)
KLsum = np.sum(rho*np.log(rho/rhohats)+(1.-rho)*np.log((1.-rho)/(1.-rhohats)))
Hsquare = dot(H,H.T)
HsquareL = np.diag(np.max(Hsquare,axis=1))
self.layers[-1]['beta'] = multi_dot((inv((np.eye(H.shape[0])*KLsum+HsquareL)*self.Cs[i]+Hsquare),H,X.T))
elif self.reg_type=='l1':
tol = 1e-3
"""
beta_ = Variable(X.shape[0],H.shape[0])
prob = Problem(Minimize(norm(beta_*H-X,'fro')+norm(beta_,1)*self.Cs[i]))
prob.solve(solver=SCS,use_indirect=False,eps=tol)#,verbose=True)
self.layers[-1]['beta'] = beta_.value.getA().T
"""
lasso = Lasso(alpha=self.Cs[i]/H.shape[1], fit_intercept=False, precompute='auto', max_iter=3000,
tol=tol, warm_start=False, random_state=self.random_state*2, selection='random')
lasso.fit(H.T,X.T)
self.layers[-1]['beta'] = lasso.coef_.T
else:
raise NotImplementedError('Regularization type "%s" is not implemented.'%self.reg_type)
H = dot(self.layers[-1]['beta'],X)
if n_layers[i+1]==n_layers[i]:
X = H
else:
#print('Layered Max Val %f Min Val %f',H.max(),H.min())
X = sigmoid(self.sigparas[i]*H)
if self.output_layer is not None and y is not None:
self.output_layer.fit(X.T,y)
"""
self.layers.append({})
if np.any(np.isnan(y)): # semi-supervised ELM
nonnan_ids = np.logical_not(np.isnan(y))
Xl = X[:,nonnan_ids].T
yl = y[nonnan_ids]
Xu = X[:,np.logical_not(nonnan_ids)].T
if self.L is None:
L = self.compute_graph_laplacian(X.T, {'NN':50,'GraphWeights':'binary','GraphDistanceFunction':'euclidean',
'LaplacianNormalize':1,'LaplacianDegree':5})
else:
L = self.L
if Nl>self.n_hidden:
self.beta = multi_dot((inv(np.eye(self.n_hidden,dtype=float)+multi_dot((H.T,C+self.SSELM_lambda_*L,H))),H.T,C,y))
else:
self.beta = multi_dot(H.T,inv(np.eye(N,dtype=float)+multi_dot((C+self.SSELM_lambda_*L,H,H.T))),C,y)
else: # normal ELM
if self.Cs[hidden_layer_num] == 0:
self.layers[-1]['beta'], _, _, _ = lstsq(X.T,y.T)
else:
self.layers[-1]['beta'] = multi_dot((inv(np.eye(X.shape[0])/self.Cs[hidden_layer_num]+dot(X,X.T)),X,y.T))
"""
else: # call the original ELM-Autoencoder in Matlab
self.start_matlab_connection()
matlab_X = matlab.double(X.tolist())
matlab_y = matlab.double(y.tolist())
matlab_n_hiddens = matlab.double(list(self.n_hiddens))
matlab_Cs = matlab.double(list(self.Cs))
matlab_sigparas = matlab.double(list(self.sigparas))
matlab_sigparas1 = matlab.double(list(self.sigparas1))
#import scipy.io as sio
#sio.savemat('aa.mat',{'X':X,'y':y,'n_hiddens':self.n_hiddens,'Cs':self.Cs,'sigparas':self.sigparas,'sigparas1':self.sigparas1})
self.layers_matlab = self.eng.elm_autoencoder_train(self.random_state,matlab_X,matlab_y,
matlab_n_hiddens,matlab_Cs,matlab_sigparas,matlab_sigparas1,self.output_layer is not None,nargout=1)
self.fitted_ = True
return self
def fit_transform(self, X, y=None):
return self.fit(X,y).transform(X)
def transform(self, X):
return self.decision_function(X)
def _transform_X(self, X):
X = X.T
hidden_layer_num = len(self.n_hiddens)
for i in range(hidden_layer_num):
H = dot(self.layers[i]['beta'],X)
if i==0:
n_hidden = X.shape[0]
else:
n_hidden = self.layers[i-1]['n_hidden']
if n_hidden == self.layers[i]['n_hidden']:
X = H
else:
X = sigmoid(dot(self.layers[i]['sigpara'],H))
return X.T
def decision_function(self, X):
if not self.fitted_:
raise ValueError('This ELMAutoEncoderClassifier instance is not fitted yet.')
X = self._validate_X(X)
if self.matlab_code_path is None:
X = self._transform_X(X)
if self.output_layer is None:
yy = X
else:
yy = self.output_layer.decision_function(X)
else:
matlab_X = matlab.double(X.tolist())
yy = self.eng.elm_autoencoder_test(self.layers_matlab, matlab_X, nargout=1)
yy = np.array(list(yy._data)).T
return yy
def predict(self, X):
if not self.fitted_:
raise ValueError('This ELMAutoEncoderClassifier instance is not fitted yet.')
X = self._transform_X(self._validate_X(X))
if self.output_layer is None:
raise TypeError('self.output_layer is None.')
y = self.output_layer.predict(X)
if y.ndim>1:
y = y[:,1]
return y
def predict_proba(self, X, return_y=False):
if not self.fitted_:
raise ValueError('This ELMAutoEncoderClassifier instance is not fitted yet.')
X = self._transform_X(self._validate_X(X))
if self.output_layer is None:
raise TypeError('self.output_layer is None.')
yp = self.output_layer.predict_proba(X)
return yp
#def score(self, X, y):
# nonan_ids = np.logical_not(np.isnan(y))
# if self.problem_type == 'classification':
# return metrics.accuracy_score(y[nonan_ids], self.predict(X[nonan_ids,:]))
# else:
# return -metrics.mean_squared_error(y[nonan_ids], self.predict(X[nonan_ids,:]))
class VigilanceELMAutoEncoder(BaseEstimator, ClassifierMixin):
def __init__(self, channel_num, sig_length, spec_length, sig_n_hidden=50, spec_n_hidden=50, n_hiddens=[], sig_C=0.1, spec_C=0.1, Cs=[0.1], sigparas=1., sigparas1=1.,
lr=0.01, mc=0.9, max_epoch_num=50, matlab_code_path=None, verbose=False, random_state=None,to_transpose=False):#, classes_=None
self.channel_num = channel_num
self.sig_length = sig_length
self.spec_length = spec_length
self.sig_n_hidden = sig_n_hidden
self.spec_n_hidden = spec_n_hidden
self.n_hiddens = n_hiddens
self.sig_C = sig_C
self.spec_C = spec_C
self.Cs = Cs
self.sigparas = sigparas
self.sigparas1 = sigparas1
self.lr = lr
self.mc = mc
self.max_epoch_num = max_epoch_num
self.matlab_code_path = matlab_code_path
self.verbose = verbose
self.random_state = random_state
self.to_transpose = to_transpose
self.fitted_ = False
def fit(self, sigs_specs, vigs):
if self.to_transpose:
sigs_specs = np.transpose(sigs_specs,(1,0,2))
# sigs_specs: channel_num x seg_num x (sig_length+spec_length)
if sigs_specs.shape[0]!=self.channel_num:
raise ValueError('sigs_specs.shape[0](%d) != channel_num(%d)'%(sigs_specs.shape[0],self.channel_num))
if sigs_specs.shape[2]!=self.sig_length+self.spec_length:
raise ValueError('sigs_specs.shape[2](%d) != sig_length(%d) + spec_length(%d)'%(sigs_specs.shape[2],self.sig_length,self.spec_length))
self.fitted_ = False
#self.random_state_ = check_random_state(self.random_state) # done in the component classifiers
self.sig_elmaes = [ELMAutoEncoderClassifier([self.sig_n_hidden], [self.sig_C], sigparas=1, sigparas1=1, reg_type='l2',
matlab_code_path=self.matlab_code_path, random_state=self.random_state+i) for i in range(self.channel_num)]
self.spec_elmaes = [ELMAutoEncoderClassifier([self.spec_n_hidden], [self.spec_C], sigparas=1, sigparas1=1, reg_type='l2',
matlab_code_path=self.matlab_code_path, random_state=self.random_state+self.channel_num+i) for i in range(self.channel_num)]
self.later_elmae = ELMAutoEncoderClassifier(self.n_hiddens, self.Cs[:-1], reg_type='l2',
output_layer=SoftLogisticRegression(C=self.Cs[-1], learning_rate=0.01, momentum=0.9, max_iter=200,
random_state=self.random_state, tol=1e-4, verbose=False),
sigparas=self.sigparas, sigparas1=self.sigparas1, matlab_code_path=self.matlab_code_path,# classes_=classes_,
random_state=self.random_state+2*self.channel_num)
## first fit_transform sig_elmaes and spec_elmaes
seg_num = sigs_specs.shape[1]
#X = np.empty((seg_num, (self.sig_n_hidden+self.spec_n_hidden)*self.channel_num))
X = np.empty((seg_num, self.spec_n_hidden*self.channel_num))
for i in range(self.channel_num):
if self.verbose:
print('channel %d/%d'%(i+1,self.channel_num))
#X[:,self.sig_n_hidden*i:self.sig_n_hidden*(i+1)] =\
# self.sig_elmaes[i].fit_transform(sigs_specs[i,:,:self.sig_length], None)
X[:,self.spec_n_hidden*i:self.spec_n_hidden*(i+1)] =\
self.spec_elmaes[i].fit_transform(sigs_specs[i,:,self.sig_length:])
## then fit later_elmae
self.later_elmae.fit(X, vigs)
self.fitted_ = True
return self
def predict(self, sigs_specs):
return self.predict_proba(sigs_specs)
def predict_proba(self, sigs_specs):
if self.to_transpose:
sigs_specs = np.transpose(sigs_specs,(1,0,2))
# sigs_specs: channel_num x seg_num x (sig_length+spec_length)
if sigs_specs.shape[0]!=self.channel_num:
raise ValueError('sigs_specs.shape[0](%d) != channel_num(%d)'%(sigs_specs.shape[0],self.channel_num))
if sigs_specs.shape[2]!=self.sig_length+self.spec_length:
raise ValueError('sigs_specs.shape[2](%d) != sig_length(%d) + spec_length(%d)'%(sigs_specs.shape[2],self.sig_length,self.spec_length))
## first transform using sig_elmaes and spec_elmaes
seg_num = sigs_specs.shape[1]
#X = np.empty((seg_num, (self.sig_n_hidden+self.spec_n_hidden)*self.channel_num))
X = np.empty((seg_num, self.spec_n_hidden*self.channel_num))
for i in range(self.channel_num):
#X[:,self.sig_n_hidden*i:self.sig_n_hidden*(i+1)] =\
# self.sig_elmaes[i].transform(sigs_specs[i,:,:self.sig_length])
X[:,self.spec_n_hidden*i:self.spec_n_hidden*(i+1)] =\
self.spec_elmaes[i].transform(sigs_specs[i,:,self.sig_length:])
yp = self.later_elmae.predict_proba(X)
if type(self.later_elmae.output_layer)==SoftLogisticRegression:
return yp[:,1]
else:
return yp[:,0]
#def score(self, sigs_specs, vigs):
# nonan_ids = np.logical_not(np.isnan(vigs))
# return -metrics.mean_squared_error(vigs[nonan_ids], self.predict(sigs_specs[:,nonan_ids,:]))
## deprecated!
"""
class OSELMClassifier(ELMClassifier):
def __init__(self, n_hidden=100, C=1.0, batch_size=1, activation_func='sigmoid', classes=None, random_state=None):
super(OSELMClassifier, self).__init__(n_hidden=n_hidden,C=C,activation_func=activation_func,classes=classes,random_state=random_state)
self.P = None # P = (H'*H+C*I)^-1
self.batch_size = batch_size
self.random_state_ = check_random_state(self.random_state)
def fit(self, X, y):
self.fitted_ = False
if self.batch_size <= 0:
raise ValueError('batch_size must be larger than 0.')
N = X.shape[0]
N_ = N-self.n_hidden
if N_>0:
if N_%self.batch_size==0:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)
else:
batches = [self.n_hidden]+[self.batch_size]*(N_//self.batch_size)+[N_%self.batch_size]
else:
batches = [N]
#shuffled_id = list(range(N))
#self.random_state_.shuffle(shuffled_id)
#X = X[shuffled_id,:]
#y = y[shuffled_id]
for i in range(len(batches)):
start_n = sum(batches[:i])
end_n = sum(batches[:i+1])
self.fit_part(X[start_n:end_n,:],y[start_n:end_n],continue_fit=i!=0)
self.fitted_ = True
return self
def fit_part(self, X, y, continue_fit=True):
#recursive least square
self.fitted_ = False
X, y = self._validate_X_y(X, y)
N, dx = X.shape
if self.activation_func == 'sigmoid':
if not continue_fit or self.P is None:
self.b = self.random_state_.uniform(size=self.n_hidden)*2-1
self.W = self.random_state_.uniform(size=(dx,self.n_hidden))*2-1
H = sigmoid(dot(X,self.W)+self.b)
else:
raise NotImplementedError('activation_func="%s" is not implemented.')
if not continue_fit or self.P is None:
#if N<self.n_hidden:
# raise ValueError('Number of samples (N=%d) cannot be smaller than hidden neuron number (n_hidden=%d) in the initial fit.'%(N,self.n_hidden))
self.P = inv(dot(H.T,H)+self.C*np.eye(self.n_hidden))
self.beta = multi_dot((self.P,H.T,y))
else:
if N==1:
h = H.ravel()
hht = np.outer(h,h)
self.P = self.P - multi_dot((self.P,hht,self.P))/(1.+(self.P*hht).sum())
else:
PHt = dot(self.P,H.T)
self.P = self.P - multi_dot((PHt,inv(dot(H,PHt)+np.eye(N)),H,self.P))
self.beta = self.beta + dot(dot(self.P,H.T),y-dot(H,self.beta))
return self
def predict(self, X):
return super(OSELMClassifier, self).predict(X,allow_not_fitted=True)
"""
if __name__=='__main__':
import copy
import pdb
import timeit
from sklearn import datasets, preprocessing, cross_validation
#check_estimator(ELMClassifier)
#check_estimator(SequentialELMClassifier)
random_state = 1
np.random.seed(random_state)
X, y = datasets.make_classification(n_samples=2000, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=3, n_clusters_per_class=2,random_state=random_state)
train_X, test_X, train_y, test_y = cross_validation.train_test_split(X, y, train_size=0.8,random_state=random_state)
scaler = preprocessing.StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.transform(test_X)
hnn = 50
C = 1
elmclf = ELMClassifier(C=C, n_hidden=hnn, fit_intercept=False,random_state=random_state)
oselmclf = ELMClassifier(C=C, n_hidden=hnn, fit_intercept=False,batch_size=300, random_state=random_state)
selmclf = SequentialELMClassifier([elmclf,copy.deepcopy(elmclf)])
st_elm = timeit.default_timer()
elmclf.fit(train_X,train_y)
et_elm = timeit.default_timer()
st_oselm = timeit.default_timer()
oselmclf.fit(train_X,train_y)
et_oselm = timeit.default_timer()
st_selm = timeit.default_timer()
selmclf.fit(train_X,train_y)
et_selm = timeit.default_timer()
print('ELM and OS-ELM are consistent: %s.'%np.allclose(elmclf.beta,oselmclf.beta))
print('ELM time: %gs'%(et_elm-st_elm,))
print('OS-ELM time: %g'%(et_oselm-st_oselm,))
print('S-ELM time: %g'%(et_selm-st_selm,))
print('ELM acc: %g'%elmclf.score(test_X,test_y))
print('OS-ELM acc: %g'%oselmclf.score(test_X,test_y))
print('S-ELM acc: %g'%selmclf.score(test_X,test_y))
|
py | 7dff0dbf1e3ad650e5148f01e7ac6bfd65d1cfe7 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
import ax.service.utils.best_point as best_point_utils
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.base_trial import BaseTrial
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.metric import Metric
from ax.core.trial import Trial
from ax.core.types import (
TEvaluationOutcome,
TModelPredictArm,
TParameterization,
TParamValue,
)
from ax.modelbridge.dispatch_utils import choose_generation_strategy
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.modelbridge.modelbridge_utils import get_pending_observation_features
from ax.plot.base import AxPlotConfig
from ax.plot.contour import plot_contour
from ax.plot.exp_utils import exp_to_df
from ax.plot.feature_importances import plot_feature_importance_by_feature
from ax.plot.helper import _format_dict, _get_in_sample_arms
from ax.plot.trace import optimization_trace_single_method
from ax.service.utils.instantiation import (
data_from_evaluations,
make_experiment,
raw_data_to_evaluation,
)
from ax.service.utils.with_db_settings_base import DBSettings, WithDBSettingsBase
from ax.storage.json_store.decoder import (
generation_strategy_from_json,
object_from_json,
)
from ax.storage.json_store.encoder import object_to_json
from ax.utils.common.docutils import copy_doc
from ax.utils.common.executils import retry_on_exception
from ax.utils.common.logger import _round_floats_for_logging, get_logger
from ax.utils.common.typeutils import (
checked_cast,
checked_cast_dict,
checked_cast_optional,
not_none,
)
from botorch.utils.sampling import manual_seed
logger = get_logger(__name__)
CHOLESKY_ERROR_ANNOTATION = (
"Cholesky errors typically occur when the same or very similar "
"arms are suggested repeatedly. This can mean the model has "
"already converged and you should avoid running further trials. "
"It will also help to convert integer or categorical parameters "
"to float ranges where reasonable.\nOriginal error: "
)
class AxClient(WithDBSettingsBase):
"""
Convenience handler for management of experimentation cycle through a
service-like API. External system manages scheduling of the cycle and makes
calls to this client to get next suggestion in the experiment and log back
data from the evaluation of that suggestion.
Note: `AxClient` expects to only propose 1 arm (suggestion) per trial; support
for use cases that require use of batches is coming soon.
Two custom types used in this class for convenience are `TParamValue` and
`TParameterization`. Those are shortcuts for `Union[str, bool, float, int]`
and `Dict[str, Union[str, bool, float, int]]`, respectively.
Args:
generation_strategy: Optional generation strategy. If not set, one is
intelligently chosen based on properties of search space.
db_settings: Settings for saving and reloading the underlying experiment
to a database. Expected to be of type
ax.storage.sqa_store.structs.DBSettings and require SQLAlchemy.
enforce_sequential_optimization: Whether to enforce that when it is
reasonable to switch models during the optimization (as prescribed
by `num_trials` in generation strategy), Ax will wait for enough trials
to be completed with data to proceed. Defaults to True. If set to
False, Ax will keep generating new trials from the previous model
until enough data is gathered. Use this only if necessary;
otherwise, it is more resource-efficient to
optimize sequentially, by waiting until enough data is available to
use the next model.
random_seed: Optional integer random seed, set to fix the optimization
random seed for reproducibility. Works only for Sobol quasi-random
generator and for BoTorch-powered models. For the latter models, the
trials generated from the same optimization setup with the same seed,
will be mostly similar, but the exact parameter values may still vary
and trials latter in the optimizations will diverge more and more.
This is because a degree of randomness is essential for high performance
of the Bayesian optimization models and is not controlled by the seed.
Note: In multi-threaded environments, the random seed is thread-safe,
but does not actually guarantee reproducibility. Whether the outcomes
will be exactly the same for two same operations that use the random
seed, depends on whether the threads modify the random state in the
same order across the two operations.
verbose_logging: Whether Ax should log significant optimization events,
defaults to `True`.
suppress_storage_errors: Whether to suppress SQL storage-related errors if
encounted. Only use if SQL storage is not important for the given use
case, since this will only log, but not raise, an exception if its
encountered while saving to DB or loading from it.
"""
def __init__(
self,
generation_strategy: Optional[GenerationStrategy] = None,
db_settings: Optional[DBSettings] = None,
enforce_sequential_optimization: bool = True,
random_seed: Optional[int] = None,
verbose_logging: bool = True,
suppress_storage_errors: bool = False,
) -> None:
super().__init__(db_settings=db_settings)
if not verbose_logging:
logger.setLevel(logging.WARNING) # pragma: no cover
else:
logger.info(
"Starting optimization with verbose logging. To disable logging, "
"set the `verbose_logging` argument to `False`. Note that float "
"values in the logs are rounded to 2 decimal points."
)
self._generation_strategy = generation_strategy
self._experiment: Optional[Experiment] = None
self._enforce_sequential_optimization = enforce_sequential_optimization
self._random_seed = random_seed
self._suppress_storage_errors = suppress_storage_errors
if random_seed is not None:
logger.warning(
f"Random seed set to {random_seed}. Note that this setting "
"only affects the Sobol quasi-random generator "
"and BoTorch-powered Bayesian optimization models. For the latter "
"models, setting random seed to the same number for two optimizations "
"will make the generated trials similar, but not exactly the same, "
"and over time the trials will diverge more."
)
# ------------------------ Public API methods. ------------------------
def create_experiment(
self,
parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],
name: Optional[str] = None,
objective_name: Optional[str] = None,
minimize: bool = False,
parameter_constraints: Optional[List[str]] = None,
outcome_constraints: Optional[List[str]] = None,
status_quo: Optional[TParameterization] = None,
overwrite_existing_experiment: bool = False,
experiment_type: Optional[str] = None,
choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Create a new experiment and save it if DBSettings available.
Args:
parameters: List of dictionaries representing parameters in the
experiment search space.
Required elements in the dictionaries are:
1. "name" (name of parameter, string),
2. "type" (type of parameter: "range", "fixed", or "choice", string),
and one of the following:
3a. "bounds" for range parameters (list of two values, lower bound
first),
3b. "values" for choice parameters (list of values), or
3c. "value" for fixed parameters (single value).
Optional elements are:
1. "log_scale" (for float-valued range parameters, bool),
2. "value_type" (to specify type that values of this parameter should
take; expects "float", "int", "bool" or "str"),
3. "is_fidelity" (bool) and "target_value" (float) for fidelity
parameters,
4. "is_ordered" (bool) for choice parameters, and
5. "is_task" (bool) for task parameters.
objective: Name of the metric used as objective in this experiment.
This metric must be present in `raw_data` argument to `complete_trial`.
name: Name of the experiment to be created.
minimize: Whether this experiment represents a minimization problem.
parameter_constraints: List of string representation of parameter
constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
the latter constraints, any number of arguments is accepted, and
acceptable operators are "<=" and ">=".
outcome_constraints: List of string representation of outcome
constraints of form "metric_name >= bound", like "m1 <= 3."
status_quo: Parameterization of the current state of the system.
If set, this will be added to each trial to be evaluated alongside
test configurations.
overwrite_existing_experiment: If an experiment has already been set
on this `AxClient` instance, whether to reset it to the new one.
If overwriting the experiment, generation strategy will be
re-selected for the new experiment and restarted.
To protect experiments in production, one cannot overwrite existing
experiments if the experiment is already stored in the database,
regardless of the value of `overwrite_existing_experiment`.
choose_generation_strategy_kwargs: Keyword arguments to pass to
`choose_generation_strategy` function which determines what
generation strategy should be used when none was specified on init.
"""
if self.db_settings_set and not name:
raise ValueError( # pragma: no cover
"Must give the experiment a name if `db_settings` is not None."
)
if self.db_settings_set:
experiment_id, _ = self._get_experiment_and_generation_strategy_db_id(
experiment_name=not_none(name)
)
if experiment_id:
raise ValueError(
f"Experiment {name} already exists in the database. "
"To protect experiments that are running in production, "
"overwriting stored experiments is not allowed. To "
"start a new experiment and store it, change the "
"experiment's name."
)
if self._experiment is not None:
if overwrite_existing_experiment:
exp_name = self.experiment._name or "untitled"
new_exp_name = name or "untitled"
logger.info(
f"Overwriting existing experiment ({exp_name}) on this client "
f"with new experiment ({new_exp_name}) and restarting the "
"generation strategy."
)
self._generation_strategy = None
else:
raise ValueError(
"Experiment already created for this client instance. "
"Set the `overwrite_existing_experiment` to `True` to overwrite "
"with new experiment."
)
self._experiment = make_experiment(
name=name,
parameters=parameters,
objective_name=objective_name,
minimize=minimize,
parameter_constraints=parameter_constraints,
outcome_constraints=outcome_constraints,
status_quo=status_quo,
experiment_type=experiment_type,
)
try:
self._save_experiment_to_db_if_possible(
experiment=self.experiment,
suppress_all_errors=self._suppress_storage_errors,
)
except Exception:
# Unset the experiment on this `AxClient` instance if encountered and
# raising an error from saving the experiment, to avoid a case where
# overall `create_experiment` call fails with a storage error, but
# `self._experiment` is still set and user has to specify the
# `ooverwrite_existing_experiment` kwarg to re-attempt exp. creation.
self._experiment = None
raise
self._set_generation_strategy(
choose_generation_strategy_kwargs=choose_generation_strategy_kwargs
)
self._save_generation_strategy_to_db_if_possible(
generation_strategy=self.generation_strategy,
suppress_all_errors=self._suppress_storage_errors,
)
@retry_on_exception(
logger=logger,
exception_types=(RuntimeError,),
suppress_all_errors=False,
wrap_error_message_in=CHOLESKY_ERROR_ANNOTATION,
)
def get_next_trial(
self, ttl_seconds: Optional[int] = None
) -> Tuple[TParameterization, int]:
"""
Generate trial with the next set of parameters to try in the iteration process.
Note: Service API currently supports only 1-arm trials.
Args:
ttl_seconds: If specified, will consider the trial failed after this
many seconds. Used to detect dead trials that were not marked
failed properly.
Returns:
Tuple of trial parameterization, trial index
"""
trial = self.experiment.new_trial(
generator_run=self._gen_new_generator_run(), ttl_seconds=ttl_seconds
)
logger.info(
f"Generated new trial {trial.index} with parameters "
f"{_round_floats_for_logging(item=not_none(trial.arm).parameters)}."
)
trial.mark_running(no_runner_required=True)
self._save_new_trial_to_db_if_possible(
experiment=self.experiment,
trial=trial,
suppress_all_errors=self._suppress_storage_errors,
)
self._update_generation_strategy_in_db_if_possible(
generation_strategy=self.generation_strategy,
new_generator_runs=trial.generator_runs,
suppress_all_errors=self._suppress_storage_errors,
)
return not_none(trial.arm).parameters, trial.index
def abandon_trial(self, trial_index: int, reason: Optional[str] = None) -> None:
"""Abandons a trial and adds optional metadata to it.
Args:
trial_index: Index of trial within the experiment.
"""
trial = self._get_trial(trial_index=trial_index)
trial.mark_abandoned(reason=reason)
def complete_trial(
self,
trial_index: int,
raw_data: TEvaluationOutcome,
metadata: Optional[Dict[str, Union[str, int]]] = None,
sample_size: Optional[int] = None,
) -> None:
"""
Completes the trial with given metric values and adds optional metadata
to it.
Args:
trial_index: Index of trial within the experiment.
raw_data: Evaluation data for the trial. Can be a mapping from
metric name to a tuple of mean and SEM, just a tuple of mean and
SEM if only one metric in optimization, or just the mean if there
is no SEM. Can also be a list of (fidelities, mapping from
metric name to a tuple of mean and SEM).
metadata: Additional metadata to track about this run.
sample_size: Number of samples collected for the underlying arm,
optional.
"""
# Validate that trial can be completed.
if not isinstance(trial_index, int): # pragma: no cover
raise ValueError(f"Trial index must be an int, got: {trial_index}.")
trial = self._get_trial(trial_index=trial_index)
self._validate_can_complete_trial(trial=trial)
# Format the data to save.
sample_sizes = {not_none(trial.arm).name: sample_size} if sample_size else {}
evaluations, data = self._make_evaluations_and_data(
trial=trial, raw_data=raw_data, metadata=metadata, sample_sizes=sample_sizes
)
trial._run_metadata = metadata or {}
for metric_name in data.df["metric_name"].values:
if metric_name not in self.experiment.metrics:
logger.info(
f"Data was logged for metric {metric_name} that was not yet "
"tracked on the experiment. Adding it as tracking metric."
)
self.experiment.add_tracking_metric(Metric(name=metric_name))
self.experiment.attach_data(data=data)
trial.mark_completed()
data_for_logging = _round_floats_for_logging(
item=evaluations[next(iter(evaluations.keys()))]
)
logger.info(
f"Completed trial {trial_index} with data: "
f"{_round_floats_for_logging(item=data_for_logging)}."
)
self._save_updated_trial_to_db_if_possible(
experiment=self.experiment,
trial=trial,
suppress_all_errors=self._suppress_storage_errors,
)
def update_trial_data(
self,
trial_index: int,
raw_data: TEvaluationOutcome,
metadata: Optional[Dict[str, Union[str, int]]] = None,
sample_size: Optional[int] = None,
) -> None:
"""
Attaches additional data for completed trial (for example, if trial was
completed with data for only one of the required metrics and more data
needs to be attached).
Args:
trial_index: Index of trial within the experiment.
raw_data: Evaluation data for the trial. Can be a mapping from
metric name to a tuple of mean and SEM, just a tuple of mean and
SEM if only one metric in optimization, or just the mean if there
is no SEM. Can also be a list of (fidelities, mapping from
metric name to a tuple of mean and SEM).
metadata: Additional metadata to track about this run.
sample_size: Number of samples collected for the underlying arm,
optional.
"""
assert isinstance(
trial_index, int
), f"Trial index must be an int, got: {trial_index}." # pragma: no cover
trial = self._get_trial(trial_index=trial_index)
if not trial.status.is_completed:
raise ValueError(
f"Trial {trial.index} has not yet been completed with data."
"To complete it, use `ax_client.complete_trial`."
)
sample_sizes = {not_none(trial.arm).name: sample_size} if sample_size else {}
evaluations, data = self._make_evaluations_and_data(
trial=trial, raw_data=raw_data, metadata=metadata, sample_sizes=sample_sizes
)
trial._run_metadata.update(metadata or {})
for metric_name in data.df["metric_name"].values:
if metric_name not in self.experiment.metrics:
logger.info(
f"Data was logged for metric {metric_name} that was not yet "
"tracked on the experiment. Adding it as tracking metric."
)
self.experiment.add_tracking_metric(Metric(name=metric_name))
# Registering trial data update is needed for generation strategies that
# leverage the `update` functionality of model and bridge setup and therefore
# need to be aware of new data added to experiment. Usually this happends
# seamlessly, by looking at newly completed trials, but in this case trial
# status does not change, so we manually register the new data.
# Currently this call will only result in a `NotImplementedError` if generation
# strategy uses `update` (`GenerationStep.use_update` is False by default).
self.generation_strategy._register_trial_data_update(trial=trial, data=data)
self.experiment.attach_data(data, combine_with_last_data=True)
data_for_logging = _round_floats_for_logging(
item=evaluations[next(iter(evaluations.keys()))]
)
logger.info(
f"Added data: {_round_floats_for_logging(item=data_for_logging)} "
f"to trial {trial.index}."
)
self._save_experiment_to_db_if_possible(
experiment=self.experiment,
suppress_all_errors=self._suppress_storage_errors,
)
def log_trial_failure(
self, trial_index: int, metadata: Optional[Dict[str, str]] = None
) -> None:
"""Mark that the given trial has failed while running.
Args:
trial_index: Index of trial within the experiment.
metadata: Additional metadata to track about this run.
"""
trial = self.experiment.trials[trial_index]
trial.mark_failed()
logger.info(f"Registered failure of trial {trial_index}.")
if metadata is not None:
trial._run_metadata = metadata
self._save_experiment_to_db_if_possible(
experiment=self.experiment,
suppress_all_errors=self._suppress_storage_errors,
)
def attach_trial(
self, parameters: TParameterization, ttl_seconds: Optional[int] = None
) -> Tuple[TParameterization, int]:
"""Attach a new trial with the given parameterization to the experiment.
Args:
parameters: Parameterization of the new trial.
ttl_seconds: If specified, will consider the trial failed after this
many seconds. Used to detect dead trials that were not marked
failed properly.
Returns:
Tuple of parameterization and trial index from newly created trial.
"""
self._validate_search_space_membership(parameters=parameters)
trial = self.experiment.new_trial(ttl_seconds=ttl_seconds).add_arm(
Arm(parameters=parameters)
)
trial.mark_running(no_runner_required=True)
logger.info(
"Attached custom parameterization "
f"{_round_floats_for_logging(item=parameters)} as trial {trial.index}."
)
self._save_new_trial_to_db_if_possible(
experiment=self.experiment,
trial=trial,
suppress_all_errors=self._suppress_storage_errors,
)
return not_none(trial.arm).parameters, trial.index
def get_trial_parameters(self, trial_index: int) -> TParameterization:
"""Retrieve the parameterization of the trial by the given index."""
return not_none(self._get_trial(trial_index).arm).parameters
@copy_doc(best_point_utils.get_best_parameters)
def get_best_parameters(
self,
) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]:
return best_point_utils.get_best_parameters(self.experiment)
def get_trials_data_frame(self) -> pd.DataFrame:
return exp_to_df(exp=self.experiment)
def get_max_parallelism(self) -> List[Tuple[int, int]]:
"""Retrieves maximum number of trials that can be scheduled in parallel
at different stages of optimization.
Some optimization algorithms profit significantly from sequential
optimization (i.e. suggest a few points, get updated with data for them,
repeat, see https://ax.dev/docs/bayesopt.html).
Parallelism setting indicates how many trials should be running simulteneously
(generated, but not yet completed with data).
The output of this method is mapping of form
{num_trials -> max_parallelism_setting}, where the max_parallelism_setting
is used for num_trials trials. If max_parallelism_setting is -1, as
many of the trials can be ran in parallel, as necessary. If num_trials
in a tuple is -1, then the corresponding max_parallelism_setting
should be used for all subsequent trials.
For example, if the returned list is [(5, -1), (12, 6), (-1, 3)],
the schedule could be: run 5 trials with any parallelism, run 6 trials in
parallel twice, run 3 trials in parallel for as long as needed. Here,
'running' a trial means obtaining a next trial from `AxClient` through
get_next_trials and completing it with data when available.
Returns:
Mapping of form {num_trials -> max_parallelism_setting}.
"""
parallelism_settings = []
for step in self.generation_strategy._steps:
parallelism_settings.append(
(step.num_trials, step.max_parallelism or step.num_trials)
)
return parallelism_settings
def get_optimization_trace(
self, objective_optimum: Optional[float] = None
) -> AxPlotConfig:
"""Retrieves the plot configuration for optimization trace, which shows
the evolution of the objective mean over iterations.
Args:
objective_optimum: Optimal objective, if known, for display in the
visualization.
"""
if not self.experiment.trials:
raise ValueError("Cannot generate plot as there are no trials.")
# pyre-fixme[16]: `Optional` has no attribute `objective`.
objective_name = self.experiment.optimization_config.objective.metric.name
best_objectives = np.array(
[
[
checked_cast(Trial, trial).objective_mean
for trial in self.experiment.trials.values()
if trial.status.is_completed
]
]
)
hover_labels = [
_format_dict(not_none(checked_cast(Trial, trial).arm).parameters)
for trial in self.experiment.trials.values()
if trial.status.is_completed
]
return optimization_trace_single_method(
y=(
np.minimum.accumulate(best_objectives, axis=1)
if self.experiment.optimization_config.objective.minimize
else np.maximum.accumulate(best_objectives, axis=1)
),
optimum=objective_optimum,
title="Model performance vs. # of iterations",
ylabel=objective_name.capitalize(),
hover_labels=hover_labels,
)
def get_contour_plot(
self,
param_x: Optional[str] = None,
param_y: Optional[str] = None,
metric_name: Optional[str] = None,
) -> AxPlotConfig:
"""Retrieves a plot configuration for a contour plot of the response
surface. For response surfaces with more than two parameters,
selected two parameters will appear on the axes, and remaining parameters
will be affixed to the middle of their range. If contour params arguments
are not provided, the first two parameters in the search space will be
used. If contour metrics are not provided, objective will be used.
Args:
param_x: name of parameters to use on x-axis for
the contour response surface plots.
param_y: name of parameters to use on y-axis for
the contour response surface plots.
metric_name: Name of the metric, for which to plot the response
surface.
"""
if not self.experiment.trials:
raise ValueError("Cannot generate plot as there are no trials.")
if len(self.experiment.parameters) < 2:
raise ValueError(
"Cannot create a contour plot as experiment has less than 2 "
"parameters, but a contour-related argument was provided."
)
if (param_x or param_y) and not (param_x and param_y):
raise ValueError(
"If `param_x` is provided, `param_y` is "
"required as well, and vice-versa."
)
objective_name = self.objective_name
if not metric_name:
metric_name = objective_name
if not param_x or not param_y:
parameter_names = list(self.experiment.parameters.keys())
param_x = parameter_names[0]
param_y = parameter_names[1]
if param_x not in self.experiment.parameters:
raise ValueError(
f'Parameter "{param_x}" not found in the optimization search space.'
)
if param_y not in self.experiment.parameters:
raise ValueError(
f'Parameter "{param_y}" not found in the optimization search space.'
)
if metric_name not in self.experiment.metrics:
raise ValueError(
f'Metric "{metric_name}" is not associated with this optimization.'
)
if self.generation_strategy.model is not None:
try:
logger.info(
f"Retrieving contour plot with parameter '{param_x}' on X-axis "
f"and '{param_y}' on Y-axis, for metric '{metric_name}'. "
"Ramaining parameters are affixed to the middle of their range."
)
return plot_contour(
model=not_none(self.generation_strategy.model),
param_x=param_x,
param_y=param_y,
metric_name=metric_name,
)
except NotImplementedError:
# Some models don't implement '_predict', which is needed
# for the contour plots.
logger.info(
f"Model {self.generation_strategy.model} does not implement "
"`predict`, so it cannot be used to generate a response "
"surface plot."
)
raise ValueError(
f'Could not obtain contour plot of "{metric_name}" for parameters '
f'"{param_x}" and "{param_y}", as a model with predictive ability, '
"such as a Gaussian Process, has not yet been trained in the course "
"of this optimization."
)
def get_feature_importances(self, relative: bool = True) -> AxPlotConfig:
"""
Get a bar chart showing feature_importances for a metric.
A drop-down controls the metric for which the importances are displayed.
Args:
relative: Whether the values are displayed as percentiles or
as raw importance metrics.
"""
if not self.experiment.trials:
raise ValueError("Cannot generate plot as there are no trials.")
cur_model = self.generation_strategy.model
if cur_model is not None:
try:
return plot_feature_importance_by_feature(cur_model, relative=relative)
except NotImplementedError:
logger.info(
f"Model {self.generation_strategy.model} does not implement "
"`feature_importances`, so it cannot be used to generate "
"this plot. Only certain models, specifically GPEI, implement "
"feature importances."
)
raise ValueError(
"Could not obtain feature_importances for any metrics "
" as a model that can produce feature importances, such as a "
"Gaussian Process, has not yet been trained in the course "
"of this optimization."
)
def load_experiment_from_database(
self,
experiment_name: str,
choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Load an existing experiment from database using the `DBSettings`
passed to this `AxClient` on instantiation.
Args:
experiment_name: Name of the experiment.
Returns:
Experiment object.
"""
experiment, generation_strategy = self._load_experiment_and_generation_strategy(
experiment_name=experiment_name
)
if experiment is None:
raise ValueError(f"Experiment by name '{experiment_name}' not found.")
self._experiment = experiment
logger.info(f"Loaded {experiment}.")
if generation_strategy is None: # pragma: no cover
self._set_generation_strategy(
choose_generation_strategy_kwargs=choose_generation_strategy_kwargs
)
self._save_generation_strategy_to_db_if_possible(
generation_strategy=self.generation_strategy,
suppress_all_errors=self._suppress_storage_errors,
)
else:
self._generation_strategy = generation_strategy
logger.info(
f"Using generation strategy associated with the loaded experiment:"
f" {generation_strategy}."
)
def get_model_predictions(
self, metric_names: Optional[List[str]] = None
) -> Dict[int, Dict[str, Tuple[float, float]]]:
"""Retrieve model-estimated means and covariances for all metrics.
Note: this function retrieves the predictions for the 'in-sample' arms,
which means that the return mapping on this function will only contain
predictions for trials that have been completed with data.
Args:
metric_names: Names of the metrics, for which to retrieve predictions.
All metrics on experiment will be retrieved if this argument was
not specified.
Returns:
A mapping from trial index to a mapping of metric names to tuples
of predicted metric mean and SEM, of form:
{ trial_index -> { metric_name: ( mean, SEM ) } }.
"""
if self.generation_strategy.model is None: # pragma: no cover
raise ValueError("No model has been instantiated yet.")
if metric_names is None and self.experiment.metrics is None:
raise ValueError( # pragma: no cover
"No metrics to retrieve specified on the experiment or as "
"argument to `get_model_predictions`."
)
arm_info, _, _ = _get_in_sample_arms(
model=not_none(self.generation_strategy.model),
metric_names=set(metric_names)
if metric_names is not None
else set(not_none(self.experiment.metrics).keys()),
)
trials = checked_cast_dict(int, Trial, self.experiment.trials)
return {
trial_index: {
m: (
arm_info[not_none(trials[trial_index].arm).name].y_hat[m],
arm_info[not_none(trials[trial_index].arm).name].se_hat[m],
)
for m in arm_info[not_none(trials[trial_index].arm).name].y_hat
}
for trial_index in trials
if not_none(trials[trial_index].arm).name in arm_info
}
def verify_trial_parameterization(
self, trial_index: int, parameterization: TParameterization
) -> bool:
"""Whether the given parameterization matches that of the arm in the trial
specified in the trial index.
"""
return (
not_none(self._get_trial(trial_index=trial_index).arm).parameters
== parameterization
)
# ------------------ JSON serialization & storage methods. -----------------
def save_to_json_file(self, filepath: str = "ax_client_snapshot.json") -> None:
"""Save a JSON-serialized snapshot of this `AxClient`'s settings and state
to a .json file by the given path.
"""
with open(filepath, "w+") as file: # pragma: no cover
file.write(json.dumps(self.to_json_snapshot()))
logger.info(f"Saved JSON-serialized state of optimization to `{filepath}`.")
@staticmethod
def load_from_json_file(
filepath: str = "ax_client_snapshot.json", **kwargs
) -> "AxClient":
"""Restore an `AxClient` and its state from a JSON-serialized snapshot,
residing in a .json file by the given path.
"""
with open(filepath, "r") as file: # pragma: no cover
serialized = json.loads(file.read())
return AxClient.from_json_snapshot(serialized=serialized, **kwargs)
def to_json_snapshot(self) -> Dict[str, Any]:
"""Serialize this `AxClient` to JSON to be able to interrupt and restart
optimization and save it to file by the provided path.
Returns:
A JSON-safe dict representation of this `AxClient`.
"""
return {
"_type": self.__class__.__name__,
"experiment": object_to_json(self._experiment),
"generation_strategy": object_to_json(self._generation_strategy),
"_enforce_sequential_optimization": self._enforce_sequential_optimization,
}
@staticmethod
def from_json_snapshot(serialized: Dict[str, Any], **kwargs) -> "AxClient":
"""Recreate an `AxClient` from a JSON snapshot."""
experiment = object_from_json(serialized.pop("experiment"))
serialized_generation_strategy = serialized.pop("generation_strategy")
ax_client = AxClient(
generation_strategy=generation_strategy_from_json(
generation_strategy_json=serialized_generation_strategy
)
if serialized_generation_strategy is not None
else None,
enforce_sequential_optimization=serialized.pop(
"_enforce_sequential_optimization"
),
**kwargs,
)
ax_client._experiment = experiment
return ax_client
# ---------------------- Private helper methods. ---------------------
@property
def experiment(self) -> Experiment:
"""Returns the experiment set on this Ax client."""
if self._experiment is None:
raise ValueError(
"Experiment not set on Ax client. Must first "
"call load_experiment or create_experiment to use handler functions."
)
return not_none(self._experiment)
@property
def generation_strategy(self) -> GenerationStrategy:
"""Returns the generation strategy, set on this experiment."""
if self._generation_strategy is None:
raise ValueError(
"No generation strategy has been set on this optimization yet."
)
return not_none(self._generation_strategy)
@property
def objective_name(self) -> str:
"""Returns the name of the objective in this optimization."""
opt_config = not_none(self.experiment.optimization_config)
return opt_config.objective.metric.name
def _set_generation_strategy(
self, choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None
) -> None:
"""Selects the generation strategy and applies specified dispatch kwargs,
if any.
"""
choose_generation_strategy_kwargs = choose_generation_strategy_kwargs or {}
random_seed = choose_generation_strategy_kwargs.pop(
"random_seed", self._random_seed
)
enforce_sequential_optimization = choose_generation_strategy_kwargs.pop(
"enforce_sequential_optimization", self._enforce_sequential_optimization
)
if self._generation_strategy is None:
self._generation_strategy = choose_generation_strategy(
search_space=self.experiment.search_space,
enforce_sequential_optimization=enforce_sequential_optimization,
random_seed=random_seed,
**choose_generation_strategy_kwargs,
)
def _gen_new_generator_run(self, n: int = 1) -> GeneratorRun:
"""Generate new generator run for this experiment.
Args:
n: Number of arms to generate.
"""
# If random seed is not set for this optimization, context manager does
# nothing; otherwise, it sets the random seed for torch, but only for the
# scope of this call. This is important because torch seed is set globally,
# so if we just set the seed without the context manager, it can have
# serious negative impact on the performance of the models that employ
# stochasticity.
with manual_seed(seed=self._random_seed) and warnings.catch_warnings():
# Filter out GPYTorch warnings to avoid confusing users.
warnings.simplefilter("ignore")
return not_none(self.generation_strategy).gen(
experiment=self.experiment,
n=n,
pending_observations=get_pending_observation_features(
experiment=self.experiment
),
)
def _get_trial(self, trial_index: int) -> Trial:
"""Gets trial by given index or raises an error if it does not exist."""
if trial_index in self.experiment.trials:
trial = self.experiment.trials.get(trial_index)
if not isinstance(trial, Trial):
raise NotImplementedError(
"`AxClient` only supports `Trial`, not `BatchTrial`."
)
return trial
raise ValueError(f"Trial {trial_index} does not yet exist.")
def _find_last_trial_with_parameterization(
self, parameterization: TParameterization
) -> int:
"""Given a parameterization, find the last trial in the experiment that
contains an arm with that parameterization.
"""
for trial_idx in sorted(self.experiment.trials.keys(), reverse=True):
if not_none(self._get_trial(trial_idx).arm).parameters == parameterization:
return trial_idx
raise ValueError(
f"No trial on experiment matches parameterization {parameterization}."
)
def _make_evaluations_and_data(
self,
trial: BaseTrial,
raw_data: Union[TEvaluationOutcome, Dict[str, TEvaluationOutcome]],
metadata: Optional[Dict[str, Union[str, int]]],
sample_sizes: Optional[Dict[str, int]] = None,
) -> Tuple[Dict[str, TEvaluationOutcome], Data]:
"""Formats given raw data as Ax evaluations and `Data`.
Args:
trial: Trial within the experiment.
raw_data: Metric outcomes for 1-arm trials, map from arm name to
metric outcomes for batched trials.
sample_size: Integer sample size for 1-arm trials, dict from arm
name to sample size for batched trials. Optional.
metadata: Additional metadata to track about this run.
data_is_for_batched_trials: Whether making evaluations and data for
a batched trial or a 1-arm trial.
"""
if isinstance(trial, BatchTrial):
assert isinstance( # pragma: no cover
raw_data, dict
), "Raw data must be a dict for batched trials."
elif isinstance(trial, Trial):
arm_name = not_none(trial.arm).name
raw_data = {arm_name: raw_data} # pyre-ignore[9]
else: # pragma: no cover
raise ValueError(f"Unexpected trial type: {type(trial)}.")
assert isinstance(raw_data, dict)
not_trial_arm_names = set(raw_data.keys()) - set(trial.arms_by_name.keys())
if not_trial_arm_names:
raise ValueError(
f"Arms {not_trial_arm_names} are not part of trial #{trial.index}."
)
evaluations = {
arm_name: raw_data_to_evaluation(
raw_data=raw_data[arm_name], objective_name=self.objective_name
)
for arm_name in raw_data
}
data = data_from_evaluations(
evaluations=evaluations,
trial_index=trial.index,
sample_sizes=sample_sizes or {},
start_time=(
checked_cast_optional(int, metadata.get("start_time"))
if metadata is not None
else None
),
end_time=(
checked_cast_optional(int, metadata.get("end_time"))
if metadata is not None
else None
),
)
return evaluations, data
# ------------------------------ Validators. -------------------------------
@staticmethod
def _validate_can_complete_trial(trial: BaseTrial) -> None:
if trial.status.is_completed:
raise ValueError(
f"Trial {trial.index} has already been completed with data."
"To add more data to it (for example, for a different metric), "
"use `ax_client.update_trial_data`."
)
if trial.status.is_abandoned or trial.status.is_failed:
raise ValueError(
f"Trial {trial.index} has been marked {trial.status.name}, so it "
"no longer expects data."
)
def _validate_search_space_membership(self, parameters: TParameterization) -> None:
self.experiment.search_space.check_membership(
parameterization=parameters, raise_error=True
)
# `check_membership` uses int and float interchangeably, which we don't
# want here.
for p_name, parameter in self.experiment.search_space.parameters.items():
if not isinstance(parameters[p_name], parameter.python_type):
typ = type(parameters[p_name])
raise ValueError(
f"Value for parameter {p_name} is of type {typ}, expected "
f"{parameter.python_type}. If the intention was to have the "
f"parameter on experiment be of type {typ}, set `value_type` "
f"on experiment creation for {p_name}."
)
# -------- Backward-compatibility with old save / load method names. -------
@staticmethod
def get_recommended_max_parallelism() -> None:
raise NotImplementedError(
"Use `get_max_parallelism` instead; parallelism levels are now "
"enforced in generation strategy, so max parallelism is no longer "
"just recommended."
)
@staticmethod
def load_experiment(experiment_name: str) -> None:
raise NotImplementedError(
"Use `load_experiment_from_database` to load from SQL database or "
"`load_from_json_file` to load optimization state from .json file."
)
@staticmethod
def load(filepath: Optional[str] = None) -> None:
raise NotImplementedError(
"Use `load_experiment_from_database` to load from SQL database or "
"`load_from_json_file` to load optimization state from .json file."
)
@staticmethod
def save(filepath: Optional[str] = None) -> None:
raise NotImplementedError(
"Use `save_to_json_file` to save optimization state to .json file."
)
|
py | 7dff0e1639b0e0c7ec56fecd720c8bdf72eea64e | # Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def empty():
pass
module_var = None
def calledRepeatedly():
# We measure making that call or not. Lets get the module
# variable look-up out of the game, by making it a local
# variable
called = empty
# construct_begin
called()
# construct_end
import itertools
for x in itertools.repeat(None, 50000):
calledRepeatedly()
print("OK.")
|
py | 7dff0e734a3370823b4183bd6929f05eb3276fd9 | import xlrd
import zipfile
import argparse
import sys
from shutil import rmtree
from PIL import Image
from glob import glob
from os import makedirs, rename
from os.path import join, splitext, basename, exists
from lib.preprocess import resize_and_center_fundus
parser = argparse.ArgumentParser(description='Preprocess Messidor-Original data set.')
parser.add_argument("--data_dir", help="Directory where Messidor-Original resides.",
default="data/messidor")
args = parser.parse_args()
data_dir = str(args.data_dir)
# Create directories for grades.
[makedirs(join(data_dir, str(i))) for i in [0, 1, 2, 3]
if not exists(join(data_dir, str(i)))]
# Create a tmp directory for saving temporary preprocessing files.
tmp_path = join(data_dir, 'tmp')
if exists(tmp_path):
rmtree(tmp_path)
makedirs(tmp_path)
# Find shard zip files.
shards_paths = glob(join(data_dir, "*.zip"))
for shard in shards_paths:
shard_name = splitext(basename(shard))[0]
shard_unpack_dir = join(data_dir, shard_name)
# Unzip shard.
print(f"Unzipping {shard_name}...")
if exists(shard_unpack_dir):
rmtree(shard_unpack_dir)
zip_ref = zipfile.ZipFile(shard, 'r')
zip_ref.extractall(shard_unpack_dir)
zip_ref.close()
# Open annotations file for shard.
annotations_path = join(
data_dir, f"Annotation_{shard_name}.xls")
workbook = xlrd.open_workbook(annotations_path)
worksheet = workbook.sheet_by_index(0)
# Parse annotations file.
for num, row in enumerate(range(1, worksheet.nrows)):
filename = worksheet.cell(row, 0).value
grade = worksheet.cell(row, 2).value
im_path = glob(join(shard_unpack_dir, "**/{}".format(filename)),
recursive=True)[0]
# Find contour of eye fundus in image, and scale
# diameter of fundus to 299 pixels and crop the edges.
res = resize_and_center_fundus(save_path=tmp_path, image_path=im_path,
diameter=299, verbosity=0)
# Status-message.
msg = "\r- Preprocessing image: {0:>6} / {1}".format(
num+1, worksheet.nrows-1)
# Print the status message.
sys.stdout.write(msg)
sys.stdout.flush()
if res != 1:
continue
new_filename = "{0}.jpg".format(splitext(basename(im_path))[0])
# Move the file from the tmp folder to the right grade folder.
rename(join(tmp_path, new_filename),
join(data_dir, str(int(grade)), new_filename))
print()
rmtree(shard_unpack_dir)
# Clean tmp folder.
rmtree(tmp_path)
|
py | 7dff0eae9d1e347077bedfd8aa00a2831fd4645a | # -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from .world import world
from nose.tools import eq_
from bigml.api import HTTP_OK
#@step(r'I get the anomaly detector "(.*)"')
def i_get_the_anomaly(step, anomaly):
resource = world.api.get_anomaly(anomaly)
world.status = resource['code']
eq_(world.status, HTTP_OK)
world.anomaly = resource['object']
|
py | 7dff1082e87973e8e109714cbdff9d18a7450d4d | import os
def cheese():
while True:
if os.system("cheese") == 0:
break
|
py | 7dff120c5b9a23216c4cb236a2b06eb86065da69 | # coding: utf-8
import six
from test_server import TestServer
from grab.proxylist import BaseProxySource
from tests.util import build_grab, temp_file
from tests.util import BaseGrabTestCase, TEST_SERVER_PORT
ADDRESS = '127.0.0.1'
class TestProxy(BaseGrabTestCase):
@classmethod
def setUpClass(cls):
super(TestProxy, cls).setUpClass()
cls.extra_servers = {}
for cnt in range(3):
serv = TestServer(address=ADDRESS, port=TEST_SERVER_PORT + 1 + cnt)
serv.start()
cls.extra_servers[serv.port] = {
'server': serv,
'proxy': '%s:%d' % (ADDRESS, serv.port),
}
@classmethod
def tearDownClass(cls):
super(TestProxy, cls).tearDownClass()
for item in cls.extra_servers.values():
item['server'].stop()
def setUp(self):
super(TestProxy, self).setUp()
for item in self.extra_servers.values():
item['server'].reset()
def test_proxy_option(self):
grab = build_grab()
proxy = '%s:%s' % (ADDRESS, self.server.port)
grab.setup(proxy=proxy, proxy_type='http', debug=True)
self.server.response['data'] = '123'
grab.go('http://yandex.ru')
self.assertEqual(b'123', grab.doc.body)
self.assertEqual('yandex.ru', self.server.request['headers']['host'])
def test_deprecated_setup_proxylist(self):
with temp_file() as tmp_file:
proxy = '%s:%s' % (ADDRESS, self.server.port)
grab = build_grab()
with open(tmp_file, 'w') as out:
out.write(proxy)
grab.proxylist.load_file(tmp_file)
self.server.response['get.data'] = '123'
grab.change_proxy()
grab.go('http://yandex.ru')
self.assertEqual(b'123', grab.doc.body)
self.assertEqual('yandex.ru',
self.server.request['headers']['host'])
def test_load_proxylist(self):
with temp_file() as tmp_file:
content = '\n'.join(x['proxy'] for x in
self.extra_servers.values())
with open(tmp_file, 'w') as out:
out.write(content)
# By default auto_change is True
grab = build_grab()
grab.proxylist.load_file(tmp_file)
self.assertEqual(grab.config['proxy_auto_change'], True)
servers = set()
for _ in six.moves.range(10):
grab.go('http://yandex.ru')
servers.add(grab.config['proxy'])
self.assertTrue(len(servers) > 1)
# Disable auto_change
# Change proxy manually
grab = build_grab()
grab.proxylist.load_file(tmp_file)
grab.setup(proxy_auto_change=False)
grab.change_proxy()
self.assertEqual(grab.config['proxy_auto_change'], False)
# TODO: probably call proxy change manually
servers = set()
for _ in six.moves.range(10):
grab.go('http://yandex.ru')
servers.add(grab.config['proxy'])
self.assertEqual(len(servers), 1)
# Disable auto_change
# By default auto_init is True
# Proxylist will not be used by default
grab = build_grab()
grab.proxylist.load_file(tmp_file)
grab.setup(proxy_auto_change=False)
self.assertEqual(grab.config['proxy_auto_change'], False)
grab.go(self.server.get_url())
self.assertEqual(grab.config['proxy'], None)
def test_change_proxy(self):
with temp_file() as tmp_file:
grab = build_grab()
grab.change_proxy()
self.assertEqual(grab.config['proxy'], None)
grab = build_grab()
with open(tmp_file, 'w') as out:
for num in six.moves.range(10):
out.write('server-%d:777\n' % num)
grab.proxylist.load_file(tmp_file)
grab.setup(proxy_auto_change=False)
self.assertEqual(grab.config['proxy'], None)
grab.proxylist.load_file(tmp_file)
self.assertEqual(grab.config['proxy'], None)
grab.proxylist.load_file(tmp_file)
grab.setup(proxy_auto_change=False)
grab.change_proxy()
# pylint: disable=unsupported-membership-test
self.assertTrue('server-' in grab.config['proxy'])
# pylint: enable=unsupported-membership-test
def test_list_proxysource(self):
grab = build_grab()
items = [x['proxy'] for x in self.extra_servers.values()]
grab.proxylist.load_list(items)
grab.go('http://yandex.ru')
servers = [x['server'] for x in self.extra_servers.values()
if x['server'].request['done']]
for serv in servers:
self.assertEqual(serv.request['headers']['host'], 'yandex.ru')
self.assertTrue(grab.doc.headers['listen-port'] in
map(str, self.extra_servers))
def test_custom_proxysource(self):
extra_servers = list(self.extra_servers.values())
class CustomProxySource(BaseProxySource):
def load_raw_data(self):
return '\n'.join(x['proxy'] for x in extra_servers)
grab = build_grab()
grab.setup(proxy_auto_change=False)
grab.proxylist.set_source(CustomProxySource())
grab.change_proxy(random=False)
grab.go('http://yandex.ru')
serv = extra_servers[0]['server']
self.assertEqual((serv.request['headers']['host']), 'yandex.ru')
self.assertEqual(grab.doc.headers['listen-port'], str(serv.port))
grab.change_proxy(random=False)
grab.go('http://yandex.ru')
serv = extra_servers[1]['server']
self.assertEqual(serv.request['headers']['host'], 'yandex.ru')
self.assertEqual(grab.doc.headers['listen-port'], str(serv.port))
def test_baseproxysource_constructor_arguments(self):
src = BaseProxySource()
self.assertEqual(src.config, {'proxy_type': 'http',
'proxy_userpwd': None})
src = BaseProxySource(proxy_type='socks')
self.assertEqual(src.config, {'proxy_type': 'socks',
'proxy_userpwd': None})
src = BaseProxySource(proxy_userpwd='foo:bar')
self.assertEqual(src.config, {'proxy_type': 'http',
'proxy_userpwd': 'foo:bar'})
src = BaseProxySource(foo='bar')
self.assertEqual(src.config, {'proxy_type': 'http',
'proxy_userpwd': None,
'foo': 'bar'})
def test_global_proxy_userpwd_argument(self):
grab = build_grab()
items = ['localhost:1']
grab.proxylist.load_list(items)
self.assertEqual(grab.proxylist.get_next_proxy().username, None)
grab.proxylist.load_list(items, proxy_userpwd='foo:bar')
proxy = grab.proxylist.get_next_proxy()
self.assertEqual(proxy.username, 'foo')
self.assertEqual(proxy.password, 'bar')
items = ['localhost:1' + ':admin:test', 'localhost:2']
grab.proxylist.load_list(items, proxy_userpwd='foo:bar')
proxy = grab.proxylist.get_next_proxy()
self.assertEqual(proxy.username, 'admin')
self.assertEqual(proxy.password, 'test')
def test_global_proxy_type_argument(self):
grab = build_grab()
items = ['localhost:1']
grab.proxylist.load_list(items)
proxy = grab.proxylist.get_next_proxy()
self.assertEqual(proxy.proxy_type, 'http')
grab.proxylist.load_list(items, proxy_type='socks')
proxy = grab.proxylist.get_next_proxy()
self.assertEqual(proxy.proxy_type, 'socks')
def test_setup_with_proxyline(self):
grab = build_grab()
grab.setup_with_proxyline('1.1.1.1:8080')
self.assertEqual(grab.config['proxy'], '1.1.1.1:8080')
self.assertEqual(grab.config['proxy_userpwd'], None)
self.assertEqual(grab.config['proxy_type'], 'http')
def test_setup_with_proxyline_custom_proxy_type(self):
grab = build_grab()
grab.setup_with_proxyline('1.1.1.1:8080', proxy_type='socks')
self.assertEqual(grab.config['proxy'], '1.1.1.1:8080')
self.assertEqual(grab.config['proxy_userpwd'], None)
self.assertEqual(grab.config['proxy_type'], 'socks')
def test_setup_with_proxyline_userpwd(self):
grab = build_grab()
grab.setup_with_proxyline('1.1.1.1:8080:user:pass')
self.assertEqual(grab.config['proxy'], '1.1.1.1:8080')
self.assertEqual(grab.config['proxy_userpwd'], 'user:pass')
self.assertEqual(grab.config['proxy_type'], 'http')
|
py | 7dff125854dd14b27cc0c2966a205718255c7760 | import os
import pdfplumber
from parse_PDF import parse_text_for_events, get_events_list, get_events_dict_list
import time
start_time = time.time()
def get_syllabi_directory_path():
# change current directory to the parent
os.chdir("../")
return os.path.join(os.getcwd(), "sample-syllabi")
def select_syllabi_file(files):
choice = ""
while (not choice.isdigit() or int(choice) < 0 or int(choice) >
len(files)):
os.system('clear')
print("Select a file from sample-syllabi/: ")
for i, file in enumerate(files):
print(f"{i} - {file:<45}")
choice = input("\nSelect a file: ")
os.system("clear")
return files[int(choice)]
def extract_syllabi_text(syllabi):
all_text = ""
with pdfplumber.open(syllabi) as pdf:
# page = pdf.pages[0] - comment out or remove line
# text = page.extract_text() - comment out or remove line
for pdf_page in pdf.pages:
single_page_text = pdf_page.extract_text()
all_text = all_text + '\n' + single_page_text
return all_text
def main():
syllabiPath = get_syllabi_directory_path()
syllabiFiles = os.listdir(syllabiPath)
syllabi = select_syllabi_file(syllabiFiles)
text = extract_syllabi_text(os.path.join(syllabiPath, syllabi))
parse_text_for_events(text)
get_events_list(text)
print(get_events_dict_list(text))
print("The list of the events extracted from the file:", get_events_list(text))
main()
#print("--- %s seconds ---" % (time.time() - start_time))
# keywords = ["OH", "office", "hour", "meeting", "class", "sessions", "drop-in"]
# weekDays = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday",
# "mon", "tue", "wed", "thu", "thur", "fri", "sat", "sun", "mwt", "tth"]
# weekDaysArr = []
# numsArr = []
# foundKeywords = []
# numIndsArr = []
# keywordIndsArr = []
# weekdayIndsArr = []
# # Splitting characters in String by ',' OR '_', OR '-', OR ... etc
# res = re.split(' |\n', all_text)
# # remove empty spaces from the list of words that were parced
# res = list(filter(None, res))
# """
# for ind, el in enumerate(res):
# if el.isdigit():
# numsArr.append(el)
# numIndsArr.append(ind)
# elif el.lower() in weekDays:
# weekDaysArr.append(el)
# weekdayIndsArr.append(ind)
# elif el.lower() in keywords:
# foundKeywords.append(el)
# keywordIndsArr.append(ind)
# print("{} nums extracted: {}".format(len(numsArr), numsArr))
# print("{} weekdays extracted: {}".format(len(weekDaysArr), weekDaysArr))
# print("{} keywords extracted: {}".format(len(keywordIndsArr), foundKeywords))
# """
# # ATTEMPT 2: extract the keywords and numbers are the located the closest to each other
# # in the list of words
# #print("the closest matching pairs are")
# #pairs = sorted(product(numIndsArr, keywordIndsArr), key=lambda t: abs(t[0]-t[1]))
# # for i in range(len(keywordIndsArr)):
# # currPair = pairs[i]
# # print(currPair, ":", res[currPair[0]], res[currPair[1]])
# # ATTEMPT 3: extracting an words before and after
# # for ind in numIndsArr:
# # print("num", res[ind], res[ind-10:ind+10])
# # issues:
# # PDFs are sometimes formated in 2 columns -- cannot read "across" the line
# # ATTEMPT 5:
# # new logic: 1) extract all numbers 2) search the substrings for keywords
# # create a long string from the result
# strRes = ''.join(map(str, res))
# # par.find_all_regex(main_str=strRes, substr="class") # this will return inds where the word "class" occurs
# keyword_inds = par.find_all_regex(
# main_str=strRes, patterns=keywords) # returns inds of all keywords
# num_inds = par.find_all_nums(strRes)
# for k in keyword_inds: # attempting to match keywords and numbers
# # k==index of the last letter of the keyword
# print("keyword:", strRes[k[0]:k[1]])
# curr_num_inds = par.find_all_nums(strRes[k[1]:k[1]+150])
# curr_times_inds = par.find_am_pm(strRes[k[1]:k[1]+150])
# curr_weekday_inds = par.find_all_regex(
# main_str=strRes[k[1]:k[1]+150], patterns=weekDays)
# # adjust the inds
# curr_num_inds_s = [x[0]+k[1] for x in curr_num_inds]
# curr_num_inds_e = [x[1]+k[1] for x in curr_num_inds]
# curr_times_inds = [x+k[1] for x in curr_times_inds]
# curr_weekday_inds = [[x[0]+k[1], x[1]+k[1]] for x in curr_weekday_inds]
# print("matching dates/times")
# for s, e in zip(curr_num_inds_s, curr_num_inds_e):
# print(strRes[s:e])
# for ind in set(curr_times_inds):
# print(strRes[ind:ind+2])
# for ind in curr_weekday_inds:
# print(strRes[ind[0]:ind[1]])
# # def is_a_time():
# # if there is a am/pm keyword after the number
|
py | 7dff125a6fe2f5022f1ec90168e170c1ec750aeb | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
# import cv2
# import matplotlib.pyplot as plt
import sys
# import segmentation_models_pytorch as smp
import albumentations as albu
import torch
import time
from torch.autograd import Variable
import onnx
#384x512
#pth_path = '/home/pt/Desktop/code/matting/traindata/unet4/yxl_1/res18_yxl.pth'
# pth_path = 'D:/pengt/segmetation/4channels/Unet4/parameter/best_model.pth'
# onnx_out='D:/pengt/segmetation/4channels/Unet4/parameter/onnx_best_model.onnx'
model_ENCODER = 'timm-efficientnet-b0'# 'timm-efficientnet-b0' #'dpn68'#'resnet18' #'mobilenet_v2' #'resnet18'# 'resnet101' #'resnet50' 'resnet152' 101
model_path = '//SmartGo-Nas/pentao/code/4channels/parameter/seg_pytorch/%s/best_model_52_0.936.pth'%model_ENCODER
# onnx_out='D:/pengt/code/Cplus/onnx_model/deeplab/%s_384x640_new1.onnx'%model_ENCODER
onnx_out='D:/pengt/code/Cplus/onnx_model/deeplab1/%s_640x384.onnx'%model_ENCODER
# ENCODER= 'densenet121'#'vgg13' #timm-efficientnet-b0' #"mobilenet_v2"
# ENCODER_WEIGHTS = 'imagenet'
# CLASSES = ['person']
# ACTIVATION = 'sigmoid' # could be None for logits or 'softmax2d' for multicalss segmentation
# model = smp.FPN(
# encoder_name=ENCODER,
# encoder_weights=ENCODER_WEIGHTS,
# classes=len(CLASSES),
# activation=ACTIVATION,
# )
# model = smp.DeepLabV3Plus(
# encoder_name=model_ENCODER,
# encoder_weights=ENCODER_WEIGHTS,
# classes=len(CLASSES),
# activation=ACTIVATION,
# in_channels = 4,
# )
# model = torch.load(model_path,map_location=torch.device('cuda'))
model = torch.load(model_path)
model=model.cuda()
dummy_input = Variable(torch.randn(1, 4, 640 , 384))
# #dummy_input = torch.randn(1, 4, 320, 320, requires_grad=True)
model=model.cpu()
# dummy_input = Variable(torch.randn(1, 4, 480 , 640))
#dummy_input = torch.randn(1, 4, 320, 320, requires_grad=True)
model.eval()
print(model)
#torch.onnx.export(model, dummy_input, "/media/xingshi2/data/purning/network-slimming/Unet_resnet50.onnx", verbose=True)
#model1 = onnx.load("/media/xingshi2/data/purning/network-slimming/Unet_resnet50.onnx")
# Check that the IR is well formed
#onnx.checker.check_model(model1)
# Print a human readable representation of the graph
#onnx.helper.printable_graph(model1.graph)
#opset_version = model1.opset_import[0].version
# model.set_swish(memory_efficient=False)
torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
onnx_out, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11,#12, # the onnx version to export the model to
verbose=True,
# do_constant_folding=True, # wether to execute constant folding for optimization
#input_names = ['final_conv'], # the model's input names
#output_names = ['final_conv'], # the model's output names
# dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
# 'output' : {0 : 'batch_size'}}
)
|
py | 7dff1281bf916836489e24ec27dae1d1d40a704b |
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.forms import ModelChoiceField
from django.http import QueryDict
from django.template import loader
from django.utils.decorators import method_decorator
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from xadmin.filters import FILTER_PREFIX, SEARCH_VAR
from xadmin.plugins.relate import RELATE_PREFIX
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views import ModelAdminView, BaseAdminPlugin, ListAdminView
from xadmin.views.list import COL_LIST_VAR, ORDER_VAR
from xadmin.views.dashboard import widget_manager, BaseWidget, PartialBaseWidget
from xadmin.models import Bookmark
csrf_protect_m = method_decorator(csrf_protect)
class BookmarkPlugin(BaseAdminPlugin):
# [{'title': "Female", 'query': {'gender': True}, 'order': ('-age'), 'cols': ('first_name', 'age', 'phones'), 'search': 'Tom'}]
list_bookmarks = []
show_bookmarks = True
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
def get_context(self, context):
if not self.show_bookmarks:
return context
bookmarks = []
current_qs = '&'.join([
'%s=%s' % (k, v)
for k, v in sorted(filter(
lambda i: bool(i[1] and (
i[0] in (COL_LIST_VAR, ORDER_VAR, SEARCH_VAR)
or i[0].startswith(FILTER_PREFIX)
or i[0].startswith(RELATE_PREFIX)
)),
self.request.GET.items()
))
])
model_info = (self.opts.app_label, self.opts.model_name)
has_selected = False
menu_title = _(u"Bookmark")
list_base_url = reverse('xadmin:%s_%s_changelist' %
model_info, current_app=self.admin_site.name)
# local bookmarks
for bk in self.list_bookmarks:
title = bk['title']
params = dict([
(FILTER_PREFIX + k, v)
for (k, v) in bk['query'].items()
])
if 'order' in bk:
params[ORDER_VAR] = '.'.join(bk['order'])
if 'cols' in bk:
params[COL_LIST_VAR] = '.'.join(bk['cols'])
if 'search' in bk:
params[SEARCH_VAR] = bk['search']
def check_item(i):
return bool(i[1]) or i[1] == False
bk_qs = '&'.join([
'%s=%s' % (k, v)
for k, v in sorted(filter(check_item, params.items()))
])
url = list_base_url + '?' + bk_qs
selected = (current_qs == bk_qs)
bookmarks.append(
{'title': title, 'selected': selected, 'url': url})
if selected:
menu_title = title
has_selected = True
content_type = ContentType.objects.get_for_model(self.model)
bk_model_info = (Bookmark._meta.app_label, Bookmark._meta.model_name)
bookmarks_queryset = Bookmark.objects.filter(
content_type=content_type,
url_name='xadmin:%s_%s_changelist' % model_info
).filter(Q(user=self.user) | Q(is_share=True))
for bk in bookmarks_queryset:
selected = (current_qs == bk.query)
if self.has_change_permission(bk):
change_or_detail = 'change'
else:
change_or_detail = 'detail'
bookmarks.append({'title': bk.title, 'selected': selected, 'url': bk.url, 'edit_url':
reverse('xadmin:%s_%s_%s' % (bk_model_info[0], bk_model_info[1], change_or_detail),
args=(bk.id,))})
if selected:
menu_title = bk.title
has_selected = True
post_url = reverse('xadmin:%s_%s_bookmark' % model_info,
current_app=self.admin_site.name)
new_context = {
'bk_menu_title': menu_title,
'bk_bookmarks': bookmarks,
'bk_current_qs': current_qs,
'bk_has_selected': has_selected,
'bk_list_base_url': list_base_url,
'bk_post_url': post_url,
'has_add_permission_bookmark': self.admin_view.request.user.has_perm('xadmin.add_bookmark'),
'has_change_permission_bookmark': self.admin_view.request.user.has_perm('xadmin.change_bookmark')
}
context.update(new_context)
return context
# Media
def get_media(self, media):
return media + self.vendor('xadmin.plugin.bookmark.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.show_bookmarks:
nodes.insert(0, loader.render_to_string('xadmin/blocks/model_list.nav_menu.bookmarks.html',
context=get_context_dict(context)))
class BookmarkView(ModelAdminView):
@csrf_protect_m
@transaction.atomic
def post(self, request):
model_info = (self.opts.app_label, self.opts.model_name)
url_name = 'xadmin:%s_%s_changelist' % model_info
bookmark = Bookmark(
content_type=ContentType.objects.get_for_model(self.model),
title=request.POST[
'title'], user=self.user, query=request.POST.get('query', ''),
is_share=request.POST.get('is_share', 0), url_name=url_name)
bookmark.save()
content = {'title': bookmark.title, 'url': bookmark.url}
return self.render_response(content)
class BookmarkAdmin(object):
model_icon = 'fa fa-book'
list_display = ('title', 'users', 'url_name', 'query')
list_display_links = ('title',)
user_fields = ['users']
hidden_menu = True
def queryset(self):
if self.user.is_superuser:
return Bookmark.objects.all()
return Bookmark.objects.filter(Q(user=self.user) | Q(is_share=True))
def get_list_display(self):
list_display = super(BookmarkAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('users')
return list_display
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
@widget_manager.register
class BookmarkWidget(PartialBaseWidget):
widget_type = _('bookmark')
widget_icon = 'fa fa-bookmark'
description = _(
'Bookmark Widget, can show users\'s bookmark list data in widget.')
template = "xadmin/widgets/list.html"
bookmark = ModelChoiceField(
label=_('Bookmark'), queryset=Bookmark.objects.all(), required=False)
def setup(self):
BaseWidget.setup(self)
bookmark = self.cleaned_data['bookmark']
model = bookmark.content_type.model_class()
data = QueryDict(bookmark.query)
self.bookmark = bookmark
if not self.title:
self.title = smart_text(bookmark)
req = self.make_get_request("", data.items())
self.list_view = self.get_view_class(
ListAdminView, model, list_per_page=10, list_editable=[])(req)
def has_perm(self):
return True
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [
[o for i, o in enumerate(filter(
lambda c: c.field_name in base_fields,
r.cells
))]
for r in list_view.results()
]
context['result_count'] = list_view.result_count
context['page_url'] = self.bookmark.url
site.register(Bookmark, BookmarkAdmin)
site.register_plugin(BookmarkPlugin, ListAdminView)
site.register_modelview(r'^bookmark/$', BookmarkView, name='%s_%s_bookmark')
|
py | 7dff143b195f1506ea18e51702cefeac04dcaba5 | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# File auto-generated by PythonFileGenerator
__all__ = [
'ClusterMembersResponse',
'ContextsResponse',
'StatusResponse'
]
from ClusterMembersResponse import ClusterMembersResponse
from ContextsResponse import ContextsResponse
from StatusResponse import StatusResponse
|
py | 7dff150e39361cafeb16ccb6ff9f707428262ac4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.data import to_categorical
from torchmetrics.utilities.distributed import reduce
def _stat_scores(
preds: Tensor,
target: Tensor,
class_index: int,
argmax_dim: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Calculates the number of true positive, false positive, true negative
and false negative for a specific class
Args:
pred: prediction tensor
target: target tensor
class_index: class to calculate over
argmax_dim: if pred is a tensor of probabilities, this indicates the
axis the argmax transformation will be applied over
Return:
True Positive, False Positive, True Negative, False Negative, Support
Example:
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([0, 2, 3])
>>> tp, fp, tn, fn, sup = _stat_scores(x, y, class_index=1)
>>> tp, fp, tn, fn, sup
(tensor(0), tensor(1), tensor(2), tensor(0), tensor(0))
"""
if preds.ndim == target.ndim + 1:
preds = to_categorical(preds, argmax_dim=argmax_dim)
tp = ((preds == class_index) * (target == class_index)).to(torch.long).sum()
fp = ((preds == class_index) * (target != class_index)).to(torch.long).sum()
tn = ((preds != class_index) * (target != class_index)).to(torch.long).sum()
fn = ((preds != class_index) * (target == class_index)).to(torch.long).sum()
sup = (target == class_index).to(torch.long).sum()
return tp, fp, tn, fn, sup
def dice_score(
pred: Tensor,
target: Tensor,
bg: bool = False,
nan_score: float = 0.0,
no_fg_score: float = 0.0,
reduction: str = 'elementwise_mean',
) -> Tensor:
"""
Compute dice score from prediction scores
Args:
pred: estimated probabilities
target: ground-truth labels
bg: whether to also compute dice for the background
nan_score: score to return, if a NaN occurs during computation
no_fg_score: score to return, if no foreground pixel was found in target
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
Return:
Tensor containing dice score
Example:
>>> pred = torch.tensor([[0.85, 0.05, 0.05, 0.05],
... [0.05, 0.85, 0.05, 0.05],
... [0.05, 0.05, 0.85, 0.05],
... [0.05, 0.05, 0.05, 0.85]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> dice_score(pred, target)
tensor(0.3333)
"""
num_classes = pred.shape[1]
bg = (1 - int(bool(bg)))
scores = torch.zeros(num_classes - bg, device=pred.device, dtype=torch.float32)
for i in range(bg, num_classes):
if not (target == i).any():
# no foreground class
scores[i - bg] += no_fg_score
continue
# TODO: rewrite to use general `stat_scores`
tp, fp, tn, fn, sup = _stat_scores(preds=pred, target=target, class_index=i)
denom = (2 * tp + fp + fn).to(torch.float)
# nan result
score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else nan_score
scores[i - bg] += score_cls
return reduce(scores, reduction=reduction)
|
py | 7dff1521124e48466ca047fe5469e3696b8dc7e0 | """
https://pythontutor.com/visualize.html#code=class%20Solution%3A%0A%20%20%20%20def%20duplicateZeros%28self,%20arr%29%20-%3E%20None%3A%0A%20%20%20%20%20%20%20%20%22%22%22%0A%20%20%20%20%20%20%20%20Do%20not%20return%20anything,%20modify%20arr%20in-place%20instead.%0A%20%20%20%20%20%20%20%20%22%22%22%0A%20%20%20%20%20%20%20%20possible_dups%20%3D%200%0A%20%20%20%20%20%20%20%20length_%20%3D%20len%28arr%29%20-%201%0A%0A%20%20%20%20%20%20%20%20%23%20Find%20the%20number%20of%20zeros%20to%20be%20duplicated%0A%20%20%20%20%20%20%20%20for%20left%20in%20range%28length_%20%2B%201%29%3A%0A%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Stop%20when%20left%20points%20beyond%20the%20last%20element%20in%20the%20original%20list%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20which%20would%20be%20part%20of%20the%20modified%20list%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20left%20%3E%20length_%20-%20possible_dups%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20break%0A%0A%20%20%20%20%20%20%20%20%20%20%20%20%23%20Count%20the%20zeros%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20arr%5Bleft%5D%20%3D%3D%200%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%23%20Edge%20case%3A%20This%20zero%20can't%20be%20duplicated.%20We%20have%20no%20more%20space,%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%23%20as%20left%20is%20pointing%20to%20the%20last%20element%20which%20could%20be%20included%20%20%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20if%20left%20%3D%3D%20length_%20-%20possible_dups%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Blength_%5D%20%3D%200%20%23%20For%20this%20zero%20we%20just%20copy%20it%20without%20duplication.%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20length_%20-%3D%201%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20break%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20possible_dups%20%2B%3D%201%0A%0A%20%20%20%20%20%20%20%20%23%20Start%20backwards%20from%20the%20last%20element%20which%20would%20be%20part%20of%20new%20list.%0A%20%20%20%20%20%20%20%20last%20%3D%20length_%20-%20possible_dups%0A%0A%20%20%20%20%20%20%20%20%23%20Copy%20zero%20twice,%20and%20non%20zero%20once.%0A%20%20%20%20%20%20%20%20for%20i%20in%20range%28last,%20-1,%20-1%29%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20if%20arr%5Bi%5D%20%3D%3D%200%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%200%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20possible_dups%20-%3D%201%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%200%0A%20%20%20%20%20%20%20%20%20%20%20%20else%3A%0A%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20arr%5Bi%20%2B%20possible_dups%5D%20%3D%20arr%5Bi%5D%0A%0A%0As%20%3D%20Solution%28%29%0As.duplicateZeros%28%5B1,0,2,3,0,4,5,0%5D%29%0A%20%20%20%20%20%20%20%20%20%20%20%20&cumulative=false&curInstr=8&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false
Why backwards?
If we would start shifting from left to right, then we would be overwriting elements before we have had the chance to shift them
that is why we go backwards instead.
We make sure we have shifted out an element before we shift another one into its original position.
What is the correct shift distance?
The duplication of a zero pushes all elements to the right of it by one.
This means also that every element is shifted to the right as many times as there are zeroes to the left of it.
E.g. in the array [1,0,2,0,3] , 1 will not move, 2 will shift one position and 3 will shift two positions.
As we go backwards, every time we bypass a zero (and duplicate it), the shift distance decreases for the elements we haven't shifted yet, because there is one less zero in front of them.
Why the < n checks?
Shifts push some of the elements out of the array. We do the < n checks to make sure we write down only elements that are shifted to a valid position inside the array and we ignore the ones falling off the end
"""
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
zeros = 0
for i in range(len(arr)):
if arr[i] == 0:
zeros += 1
for i in range(len(arr) - 1, -1 ,-1):
if i + zeros < len(arr):
arr[i + zeros] == arr[i]
if arr[i] == 0:
zeros -= 1
if i + zeros < len(arr):
arr[i + zeros] = 0
|
py | 7dff15added57911df8726274ae5998c32a98e61 | import unittest
import numpy as np
from pma import can_solver
from openravepy import Environment, matrixFromAxisAngle
from core.util_classes.viewer import OpenRAVEViewer
from core.util_classes.openrave_body import OpenRAVEBody
from core.util_classes.robots import PR2
from core.util_classes import items, pr2_sampling, matrix, param_setup
from core.util_classes.param_setup import ParamSetup
from core.internal_repr import parameter
import time
class TestSampling(unittest.TestCase):
def test_sample_ee_from_target(self):
solver = can_solver.CanSolver()
env = ParamSetup.setup_env()
# env.SetViewer('qtcoin')
target = ParamSetup.setup_target()
target.value = np.array([[0,0,0]]).T
target.rotation = np.array([[1.1,.3,0]]).T
dummy_targ_geom = items.BlueCan(0.04, 0.25)
target_body = OpenRAVEBody(env, target.name, dummy_targ_geom)
target_body.set_pose(target.value.flatten(), target.rotation.flatten())
target_body.set_transparency(.7)
robot = ParamSetup.setup_pr2()
robot_body = OpenRAVEBody(env, robot.name, robot.geom)
robot_body.set_transparency(.7)
robot_body.set_pose(robot.pose.flatten())
dof_value_map = {"backHeight": robot.backHeight,
"lArmPose": robot.lArmPose.flatten(),
"lGripper": robot.lGripper,
"rArmPose": robot.rArmPose.flatten(),
"rGripper": robot.rGripper}
robot_body.set_dof(dof_value_map)
dummy_ee_pose_geom = items.GreenCan(.03,.3)
ee_list = list(enumerate(pr2_sampling.get_ee_from_target(target.value, target.rotation)))
for ee_pose in ee_list:
ee_pos, ee_rot = ee_pose[1]
body = OpenRAVEBody(env, "dummy"+str(ee_pose[0]), dummy_ee_pose_geom)
body.set_pose(ee_pos, ee_rot)
body.set_transparency(.9)
def test_closest_arm_pose(self):
env = ParamSetup.setup_env()
# env.SetViewer('qtcoin')
can = ParamSetup.setup_blue_can()
robot = ParamSetup.setup_pr2()
can.pose = np.array([[0,-.2,.8]]).T
can_body = OpenRAVEBody(env, can.name, can.geom)
can_body.set_pose(can.pose.flatten(), can.rotation.flatten())
can_body.set_transparency(.7)
robot.pose = np.array([[-.5,0,0]]).T
robot_body = OpenRAVEBody(env, robot.name, robot.geom)
robot_body.set_transparency(.7)
robot_body.set_pose(robot.pose.flatten())
dof_value_map = {"backHeight": robot.backHeight,
"lArmPose": robot.lArmPose.flatten(),
"lGripper": robot.lGripper,
"rArmPose": robot.rArmPose.flatten(),
"rGripper": robot.rGripper}
robot_body.set_dof(dof_value_map)
can_trans = OpenRAVEBody.transform_from_obj_pose(can.pose, can.rotation)
rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
rot_mat = can_trans[:3, :3].dot(rot_mat[:3, :3])
can_trans[:3, :3] = rot_mat
torso_pose, arm_pose = pr2_sampling.get_torso_arm_ik(robot_body, can_trans, robot.rArmPose)
dof_value_map = {"backHeight": robot.backHeight,
"lArmPose": robot.lArmPose.flatten(),
"lGripper": robot.lGripper,
"rArmPose": robot.rArmPose.flatten(),
"rGripper": robot.rGripper}
robot_body.set_dof(dof_value_map)
# import ipdb; ipdb.set_trace()
|
py | 7dff1675a97ab9f8efb2cf1f13cabf4faaf3729b | import logging
import random
from ozpcenter.recommend import recommend_utils
from ozpcenter.recommend.recommend_utils import Direction
from ozpcenter.recommend.recommend_utils import FastNoSuchElementException
from plugins.plugin_manager import system_has_access_control
logger = logging.getLogger('ozp-center.' + str(__name__))
class Pipe(object):
"""
<S, E>
"""
def __init__(self):
"""
Initialize Pipe
Args:
starts: Start of the Pipe
"""
self.starts = None
self.available = False
self.current_end = None
self.next_end = None
def set_starts(self, starts):
"""
Args:
starts: iterable of s objects to the head (start) of pipe
"""
self.starts = starts
def next(self):
"""
Return one E Object
"""
if self.available:
self.available = False
self.current_end = self.next_end
return self.current_end
else:
self.current_end = self.process_next_start()
return self.current_end
def has_next(self):
"""
Return Boolean
"""
if self.available:
return True
else:
try:
self.next_end = self.process_next_start()
self.available = True
return self.available
except IndexError as err: # TODO: Fix to RuntimeError
self.available = False
return self.available
except Exception as err: # NoSuchElementException
raise err
def process_next_start(self):
"""
Returns E
Raise:
NoSuchElementException
"""
raise NotImplementedError("Need to implement in subclasses")
def reset(self):
if isinstance(self.starts, self.__class__):
self.starts.reset()
self.next_end = None
self.current_end = None
self.available = False
def __str__(self):
default_keys = ['starts', 'available', 'current_end', 'next_end']
instance_vars = {}
variables = vars(self)
for variable_key in variables:
variable_value = variables[variable_key]
if variable_key not in default_keys:
if callable(variable_value):
variable_value = variable_value.__class__
if not variable_key.startswith('_'):
instance_vars[variable_key] = variable_value
variables_string = ', '.join(['{}:{}'.format(key, instance_vars[key]) for key in instance_vars])
output = '{}({})'.format(self.__class__.__name__, variables_string)
return output
class MetaPipe(Pipe):
def get_pipes(self):
raise NotImplementedError("Need to implement in subclasses")
def reset(self):
for pipe in self.get_pipes():
pipe.reset()
super().reset()
class AsPipe(MetaPipe):
def __init__(self, name, pipe):
super().__init__()
self.name = name
self.pipe = pipe
def set_starts(self, starts):
"""
Args:
starts: iterable of s objects to the head (start) of pipe
"""
self.pipe.set_starts(starts)
self.starts = starts
def get_current_end(self):
return self.current_end
def get_name(self):
return self.name
def process_next_start(self):
return self.pipe.next()
def get_pipes(self):
return [self.pipe]
class VerticesVerticesPipe(Pipe):
def __init__(self, direction, *labels):
super().__init__()
# Super
self.direction = direction
self.labels = labels
self.next_end = recommend_utils.EmptyIterator()
def process_next_start(self):
"""
Start at Vertex, return Vertex
"""
while True:
if self.next_end.has_next():
try:
current_edge = self.next_end.next()
if self.direction == Direction.OUT:
return current_edge.out_vertex # Edge
elif self.direction == Direction.IN:
return current_edge.in_vertex
else:
raise Exception('Need to implement')
except FastNoSuchElementException:
pass
else:
current_vertex = self.starts.next()
edges_iterator = current_vertex.get_edges_iterator(self.direction, self.labels)
self.next_end = edges_iterator
class VerticesEdgesPipe(Pipe):
def __init__(self, direction, *labels):
super().__init__()
# Super
self.direction = direction
self.labels = labels
self.next_end = recommend_utils.EmptyIterator()
def process_next_start(self):
"""
Start at Vertex, return Vertex
"""
while True:
if self.next_end.has_next():
try:
current_edge = self.next_end.next()
if self.direction == Direction.OUT:
return current_edge # Edge
elif self.direction == Direction.IN:
return current_edge
else:
raise Exception('Need to implement')
except FastNoSuchElementException:
pass
else:
current_vertex = self.starts.next()
edges_iterator = current_vertex.get_edges_iterator(self.direction, self.labels)
self.next_end = edges_iterator
class EdgesVerticesPipe(Pipe):
def __init__(self, direction):
super().__init__()
# Super
self.direction = direction
def process_next_start(self):
"""
Start at Edge, return Vertex
"""
pass
class CapitalizePipe(Pipe):
def __init__(self):
super().__init__()
def process_next_start(self):
"""
CapitalizePipe each string object
"""
start = self.starts.next().upper()
return start
class SideEffectPipe(Pipe):
def __init__(self, function):
super().__init__()
self.function = function
def process_next_start(self):
"""
SideEffectPipe
"""
start = self.starts.next()
self.function(start)
return start
class LenPipe(Pipe):
def __init__(self):
super().__init__()
def process_next_start(self):
"""
Find length each object
"""
start = len(self.starts.next())
return start
class ListingPostSecurityMarkingCheckPipe(Pipe):
def __init__(self, username):
super().__init__()
self.username = username
def process_next_start(self):
"""
execute security_marking check on each listing
"""
while True:
listing = self.starts.next()
if not listing.security_marking:
logger.debug('Listing {0!s} has no security_marking'.format(listing.title))
if system_has_access_control(self.username, listing.security_marking):
return listing
class JitterPipe(Pipe):
def __init__(self):
super().__init__()
self.count = 0
self.non_random_beginning_count = 3
self.upper_limit = 7
def process_next_start(self):
"""
execute on each listing
"""
while True:
self.count = self.count + 1
listing = self.starts.next()
if self.count <= self.non_random_beginning_count:
return listing
random_number = random.randint(0, 10)
if random_number <= self.upper_limit:
return listing
class ListingDictPostSecurityMarkingCheckPipe(Pipe):
def __init__(self, username, featured=False):
super().__init__()
self.username = username
self.featured = featured
def process_next_start(self):
"""
execute security_marking check on each listing
"""
while True:
listing = self.starts.next()
if not listing['security_marking']:
logger.debug('Listing {0!s} has no security_marking'.format(listing['title']))
else:
if self.featured:
if listing['is_featured'] is True:
if system_has_access_control(self.username, listing['security_marking']):
return listing
else:
if system_has_access_control(self.username, listing['security_marking']):
return listing
class LimitPipe(Pipe):
def __init__(self, limit_number):
super().__init__()
self._count = 1
self.limit_number = limit_number
def process_next_start(self):
"""
Limit number of items
"""
while True:
if self._count > self.limit_number:
raise FastNoSuchElementException()
else:
current_item = self.starts.next()
self._count = self._count + 1
return current_item
class DistinctPipe(Pipe):
def __init__(self):
super().__init__()
self._items = set()
def process_next_start(self):
"""
Limit number of items
"""
while True:
current_item = self.starts.next()
if current_item not in self._items:
self._items.add(current_item)
return current_item
class ExcludePipe(Pipe):
def __init__(self, object_list):
super().__init__()
self._items = set()
for current_object in object_list:
self._items.add(current_object)
def process_next_start(self):
"""
Limit number of items
"""
while True:
current_item = self.starts.next()
if current_item not in self._items:
return current_item
class ExcludeIdsPipe(Pipe):
def __init__(self, object_list):
super().__init__()
self._items = set()
for current_object in object_list:
self._items.add(current_object)
def process_next_start(self):
"""
Limit number of items
"""
while True:
current_element = self.starts.next()
current_element_id = current_element.id
if current_element_id not in self._items:
return current_element
class GraphVertexPipe(Pipe):
"""
Start of Graph to vertex flow
"""
def __init__(self):
super().__init__()
def process_next_start(self):
"""
CapitalizePipe each string object
"""
current_vertex = self.starts.next()
return current_vertex
class ElementIdPipe(Pipe):
"""
Start of Graph to vertex flow
"""
def __init__(self):
super().__init__()
def process_next_start(self):
"""
CapitalizePipe each string object
"""
current_vertex = self.starts.next()
return current_vertex.id
class ElementPropertiesPipe(Pipe):
"""
Start of Graph to vertex flow
"""
def __init__(self, internal=False):
super().__init__()
self.internal = internal
def process_next_start(self):
"""
CapitalizePipe each string object
"""
current_vertex = self.starts.next()
vertex_properties = current_vertex.properties
if self.internal:
vertex_properties['_id'] = current_vertex.id
vertex_properties['_label'] = current_vertex.label
return vertex_properties
class ElementHasPipe(Pipe):
"""
ElementHasPipe Pipe
"""
def __init__(self, label, key=None, predicate='EQUALS', value=None):
super().__init__()
def process_next_start(self):
"""
Element Has each string object
"""
current_vertex = self.starts.next()
return current_vertex
class DictKeyPipe(Pipe):
"""
DictKeyPipe Pipe
"""
def __init__(self, key):
super().__init__()
self.key = key
def process_next_start(self):
"""
DictKeyPipe
"""
current_dict = self.starts.next()
if self.key in current_dict:
return current_dict[self.key]
class EachKeyPipe(Pipe):
"""
EachKeyPipe Pipe
"""
def __init__(self, key):
super().__init__()
self.key = key
self.next_end = recommend_utils.EmptyIterator()
def process_next_start(self):
"""
EachKeyPipe
"""
while True:
if self.next_end.has_next():
try:
while True:
return self.next_end.next()
except recommend_utils.FastNoSuchElementException:
# Ignore FastNoSuchElementException
pass
else:
current_dict = self.starts.next()
if self.key in current_dict:
current_list = current_dict[self.key]
if current_list:
self.next_end = recommend_utils.ListIterator(current_list)
|
py | 7dff171a5eb13868d248fa1c4549f6e3efc762d7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import ByteString, Callable, Dict, List, Optional, Set, Union
import marshal
import enum
import io
import re
import uuid
import zlib
import os
import os.path
import contextlib
import dataclasses
import sys
from importlib.util import MAGIC_NUMBER
from refinery.units.formats.archive import Arg, ArchiveUnit
from refinery.units.pattern.carve import carve
from refinery.lib.structures import EOF, MemoryFile, StreamDetour, Struct, StructReader
from refinery.lib.tools import NoLogging
from Crypto.Cipher import AES
class Unmarshal(enum.IntEnum):
No = 0
Yes = 1
YesAndDecompile = 2
def version2tuple(version: str):
return tuple(int(k, 10) for k in re.fullmatch(R'^(\d+\.\d+(?:\.\d+)?)(.*)$', version).group(1).split('.'))
def decompress_peek(buffer, size=512) -> Optional[bytes]:
try:
return zlib.decompressobj().decompress(buffer[:size])
except zlib.error:
return None
def decompile_buffer(buffer: ByteString, file_name: str) -> ByteString:
code_objects = {}
sys_stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
try:
version, timestamp, magic_int, codez, is_pypy, _, _ = \
xtpyi._xdis.load.load_module_from_file_object(MemoryFile(buffer), file_name, code_objects)
finally:
sys.stderr.close()
sys.stderr = sys_stderr
if not isinstance(codez, list):
codez = [codez]
errors = ''
python = ''
for code in codez:
for name, engine in {
'decompyle3': xtpyi._decompyle3,
'uncompyle6': xtpyi._uncompyle6,
}.items():
with io.StringIO(newline='') as output, NoLogging(NoLogging.Mode.ALL):
try:
engine.main.decompile(
version,
code,
output,
timestamp=timestamp,
code_objects=code_objects,
is_pypy=is_pypy,
magic_int=magic_int,
)
except Exception as E:
errors += '\n'.join(F'# {line}' for line in (
F'Error while decompiling with {name}:', *str(E).splitlines(True)))
errors += '\n'
else:
python = output.getvalue()
break
if python:
return python.encode(xtpyi.codec)
embedded = bytes(buffer | carve('printable', single=True))
if len(buffer) - len(embedded) < 0x20:
return embedded
disassembly = MemoryFile()
with io.TextIOWrapper(disassembly, xtpyi.codec, newline='\n') as output:
output.write(errors)
output.write('# Generating Disassembly:\n\n')
for code in codez:
instructions = list(xtpyi._xdis.std.Bytecode(code))
width_offset = max(len(str(i.offset)) for i in instructions)
for i in instructions:
opname = i.opname.replace('_', '.').lower()
offset = F'{i.offset:0{width_offset}d}'
output.write(F'# {offset:>5} {opname:<25} {i.argrepr}\n')
output.write('\n')
return disassembly.getbuffer()
class PiType(bytes, enum.Enum):
BINARY = B'b' # noqa / binary
DEPENDENCY = B'd' # noqa / runtime option
PYZ = B'z' # noqa / zlib (pyz) - frozen Python code
PACKAGE = B'M' # noqa / Python package (__init__.py)
MODULE = B'm' # noqa / Python module
SOURCE = B's' # noqa / Python script (v3)
DATA = B'x' # noqa / data
RUNTIME_OPTION = B'o' # noqa / runtime option
SPLASH = B'l' # noqa / splash resources
UNKNOWN = B'uk' # noqa
DECOMPILED = B'dc' # noqa
USERCODE = B'uc' # noqa
ENCRYPTED = B'ec' # noqa
class PzType(enum.IntEnum):
MODULE = 0
PKG = 1
DATA = 2
@dataclasses.dataclass
class PiMeta:
type: PiType
name: str
data: Union[Callable[[], ByteString], ByteString]
def unpack(self) -> ByteString:
if callable(self.data):
self.data = self.data()
return self.data
def make_decompiled_item(name: str, data: ByteString, *magics) -> PiMeta:
def extract(data=data, magics=magics):
error = None
if any(data[:4] == m[:4] for m in magics):
return decompile_buffer(data, name)
for magic in magics:
try:
return decompile_buffer(magic + data, name)
except Exception as exception:
error = exception
return '\n'.join(F'# {line}'
for line in str(error).splitlines(True)).encode(xtpyi.codec)
return PiMeta(PiType.DECOMPILED, F'{name}.py', extract)
class PYZ(Struct):
MagicSignature = B'PYZ\0'
def __init__(self, reader: StructReader, version: str):
reader.bigendian = True
self.base = reader.tell()
signature = reader.read(4)
if signature != self.MagicSignature:
raise ValueError('invalid magic')
magic = bytes(reader.read(4))
with contextlib.suppress(KeyError):
version = xtpyi._xdis.magics.versions[magic]
vtuple = version2tuple(version)
padding_size = 4
if vtuple >= (3, 3):
padding_size += 4
if vtuple >= (3, 7):
padding_size += 4
self.version = version
self.magic = magic + padding_size * b'\0'
self.toc_offset = reader.i32()
self.reader = reader
self.entries: List[PiMeta] = []
def unpack(self, decompile: bool, key: Optional[bytes] = None) -> bool:
with StreamDetour(self.reader, self.base + self.toc_offset):
toc_data = self.reader.read()
try:
toc = marshal.loads(toc_data)
except Exception as error:
if MAGIC_NUMBER != self.magic[:4]:
_ord = xtpyi._xdis.marsh.Ord
xtpyi._xdis.marsh.Ord = ord # monkey-patch workaround for bug in xdis
try:
toc = xtpyi._xdis.marsh.load(
MemoryFile(self.data), self.version)
except Exception:
pass
else:
error = None
finally:
xtpyi._xdis.marsh.Ord = _ord
if error is not None:
raise error
if isinstance(toc, list):
try:
toc = dict(toc)
except Exception as error:
self.entries = []
self.error = error
return
failures = 0
attempts = len(toc)
for name, (pzt, offset, length) in toc.items():
try:
name: str
name = name.decode('utf-8')
except AttributeError:
pass
try:
pzt = PzType(pzt)
except Exception:
pzt = PzType.DATA
name = name.replace('.', '/')
if pzt is PzType.PKG:
name = F'{name}/__init__'
with StreamDetour(self.reader, self.base + offset):
data = self.reader.read(length)
if key:
def decompressed(data=data):
cipher = AES.new(key, AES.MODE_CFB, bytes(data[:0x10]))
return zlib.decompress(cipher.decrypt(data[0x10:]))
elif decompress_peek(data):
def decompressed(data=data):
return zlib.decompress(data)
else:
failures += 1
continue
if decompile and pzt in (PzType.MODULE, PzType.PKG):
def decompiled(data=data, name=name, magic=self.magic):
data = decompressed(data)
if data[:4] != magic[:4]:
data = magic + data
return decompile_buffer(data, name)
self.entries.append(PiMeta(PiType.DECOMPILED, F'{name}.py', decompiled))
name = F'{name}.pyc'
type = PiType.SOURCE
else:
type = PiType.DATA
self.entries.append(PiMeta(type, name, decompressed))
if key:
if failures >= 6:
xtpyi.logger.warning(F'pyz decompression failed for {failures-5} additional items')
return True
elif failures > 0.7 * attempts:
self.entries.clear()
return False
else:
return True
class PiTOCEntry(Struct):
def __init__(self, reader: StructReader):
reader.bigendian = True
entry_start_offset = reader.tell()
self.size_of_entry = reader.i32()
self.offset = reader.i32()
self.size_of_compressed_data = reader.i32()
self.size_od_uncompressed_data = reader.i32()
self.is_compressed = bool(reader.read_byte())
entry_type = bytes(reader.read(1))
name_length = self.size_of_entry - reader.tell() + entry_start_offset
if name_length > 0x1000:
raise RuntimeError(F'Refusing to process TOC entry with name of size {name_length}.')
name, *_ = bytes(reader.read(name_length)).partition(B'\0')
try:
name = name.decode('utf8', 'backslashreplace')
except Exception:
name = None
if not all(part.isprintable() for part in re.split('\\s*', name)):
raise RuntimeError('Refusing to process TOC entry with non-printable name.')
name = name or str(uuid.uuid4())
if entry_type == B'Z':
entry_type = B'z'
try:
self.type = PiType(entry_type)
except ValueError:
xtpyi.logger.error(F'unknown type {entry_type!r} in field {name}')
self.type = PiType.UNKNOWN
self.name = name
def __hash__(self):
return hash(self.name)
class PyInstallerArchiveEpilogue(Struct):
MagicSignature = bytes.fromhex('4D45490C0B0A0B0E')
def _read_libname(self, reader: StructReader) -> Optional[str]:
position = reader.tell()
try:
libname, t, rest = reader.read_bytes(64).partition(B'\0')
except EOF:
reader.seekset(position)
return None
try:
libname = libname.decode('utf8')
except Exception:
reader.seekset(position)
return None
if not t or any(rest) or len(rest) < 10 or not re.fullmatch(R'[\s!-~]+', libname):
reader.seekset(position)
return None
return libname
def __init__(self, reader: StructReader, offset: int, unmarshal: Unmarshal = Unmarshal.No):
reader.bigendian = True
reader.seekset(offset)
self.reader = reader
signature = reader.read_bytes(8)
if signature != self.MagicSignature:
raise ValueError(
F'offset 0x{offset:X} has invalid signature {signature.hex().upper()}; '
F'should be {self.MagicSignature.hex().upper()}')
self.size = reader.i32()
toc_offset = reader.i32()
toc_length = reader.i32()
self.py_version = '.'.join(str(reader.u32()))
self.py_libname = self._read_libname(reader)
self.offset = reader.tell() - self.size
self.toc: Dict[str, PiTOCEntry] = {}
toc_end = self.offset + toc_offset + toc_length
reader.seekset(self.offset + toc_offset)
while reader.tell() < toc_end:
try:
entry = PiTOCEntry(reader)
except EOF:
xtpyi.logger.warning('end of file while reading TOC')
break
except Exception as error:
xtpyi.logger.warning(F'unexpected error while reading TOC: {error!s}')
break
if entry.name in self.toc:
raise KeyError(F'duplicate name {entry.name}')
self.toc[entry.name] = entry
self.files: Dict[str, PiMeta] = {}
no_pyz_found = True
pyz_entries: Dict[str, PYZ] = {}
for entry in list(self.toc.values()):
if entry.type is not PiType.PYZ:
continue
no_pyz_found = False
name, xt = os.path.splitext(entry.name)
name_pyz = F'{name}.pyz'
if name == entry.name:
del self.toc[name]
self.toc[name_pyz] = entry
entry.name = name_pyz
reader.seekset(self.offset + entry.offset)
if entry.is_compressed:
data = self.extract(entry.name).unpack()
else:
data = reader
pyz_entries[name] = PYZ(data, self.py_version)
magics = {pyz.magic for pyz in pyz_entries.values()}
if not magics:
if not no_pyz_found:
xtpyi.logger.warning(
'no magic signature could be recovered from embedded pyzip archives; this is '
'unsual and means that there is no way to guess the missing magic for source '
'file entries and it will likely not be possible to decompile them.')
return
elif len(magics) > 1:
xtpyi.logger.warning('more than one magic signature was recovered; this is unusual.')
magics = list(magics)
keys: Set[bytes] = set()
for entry in self.toc.values():
extracted = self.extract(entry.name)
if entry.type not in (PiType.SOURCE, PiType.MODULE):
self.files[entry.name] = extracted
continue
data = extracted.unpack()
name, _ = os.path.splitext(extracted.name)
del self.files[extracted.name]
extracted.name = F'{name}.pyc'
self.files[extracted.name] = extracted
if len(magics) == 1 and data[:4] != magics[0]:
extracted.data = magics[0] + data
decompiled = make_decompiled_item(name, data, *magics)
if entry.type is PiType.SOURCE:
decompiled.type = PiType.USERCODE
self.files[F'{name}.py'] = decompiled
if name.endswith('crypto_key'):
for key in decompiled.unpack() | carve('string', decode=True):
if len(key) != 0x10:
continue
xtpyi.logger.info(F'found key: {key.decode(xtpyi.codec)}')
keys.add(key)
if unmarshal is Unmarshal.No:
return
if not keys:
key = None
else:
key = next(iter(keys))
for name, pyz in pyz_entries.items():
pyz.unpack(unmarshal is Unmarshal.YesAndDecompile, key)
for unpacked in pyz.entries:
unpacked.name = path = F'{name}/{unpacked.name}'
if path in self.files:
raise ValueError(F'duplicate file name: {path}')
self.files[path] = unpacked
def extract(self, name: str) -> PiMeta:
try:
return self.files[name]
except KeyError:
pass
entry = self.toc[name]
with StreamDetour(self.reader, self.offset + entry.offset):
data = self.reader.read(entry.size_of_compressed_data)
if entry.is_compressed:
def extracted(d=data): return zlib.decompress(d)
else:
extracted = data
result = PiMeta(entry.type, name, extracted)
self.files[name] = result
return result
class xtpyi(ArchiveUnit):
"""
Extracts and decompiles files from a Python Installer (aka PyInstaller) archive.
"""
def __init__(
self, *paths, list=False, join_path=False, drop_path=False, path=b'path', date=b'date',
user_code: Arg.Switch('-u', group='FILTER', help=(
'Extract only source code files from the root of the archive. These usually implement '
'the actual domain logic.')) = False,
unmarshal: Arg('-y', action='count', group='FILTER', help=(
'(DANGEROUS) Unmarshal embedded PYZ archives. Warning: Maliciously crafted packages can '
'potentially exploit this to execute code. It is advised to only use this option inside '
'an isolated environment. Specify twice to decompile unmarshalled Python bytecode.'
)) = 0
):
super().__init__(
*paths,
list=list, join_path=join_path, drop_path=drop_path, path=path, date=date,
unmarshal=unmarshal, user_code=user_code
)
@ArchiveUnit.Requires('xdis', optional=False)
def _xdis():
import xdis.load
import xdis.magics
import xdis.marsh
import xdis
A, B, C, *_ = sys.version_info
V = F'{A}.{B}.{C}'
if V not in xdis.magics.canonic_python_version:
xdis.magics.add_canonic_versions(V, F'{A}.{B}')
del A, B, C, V
import xdis.std
return xdis
@ArchiveUnit.Requires('uncompyle6', optional=False)
def _uncompyle6():
import uncompyle6
import uncompyle6.main
return uncompyle6
@ArchiveUnit.Requires('decompyle3', optional=False)
def _decompyle3():
import decompyle3
import decompyle3.main
return decompyle3
def unpack(self, data):
view = memoryview(data)
positions = [m.start() for m in re.finditer(re.escape(PyInstallerArchiveEpilogue.MagicSignature), view)]
mode = Unmarshal(min(2, int(self.args.unmarshal)))
self.log_debug(F'unmarshal mode: {mode.name}')
if not positions:
raise LookupError('unable to find PyInstaller signature')
if len(positions) > 2:
# first position is expected to be the sentinel value in the unpacker stub
width = max(len(F'{p:X}') for p in positions)
for position in positions:
self.log_info(F'magic signature found at offset 0x{position:0{width}X}')
self.log_warn(F'found {len(positions)-1} potential PyInstaller epilogue markers; using last one.')
archive = PyInstallerArchiveEpilogue(view, positions[-1], mode)
for name, file in archive.files.items():
if self.args.user_code:
if file.type != PiType.USERCODE:
continue
if name.startswith('pyiboot'):
continue
yield self._pack(name, None, file.data, type=file.type.name)
@classmethod
def handles(cls, data: ByteString) -> Optional[bool]:
return PyInstallerArchiveEpilogue.MagicSignature in data
|
py | 7dff1b94621f2587764b07281b8891e8c6eace03 | # Generated by Django 3.2.9 on 2021-12-01 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photos', '0007_alter_image_image'),
]
operations = [
migrations.AlterField(
model_name='category',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='image',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='location',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
py | 7dff1c319a6c251f27c302ce9ee8aa3440ffc714 | """Interface to the liblzma compression library.
This module provides a class for reading and writing compressed files,
classes for incremental (de)compression, and convenience functions for
one-shot (de)compression.
These classes and functions support both the XZ and legacy LZMA
container formats, as well as raw compressed data streams.
"""
__all__ = [
"CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
"CHECK_ID_MAX", "CHECK_UNKNOWN",
"FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
"FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
"FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
"MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
"MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
"LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
"open", "compress", "decompress", "is_check_supported",
]
import builtins
import io
from _lzma import *
from _lzma import _encode_filter_properties, _decode_filter_properties
import _compression
_MODE_CLOSED = 0
_MODE_READ = 1
# Value 2 no longer used
_MODE_WRITE = 3
class LZMAFile(_compression.BaseStream):
"""A file object providing transparent LZMA (de)compression.
An LZMAFile can act as a wrapper for an existing file object, or
refer directly to a named file on disk.
Note that LZMAFile provides a *binary* file interface - data read
is returned as bytes, and data to be written must be given as bytes.
"""
def __init__(self, filename=None, mode="r", *,
format=None, check=-1, preset=None, filters=None):
"""Open an LZMA-compressed file in binary mode.
filename can be either an actual file name (given as a str or
bytes object), in which case the named file is opened, or it can
be an existing file object to read from or write to.
mode can be "r" for reading (default), "w" for (over)writing,
"x" for creating exclusively, or "a" for appending. These can
equivalently be given as "rb", "wb", "xb" and "ab" respectively.
format specifies the container format to use for the file.
If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
default is FORMAT_XZ.
check specifies the integrity check to use. This argument can
only be used when opening a file for writing. For FORMAT_XZ,
the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
support integrity checks - for these formats, check must be
omitted, or be CHECK_NONE.
When opening a file for reading, the *preset* argument is not
meaningful, and should be omitted. The *filters* argument should
also be omitted, except when format is FORMAT_RAW (in which case
it is required).
When opening a file for writing, the settings used by the
compressor can be specified either as a preset compression
level (with the *preset* argument), or in detail as a custom
filter chain (with the *filters* argument). For FORMAT_XZ and
FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
level. For FORMAT_RAW, the caller must always specify a filter
chain; the raw compressor does not support preset compression
levels.
preset (if provided) should be an integer in the range 0-9,
optionally OR-ed with the constant PRESET_EXTREME.
filters (if provided) should be a sequence of dicts. Each dict
should have an entry for "id" indicating ID of the filter, plus
additional entries for options to the filter.
"""
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
if mode in ("r", "rb"):
if check != -1:
raise ValueError("Cannot specify an integrity check "
"when opening a file for reading")
if preset is not None:
raise ValueError("Cannot specify a preset compression "
"level when opening a file for reading")
if format is None:
format = FORMAT_AUTO
mode_code = _MODE_READ
elif mode in ("w", "wb", "a", "ab", "x", "xb"):
if format is None:
format = FORMAT_XZ
mode_code = _MODE_WRITE
self._compressor = LZMACompressor(format=format, check=check,
preset=preset, filters=filters)
self._pos = 0
else:
raise ValueError("Invalid mode: {!r}".format(mode))
if isinstance(filename, (str, bytes)):
if "b" not in mode:
mode += "b"
self._fp = builtins.open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str or bytes object, or a file")
if self._mode == _MODE_READ:
raw = _compression.DecompressReader(self._fp, LZMADecompressor,
trailing_error=LZMAError, format=format, filters=filters)
self._buffer = io.BufferedReader(raw)
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
if self._mode == _MODE_CLOSED:
return
try:
if self._mode == _MODE_READ:
self._buffer.close()
self._buffer = None
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._buffer.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode == _MODE_READ
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
def peek(self, size=-1):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
self._check_can_read()
# Relies on the undocumented fact that BufferedReader.peek() always
# returns at least one byte (except at EOF)
return self._buffer.peek(size)
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b"" if the file is already at EOF.
"""
self._check_can_read()
return self._buffer.read(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative.
Returns b"" if the file is at EOF.
"""
self._check_can_read()
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
self._check_can_read()
return self._buffer.readline(size)
def write(self, data):
"""Write a bytes object to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
def seek(self, offset, whence=io.SEEK_SET):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
return self._buffer.seek(offset, whence)
def tell(self):
"""Return the current file position."""
self._check_not_closed()
if self._mode == _MODE_READ:
return self._buffer.tell()
return self._pos
def open(filename, mode="rb", *,
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
"""Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes
object), in which case the named file is opened, or it can be an
existing file object to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
"a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
mode.
The format, check, preset and filters arguments specify the
compression settings, as for LZMACompressor, LZMADecompressor and
LZMAFile.
For binary mode, this function is equivalent to the LZMAFile
constructor: LZMAFile(filename, mode, ...). In this case, the
encoding, errors and newline arguments must not be provided.
For text mode, an LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
"""Compress a block of data.
Refer to LZMACompressor's docstring for a description of the
optional arguments *format*, *check*, *preset* and *filters*.
For incremental compression, use an LZMACompressor instead.
"""
comp = LZMACompressor(format, check, preset, filters)
return comp.compress(data) + comp.flush()
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
"""Decompress a block of data.
Refer to LZMADecompressor's docstring for a description of the
optional arguments *format*, *check* and *filters*.
For incremental decompression, use an LZMADecompressor instead.
"""
results = []
while True:
decomp = LZMADecompressor(format, memlimit, filters)
try:
res = decomp.decompress(data)
except LZMAError:
if results:
break # Leftover data is not a valid LZMA/XZ stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise LZMAError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
if not data:
break
return b"".join(results)
|
py | 7dff1c8feb3752c6fed51f68a340d8d3d4708b67 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import unittest
import os
import shutil
from ai_flow import AIFlowServerRunner, init_ai_flow_context
from ai_flow.workflow.status import Status
from ai_flow_plugins.job_plugins import bash
from ai_flow.test.util.notification_service_utils import start_notification_server, stop_notification_server
import ai_flow as af
project_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
class TestBash(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.ns_server = start_notification_server()
config_file = project_path + '/master.yaml'
cls.master = AIFlowServerRunner(config_file=config_file)
cls.master.start()
@classmethod
def tearDownClass(cls) -> None:
cls.master.stop()
generated = '{}/generated'.format(project_path)
if os.path.exists(generated):
shutil.rmtree(generated)
temp = '{}/temp'.format(project_path)
if os.path.exists(temp):
shutil.rmtree(temp)
stop_notification_server(cls.ns_server)
def setUp(self):
self.master._clear_db()
af.current_graph().clear_graph()
init_ai_flow_context()
def tearDown(self):
self.master._clear_db()
def test_bash_task(self):
with af.job_config('task_1'):
af.user_define_operation(processor=bash.BashProcessor(bash_command='echo "Xiao ming hello world!"'))
w = af.workflow_operation.submit_workflow(workflow_name='test_bash')
je = af.workflow_operation.start_job_execution(job_name='task_1', execution_id='1')
jes = af.workflow_operation.get_job_executions(job_name='task_1', execution_id='1')
self.assertEqual(Status.FINISHED, jes[0].status)
def test_stop_bash_task(self):
time.sleep(1)
with af.job_config('task_1'):
af.user_define_operation(processor=bash.BashProcessor(bash_command='sleep 10'))
w = af.workflow_operation.submit_workflow(workflow_name='test_bash')
je = af.workflow_operation.start_job_execution(job_name='task_1', execution_id='1')
af.workflow_operation.stop_job_execution(job_name='task_1', execution_id='1')
jes = af.workflow_operation.get_job_executions(job_name='task_1', execution_id='1')
self.assertEqual(Status.FAILED, jes[0].status)
self.assertTrue('err' in jes[0].properties)
if __name__ == '__main__':
unittest.main()
|
py | 7dff1ceef61f17990dfb2d4b2e0801f801bc4020 | #! python3
# -*- coding: utf-8 -*-
import time
import binascii
test_str = b'Hello World.'
'''hashlib'''
import hashlib
per_st = time.perf_counter()
m = hashlib.sha256(test_str).digest()
per_ed = time.perf_counter()
print(binascii.hexlify(m))
print('run time use hashlib: ' + str(per_ed - per_st) + ' s')
'''pycryptodome'''
from Crypto.Hash import SHA256
per_st = time.perf_counter()
h = SHA256.new(test_str).digest()
per_ed = time.perf_counter()
print(binascii.hexlify(h))
print('run time use pycryptodome: ' + str(per_ed - per_st) + ' s')
'''cryptography'''
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
per_st = time.perf_counter()
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(test_str)
s = digest.finalize()
per_ed = time.perf_counter()
print(binascii.hexlify(s))
print('run time use cryptography: ' + str(per_ed - per_st) + ' s')
|
py | 7dff1d031606574d17b990f88dd82d9aaa0259ba | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import dataclasses
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
# 指定训练模型的GPU是哪一个,但是仍然有问题,如果我想在n个GPU上并行训练,该怎么做?
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
# step1. 解析参数
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# step2.判断输出文件是否已经存在
# 为什么要判断一下? 难道是想我们继续从之前的模型开始输入? 那么这里就可以有一个优化,如果是
# 模型已经有过输出了,那么就从这个输出开始继续训练
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# step3. 配置日志信息
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# step 4. 设置随机种子 ? =》 有什么用?
# Set seed
set_seed(training_args.seed)
# step 5.下面这个操作是什么意思?
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# step 6. Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# step7. Get datasets
# 分别为train, eval, test 设置数据集
train_dataset = (
GlueDataset(data_args,
tokenizer=tokenizer,
cache_dir=model_args.cache_dir) if training_args.do_train else None
)
eval_dataset = (
GlueDataset(data_args,
tokenizer=tokenizer,
mode="dev",
cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
test_dataset = (
GlueDataset(data_args,
tokenizer=tokenizer,
mode="test",
cache_dir=model_args.cache_dir)
if training_args.do_predict
else None
)
# step 8. 定义评价时的metric
# 这里进行了多层的函数嵌套 => 以task_name = mrpc 为例
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(task_name, preds, p.label_ids)
return compute_metrics_fn # 返回的是个函数
# step 9.Initialize our Trainer
""" 分别来谈一下其中参数的作用
01.compute_metrics (Callable[[EvalPrediction], Dict], optional):
The function that will be used to compute metrics at evaluation.
Must take a EvalPrediction and return a dictionary string to metric values.
02. 通常的情况是,程序运行到这一步,如果GPU的内存不足,就会报 CUDA error: out of memory
"""
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name),
)
# step 10.Training
if training_args.do_train:
trainer.train( # 开始真正训练的入口
model_path=model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# step 11.Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
eval_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value) # 打日志的同时写入到文件中
writer.write("%s = %s\n" % (key, value))
eval_results.update(eval_result)
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
)
for test_dataset in test_datasets:
predictions = trainer.predict(test_dataset=test_dataset).predictions
if output_mode == "classification":
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
item = test_dataset.get_labels()[item]
writer.write("%d\t%s\n" % (index, item))
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() |
py | 7dff1f6af0486b029bacaf13cc86f9853e044739 | import tempfile
import sacc
import sacc.data_types
import numpy as np
import pytest
import os
import pathlib
import urllib
import time
test_dir = pathlib.Path(__file__).resolve().parent
test_data_dir = test_dir / 'data'
# idea based on TreeCorr tests
def get_from_wiki(url):
file_name = url.split('/')[-1]
local_file_name = test_data_dir / file_name
if not local_file_name.exists():
print(f"Downlading {url} to data dir")
try:
urllib.request.urlretrieve(url, local_file_name)
except urllib.request.HTTPError as err:
if err.code == 429:
print("Rate limit download - waiting 10 seconds to try again")
time.sleep(10)
urllib.request.urlretrieve(url, local_file_name)
else:
raise
return local_file_name
def test_quantity_warning():
s = sacc.Sacc()
with pytest.warns(UserWarning):
s.add_tracer('Misc', 'source_0',
quantity='dummy')
def test_data_type_warning():
s = sacc.Sacc()
s.add_tracer('Misc', 'source_0')
with pytest.warns(UserWarning):
s.add_data_point('cl_wrong', ('source_0', 'source_0'),
0.1, ell=10.)
def test_construct():
s = sacc.Sacc()
# Tracer
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
s.add_tracer('NZ', 'source_0', z, nz,
quantity='galaxy_density')
for i in range(20):
ee = 0.1 * i
tracers = ('source_0', 'source_0')
s.add_data_point(sacc.standard_types.cl_00, tracers, ee, ell=10.0*i)
def test_tracers_later():
s = sacc.Sacc()
with pytest.raises(ValueError):
tracers = ('source_0', 'source_0')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, 0.0, ell=1)
s = sacc.Sacc()
tracers = ('source_0', 'source_0')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, 0.0, tracers_later=True, ell=1)
def test_full_cov():
covmat = np.random.uniform(size=(100, 100))
C = sacc.covariance.BaseCovariance.make(covmat)
assert C.size == 100
assert isinstance(C, sacc.covariance.FullCovariance)
assert np.all(C.covmat == covmat)
hdu = C.to_hdu()
C2 = sacc.covariance.BaseCovariance.from_hdu(hdu)
assert np.allclose(C.covmat, C2.covmat)
def test_block_cov():
covmat = [np.random.uniform(size=(50, 50)),
np.random.uniform(size=(100, 100)),
np.random.uniform(size=(150, 150))]
C = sacc.covariance.BaseCovariance.make(covmat)
assert C.size == 300
assert isinstance(C, sacc.covariance.BlockDiagonalCovariance)
hdu = C.to_hdu()
C2 = sacc.covariance.BaseCovariance.from_hdu(hdu)
assert len(C2.blocks) == 3
assert C.block_sizes == C2.block_sizes
for i in range(3):
assert np.allclose(C.blocks[i], C2.blocks[i])
def test_misc_tracer():
md1 = {'potato': 'if_necessary', 'answer': 42, 'height': 1.83}
md2 = {'potato': 'never'}
T1 = sacc.BaseTracer.make('Misc', 'tracer1',
quantity='generic', metadata=md1)
T2 = sacc.BaseTracer.make('Misc', 'tracer2',
quantity='generic', metadata=md2)
assert T1.metadata == md1
assert T2.metadata == md2
tables = sacc.BaseTracer.to_tables([T1, T2])
D = sacc.BaseTracer.from_tables(tables)
T1a = D['tracer1']
T2a = D['tracer2']
assert T1a.metadata == md1
assert T2a.metadata == md2
def test_numap_tracer():
md1 = {'mac': 'yes', 'cheese': 'of_course', 'quantity': 3}
md2 = {'mac': 'no', 'cheese': 'no'}
ell = np.linspace(2, 1000, 1000)
beam = np.exp(-0.1 * ell * (ell + 1))
beam_extra = {'err1': np.sin(ell * 0.1)}
nu = np.linspace(30., 60., 100)
bandpass = np.ones(100)
bandpass_extra = {'err1': bandpass * 0.1,
'err2': bandpass * 0.05}
T1 = sacc.BaseTracer.make('NuMap', 'band1', 0,
nu, bandpass, ell, beam,
quantity='cmb_temperature',
bandpass_extra=bandpass_extra,
beam_extra=beam_extra,
metadata=md2)
T2 = sacc.BaseTracer.make('NuMap', 'band2', 0,
nu, bandpass, ell, beam,
quantity='cmb_convergence',
bandpass_extra=bandpass_extra,
beam_extra=beam_extra,
metadata=md1)
assert T2.metadata == md1
assert T1.metadata == md2
tables = sacc.BaseTracer.to_tables([T1, T2])
D = sacc.BaseTracer.from_tables(tables)
T1a = D['band1']
T2a = D['band2']
assert T1a.metadata == md2
assert T2a.metadata == md1
assert np.all(T1a.bandpass_extra['err1'] == 0.1 * bandpass)
def test_map_tracer():
md1 = {'mac': 'yes', 'cheese': 'of_course', 'quantity': 3}
md2 = {'mac': 'no', 'cheese': 'no'}
ell = np.linspace(2, 1000, 1000)
beam = np.exp(-0.1 * ell * (ell + 1))
err = np.sin(ell * 0.1)
beam_extra = {'err1': err}
T1 = sacc.BaseTracer.make('Map', 'y_milca',
0, ell, beam,
quantity='cmb_tSZ',
beam_extra=beam_extra,
metadata=md1)
T2 = sacc.BaseTracer.make('Map', 'y_nilc',
0, ell, beam,
quantity='cmb_kSZ',
beam_extra=beam_extra,
metadata=md2)
assert T1.metadata == md1
assert T2.metadata == md2
tables = sacc.BaseTracer.to_tables([T1, T2])
D = sacc.BaseTracer.from_tables(tables)
T1a = D['y_milca']
T2a = D['y_nilc']
assert T1a.metadata == md1
assert T2a.metadata == md2
assert np.all(T1a.beam_extra['err1'] == err)
def test_nz_tracer():
md1 = {'potato': 'if_necessary', 'answer': 42, 'height': 1.83}
md2 = {'potato': 'never'}
z = np.arange(0., 1., 0.01)
Nz1 = 1*z # not a sensible N(z)!
Nz2 = 2*z # not a sensible N(z)!
Nz3 = 3*z
Nz4 = 4*z
more_nz = {'v1': Nz3, 'v2': Nz4}
T1 = sacc.BaseTracer.make('NZ', 'tracer1', z, Nz1,
quantity='galaxy_density',
extra_columns=more_nz,
spin=0,
metadata=md1)
T2 = sacc.BaseTracer.make('NZ', 'tracer2', z, Nz2,
quantity='galaxy_shear',
spin=2,
metadata=md2)
assert T1.metadata == md1
assert T2.metadata == md2
tables = sacc.BaseTracer.to_tables([T1, T2])
D = sacc.BaseTracer.from_tables(tables)
T1a = D['tracer1']
T2a = D['tracer2']
assert T1a.metadata == md1
assert T2a.metadata == md2
assert np.all(T1a.extra_columns['v1'] == Nz3)
assert np.all(T1a.extra_columns['v2'] == Nz4)
def test_mixed_tracers():
md1 = {'potato': 'never'}
md2 = {'rank': 'duke'}
md3 = {'rank': 'earl', 'robes': 78}
z = np.arange(0., 1., 0.01)
Nz1 = 1*z # not a sensible N(z)!
Nz2 = 2*z
T1 = sacc.BaseTracer.make('NZ', 'tracer1', z, Nz1,
quantity='galaxy_convergence')
T2 = sacc.BaseTracer.make('NZ', 'tracer2', z, Nz2,
quantity='galaxy_shear', metadata=md1)
M1 = sacc.BaseTracer.make("Misc", "sample1", metadata=md2)
M2 = sacc.BaseTracer.make("Misc", "sample2", metadata=md3)
tables = sacc.BaseTracer.to_tables([T1, M1, T2, M2])
recovered = sacc.BaseTracer.from_tables(tables)
assert recovered['sample1'].metadata['rank'] == 'duke'
assert recovered['sample2'].metadata['robes'] == 78
assert np.all(recovered['tracer1'].nz == Nz1)
assert recovered['tracer2'].metadata['potato'] == 'never'
def test_inverses():
N = 25
C = np.random.uniform(0, 1, size=(N, N))
C = (C+C.T) + np.eye(N)*20
M1 = sacc.BaseCovariance.make(C)
assert M1.size == N
invC = M1.inverse
ii = np.dot(invC, C)
assert np.allclose(ii, np.eye(N))
blocks = [np.random.uniform(0, 1, size=(5, 5))
for i in range(5)]
for b in blocks:
b += b.T + np.eye(5)*20
M2 = sacc.BaseCovariance.make(blocks)
assert M2.size == N
M2dense = np.zeros((N, N))
for i in range(5):
M2dense[i*5:i*5+5, i*5:i*5+5] = blocks[i]
invC2 = M2.inverse
ii = np.dot(invC2, M2dense)
assert np.allclose(ii, np.eye(N))
d = abs(np.random.uniform(0, 1, size=N))+1
M3 = sacc.BaseCovariance.make(d)
assert M3.size == N
invC3 = M3.inverse
assert np.count_nonzero(invC3 - np.diag(np.diagonal(invC3))) == 0
assert np.allclose(invC3.diagonal() * d, 1)
def test_data_point():
from sacc.data_types import DataPoint
dt = sacc.data_types.standard_types.galaxy_shearDensity_cl_e
value = 13.4
tracers = ('aaa', 'bbb')
tags = {'ell': 12, 'theta': 14.3}
d = DataPoint(dt, tracers, value, **tags)
s = repr(d)
d2 = eval(s)
assert d.tracers == d2.tracers
assert d.tags == d2.tags
assert d.data_type == d2.data_type
assert d.value == d2.value
def test_keep_remove():
s = sacc.Sacc()
# Tracer
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
s.add_tracer('NZ', 'source_0', z, nz)
s.add_tracer('NZ', 'source_1', z, nz,
quantity='galaxy_shear', spin=2)
s.add_tracer('NZ', 'source_2', z, nz,
quantity='cluster_density')
for i in range(20):
ee = 0.1 * i
tracers = ('source_0', 'source_0')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, ee, ell=10.0*i)
for i in range(20):
bb = 0.1 * i
tracers = ('source_1', 'source_1')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_bb,
tracers, bb, ell=10.0*i)
for i in range(20):
ee = 0.1 * i
tracers = ('source_2', 'source_2')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, ee, ell=10.0*i)
# Select by data type
s2 = s.copy()
s2.keep_selection(data_type=sacc.standard_types.galaxy_shear_cl_bb)
assert all(d.data_type == sacc.standard_types.galaxy_shear_cl_bb
for d in s2.data)
assert len(s2) == 20
# From multiple tracers
s2 = s.copy()
s2.keep_selection(data_type=sacc.standard_types.galaxy_shear_cl_ee)
assert all(d.data_type == sacc.standard_types.galaxy_shear_cl_ee
for d in s2.data)
assert len(s2) == 40
# Test removing a single tracer
s2 = s.copy()
s2.remove_selection(tracers=('source_1', 'source_1'))
for i, d in enumerate(s2.data):
if i < 20:
assert d.tracers == ('source_0', 'source_0')
else:
assert d.tracers == ('source_2', 'source_2')
assert all(d.data_type == sacc.standard_types.galaxy_shear_cl_ee
for d in s2.data)
assert len(s2) == 40
# Test selecting by tag
s2 = s.copy()
s2.remove_selection(ell__lt=55)
ell = s2.get_tag('ell')
for e in ell:
assert e > 55
s2 = s.copy()
s2.keep_selection(ell__lt=55)
ell = s2.get_tag('ell')
for e in ell:
assert e < 55
# Cutting just by index
s2 = s.copy()
ind = s2.indices(tracers=('source_1', 'source_1'))
assert (ind == np.arange(20, 40)).all()
# multiple selections
s2 = s.copy()
ind = s2.indices(tracers=('source_2', 'source_2'), ell__lt=45)
assert len(ind) == 5
def test_cutting_block_cov():
covmat = [np.random.uniform(size=(50, 50)),
np.random.uniform(size=(100, 100)),
np.random.uniform(size=(150, 150))]
C = sacc.covariance.BaseCovariance.make(covmat)
ind = list(range(50))
C2 = C.keeping_indices(np.arange(50))
assert C2.size == len(ind)
assert np.allclose(C2.get_block(ind), covmat[0])
def test_cutting_block_cov2():
covmat = [np.random.uniform(size=(50, 50)),
np.random.uniform(size=(100, 100)),
np.random.uniform(size=(150, 150))]
C = sacc.covariance.BaseCovariance.make(covmat)
ind = list(range(50,150))
C2 = C.keeping_indices(np.arange(50,150))
assert C2.size == len(ind)
assert np.allclose(C2.get_block(range(100)), covmat[1])
def test_cutting_full_cov():
covmat = np.random.uniform(size=(100, 100))
C = sacc.covariance.BaseCovariance.make(covmat)
ind = np.arange(10, dtype=int)
C2 = C.keeping_indices(ind)
assert np.allclose(C2.get_block(ind),
covmat[:10, :10])
def test_cutting_diag_cov():
diag = np.random.uniform(size=(100,))
C = sacc.covariance.BaseCovariance.make(diag)
ind = np.arange(20, dtype=int)
C2 = C.keeping_indices(ind)
assert np.allclose(C2.get_block(ind).diagonal(), diag[:20])
def test_parse_data_names():
for name in sacc.data_types.required_tags_verbose:
sources, props, stat, sub = sacc.parse_data_type_name(name)
name2 = sacc.build_data_type_name(sources, props, stat, sub)
assert name == name2
def test_bandpower_window():
nb = 20
nl = 200
dl = nl // nb
ells = np.arange(nl)
w = np.zeros([nb, nl])
for i in range(nb):
w[i, i*dl: (i+1)*dl] = 1./dl
W1 = [sacc.BandpowerWindow(ells, w.T)]
tables = sacc.BandpowerWindow.to_tables(W1)
W2 = sacc.BandpowerWindow.from_tables(tables)
for w1 in W1:
w2 = W2[id(w1)]
assert np.all(w1.values == w2.values)
assert np.all(w1.weight.flatten() == w2.weight.flatten())
def test_tophat_window():
edges = np.arange(10) * 10
W1 = [sacc.TopHatWindow(edges[:-1], edges[1:])]
tables = sacc.TopHatWindow.to_tables(W1)
W2 = sacc.TopHatWindow.from_tables(tables)
for w1 in W1:
w2 = W2[id(w1)]
assert np.all(w1.min == w2.min)
assert np.all(w1.max == w2.max)
def test_log_window():
edges = (np.arange(10) + 1) * 10
W1 = [sacc.LogTopHatWindow(edges[:-1], edges[1:])]
tables = sacc.LogTopHatWindow.to_tables(W1)
W2 = sacc.LogTopHatWindow.from_tables(tables)
for w1 in W1:
w2 = W2[id(w1)]
assert np.all(w1.min == w2.min)
assert np.all(w1.max == w2.max)
def test_concatenate_covariance():
v1 = np.array([1., 2., 3.])
v2 = np.array([4.])
A = sacc.BaseCovariance.make(v1)
B = sacc.BaseCovariance.make(v2)
C = sacc.covariance.concatenate_covariances(A, B)
assert isinstance(C, sacc.covariance.DiagonalCovariance)
assert np.allclose(C.diag, [1, 2, 3, 4])
v1 = np.array([2.])
v2 = np.array([[3., 0.1], [0.1, 3]])
A = sacc.BaseCovariance.make(v1)
B = sacc.BaseCovariance.make(v2)
C = sacc.covariance.concatenate_covariances(A, B)
assert isinstance(C, sacc.covariance.BlockDiagonalCovariance)
test_C = np.array([
[2.0, 0.0, 0.0],
[0.0, 3.0, 0.1],
[0.0, 0.1, 3.0]]
)
assert np.allclose(C.dense, test_C)
v1 = np.array([[2.0, 0.2, ],
[0.2, 3.0, ]])
v2 = np.array([[4.0, -0.2, ],
[-0.2, 5.0, ]])
test_C = np.array([[2.0, 0.2, 0.0, 0.0],
[0.2, 3.0, 0.0, 0.0],
[0.0, 0.0, 4.0, -0.2],
[0.0, 0.0, -0.2, 5.0]])
A = sacc.BaseCovariance.make(v1)
B = sacc.BaseCovariance.make(v2)
C = sacc.covariance.concatenate_covariances(A, B)
assert isinstance(C, sacc.covariance.BlockDiagonalCovariance)
assert np.allclose(C.dense, test_C)
def test_concatenate_data():
s1 = sacc.Sacc()
# Tracer
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
s1.add_tracer('NZ', 'source_0', z, nz)
for i in range(20):
ee = 0.1 * i
tracers = ('source_0', 'source_0')
s1.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, ee, ell=10.0*i)
s2 = sacc.Sacc()
# Tracer
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
s2.add_tracer('NZ', 'source_0', z, nz,
quantity='galaxy_shear', spin=2)
for i in range(20):
ee = 0.1 * i
tracers = ('source_0', 'source_0')
s2.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, ee, ell=10.0*i, label='xxx')
# same tracer
s3 = sacc.concatenate_data_sets(s1, s2, same_tracers=['source_0'])
assert ['source_0'] == list(s3.tracers.keys())
# check data points in right order
for i in range(20):
assert s3.data[i].get_tag('ell') == 10.0*i
assert s3.data[i+20].get_tag('ell') == 10.0*i
assert s3.data[i].get_tag('label') is None
assert s3.data[i+20].get_tag('label') == 'xxx'
t1 = s3.data[i].tracers[0]
t2 = s3.data[i+20].tracers[0]
assert t1 == 'source_0'
assert t1 == t2
# To make sure the first 'source_0' tracer is used and not rewritten
s3.get_tracer(t1).quantity == 'generic'
# name clash
with pytest.raises(ValueError):
sacc.concatenate_data_sets(s1, s2)
s3 = sacc.concatenate_data_sets(s1, s2, labels=['1', '2'])
assert 'source_0_1' in s3.tracers
assert 'source_0_2' in s3.tracers
assert len(s3) == len(s1) + len(s2)
# check data points in right order
for i in range(20):
assert s3.data[i].get_tag('ell') == 10.0*i
assert s3.data[i+20].get_tag('ell') == 10.0*i
assert s3.data[i].get_tag('label') == '1'
assert s3.data[i+20].get_tag('label') == 'xxx_2'
t1 = s3.data[i].tracers[0]
t2 = s3.data[i+20].tracers[0]
assert t1 == 'source_0_1'
assert t2 == 'source_0_2'
s3.get_tracer(t1)
s3.get_tracer(t2)
# labels + same_tracers
s4 = sacc.concatenate_data_sets(s3, s3, labels=['x', 'y'],
same_tracers=['source_0_1'])
trs = ['source_0_1', 'source_0_2_x', 'source_0_2_y']
assert trs == list(s4.tracers.keys())
assert s4.mean.size == 2 * s3.mean.size
def test_io():
s = sacc.Sacc()
# Tracer
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
s.add_tracer('NZ', 'source_0', z, nz)
for i in range(20):
ee = 0.1 * i
tracers = ('source_0', 'source_0')
s.add_data_point(sacc.standard_types.galaxy_shear_cl_ee,
tracers, ee, ell=10.0*i)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.sacc')
s.save_fits(filename)
s2 = sacc.Sacc.load_fits(filename)
assert len(s2) == 20
mu = s2.get_mean(sacc.standard_types.galaxy_shear_cl_ee)
for i in range(20):
assert mu[i] == 0.1 * i
def test_io_maps_bpws():
s = sacc.Sacc()
n_ell = 10
d_ell = 100
n_ell_large = n_ell * d_ell
ell = np.linspace(2, 1000, n_ell)
c_ell = 1./(ell+1)**3
beam = np.exp(-0.1 * ell * (ell+1))
nu = np.linspace(30., 60., 100)
bandpass = np.ones(100)
z = np.arange(0., 1.0, 0.01)
nz = (z-0.5)**2/0.1**2
# Tracer
s.add_tracer('NZ', 'gc', z, nz)
s.add_tracer('NuMap', 'cmbp', 2, nu, bandpass, ell, beam)
s.add_tracer('Map', 'sz', 0, ell, beam)
# Window
ells_large = np.arange(n_ell_large)
window_single = np.zeros([n_ell, n_ell_large])
for i in range(n_ell):
window_single[i, i * d_ell: (i + 1) * d_ell] = 1.
wins = sacc.BandpowerWindow(ells_large, window_single.T)
s.add_ell_cl('cl_00', 'gc', 'gc', ell, c_ell, window=wins)
s.add_ell_cl('cl_0e', 'gc', 'cmbp', ell, c_ell, window=wins)
s.add_ell_cl('cl_00', 'gc', 'sz', ell, c_ell, window=wins)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.sacc')
s.save_fits(filename)
s2 = sacc.Sacc.load_fits(filename)
assert len(s2) == 30
l, cl, ind = s2.get_ell_cl('cl_00', 'gc', 'sz',
return_ind=True)
w = s2.get_bandpower_windows(ind)
assert np.all(cl == c_ell)
assert w.weight.shape == (n_ell_large, n_ell)
@pytest.mark.parametrize("vv,ncl,ntr",
[('0.2.0', 2, 2),
('0.3.0', 3, 2),
('0.4.2', 6, 5)])
def test_legacy_format(vv, ncl, ntr):
print(vv, ncl, ntr)
local_file_name = get_from_wiki(
f'https://github.com/LSSTDESC/sacc/wiki/legacy_files/dummy_v{vv}.fits')
s = sacc.Sacc.load_fits(local_file_name)
assert len(s.mean) == ncl * 100
assert len(s.tracers) == ntr
|
py | 7dff20c1ecd9299028aec1982d31a6cc9fe54faf | from tests.integration.base import DBTIntegrationTest, FakeArgs, use_profile
class TestSelectionExpansion(DBTIntegrationTest):
@property
def schema(self):
return "test_incremental_schema"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
"config-version": 2,
"test-paths": ["tests"]
}
def list_tests_and_assert(self, include, exclude, expected_tests):
list_args = ['ls', '--resource-type', 'test']
if include:
list_args.extend(('--select', include))
if exclude:
list_args.extend(('--exclude', exclude))
listed = self.run_dbt(list_args)
print(listed)
assert len(listed) == len(expected_tests)
test_names = [name.split('.')[-1] for name in listed]
assert sorted(test_names) == sorted(expected_tests)
def run_tests_and_assert(
self, include, exclude, expected_tests, compare_source, compare_target
):
run_args = ['run']
if include:
run_args.extend(('--models', include))
results_one = self.run_dbt(run_args)
results_two = self.run_dbt(run_args)
self.assertEqual(len(results_one), 3)
self.assertEqual(len(results_two), 3)
test_args = ['test']
if include:
test_args.extend(('--models', include))
if exclude:
test_args.extend(('--exclude', exclude))
results = self.run_dbt(test_args)
tests_run = [r.node.name for r in results]
assert len(tests_run) == len(expected_tests)
assert sorted(tests_run) == sorted(expected_tests)
self.assertTablesEqual(compare_source, compare_target)
def run_incremental_ignore(self):
select = 'model_a incremental_ignore incremental_ignore_target'
compare_source = 'incremental_ignore'
compare_target = 'incremental_ignore_target'
exclude = None
expected = [
'select_from_a',
'select_from_incremental_ignore',
'select_from_incremental_ignore_target',
'unique_model_a_id',
'unique_incremental_ignore_id',
'unique_incremental_ignore_target_id'
]
self.list_tests_and_assert(select, exclude, expected)
self.run_tests_and_assert(select, exclude, expected, compare_source, compare_target)
def run_incremental_append_new_columns(self):
select = 'model_a incremental_append_new_columns incremental_append_new_columns_target'
compare_source = 'incremental_append_new_columns'
compare_target = 'incremental_append_new_columns_target'
exclude = None
expected = [
'select_from_a',
'select_from_incremental_append_new_columns',
'select_from_incremental_append_new_columns_target',
'unique_model_a_id',
'unique_incremental_append_new_columns_id',
'unique_incremental_append_new_columns_target_id'
]
self.list_tests_and_assert(select, exclude, expected)
self.run_tests_and_assert(select, exclude, expected, compare_source, compare_target)
def run_incremental_append_new_columns_remove_one(self):
select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target'
compare_source = 'incremental_append_new_columns_remove_one'
compare_target = 'incremental_append_new_columns_remove_one_target'
exclude = None
expected = [
'select_from_a',
'select_from_incremental_append_new_columns_remove_one',
'select_from_incremental_append_new_columns_remove_one_target',
'unique_model_a_id',
'unique_incremental_append_new_columns_remove_one_id',
'unique_incremental_append_new_columns_remove_one_target_id'
]
self.run_tests_and_assert(select, exclude, expected, compare_source, compare_target)
def run_incremental_sync_all_columns(self):
select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target'
compare_source = 'incremental_sync_all_columns'
compare_target = 'incremental_sync_all_columns_target'
exclude = None
expected = [
'select_from_a',
'select_from_incremental_sync_all_columns',
'select_from_incremental_sync_all_columns_target',
'unique_model_a_id',
'unique_incremental_sync_all_columns_id',
'unique_incremental_sync_all_columns_target_id'
]
self.list_tests_and_assert(select, exclude, expected)
self.run_tests_and_assert(select, exclude, expected, compare_source, compare_target)
def run_incremental_fail_on_schema_change(self):
select = 'model_a incremental_fail'
self.run_dbt(['run', '--models', select, '--full-refresh'])
results = self.run_dbt(['run', '--models', select], expect_pass=False)
self.assertIn('Compilation Error', results[1].message)
@use_profile('redshift')
def test__redshift__run_incremental_ignore(self):
self.run_incremental_ignore()
@use_profile('redshift')
def test__redshift__run_incremental_append_new_columns(self):
self.run_incremental_append_new_columns()
self.run_incremental_append_new_columns_remove_one()
@use_profile('redshift')
def test__redshift__run_incremental_sync_all_columns(self):
self.run_incremental_sync_all_columns()
@use_profile('redshift')
def test__redshift__run_incremental_fail_on_schema_change(self):
self.run_incremental_fail_on_schema_change()
|
py | 7dff21be9f15dd548d1d81e9f664d25c7d2913f5 | #!/bin/python
""" Healthcheck script to run inside docker
Example of usage in a Dockerfile
```
COPY --chown=scu:scu docker/healthcheck.py docker/healthcheck.py
HEALTHCHECK --interval=30s \
--timeout=30s \
--start-period=1s \
--retries=3 \
CMD python3 docker/healthcheck.py http://localhost:8000/
```
Q&A:
1. why not to use curl instead of a python script?
- SEE https://blog.sixeyed.com/docker-healthchecks-why-not-to-use-curl-or-iwr/
"""
import os
import sys
from urllib.request import urlopen
SUCCESS, UNHEALTHY = 0, 1
# Disabled if boots with debugger (e.g. debug, pdb-debug, debug-ptvsd, etc)
ok = "debug" in os.environ.get("SC_BOOT_MODE", "").lower()
# Queries host
# pylint: disable=consider-using-with
ok = (
ok
or urlopen(
"{host}{baseurl}".format(
host=sys.argv[1], baseurl=os.environ.get("SIMCORE_NODE_BASEPATH", "")
) # adds a base-path if defined in environ
).getcode()
== 200
)
sys.exit(SUCCESS if ok else UNHEALTHY)
|
py | 7dff21c71c2d46687af98aa15be782130a3f8cb6 | import numpy as np
np.random.seed(0)
def sigmoid(xvec):
""" Compute the sigmoid function """
# Cap -xvec, to avoid overflow
# Undeflow is okay, since it get set to zero
if isinstance(xvec, (list, np.ndarray)):
xvec[xvec < -100] = -100
elif xvec < -100:
xvec = -100
vecsig = 1.0 / (1.0 + np.exp(np.negative(xvec)))
return vecsig
def gen1(length, realization, weight_mu, weight_sigma, feature_num=25,cts_A=True, cts_B=True, cts_C=True):
'''
:param length: of A,B,C
:param feature_num: of X
:param cts_A:
:param cts_B:
:param cts_C:
:return:
'''
# mu and sigma for generating Ai,Bi,Ci
mu_a = np.random.normal(5, 20, length[0])
sigma_a = np.random.gamma(2, 0.5, length[0])
mu_b = np.random.normal(7, 15, length[1])
sigma_b = np.random.gamma(2, 0.5, length[1])
mu_c = np.random.normal(3, 17, length[2])
sigma_c = np.random.gamma(2, 0.5, length[2])
mu = [mu_a, mu_b, mu_c]
sigma = [sigma_a, sigma_b, sigma_c]
# shape_A=(length[0],1)
A = np.random.normal(mu[0], sigma[0], length[0])
B = np.random.normal(mu[1], sigma[1], length[1])
C = np.random.normal(mu[2], sigma[2], length[2])
############################################## 2
# generate 25 different sets of weights for each X_i
X_mu = []
X_sigma = []
# average Ai,Bi,Ci
average = [np.average(A), np.average(B), np.average(C)]
for i in xrange(feature_num):
X_mu.append( np.dot(average,weight_mu[i]) )
# try to make sigma small !!!
X_sigma.append( np.dot(average,weight_sigma[i]))
# normalize sigma
X_sigma = np.divide(X_sigma,np.sqrt((np.sum(np.asarray(X_sigma)**2))))
X_sigma = np.abs(X_sigma)
X = []
# for 100 realization
for j in xrange(realization):
X.append(np.random.normal(X_mu, X_sigma, feature_num))
# X is a matrix 100row, 25 column
############################################## 3
# weights for T
# random t
t = []
# global sss
for i in xrange(realization):
weights = [np.random.normal(-0.3,2,1), np.random.normal(0.7,1,1)]
p = sigmoid(np.dot(np.reshape(weights,(2,)), average[0:2]))
ti = np.random.binomial(1, p, 1)
t.append( ti )
# sss+=ti
# for the same patient we have the same t
#t = [t.astype('int64')]*realization
############################################## 4
yf = []
ycf = []
for i in range(realization):
if t[0] == 0:
weights_alpha = [7, 7]
weights_beta = [0.4, 0.04]
a = np.dot([np.average(B), np.average(C)], weights_alpha)
b = np.dot([np.var(B), np.var(C)], weights_beta)
alpha = np.abs(np.divide(np.square(a), b))
beta = np.abs(np.divide(b, a))
yf.append(np.random.gamma(alpha, beta, 1) - 1)
ycf.append(np.random.gamma(alpha, beta, 1))
else:
weights_alpha = [5, 0.5]
weights_beta = [0.5, 0.07]
a = np.dot([np.average(B), np.average(C)], weights_alpha)
b = np.dot([np.var(B), np.var(C)], weights_beta)
alpha = np.abs(np.divide(np.square(a), b))
beta = np.abs(np.divide(b, a))
yf.append(np.random.gamma(alpha, beta, 1)-1)
ycf.append(np.random.gamma(alpha, beta, 1))
# X is 100 row * 25 colomn
# t is 1*100, yf is 1*100, ycf is 1*100
return [X, t, yf, ycf]
def generate_full_data(sample_size,realization):
sample_x = []
sample_t = []
sample_yf = []
sample_ycf = []
mu0 = []
mu1 = []
# generate random weights
weight_mu = []
weight_sigma = []
for j in range(25):# feature number
v = np.random.uniform(-5, 5, 3)
weight_mu.append(np.divide(v, np.sum(v)))
v = np.random.uniform(0, 1, 3)
weight_sigma.append(np.divide(v, np.sum(v)))
for i in range(sample_size):
[X, t, yf, ycf] = gen1([10,15,20],realization,weight_mu,weight_sigma )
sample_x.append(X)
sample_t.append(t)
sample_yf.append(yf)
sample_ycf.append(ycf)
if t == 1: # is this a problem?
mu1.append(yf)
mu0.append(ycf)
else:
mu1.append(ycf)
mu0.append(yf)
sample_x = np.reshape(sample_x, (sample_size, 25, realization))
sample_t = np.reshape(sample_t, (sample_size, realization))
sample_yf = np.reshape(sample_yf, (sample_size, realization))
sample_ycf = np.reshape(sample_ycf, (sample_size, realization))
mu0 = np.reshape(mu0,(sample_size,realization))
mu1 = np.reshape(mu1,(sample_size,realization))
ate = np.array(4)
yadd = np.array(0)
ymul = np.array(1)
return [sample_x, sample_t, sample_yf, sample_ycf, mu0, mu1, ate, yadd, ymul]
# sss = 0
realization = 100
#q = gen1([5,5,5])
q = generate_full_data(1000,realization)
#print q[2]
np.savez('./data/synthetic_train_rtsy_%d.npz'%realization, x=q[0], t= q[1], yf=q[2], ycf=q[3], mu0=q[4],
mu1=q[5], ate=q[6], yadd=q[7], ymul=q[8])
# print sss
# sss = 0
np.random.seed(1)
q = generate_full_data(1000,realization)
np.savez('./data/synthetic_test_rtsy_%d.npz'%realization, x=q[0], t= q[1], yf=q[2], ycf=q[3], mu0=q[4],
mu1=q[5], ate=q[6], yadd=q[7], ymul=q[8])
# print sss
|
py | 7dff21d06f9c2d2d7de59a2275406a55103596a6 | """
WSGI config for paintapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'paintapp.settings')
application = get_wsgi_application()
|
py | 7dff22882b1d297d65a610ffd73c32094550ed34 | from django.urls import path
from .views import *
urlpatterns = [
path('api/users', UserList.as_view(), name = 'users_list'),
#path('api/create_users', UserCreate.as_view(), name = 'user_create'),
] |
py | 7dff228b40fa48265d4658c8846a75777bd6f022 | import kurisu.nyaa, kurisu.tips, kurisu.prefs
import sqlite3, time, os.path
from discord.ext import commands
class SteinsGate:
"""Команды, связанные с Steins;Gate"""
client = None
def __init__(self, bot):
self.client = bot
@commands.command()
async def tips(self, *tip: str):
"""Поиск по TIPS Steins;Gate.
Аргументы:
-----------
tip: `str`
TIP, который нужно найти.
"""
tip = ' '.join(tip)
await kurisu.tips.search(tip, self.client)
@commands.command()
async def sg0(self, episode: int):
"""Выводит список .torrent файлов.
Аргументы:
-----------
episide: `int`
Номер запрашиваемого эпизода.
"""
tmpEmbed = kurisu.prefs.Embeds.new('SG0')
# Nyaa
res = {}
conn = sqlite3.connect('torr_db.sqlite3')
cursor = conn.cursor()
cursor.execute("select title from episodes where id = %s" % episode)
ep = cursor.fetchall()
if len(ep) == 0:
await self.client.say('```Такой серии нет и не будет.```')
return "Fuck"
for dl in kurisu.nyaa.nyaa_dls:
cursor.execute('select dl, link, seeders, leechers from torrents where episode = %s and dl = "%s"' % (episode, dl))
res[dl] = cursor.fetchall()
conn.close()
nyaaField = []
for tmp in list(res.values()):
if len(tmp) > 0:
tmp = tmp[0]
nyaaField.append('[%s](https://nyaa.si/download/%s.torrent) | %s/%s' % (tmp[0], tmp[1], tmp[2], tmp[3]))
if len(nyaaField) == 0:
nyaaField.append('Эпизода еще нет на nyaa.si')
tmpEmbed.add_field(name='nyaa.si', value='\n'.join(nyaaField), inline = True)
tmpEmbed.title = ep[0][0]
pt = kurisu.prefs.parse_time(time.localtime(os.path.getmtime('torr_db.sqlite3')))
pt = '%s в %s' % (pt[0], pt[1][:-3])
tmpEmbed.set_footer(text='Последнее обновление БД: %s' % pt)
await self.client.say(embed=tmpEmbed)
@commands.command()
async def tips0(self, *tip: str):
"""Поиск по TIPS Steins;Gate 0.
Аргументы:
-----------
tip: `str`
TIP, который нужно найти.
"""
tip = ' '.join(tip)
await kurisu.tips.search(tip, self.client, 0)
def setup(bot):
bot.add_cog(SteinsGate(bot))
|
py | 7dff23151386dc04d072411dc694bc6ad4fb5f55 | # (c) Copyright 2014-2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from keystoneauth1 import loading as kaloading
from freezerclient import utils
from freezerclient.v1.managers import actions
from freezerclient.v1.managers import backups
from freezerclient.v1.managers import clients
from freezerclient.v1.managers import jobs
from freezerclient.v1.managers import sessions
FREEZER_SERVICE_TYPE = 'backup'
class Client(object):
"""Client for the OpenStack Disaster Recovery v1 API.
"""
def __init__(self, token=None, username=None, password=None,
tenant_name=None, auth_url=None, session=None, endpoint=None,
endpoint_type=None, opts=None, project_name=None,
user_domain_name=None, user_domain_id=None,
project_domain_name=None, project_domain_id=None,
cert=None, cacert=None, insecure=False, project_id=None):
"""
Initialize a new client for the Disaster Recovery v1 API.
:param token: keystone token
:param username: openstack username
:param password: openstack password
:param tenant_name: tenant
:param auth_url: keystone-api endpoint
:param session: keystone.Session
:param endpoint: freezer-api endpoint
:param endpoint_type: type of endpoint
:param opts: a namespace to store all keystone data
:param project_name: only for version 3
:param user_domain_name: only for version 3
:param user_domain_id: only for version 3
:param project_domain_name: only for version 3
:param project_domain_id: only for version 3
:param insecure: The verification arguments to pass to requests.
These are of the same form as requests expects,
so True or False to verify (or not) against system
certificates or a path to a bundle or CA certs to
check against or None for requests to
attempt to locate and use certificates. (optional,
defaults to True)
:param cert: Path to cert
:param project_id: only for version 3
:return: freezerclient.Client
"""
self.project_id = project_id
if opts is None:
self.opts = utils.Namespace({})
self.opts.os_token = token or None
self.opts.os_username = username or None
self.opts.os_password = password or None
self.opts.os_tenant_name = tenant_name or None
self.opts.os_auth_url = auth_url or None
self.opts.os_backup_url = endpoint or None
self.opts.os_endpoint_type = endpoint_type or None
self.opts.os_project_name = project_name or None
self.opts.os_project_id = project_id or None
self.opts.os_user_domain_name = user_domain_name or None
self.opts.os_user_domain_id = user_domain_id or None
self.opts.os_project_domain_name = project_domain_name or None
self.opts.os_project_domain_id = project_domain_id or None
self.opts.os_cacert = cacert or None
self.opts.insecure = insecure
self.opts.cert = cert
else:
self.opts = opts
self.cert = cert
self.cacert = cacert or self.opts.os_cacert
self._session = session
verify = self.opts.os_cacert
if self.opts.insecure:
verify = False
self.validate()
self.project_id = project_id or self.get_project_id
self.jobs = jobs.JobManager(self, verify=verify)
self.clients = clients.ClientManager(self, verify=verify)
self.backups = backups.BackupsManager(self, verify=verify)
self.sessions = sessions.SessionManager(self, verify=verify)
self.actions = actions.ActionManager(self, verify=verify)
@utils.CachedProperty
def session(self):
if self._session:
return self._session
auth_type = 'password'
auth_kwargs = {
'auth_url': self.opts.os_auth_url,
'project_id': self.opts.os_project_id,
'project_name': self.opts.os_project_name,
'project_domain_id': self.opts.os_project_domain_id,
'project_domain_name': self.opts.os_project_domain_name,
}
if self.opts.os_username and self.opts.os_password:
auth_kwargs.update({
'username': self.opts.os_username,
'password': self.opts.os_password,
'tenant_name': self.opts.os_tenant_name,
'user_domain_id': self.opts.os_user_domain_id,
'user_domain_name': self.opts.os_user_domain_name,
})
elif self.opts.os_token:
auth_type = 'token'
auth_kwargs.update({
'token': self.opts.os_token,
})
loader = kaloading.get_plugin_loader(auth_type)
auth_plugin = loader.load_from_options(**auth_kwargs)
# Let keystoneauth do the necessary parameter conversions
session = kaloading.session.Session().load_from_options(
auth=auth_plugin, insecure=self.opts.insecure, cacert=self.cacert,
cert=self.cert)
return session
@utils.CachedProperty
def endpoint(self):
if self.opts.os_backup_url:
return self.opts.os_backup_url
else:
auth_ref = self.session.auth.get_auth_ref(self.session)
endpoint = auth_ref.service_catalog.url_for(
service_type=FREEZER_SERVICE_TYPE,
interface=self.opts.os_endpoint_type,
)
return endpoint
@property
def auth_token(self):
return self.session.get_token()
@property
def get_project_id(self):
return self.session.get_project_id()
@utils.CachedProperty
def client_id(self):
return '{0}_{1}'.format(self.session.get_project_id(),
socket.gethostname())
def validate(self):
"""Validate that the client objects gets created correctly.
:return: bool
"""
if not self._session and self.opts.os_auth_url is None:
raise Exception('OS_AUTH_URL should be provided.')
|
py | 7dff24698bf97b348dd3cd7eefe7ac88f63dbb74 | example = {'breakfast': {
'eggs': 1,
'spam': 2
}
} |
py | 7dff249894cd9a4521530dc4ad41f2385099c12d | #Raising Exceptions
def calculate_xfactor(x):
if x <= 0:
raise ValueError("Age Can not be zero or lease than zero")
return 10 / x
else:
print("executed else parth")
try:
calculate_xfactor(-1)
except ValueError as ex:
print(ex) |
py | 7dff2547f9929ea51c008e69d3f4a98ba3c86f2a | # importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import GaussianNB
from sklearn.naive_bayes import GaussianNB
import cv2
# import warnings to remove any type of future warnings
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# reading csv file and extracting class column to y.
dataf = pd.read_csv("Datasetinfectedhealthy.csv")
# extracting two features
X = dataf.drop(['imgid','fold num'], axis=1)
y = X['label']
X = X.drop('label', axis=1)
print("\nTraining dataset:-\n")
print(X)
log = pd.read_csv("datasetlog/Datasetunlabelledlog.csv")
log = log.tail(1)
X_ul = log.drop(['imgid','fold num'], axis=1)
print("\nTest dataset:-\n")
print(X_ul)
print("\n*To terminate press (q)*")
#X.plot(kind='scatter',x='feature1',y='feature2')
#plt.show()
Sum = 0
from sklearn.model_selection import train_test_split
'''
from sklearn.model_selection import train_test_split
for n in range(4):
x_train, Xi_test, y_train, yi_test = train_test_split(X, y, test_size=0.52, random_state=60)
if cv2.waitKey(1) == ord('q' or 'Q'): break
svclassifier = SVC(kernel='linear')
svclassifier.fit(x_train, y_train)
pred = svclassifier.predict(X_ul)
'''
for n in range(4):
x_train, Xi_test, y_train, yi_test = train_test_split(X, y, test_size=0.52, random_state=60)
if cv2.waitKey(1) == ord('q' or 'Q'): break
classifier = GaussianNB()
classifier.fit(x_train, y_train)
pred =classifier.predict(X_ul)
Sum = Sum + pred
print(pred)
print("\nprediction: %d" %int(Sum/4))
if(Sum < 2):
print("The leaf is sufficiently healthy!")
else:
print("The leaf is infected!")
print("\nKeypress on any image window to terminate")
#from sklearn.metrics import classification_report, confusion_matrix
#print(classification_report(yi_test,y_pred))
#print "\n Average precision percentage: %.2f" %avg_pred + "%"
cv2.waitKey(0)
|
py | 7dff284ba8c3b8def671f13b064630d09c7a40c6 | # coding: utf-8
"""
ESP Documentation
The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures.
OpenAPI spec version: v2_sdk
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class StatSignaturesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def list_for_stat(self, stat_id, **kwargs):
"""
Get a list of statistics for signatures
A successful call to this API returns all the statistics of all the signatures for a report identified by the stat_id parameter. Said report contains all statistics for this alert triggered from signatures contained in all signatures for the selected hour.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_for_stat(stat_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stat_id: The ID of the stat to retrieve signature statistics for (required)
:param str include: Related objects that can be included in the response: signature, stat See Including Objects for more information.
:param dict(str, str) filter: Filter Params for Searching. Equality Searchable Attributes: [stat_id, type_id]
:param str page: Page Number and Page Size. Number is the page number of the collection to return, size is the number of items to return per page.
:return: PaginatedCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_for_stat_with_http_info(stat_id, **kwargs)
else:
(data) = self.list_for_stat_with_http_info(stat_id, **kwargs)
return data
def list_for_stat_with_http_info(self, stat_id, **kwargs):
"""
Get a list of statistics for signatures
A successful call to this API returns all the statistics of all the signatures for a report identified by the stat_id parameter. Said report contains all statistics for this alert triggered from signatures contained in all signatures for the selected hour.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_for_stat_with_http_info(stat_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stat_id: The ID of the stat to retrieve signature statistics for (required)
:param str include: Related objects that can be included in the response: signature, stat See Including Objects for more information.
:param dict(str, str) filter: Filter Params for Searching. Equality Searchable Attributes: [stat_id, type_id]
:param str page: Page Number and Page Size. Number is the page number of the collection to return, size is the number of items to return per page.
:return: PaginatedCollection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stat_id', 'include', 'filter', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_for_stat" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stat_id' is set
if ('stat_id' not in params) or (params['stat_id'] is None):
raise ValueError("Missing the required parameter `stat_id` when calling `list_for_stat`")
collection_formats = {}
resource_path = '/api/v2/stats/{stat_id}/signatures.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'stat_id' in params:
path_params['stat_id'] = params['stat_id']
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
if 'filter' in params:
form_params.append(('filter', params['filter']))
if 'page' in params:
form_params.append(('page', params['page']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PaginatedCollection',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def show(self, id, **kwargs):
"""
Show a single Stat Signature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Stat Signature ID (required)
:param str include: Related objects that can be included in the response: signature, stat See Including Objects for more information.
:return: StatSignature
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.show_with_http_info(id, **kwargs)
else:
(data) = self.show_with_http_info(id, **kwargs)
return data
def show_with_http_info(self, id, **kwargs):
"""
Show a single Stat Signature
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.show_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Stat Signature ID (required)
:param str include: Related objects that can be included in the response: signature, stat See Including Objects for more information.
:return: StatSignature
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method show" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `show`")
collection_formats = {}
resource_path = '/api/v2/stats/signatures/{id}.json_api'.replace('{format}', 'json_api')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'include' in params:
query_params['include'] = params['include']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/vnd.api+json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/vnd.api+json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatSignature',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 7dff285c77745428b41746510ec1bf25e3ca8200 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
from unittest.mock import MagicMock
# Mock modules
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'glfw',
'sdl2',
'sdl2.ext',
'sdl2.video',
'pyglet',
'pyglet.window',
'PyQt5',
'PyQt5.QtCore',
'QtCore',
'QtOpenGL',
'QtWidgets',
'PySide2',
'PySide2.QtCore',
'pywavefront',
'pywavefront.obj',
'trimesh',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'moderngl-window'
copyright = '2019, Einar Forselv'
author = 'Einar Forselv'
# The short X.Y version
version = '2.1.0'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'moderngl-windowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'moderngl-window.tex', 'moderngl\\_window Documentation',
'Einar Forselv', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'moderngl-window', 'moderngl-window Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'moderngl-window', 'moderngl-window Documentation',
author, 'moderngl-window', 'A cross platform helper library for ModernGL making window creation and resource loading simple',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
py | 7dff28715bb8152ed0340ed60686acdf9a9f5bf6 | # ### 1.5 function that post process the ccs results
import numpy as np
import pandas as pd
# define a function that could calculate the overall annual emission and lock emission
def bau_ccs_post (df):
coal_annual_existing_emission = df.loc[:,('coal_power_annual_emission_existing')].values
coal_annual_new_emission = df.loc[:,('coal_power_annual_emission_new')].values
gas_annual_existing_emission = df.loc[:,('gas_power_annual_emission_existing')].values
gas_annual_new_emission = df.loc[:,('gas_power_annual_emission_new')].values
oil_annual_existing_emission = df.loc[:,('oil_power_annual_emission_existing')].values
oil_annual_new_emission = df.loc[:,('oil_power_annual_emission_new')].values
coal_lock_existing_emission = df.loc[:,('coal_power_lock_emission_existing')].values
coal_lock_new_emission = df.loc[:,('coal_power_lock_emission_new')].values
gas_lock_existing_emission = df.loc[:,('gas_power_lock_emission_existing')].values
gas_lock_new_emission = df.loc[:,('gas_power_lock_emission_new')].values
oil_lock_existing_emission = df.loc[:,('oil_power_lock_emission_existing')].values
oil_lock_new_emission = df.loc[:,('oil_power_lock_emission_new')].values
coal_overall_lock_emission = np.zeros(shape=(36))
gas_overall_lock_emission = np.zeros(shape=(36))
oil_overall_lock_emission = np.zeros(shape=(36))
coal_overall_annual_emission = np.zeros(shape=(36))
gas_overall_annual_emission = np.zeros(shape=(36))
oil_overall_annual_emission = np.zeros(shape=(36))
for i in range(36):
coal_annual_exisitng = coal_annual_existing_emission[i]
gas_annual_exisitng = gas_annual_existing_emission[i]
oil_annual_exisitng = oil_annual_existing_emission[i]
coal_annual_added = 0
gas_annual_added = 0
oil_annual_added = 0
for j in range(i+1):
coal_annual_new = coal_annual_new_emission[j]
coal_annual_added = coal_annual_added + coal_annual_new
gas_annual_new = gas_annual_new_emission[j]
gas_annual_added = gas_annual_added + gas_annual_new
oil_annual_new = oil_annual_new_emission[j]
oil_annual_added = oil_annual_added + oil_annual_new
coal_overall_annual_emission[i] = coal_annual_exisitng + coal_annual_added
df.loc[:,('coal_annual_emission')] = coal_overall_annual_emission
gas_overall_annual_emission[i] = gas_annual_exisitng + gas_annual_added
df.loc[:,('gas_annual_emission')] = gas_overall_annual_emission
oil_overall_annual_emission[i] = oil_annual_exisitng + oil_annual_added
df.loc[:,('oil_annual_emission')] = oil_overall_annual_emission
for i in range(36):
coal_lock_exisitng = coal_lock_existing_emission[i]
gas_lock_exisitng = gas_lock_existing_emission[i]
oil_lock_exisitng = oil_lock_existing_emission[i]
coal_lock_added = 0
gas_lock_added = 0
oil_lock_added = 0
for j in range(i+1):
coal_lock_new = coal_lock_new_emission[j]* (1-0.025*(i-j))
coal_lock_added = coal_lock_added + coal_lock_new
gas_lock_new = gas_lock_new_emission[j]* (1-0.025*(i-j))
gas_lock_added = gas_lock_added + gas_lock_new
oil_lock_new = oil_lock_new_emission[j]* (1-0.025*(i-j))
oil_lock_added = oil_lock_added + oil_lock_new
coal_overall_lock_emission[i] = coal_lock_exisitng + coal_lock_added
df.loc[:,('coal_lock_emission')] = coal_overall_lock_emission
gas_overall_lock_emission[i] = gas_lock_exisitng + gas_lock_added
df.loc[:,('gas_lock_emission')] = gas_overall_lock_emission
oil_overall_lock_emission[i] = oil_lock_exisitng + oil_lock_added
df.loc[:,('oil_lock_emission')] = oil_overall_lock_emission
return df
# define a function that could select the useful columns from the table
def ccs_results (ccs):
ccs_cols = ['year',
'coal_power_capacity_GW','coal_power_capacity_existing','coal_power_capacity_new',
'coal_annual_emission','coal_power_annual_emission_existing','coal_power_annual_emission_new',
'coal_lock_emission','coal_power_lock_emission_existing','coal_power_lock_emission_new',
'gas_power_capacity_GW','gas_power_capacity_existing','gas_power_capacity_new',
'gas_annual_emission','gas_power_annual_emission_existing','gas_power_annual_emission_new',
'gas_lock_emission','gas_power_lock_emission_existing','gas_power_lock_emission_new',
'oil_power_capacity_GW','oil_power_capacity_existing','oil_power_capacity_new',
'oil_annual_emission','oil_power_annual_emission_existing','oil_power_annual_emission_new',
'oil_lock_emission','oil_power_lock_emission_existing','oil_power_lock_emission_new',
'coal_power_capacity_new1','coal_power_annual_emission_new1','coal_power_annual_emission_new1',
'coal_power_capacity_new2','coal_power_annual_emission_new2','coal_power_annual_emission_new2',
'gas_power_capacity_new1','gas_power_annual_emission_new1','gas_power_annual_emission_new1',
'gas_power_capacity_new2','gas_power_annual_emission_new2','gas_power_annual_emission_new2',
'oil_power_capacity_new1','oil_power_annual_emission_new1','oil_power_annual_emission_new1',
'oil_power_capacity_new2','oil_power_annual_emission_new2','oil_power_annual_emission_new2']
ccs = ccs[ccs_cols]
return ccs |
py | 7dff2aadd7014b3c777239c2f822bf4ad5dbb018 | def face_count(imagepath):
import cv2
import sys
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
image = cv2.imread(imagepath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=3, # to tune the face count
minSize=(30, 30)
)
print("Found {0} faces!".format(len(faces)))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found", image)
cv2.waitKey(0)
img_path="F:\\ComputerVision\\Faces_Count\\Images\\crowd-1.jpg"
face_count(img_path)
|
py | 7dff2b9475636e38a7646bcf28e7ade68a405647 |
# Import the backtrader platform
import backtrader as bt
import math
from backtesting.strategies.basic_strategy import BasicStrategy
# Create a Stratey
class SimpleMovingAverage(BasicStrategy):
params = (
# period for the fast Moving Average
('p1', 50),
# period for the slow moving average
('p2', 200),
)
def __init__(self, cash_to_invest_in_asset):
BasicStrategy.__init__(self, cash_to_invest_in_asset)
# For the moving average strategy
sma1 = bt.indicators.SMA(period=self.p.p1)
sma2 = bt.indicators.SMA(period=self.p.p2)
self.lines.signal = sma1 - sma2
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
if self.lines.signal > 0:
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.num_stocks_bought = math.ceil(self.amount_money_invested / self.dataclose[0])
self.order = self.buy(size=self.num_stocks_bought)
else:
if self.lines.signal < 0:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell(size=self.num_stocks_bought)
self.num_stocks_bought = 0
|
py | 7dff2d12062c5ef71e0405359576fa02fb79cafd | from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("auth", "0006_require_contenttypes_0002"),
]
operations = [
migrations.CreateModel(
name="Booking",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("confirmedOn", models.DateTimeField(null=True, blank=True)),
("cancelledOn", models.DateTimeField(null=True, blank=True)),
("datePaid", models.DateTimeField(null=True, blank=True)),
("exempt_of_payment", models.BooleanField(default=False)),
(
"cancelledBy",
models.ForeignKey(
related_name="cancelled_bookings",
blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
on_delete=models.deletion.SET_NULL,
),
),
],
options={"ordering": ["id"]},
),
migrations.CreateModel(
name="BookingOption",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"booking",
models.ForeignKey(
related_name="options",
to="oneevent.Booking",
on_delete=models.deletion.CASCADE,
),
),
],
options={"ordering": ["option__choice__id", "option__id", "id"]},
),
migrations.CreateModel(
name="Choice",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("title", models.CharField(max_length=64)),
],
options={"ordering": ["id"]},
),
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("title", models.CharField(unique=True, max_length=64)),
("start", models.DateTimeField(help_text="Local start date and time")),
(
"end",
models.DateTimeField(
help_text="Local end date and time", null=True, blank=True
),
),
(
"city",
models.CharField(
help_text="Timezone of your event",
max_length=32,
choices=[
("Boston", "Boston"),
("Erding", "Erding"),
("London", "London"),
("Miami", "Miami"),
("Munich", "Munich"),
("Nice", "Nice"),
("Sydney", "Sydney"),
("Toronto", "Toronto"),
("UTC", "UTC"),
],
),
),
("description", models.TextField(blank=True)),
(
"pub_status",
models.CharField(
default="UNPUB",
help_text="Public: Visible and bookable by all; Restricted: "
"Visible and Bookable by invited groups; Private: "
"Visible by participant, bookable by all; "
"Unpublished: Visible by organisers, not bookable; "
"Archived: Not visible, not bookable",
max_length=8,
verbose_name="Publication status",
choices=[
("PUB", "Public"),
("REST", "Restricted"),
("PRIV", "Private"),
("UNPUB", "Unpublished"),
("ARCH", "Archived"),
],
),
),
(
"location_name",
models.CharField(
help_text="Venue of your event",
max_length=64,
null=True,
blank=True,
),
),
("location_address", models.TextField(null=True, blank=True)),
(
"booking_close",
models.DateTimeField(
help_text="Limit date and time for registering",
null=True,
blank=True,
),
),
(
"choices_close",
models.DateTimeField(
help_text="Limit date and time for changing choices",
null=True,
blank=True,
),
),
(
"max_participant",
models.PositiveSmallIntegerField(
default=0,
help_text="Maximum number of participants to this event (0 = "
"no limit)",
),
),
(
"price_for_employees",
models.DecimalField(default=0, max_digits=6, decimal_places=2),
),
(
"price_for_contractors",
models.DecimalField(default=0, max_digits=6, decimal_places=2),
),
(
"price_currency",
models.CharField(
max_length=3,
null=True,
verbose_name="Currency for prices",
blank=True,
),
),
(
"contractors_groups",
models.ManyToManyField(
related_name="contractors_for_event+",
verbose_name="Groups considered as Contractors",
to="auth.Group",
blank=True,
),
),
(
"employees_groups",
models.ManyToManyField(
related_name="employees_for_event+",
verbose_name="Groups considered as Employees",
to="auth.Group",
blank=True,
),
),
(
"organisers",
models.ManyToManyField(
related_name="events_organised",
to=settings.AUTH_USER_MODEL,
blank=True,
),
),
(
"owner",
models.ForeignKey(
related_name="events_owned",
to=settings.AUTH_USER_MODEL,
help_text="Main organiser",
on_delete=models.deletion.PROTECT,
),
),
],
),
migrations.CreateModel(
name="Message",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"category",
models.CharField(
max_length=8,
verbose_name="Reason",
choices=[
("QUERY", "Question"),
("COMMENT", "Comment"),
("BUG", "Bug report"),
("FEAT", "Feature request"),
("ADMIN", "Administration Request"),
],
),
),
("title", models.CharField(max_length=128)),
("text", models.TextField(max_length=2048)),
("created", models.DateTimeField(auto_now_add=True)),
("safe_content", models.BooleanField(default=False)),
(
"sender",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE
),
),
(
"thread_head",
models.ForeignKey(
related_name="thread",
blank=True,
to="oneevent.Message",
null=True,
on_delete=models.deletion.CASCADE,
),
),
],
options={"ordering": ["-created"]},
),
migrations.CreateModel(
name="Option",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("title", models.CharField(max_length=256)),
("default", models.BooleanField(default=False)),
(
"choice",
models.ForeignKey(
related_name="options",
to="oneevent.Choice",
on_delete=models.deletion.CASCADE,
),
),
],
options={"ordering": ["choice__id", "id"]},
),
migrations.CreateModel(
name="Session",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("title", models.CharField(unique=True, max_length=64)),
("start", models.DateTimeField(help_text="Local start date and time")),
(
"end",
models.DateTimeField(
help_text="Local end date and time", null=True, blank=True
),
),
(
"max_participant",
models.PositiveSmallIntegerField(
default=0,
help_text="Maximum number of participants to this session (0 "
"= no limit)",
),
),
(
"event",
models.ForeignKey(
related_name="sessions",
to="oneevent.Event",
on_delete=models.deletion.CASCADE,
),
),
],
options={"ordering": ["event", "title"]},
),
migrations.AddField(
model_name="choice",
name="event",
field=models.ForeignKey(
related_name="choices",
to="oneevent.Event",
on_delete=models.deletion.CASCADE,
),
),
migrations.AddField(
model_name="bookingoption",
name="option",
field=models.ForeignKey(
blank=True,
to="oneevent.Option",
null=True,
on_delete=models.deletion.CASCADE,
),
),
migrations.AddField(
model_name="booking",
name="event",
field=models.ForeignKey(
related_name="bookings",
to="oneevent.Event",
on_delete=models.deletion.CASCADE,
),
),
migrations.AddField(
model_name="booking",
name="paidTo",
field=models.ForeignKey(
related_name="received_payments",
blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
on_delete=models.deletion.SET_NULL,
),
),
migrations.AddField(
model_name="booking",
name="person",
field=models.ForeignKey(
related_name="bookings",
to=settings.AUTH_USER_MODEL,
on_delete=models.deletion.CASCADE,
),
),
migrations.AddField(
model_name="booking",
name="session",
field=models.ForeignKey(
related_name="bookings",
blank=True,
to="oneevent.Session",
null=True,
on_delete=models.deletion.CASCADE,
),
),
migrations.AlterUniqueTogether(
name="session", unique_together=set([("event", "title")]),
),
migrations.AlterUniqueTogether(
name="option", unique_together=set([("choice", "title")]),
),
migrations.AlterUniqueTogether(
name="choice", unique_together=set([("event", "title")]),
),
migrations.AlterUniqueTogether(
name="bookingoption", unique_together=set([("booking", "option")]),
),
migrations.AlterUniqueTogether(
name="booking", unique_together=set([("event", "person")]),
),
]
|
py | 7dff2d1dd62ad338361acc9bf77e1c96c278a074 | import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="contour.colorbar", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
py | 7dff2da70ef457c6e152447cc1b67140f43b821a | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
General utility code for building PyTorch-based agents in ParlAI.
Contains the following main utilities:
* TorchAgent class which serves as a useful parent class for other model agents
* Batch namedtuple which is the input type of the main abstract methods of
the TorchAgent class
* Output namedtuple which is the expected output type of the main abstract
methods of the TorchAgent class
* History class which handles tracking the dialogue state over the course of an episode.
See below for documentation on each specific tool.
"""
from typing import Dict, Any, Union, List, Tuple
from abc import ABC, abstractmethod
from copy import deepcopy
from collections import deque
import json
import random
import os
import torch
from torch import optim
from parlai.core.opt import Opt
from parlai.core.agents import Agent
from parlai.utils.thread import SharedTable
from parlai.core.dict import DictionaryAgent
from parlai.nn.lr_scheduler import ParlAILRScheduler
from parlai.core.message import Message
from parlai.utils.distributed import is_distributed
from parlai.utils.misc import AttrDict, warn_once
from parlai.utils.fp16 import (
fp16_apex_available,
fp16_optimizer_wrapper,
MemoryEfficientFP16Optimizer,
MemoryEfficientFP16Adam,
Adafactor,
)
from parlai.core.metrics import (
Metrics,
Metric,
GlobalAverageMetric,
GlobalSumMetric,
GlobalFixedMetric,
)
from parlai.utils.distributed import is_primary_worker
from parlai.utils.torch import argsort, compute_grad_norm, padded_tensor
class Batch(AttrDict):
"""
Batch is a namedtuple containing data being sent to an agent.
This is the input type of the train_step and eval_step functions.
Agents can override the batchify function to return an extended namedtuple
with additional fields if they would like, though we recommend calling the
parent function to set up these fields as a base.
:param text_vec:
bsz x seqlen tensor containing the parsed text data.
:param text_lengths:
list of length bsz containing the lengths of the text in same order as
text_vec; necessary for pack_padded_sequence.
:param label_vec:
bsz x seqlen tensor containing the parsed label (one per batch row).
:param label_lengths:
list of length bsz containing the lengths of the labels in same order as
label_vec.
:param labels:
list of length bsz containing the selected label for each batch row (some
datasets have multiple labels per input example).
:param valid_indices:
list of length bsz containing the original indices of each example in the
batch. we use these to map predictions back to their proper row, since e.g.
we may sort examples by their length or some examples may be invalid.
:param candidates:
list of lists of text. outer list has size bsz, inner lists vary in size
based on the number of candidates for each row in the batch.
:param candidate_vecs:
list of lists of tensors. outer list has size bsz, inner lists vary in size
based on the number of candidates for each row in the batch.
:param image:
list of image features in the format specified by the --image-mode arg.
:param observations:
the original observations in the batched order
"""
def __init__(
self,
text_vec=None,
text_lengths=None,
label_vec=None,
label_lengths=None,
labels=None,
valid_indices=None,
candidates=None,
candidate_vecs=None,
image=None,
observations=None,
**kwargs,
):
super().__init__(
text_vec=text_vec,
text_lengths=text_lengths,
label_vec=label_vec,
label_lengths=label_lengths,
labels=labels,
valid_indices=valid_indices,
candidates=candidates,
candidate_vecs=candidate_vecs,
image=image,
observations=observations,
**kwargs,
)
class Output(AttrDict):
"""
Output is an object containing agent predictions.
This is the expected return type of the train_step and eval_step functions,
though agents can choose to return None if they do not want to answer.
:param List[str] text:
list of strings of length bsz containing the predictions of the model
:param List[List[str]] text_candidates:
list of lists of length bsz containing ranked predictions of the model.
each sub-list is an ordered ranking of strings, of variable length.
"""
def __init__(self, text=None, text_candidates=None, **kwargs):
super().__init__(text=text, text_candidates=text_candidates, **kwargs)
class History(object):
"""
History handles tracking the dialogue state over the course of an episode.
History may also be used to track the history of any field.
:param field:
field in the observation to track over the course of the episode
(defaults to 'text')
:param vec_type:
specify a 'list' or 'deque' to save the history in this object
:param maxlen:
if `vec_type` is 'deque', this sets the maximum length of that object
:param p1_token:
token indicating 'person 1'; opt must have 'person_tokens' set to True
for this to be added
:param p1_token:
token indicating 'person 2'; opt must have 'person_tokens' set to True
for this to be added
:param dict_agent:
DictionaryAgent object for tokenizing the history
"""
def __init__(
self,
opt,
field='text',
vec_type='deque',
maxlen=None,
size=-1,
p1_token='__p1__',
p2_token='__p2__',
dict_agent=None,
):
self.field = field
self.dict = dict_agent
self.delimiter = opt.get('delimiter', '\n')
self.delimiter_tok = self.parse(self.delimiter)
self.size = size
self.split_on_newln = opt.get('split_lines', False)
self._global_end_token = opt.get('history_add_global_end_token', None)
if self._global_end_token is not None:
self._global_end_token = self.dict[self.dict.end_token]
# set up history objects
if vec_type != 'deque' and vec_type != 'list':
raise RuntimeError('Type {} is not supported for history'.format(vec_type))
self.vec_type = vec_type
self.max_len = maxlen
self.history_strings = []
self.history_raw_strings = []
self.history_vecs = []
# person token args
self.add_person_tokens = opt.get('person_tokens', False)
self.add_p1_after_newln = opt.get('add_p1_after_newln', False)
self.p1_token = p1_token
self.p2_token = p2_token
def parse(self, text):
"""
Tokenize text with the given dictionary.
"""
return self.dict.txt2vec(text)
def reset(self):
"""
Clear the history.
"""
self.history_raw_strings = []
self.history_strings = []
self.history_vecs = []
def _update_strings(self, text):
if self.size > 0:
while len(self.history_strings) >= self.size:
self.history_strings.pop(0)
self.history_strings.append(text)
def _update_raw_strings(self, text):
if self.size > 0:
while len(self.history_raw_strings) >= self.size:
self.history_raw_strings.pop(0)
self.history_raw_strings.append(text)
def _update_vecs(self, text):
if self.size > 0:
while len(self.history_vecs) >= self.size:
self.history_vecs.pop(0)
self.history_vecs.append(self.parse(text))
def add_reply(self, text):
"""
Add your own response to the history.
"""
self._update_raw_strings(text)
if self.add_person_tokens:
text = self._add_person_tokens(text, self.p2_token)
# update history string
self._update_strings(text)
# update history vecs
self._update_vecs(text)
def update_history(self, obs):
"""
Update the history with the given observation.
"""
if self.field in obs and obs[self.field] is not None:
if self.split_on_newln:
next_texts = obs[self.field].split('\n')
else:
next_texts = [obs[self.field]]
for text in next_texts:
self._update_raw_strings(text)
if self.add_person_tokens:
text = self._add_person_tokens(
obs[self.field], self.p1_token, self.add_p1_after_newln
)
# update history string
self._update_strings(text)
# update history vecs
self._update_vecs(text)
def get_history_str(self):
"""
Return the string version of the history.
"""
if len(self.history_strings) > 0:
return self.delimiter.join(self.history_strings)
return None
def get_history_vec(self):
"""
Return a vectorized version of the history.
"""
if len(self.history_vecs) == 0:
return None
if self.vec_type == 'deque':
history = deque(maxlen=self.max_len)
for vec in self.history_vecs[:-1]:
history.extend(vec)
history.extend(self.delimiter_tok)
history.extend(self.history_vecs[-1])
if self._global_end_token is not None:
history.extend([self._global_end_token])
else:
# vec type is a list
history = []
for vec in self.history_vecs[:-1]:
history += vec
history += self.delimiter_tok
history += self.history_vecs[-1]
if self._global_end_token is not None:
history += [self._global_end_token]
return history
def get_history_vec_list(self):
"""
Return a list of history vecs.
"""
return self.history_vecs
def _add_person_tokens(self, text, token, add_after_newln=False):
if add_after_newln:
split = text.split('\n')
split[-1] = token + ' ' + split[-1]
return '\n'.join(split)
else:
return token + ' ' + text
class TorchAgent(ABC, Agent):
"""
A provided abstract base agent for any model that wants to use Torch.
Exists to make it easier to implement a new agent.
Not necessary, but reduces duplicated code.
Many methods are intended to be either used as is when the default is
acceptable, or to be overriden and called with super(), with the extra
functionality added to the initial result. See the method comment for
recommended behavior.
This agent serves as a common framework for all ParlAI models which want
to use PyTorch.
"""
P1_TOKEN = '__p1__'
P2_TOKEN = '__p2__'
@classmethod
def optim_opts(self):
"""
Fetch optimizer selection.
By default, collects everything in torch.optim, as well as importing:
- qhm / qhmadam if installed from github.com/facebookresearch/qhoptim
Override this (and probably call super()) to add your own optimizers.
"""
# first pull torch.optim in
optims = {
k.lower(): v
for k, v in optim.__dict__.items()
if not k.startswith('__') and k[0].isupper()
}
try:
import apex.optimizers.fused_adam as fused_adam
import apex.optimizers.fused_lamb as fused_lamb
optims['fused_adam'] = fused_adam.FusedAdam
optims['fused_lamb'] = fused_lamb.FusedLAMB
except ImportError:
pass
try:
# https://openreview.net/pdf?id=S1fUpoR5FQ
from qhoptim.pyt import QHM, QHAdam
optims['qhm'] = QHM
optims['qhadam'] = QHAdam
except ImportError:
# no QHM installed
pass
# now pull in memory efficient implementations
optims['mem_eff_adam'] = MemoryEfficientFP16Adam
optims['adafactor'] = Adafactor
return optims
@staticmethod
def dictionary_class():
"""
Return the dictionary class that this agent expects to use.
Can be overriden if a more complex dictionary is required.
"""
return DictionaryAgent
@classmethod
def history_class(cls):
"""
Return the history class that this agent expects to use.
Can be overriden if a more complex history is required.
"""
return History
@classmethod
def add_cmdline_args(cls, argparser):
"""
Add the default commandline args we expect most agents to want.
"""
agent = argparser.add_argument_group('TorchAgent Arguments')
agent.add_argument(
'-i',
'--interactive-mode',
type='bool',
default=False,
help='Whether in full interactive mode or not, which means generating text or'
' retrieving from a full set of candidates, which is necessary to actually'
' do full dialogue. However, during training or quick validation (e.g. PPL for'
' generation or ranking a few candidates for ranking models) you might want these'
' set to off.'
' Typically, scripts can set their preferred default behavior at the start,'
' e.g. eval scripts.',
)
# pretrained embedding arguments
agent.add_argument(
'-emb',
'--embedding-type',
default='random',
choices=[
'random',
'glove',
'glove-fixed',
'fasttext',
'fasttext-fixed',
'fasttext_cc',
'fasttext_cc-fixed',
],
help='Choose between different strategies for initializing word '
'embeddings. Default is random, but can also preinitialize '
'from Glove or Fasttext. Preinitialized embeddings can also '
'be fixed so they are not updated during training.',
)
agent.add_argument(
'-embp',
'--embedding-projection',
default='random',
help='If pretrained embeddings have a different dimensionality '
'than your embedding size, strategy for projecting to the '
'correct size. If the dimensions are the same, this is '
'ignored unless you append "-force" to your choice.',
)
agent.add_argument(
'--fp16', type='bool', default=False, help='Use fp16 computations.'
)
agent.add_argument(
'--fp16-impl',
type=str,
default='apex',
choices=['apex', 'mem_efficient'],
help='Implementation of FP16 to use',
)
agent.add_argument(
'--force-fp16-tokens',
type='bool',
default=False,
hidden=True,
help='Add the special fp16 tokens even if not using fp16.',
)
# optimizer arguments
optim_group = agent.add_argument_group('Optimizer Arguments')
optim_group.add_argument(
'-opt',
'--optimizer',
default='sgd',
choices=cls.optim_opts(),
help='Choose between pytorch optimizers. Any member of torch.optim'
' should be valid.',
)
optim_group.add_argument(
'-lr', '--learningrate', type=float, default=1, help='Learning rate'
)
optim_group.add_argument(
'-clip',
'--gradient-clip',
type=float,
default=0.1,
help='gradient clipping using l2 norm',
)
optim_group.add_argument(
'--adam-eps',
type=float,
default=1e-8,
hidden=True,
help='Epsilon value for Adam optimizers. Set to 1e-6 if your '
'large model has stability issues, but prefer the default.',
)
optim_group.add_argument(
'--adafactor-eps',
default='1e-30,1e-3',
type='floats',
help='Epsilon values for adafactor optimizer: regularization '
'constants for square gradient and parameter scale respectively',
recommended='1e-30,1e-3',
)
optim_group.add_argument(
'-mom',
'--momentum',
default=0,
type=float,
help='if applicable, momentum value for optimizer.',
)
optim_group.add_argument(
'--nesterov',
default=True,
type='bool',
help='if applicable, whether to use nesterov momentum.',
)
optim_group.add_argument(
'-nu',
'--nus',
default='0.7',
type='floats',
help='if applicable, nu value(s) for optimizer. can use a single '
'value like 0.7 or a comma-separated tuple like 0.7,1.0',
)
optim_group.add_argument(
'-beta',
'--betas',
default='0.9,0.999',
type='floats',
help='if applicable, beta value(s) for optimizer. can use a single '
'value like 0.9 or a comma-separated tuple like 0.9,0.999',
)
optim_group.add_argument(
'-wdecay',
'--weight-decay',
type=float,
default=None,
help='Weight decay on the weights.',
)
# preprocessing arguments
agent.add_argument(
'-rc',
'--rank-candidates',
type='bool',
default=False,
help='Whether the model should parse candidates for ranking.',
)
agent.add_argument(
'-tr',
'--truncate',
default=-1,
type=int,
help='Truncate input lengths to increase speed / use less memory.',
)
agent.add_argument(
'--text-truncate',
type=int,
help='Text input truncation length: if not specified, this will '
'default to `truncate`',
)
agent.add_argument(
'--label-truncate',
type=int,
help='Label truncation length: if not specified, this will default '
'to `truncate`',
)
agent.add_argument(
'-histsz',
'--history-size',
default=-1,
type=int,
help='Number of past dialog utterances to remember.',
)
agent.add_argument(
'-pt',
'--person-tokens',
type='bool',
default=False,
help='add person tokens to history. adds __p1__ in front of input '
'text and __p2__ in front of past labels when available or '
'past utterances generated by the model. these are added to '
'the dictionary during initialization.',
)
agent.add_argument(
'--split-lines',
type='bool',
default=False,
help='split the dialogue history on newlines and save in separate '
'vectors',
)
agent.add_argument(
'--use-reply',
default='label',
hidden=True,
choices=['label', 'model', 'none'],
help='Which previous replies to use as history. If label, use '
'gold dataset replies. If model, use model\'s own replies. '
'If none, do not track replies in history.',
)
agent.add_argument(
'--add-p1-after-newln',
type='bool',
default=False,
hidden=True,
help='Add the other speaker token before the last newline in the '
'input instead of at the beginning of the input. this is '
'useful for tasks that include some kind of context before '
'the actual utterance (e.g. squad, babi, personachat).',
)
agent.add_argument(
'--delimiter',
type=str,
default='\n',
help='Join history lines with this token, defaults to newline',
)
agent.add_argument(
'--history-add-global-end-token',
type='nonestr',
default=None,
hidden=True,
choices=[None, 'end'],
help='Add special token to the end of history encoding.',
)
# GPU arguments
# these gpu options are all mutually exclusive, and should error if the
# user tries to present multiple of them
gpugroup = agent.add_mutually_exclusive_group()
gpugroup.add_argument(
'-gpu', '--gpu', type=int, default=-1, help='which GPU to use'
)
gpugroup.add_argument(
'--no-cuda',
default=False,
action='store_true',
dest='no_cuda',
help='disable GPUs even if available. otherwise, will use GPUs if '
'available on the device.',
)
cls.dictionary_class().add_cmdline_args(argparser)
ParlAILRScheduler.add_cmdline_args(argparser)
def __init__(self, opt: Opt, shared=None):
"""
Initialize agent.
"""
super().__init__(opt, shared)
opt = self.opt
# Safety checkers to ensure TorchAgent assumptions aren't being violated.
self.__expecting_clear_history = False
self.__expecting_to_reply = False
# used for sharing metrics back to the teacher
self._local_metrics: Dict[str, List[Metric]] = {}
# we may want to temporarily disable local metrics, roughly similar to
# `with torch.no_grad`. See TorchGeneratorAgent._init_cuda_buffer for
# example
self.__local_metrics_enabled = True
# check for cuda
self.use_cuda = not opt['no_cuda'] and torch.cuda.is_available()
if self.use_cuda:
if not shared:
print('[ Using CUDA ]')
if not shared and opt['gpu'] != -1:
torch.cuda.set_device(opt['gpu'])
# whether we're using multi-gpu, a few different ways. these are not
# supported by all models, but we can still keep track of the options
self.model_parallel = opt.get('model_parallel', False) and self.use_cuda
self.data_parallel = opt.get('data_parallel', False) and self.use_cuda
if self.data_parallel and is_distributed():
raise RuntimeError('Cannot combine --data-parallel and distributed mode.')
if self.model_parallel and self.data_parallel:
raise RuntimeError('Cannot combine --data-parallel and --model-parallel.')
# indicate whether using fp16
self.fp16 = self.use_cuda and self.opt.get('fp16', False)
if self.fp16:
# check that the implementation requested is available
self.fp16_impl = self.opt.get('fp16_impl', 'apex')
if self.fp16_impl == 'apex' and not fp16_apex_available():
self.fp16 = False
if shared is None:
# intitialize any important structures from scratch
self.dict = self.build_dictionary()
if opt.get('fp16') or opt.get('force_fp16_tokens'):
# Volta cores revert to FP32 hardware if tensors are not multiples
# of 8 in all dimensions. This INCLUDES the embeddings layer! As
# such, we need some extra magic to ensure the dictionary is padded
# with extra tokens to make it a multiple of 8.
from parlai.utils.torch import FP16_PAD_SIZE
if len(self.dict) % FP16_PAD_SIZE != 0:
for i in range(FP16_PAD_SIZE - len(self.dict) % FP16_PAD_SIZE):
self.dict['__FP16_PAD_{}__'.format(i)] = 1
# global_metrics keeps track of batch-level or global-level metrics
self.global_metrics = Metrics(opt.get('numthreads', 1) > 1, shared=None)
# self.metrics is there for legacy reasons
self.metrics: Dict[str, Any] = {}
else:
# copy initialized data from shared table
self.opt = shared['opt']
self.dict = shared['dict']
self.model = shared['model']
self.criterion = shared['criterion']
self.metrics = shared['metrics']
self.global_metrics = Metrics(
opt.get('numthreads', 1) > 1, shared=shared['global_metrics']
)
if opt.get('numthreads', 1) > 1:
torch.set_num_threads(1)
# Default to the class name, sans "Agent". child can override
self.id = type(self).__name__.replace("Agent", "")
# now set up any fields that all instances may need
self.EMPTY = torch.zeros(0, dtype=torch.long)
self.NULL_IDX = self.dict[self.dict.null_token]
self.START_IDX = self.dict[self.dict.start_token]
self.END_IDX = self.dict[self.dict.end_token]
# for gradient acumulation
self._number_grad_accum = 0
# for the LR scheduler
self._number_training_updates = 0
# fixed random seed
self.random = random.Random(42)
# can remember as few as zero utterances if desired
self.histsz = opt['history_size']
# truncate == 0 might give funny behavior
self.truncate = opt['truncate'] if opt['truncate'] >= 0 else None
text_truncate = opt.get('text_truncate') or opt['truncate']
self.text_truncate = text_truncate if text_truncate >= 0 else None
label_truncate = opt.get('label_truncate') or opt['truncate']
self.label_truncate = label_truncate if label_truncate >= 0 else None
# stores up to hist_utt past observations within current dialog
self.history = self.build_history()
self.is_training = False # track whether model is training
self.rank_candidates = opt['rank_candidates']
self.add_person_tokens = opt.get('person_tokens', False)
# set interactive mode or not according to options.
self.set_interactive_mode(opt.get('interactive_mode', False), shared)
def build_history(self):
"""
Return the constructed history object.
"""
return self.history_class()(
self.opt,
maxlen=self.text_truncate,
size=self.histsz,
p1_token=self.P1_TOKEN,
p2_token=self.P2_TOKEN,
dict_agent=self.dict,
)
def build_dictionary(self):
"""
Return the constructed dictionary, which will be set to self.dict.
If you need to add additional tokens to the dictionary, this is likely the right
place to do it.
"""
d = self.dictionary_class()(self.opt)
if self.opt.get('person_tokens'):
d[self.P1_TOKEN] = 999_999_999
d[self.P2_TOKEN] = 999_999_998
return d
def _get_init_model(self, opt: Opt, shared):
"""
Get model file to initialize with.
If `init_model` exits, we will return the path to that file and maybe
load dict file from that path. Otherwise, use `model_file.`
:return: path to load model from, whether we loaded from `init_model`
or not
"""
init_model = None
is_finetune = False
if not shared: # only do this on first setup
# first check load path in case we need to override paths
if opt.get('init_model') and os.path.isfile(opt['init_model']):
# check first for 'init_model' for loading model from file
init_model = opt['init_model']
is_finetune = True
if opt.get('model_file') and os.path.isfile(opt['model_file']):
# next check for 'model_file', this would override init_model
init_model = opt['model_file']
is_finetune = False
if (
opt.get('load_from_checkpoint')
and opt.get('init_model')
and opt['init_model'].endswith('.checkpoint')
):
# but if we're loading from a checkpoint, we should explicitly load
# from that point
init_model = opt['init_model']
is_finetune = False
if init_model is not None:
# if we are loading a model, should load its dict too
if os.path.isfile(init_model + '.dict') or opt['dict_file'] is None:
opt['dict_file'] = init_model + '.dict'
return init_model, is_finetune
@abstractmethod
def build_model(self):
"""
Construct the model and return it.
"""
raise NotImplementedError('not implemented for this class')
def _should_initialize_optimizer(self) -> bool:
"""
Used to indicate whether we should initialize an optimizer.
When this is off, we can save memory and use larger batches.
"""
if self.opt.get('interactive_mode'):
return False
datatype = self.opt.get('datatype', '')
is_train = 'train' in datatype and 'evalmode' not in datatype
return is_train or self.opt.get('numthreads', 1) > 1
def init_optim(self, params, optim_states=None, saved_optim_type=None):
"""
Initialize optimizer with model parameters.
:param params:
parameters from the model
:param optim_states:
optional argument providing states of optimizer to load
:param saved_optim_type:
type of optimizer being loaded, if changed will skip loading
optimizer states
"""
opt = self.opt
# set up optimizer args
lr = opt['learningrate']
kwargs = {'lr': lr}
if opt.get('weight_decay'):
kwargs['weight_decay'] = opt['weight_decay']
if opt.get('momentum') > 0 and opt['optimizer'] in ['sgd', 'rmsprop', 'qhm']:
# turn on momentum for optimizers that use it
kwargs['momentum'] = opt['momentum']
if opt['optimizer'] == 'sgd' and opt.get('nesterov', True):
# for sgd, maybe nesterov
kwargs['nesterov'] = opt.get('nesterov', True)
elif opt['optimizer'] == 'qhm':
# qhm needs a nu
kwargs['nu'] = opt.get('nus', (0.7,))[0]
elif opt['optimizer'] == 'adam':
# turn on amsgrad for adam
# amsgrad paper: https://openreview.net/forum?id=ryQu7f-RZ
kwargs['amsgrad'] = True
if self.fp16 and self.fp16_impl == 'mem_efficient':
# grab this implementation instead
opt['optimizer'] = 'mem_eff_adam'
elif opt['optimizer'] == 'qhadam':
# set nus for qhadam
kwargs['nus'] = opt.get('nus', (0.7, 1.0))
elif opt['optimizer'] == 'adafactor':
# adafactor params
kwargs['beta1'] = opt.get('betas', (0.9, 0.999))[0]
kwargs['eps'] = opt['adafactor_eps']
kwargs['warmup_init'] = opt.get('warmup_updates', -1) > 0
if opt['optimizer'] in [
'adam',
'sparseadam',
'fused_adam',
'adamax',
'qhadam',
'fused_lamb',
]:
# set betas for optims that use it
kwargs['betas'] = opt.get('betas', (0.9, 0.999))
# set adam optimizer, but only if user specified it
if opt.get('adam_eps'):
kwargs['eps'] = opt['adam_eps']
# handle fused_adam where the user doesn't have apex installed
if saved_optim_type == 'fused_adam' and 'fused_adam' not in self.optim_opts():
# we trained with apex, but the user doesn't have apex installed.
saved_optim_type = 'adam'
if (
self.opt['optimizer'] == 'fused_adam'
and 'fused_adam' not in self.optim_opts()
):
raise ImportError(
'You are using --optimizer fused_adam, but you do not have APEX '
'installed. Please install APEX (https://github.com/NVIDIA/apex) or '
'switch to --optimizer adam.'
)
optim_class = self.optim_opts()[opt['optimizer']]
self.optimizer = optim_class(params, **kwargs)
if self.fp16:
if self.fp16_impl == 'apex':
self.optimizer = fp16_optimizer_wrapper(self.optimizer)
else:
# Using memory efficient optimizer
opt_name = opt['optimizer']
compatible_list = MemoryEfficientFP16Optimizer.compatible_optimizers()
is_compat = opt_name in compatible_list
if not is_compat:
raise RuntimeError(
f'The optimizer you selected {opt_name} is not compatible '
'with Memory Efficient FP16. Please select from among this '
f'list:\n{compatible_list}'
)
self.optimizer = MemoryEfficientFP16Optimizer(self.optimizer)
# TODO: we might want to hard reset optimizers here in the
# case of fine tuning. Some rudimentary experiments seemed to
# indicate that keeping adam weights around was desirable, so this
# will remain the behavior for the time being.
if optim_states and saved_optim_type != opt['optimizer']:
# we changed from adam to adamax, or sgd to adam, or similar
print('WARNING: not loading optim state since optim class changed.')
elif optim_states:
# check for any fp16/fp32 conversions we need to do
optimstate_fp16 = 'loss_scaler' in optim_states
if self.fp16 and optimstate_fp16:
# previously trained in fp16, now we're training in fp16.
# ideally no action needed, but APEX broke backwards
# compatibility and this is the hack around it.
optim_states['loss_scaler'] = self.optimizer.state_dict()['loss_scaler']
elif optimstate_fp16 and not self.fp16:
# old optimizer was fp16 but now we're doing fp32,
# if apex, drop the fp16 wrapper from the state_dict and just load
# the fp16 weights into the fp32 tensors
if 'optimizer_state_dict' in optim_states:
# trained with apex
optim_states = optim_states['optimizer_state_dict']
elif not optimstate_fp16 and self.fp16:
# old optimizer was fp32, but now we're doing fp16.
# this is a bit clunky, but alternatives are worse
try:
self.optimizer.optimizer.load_state_dict(optim_states)
except ValueError:
warn_once(
'WARNING: not loading optim state since model params changed.'
)
return
else:
# previously trained in fp32, loading in fp32.
# no special treatment needed.
pass
# finally, try to actually load the optimizer state
try:
self.optimizer.load_state_dict(optim_states)
except (ValueError, KeyError):
warn_once(
'WARNING: not loading optim state since model params changed.'
)
def build_lr_scheduler(self, states=None, hard_reset=False):
"""
Create the learning rate scheduler, and assign it to self.scheduler. This
scheduler will be updated upon a call to receive_metrics. May also create
self.warmup_scheduler, if appropriate.
:param state_dict states: Possible state_dict provided by model
checkpoint, for restoring LR state
:param bool hard_reset: If true, the LR scheduler should ignore the
state dictionary.
"""
if states is None:
states = {}
optimizer = self.optimizer
if self.fp16:
# lr schedulers don't work with apex, they expect the "real" optimizer
optimizer = optimizer.optimizer
self.scheduler = ParlAILRScheduler.lr_scheduler_factory(
self.opt, optimizer, states, hard_reset
)
if self.scheduler:
self._number_training_updates = (
self.scheduler.get_initial_number_training_updates()
)
def _control_local_metrics(self, enabled: bool = False, disabled: bool = False):
"""
Used to temporarily disable local metrics.
This is useful for things like when you need to call super(), but
prevent the parent from recording some metric. For example, if you're
forwarding a dummy batch or calling super() but still want to modify
the output.
You can compare this to torch.no_grad in its goal.
"""
if not (enabled ^ disabled):
raise ValueError(
'You must provide exactly one of enabled or disabled to '
'_control_local_metrics.'
)
self.__local_metrics_enabled = enabled
def record_local_metric(self, keyname: str, values: List[Metric]):
"""
Record an example-level metric for all items in the batch.
Local metrics are maybe recorded anywhere within batch act. They will
automatically be collated and returned at the end of batch_act. The
beginning of batch_act resets these, so you may not use them during
observe.
Example local metrics include ppl, token_acc, any other agent-specific
metrics.
"""
if not self.__local_metrics_enabled:
return
if keyname in self._local_metrics:
# we could relax this already
raise KeyError(f"Already recorded metrics for {keyname}")
self._local_metrics[keyname] = values
def report(self):
"""
Report metrics.
Report includes learning rate and number of training updates.
"""
report = self.global_metrics.report()
# only report LR if we have a scheduler
if hasattr(self, 'scheduler') and self.scheduler is not None:
report['lr'] = GlobalAverageMetric(self.optimizer.param_groups[0]['lr'])
if self.use_cuda:
report['gpu_mem'] = GlobalAverageMetric(self._gpu_usage())
if is_primary_worker() and self._number_training_updates:
# number train updates doesn't work in hogwild sadly, and should only
# be done on the primary worker
report['total_train_updates'] = GlobalFixedMetric(
self._number_training_updates
)
return report
def _gpu_usage(self):
"""
Compute GPU memory usage.
Includes both allocated and cached memory; this should be close to the
output of nvidia-smi, but not reflect of how much is currently demanded
by the program. It may be viewed as a rough approximation of
worst-case-until-now.
:return: Percent of allocated GPU memory as a fraction of available.
"""
if not self.use_cuda:
return None
if self.opt['gpu'] == -1:
# use all gpus available locally
devices = range(torch.cuda.device_count())
else:
devices = [self.opt['gpu']]
memory_avail = 0
memory_used = 0
for dev in devices:
props = torch.cuda.get_device_properties(dev)
memory_avail += props.total_memory
memory_used += torch.cuda.memory_allocated(dev) + torch.cuda.memory_cached(
dev
)
return memory_used / memory_avail
def receive_metrics(self, metrics_dict):
if not hasattr(self, 'scheduler') or self.scheduler is None:
return
self.scheduler.valid_step(metrics_dict)
def _get_embtype(self, emb_type):
# set up preinitialized embeddings
if emb_type.startswith('glove'):
init = 'glove'
from parlai.zoo.glove_vectors.build import download
embs = download(self.opt.get('datapath'))
elif emb_type.startswith('fasttext_cc'):
init = 'fasttext_cc'
from parlai.zoo.fasttext_cc_vectors.build import download
embs = download(self.opt.get('datapath'))
elif emb_type.startswith('fasttext'):
init = 'fasttext'
from parlai.zoo.fasttext_vectors.build import download
embs = download(self.opt.get('datapath'))
else:
raise RuntimeError(
'embedding type {} not implemented. check arg, '
'submit PR to this function, or override it.'
''.format(emb_type)
)
return embs, init
def _project_vec(self, vec, target_dim, method='random'):
"""
If needed, project vector to target dimensionality.
Projection methods implemented are the following:
random - random gaussian matrix multiplication of input vector
:param vec:
one-dimensional vector
:param target_dim:
dimension of returned vector
:param method:
projection method. will be used even if the dim is not changing if
method ends in "-force".
"""
pre_dim = vec.size(0)
if pre_dim != target_dim or method.endswith('force'):
if method.startswith('random'):
# random projection
if not hasattr(self, 'proj_rp'):
self.proj_rp = torch.Tensor(pre_dim, target_dim).normal_()
# rescale so we're not destroying norms too much
# http://scikit-learn.org/stable/modules/random_projection.html#gaussian-random-projection
self.proj_rp /= target_dim
return torch.mm(vec.unsqueeze(0), self.proj_rp)
else:
# TODO: PCA
# TODO: PCA + RP
# TODO: copy
raise RuntimeError(
'Projection method not implemented: {}' ''.format(method)
)
else:
return vec
def _copy_embeddings(self, weight, emb_type, log=True):
"""
Copy embeddings from the pretrained embeddings to the lookuptable.
:param weight:
weights of lookup table (nn.Embedding/nn.EmbeddingBag)
:param emb_type:
pretrained embedding type
"""
if self.opt['embedding_type'] == 'random':
# Random embedding means no copying of pretrained embeddings
return
embs, name = self._get_embtype(emb_type)
cnt = 0
for w, i in self.dict.tok2ind.items():
if w in embs.stoi:
vec = self._project_vec(embs.vectors[embs.stoi[w]], weight.size(1))
weight.data[i] = vec
cnt += 1
if log:
print(
'Initialized embeddings for {} tokens ({}%) from {}.'
''.format(cnt, round(cnt * 100 / len(self.dict), 1), name)
)
def share(self):
"""
Share fields from parent as well as useful objects in this class.
Subclasses will likely want to share their model as well.
"""
shared = super().share()
if self.opt.get('numthreads', 1) > 1 and isinstance(self.metrics, dict):
# move metrics and model to shared memory
self.metrics = SharedTable(self.metrics)
self.model.share_memory()
shared['metrics'] = self.metrics
shared['global_metrics'] = self.global_metrics.share()
shared['dict'] = self.dict
shared['model'] = self.model
shared['criterion'] = self.criterion
shared['opt'] = self.opt
return shared
def _add_start_end_tokens(self, vec, add_start=False, add_end=False):
"""
Add start and end tokens to a list or tensor.
"""
if isinstance(vec, torch.Tensor):
if len(vec.shape) != 1:
raise Exception('_add_start_end_tokens expects a 1D tensor')
tensors = [vec]
if add_start:
tensors.insert(0, vec.new_tensor([self.START_IDX]))
if add_end:
tensors.append(vec.new_tensor([self.END_IDX]))
return torch.cat(tensors, 0)
if add_start:
vec.insert(0, self.START_IDX)
if add_end:
vec.append(self.END_IDX)
return vec
def _v2t(self, vec):
"""
Convert token indices to string of tokens.
"""
new_vec = []
if hasattr(vec, 'cpu'):
vec = vec.cpu()
for i in vec:
if i == self.END_IDX:
break
new_vec.append(i)
return self.dict.vec2txt(new_vec)
def _vectorize_text(
self, text, add_start=False, add_end=False, truncate=None, truncate_left=True
):
"""
Return vector from text.
:param text:
String to vectorize.
:param add_start:
Add the start token to the front of the tensor.
:param add_end:
Add the end token to the end of the tensor.
:param truncate:
Truncate to this many tokens >= 0, or None.
:param truncate_left:
Truncate from the left side (keep the rightmost tokens). You
probably want this True for inputs, False for targets.
"""
vec = self.dict.txt2vec(text)
vec = self._add_start_end_tokens(vec, add_start, add_end)
vec = self._check_truncate(vec, truncate, truncate_left)
tensor = torch.LongTensor(vec)
return tensor
def _check_truncate(self, vec, truncate, truncate_left=False):
"""
Check that vector is truncated correctly.
"""
if truncate is None:
return vec
if len(vec) <= truncate:
return vec
if truncate_left:
return vec[-truncate:]
else:
return vec[:truncate]
def _set_text_vec(self, obs, history, truncate):
"""
Set the 'text_vec' field in the observation.
Useful to override to change vectorization behavior
"""
if 'text' not in obs:
return obs
if 'text_vec' not in obs:
# text vec is not precomputed, so we set it using the history
history_string = history.get_history_str()
# when text not exist, we get text_vec from history string
# history could be none if it is an image task and 'text'
# filed is be empty. We don't want this
if history_string is None:
return obs
obs['full_text'] = history_string
if history_string:
obs['text_vec'] = history.get_history_vec()
# check truncation
if obs.get('text_vec') is not None:
truncated_vec = self._check_truncate(obs['text_vec'], truncate, True)
obs.force_set('text_vec', torch.LongTensor(truncated_vec))
return obs
def _set_label_vec(self, obs, add_start, add_end, truncate):
"""
Set the 'labels_vec' field in the observation.
Useful to override to change vectorization behavior
"""
# convert 'labels' or 'eval_labels' into vectors
if 'labels' in obs:
label_type = 'labels'
elif 'eval_labels' in obs:
label_type = 'eval_labels'
else:
label_type = None
if label_type is None:
return
elif label_type + '_vec' in obs:
# check truncation of pre-computed vector
truncated_vec = self._check_truncate(obs[label_type + '_vec'], truncate)
obs.force_set(label_type + '_vec', torch.LongTensor(truncated_vec))
else:
# pick one label if there are multiple
lbls = obs[label_type]
label = lbls[0] if len(lbls) == 1 else self.random.choice(lbls)
vec_label = self._vectorize_text(label, add_start, add_end, truncate, False)
obs[label_type + '_vec'] = vec_label
obs[label_type + '_choice'] = label
return obs
def _set_label_cands_vec(self, obs, add_start, add_end, truncate):
"""
Set the 'label_candidates_vec' field in the observation.
Useful to override to change vectorization behavior
"""
if 'label_candidates_vecs' in obs:
if truncate is not None:
# check truncation of pre-computed vectors
vecs = obs['label_candidates_vecs']
for i, c in enumerate(vecs):
vecs[i] = self._check_truncate(c, truncate)
elif self.rank_candidates and obs.get('label_candidates'):
obs.force_set('label_candidates', list(obs['label_candidates']))
obs['label_candidates_vecs'] = [
self._vectorize_text(c, add_start, add_end, truncate, False)
for c in obs['label_candidates']
]
return obs
def vectorize(
self,
obs,
history,
add_start=True,
add_end=True,
text_truncate=None,
label_truncate=None,
):
"""
Make vectors out of observation fields and store in the observation.
In particular, the 'text' and 'labels'/'eval_labels' fields are
processed and a new field is added to the observation with the suffix
'_vec'.
If you want to use additional fields on your subclass, you can override
this function, call super().vectorize(...) to process the text and
labels, and then process the other fields in your subclass.
Additionally, if you want to override some of these default parameters,
then we recommend using a pattern like:
.. code-block:: python
def vectorize(self, *args, **kwargs):
kwargs['add_start'] = False
return super().vectorize(*args, **kwargs)
:param obs:
Single observation from observe function.
:param add_start:
default True, adds the start token to each label.
:param add_end:
default True, adds the end token to each label.
:param text_truncate:
default None, if set truncates text vectors to the specified
length.
:param label_truncate:
default None, if set truncates label vectors to the specified
length.
:return:
the input observation, with 'text_vec', 'label_vec', and
'cands_vec' fields added.
"""
self._set_text_vec(obs, history, text_truncate)
self._set_label_vec(obs, add_start, add_end, label_truncate)
self._set_label_cands_vec(obs, add_start, add_end, label_truncate)
return obs
def _pad_tensor(
self, items: List[Union[List[int], torch.LongTensor]]
) -> Tuple[torch.LongTensor, List[int]]:
"""
Create a right padded matrix from an uneven list of lists.
Returns (padded, lengths), where padded is the padded matrix, and lengths
is a list containing the lengths of each row.
:param list[iter[int]] items: List of items
:returns: (padded, lengths) tuple
:rtype: (Tensor[int64], list[int])
This is intentionally overridable so that models can control how
to pad their input.
"""
return padded_tensor(
items,
pad_idx=self.NULL_IDX,
use_cuda=self.use_cuda,
fp16friendly=self.fp16,
device=self.opt['gpu'],
)
def is_valid(self, obs):
"""
Determine if an observation is valid or not.
"""
return 'text_vec' in obs or 'image' in obs
def batchify(self, obs_batch, sort=False):
"""
Create a batch of valid observations from an unchecked batch.
A valid observation is one that passes the lambda provided to the
function, which defaults to checking if the preprocessed 'text_vec'
field is present which would have been set by this agent's 'vectorize'
function.
Returns a namedtuple Batch. See original definition above for in-depth
explanation of each field.
If you want to include additonal fields in the batch, you can subclass
this function and return your own "Batch" namedtuple: copy the Batch
namedtuple at the top of this class, and then add whatever additional
fields that you want to be able to access. You can then call
super().batchify(...) to set up the original fields and then set up the
additional fields in your subclass and return that batch instead.
:param obs_batch:
List of vectorized observations
:param sort:
Default False, orders the observations by length of vectors. Set to
true when using torch.nn.utils.rnn.pack_padded_sequence. Uses the text
vectors if available, otherwise uses the label vectors if available.
"""
if len(obs_batch) == 0:
return Batch()
valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)]
if len(valid_obs) == 0:
return Batch()
valid_inds, exs = zip(*valid_obs)
# TEXT
xs, x_lens = None, None
if any(ex.get('text_vec') is not None for ex in exs):
_xs = [ex.get('text_vec', self.EMPTY) for ex in exs]
xs, x_lens = self._pad_tensor(_xs)
if sort:
sort = False # now we won't sort on labels
xs, x_lens, valid_inds, exs = argsort(
x_lens, xs, x_lens, valid_inds, exs, descending=True
)
# LABELS
labels_avail = any('labels_vec' in ex for ex in exs)
some_labels_avail = labels_avail or any('eval_labels_vec' in ex for ex in exs)
ys, y_lens, labels = None, None, None
if some_labels_avail:
field = 'labels' if labels_avail else 'eval_labels'
label_vecs = [ex.get(field + '_vec', self.EMPTY) for ex in exs]
labels = [ex.get(field + '_choice') for ex in exs]
y_lens = [y.shape[0] for y in label_vecs]
ys, y_lens = self._pad_tensor(label_vecs)
if sort and xs is None:
ys, valid_inds, label_vecs, labels, y_lens = argsort(
y_lens, ys, valid_inds, label_vecs, labels, y_lens, descending=True
)
# LABEL_CANDIDATES
cands, cand_vecs = None, None
if any('label_candidates_vecs' in ex for ex in exs):
cands = [ex.get('label_candidates', None) for ex in exs]
cand_vecs = [ex.get('label_candidates_vecs', None) for ex in exs]
# IMAGE
imgs = None
if any('image' in ex for ex in exs):
imgs = [ex.get('image', None) for ex in exs]
return Batch(
text_vec=xs,
text_lengths=x_lens,
label_vec=ys,
label_lengths=y_lens,
labels=labels,
valid_indices=valid_inds,
candidates=cands,
candidate_vecs=cand_vecs,
image=imgs,
observations=exs,
)
def match_batch(self, batch_reply, valid_inds, output=None):
"""
Match sub-batch of predictions to the original batch indices.
Batches may be only partially filled (i.e when completing the remainder
at the end of the validation or test set), or we may want to sort by
e.g the length of the input sequences if using pack_padded_sequence.
This matches rows back with their original row in the batch for
calculating metrics like accuracy.
If output is None (model choosing not to provide any predictions), we
will just return the batch of replies.
Otherwise, output should be a parlai.core.torch_agent.Output object.
This is a namedtuple, which can provide text predictions and/or
text_candidates predictions. If you would like to map additional
fields into the batch_reply, you can override this method as well as
providing your own namedtuple with additional fields.
:param batch_reply:
Full-batchsize list of message dictionaries to put responses into.
:param valid_inds:
Original indices of the predictions.
:param output:
Output namedtuple which contains sub-batchsize list of text outputs
from model. May be None (default) if model chooses not to answer.
This method will check for ``text`` and ``text_candidates`` fields.
"""
if output is None:
return batch_reply
for k, v in output.items():
if v is None:
continue
for i, sub_val in zip(valid_inds, v):
batch_reply[i][k] = sub_val
return batch_reply
def observe(self, observation):
"""
Process incoming message in preparation for producing a response.
This includes remembering the past history of the conversation.
"""
# TODO: Migration plan: TorchAgent currently supports being passed
# observations as vanilla dicts for legacy interop; eventually we
# want to remove this behavior and demand that teachers return Messages
observation = Message(observation)
# Sanity check everything is in order
self._validate_observe_invariants()
if observation.get('episode_done'):
self.__expecting_clear_history = True
elif 'labels' in observation or 'eval_labels' in observation:
# make sure we note that we're expecting a reply in the future
self.__expecting_to_reply = True
self.observation = observation
# update the history using the observation
self.history.update_history(observation)
return self.vectorize(
observation,
self.history,
text_truncate=self.text_truncate,
label_truncate=self.label_truncate,
)
def self_observe(self, self_message: Message) -> None:
"""
Observe one's own utterance.
This is used so that the agent can incorporate its own response into
the dialogue history after a batch_act. Failure to implement this will
result in an agent that cannot hear itself speak.
:param self_message:
The message corresponding to the output from batch_act.
"""
use_reply = self.opt.get('use_reply', 'label')
# quick check everything is in order
self._validate_self_observe_invariants()
assert self.observation is not None
if self.observation['episode_done']:
# oh this was the last example in the episode. reset the history
self.history.reset()
# additionally mark the last observation as invalid
self.observation = None
# and clear the safety check
self.__expecting_clear_history = False
return
# We did reply! Safety check is good next round.
self.__expecting_to_reply = False
# actually ingest the label
if use_reply == 'none':
# we're not including our own responses anyway.
return
elif use_reply == 'label':
# first look for the true label
label_key = (
'labels'
if 'labels' in self.observation
else 'eval_labels'
if 'eval_labels' in self.observation
else None
)
if label_key is not None:
lbls = self.observation[label_key]
last_reply = lbls[0] if len(lbls) == 1 else self.random.choice(lbls)
self.history.add_reply(last_reply)
return
# you might expect a hard failure here, but in interactive mode we'll
# never get a label
# otherwise, we use the last output the model generated
if self_message is not None:
last_reply = self_message['text']
self.history.add_reply(last_reply)
return
raise RuntimeError("Unexpected case in self_observe.")
def _validate_observe_invariants(self):
"""
Check that we properly called self_observe after the last batch_act.
"""
if self.__expecting_to_reply:
raise RuntimeError(
"Last observe() had a label, but no call to self_observe ever "
"happened. You are likely making multiple observe() calls without "
"a corresponding act(). This was changed in #2043. File a GitHub "
"issue if you require assistance."
)
if self.__expecting_clear_history:
raise RuntimeError(
"Last observe() was episode_done, but we never saw a corresponding "
"self_observe to clear the history, probably because you missed an "
"act(). This was changed in #2043. File a GitHub issue if you require "
"assistance."
)
def _validate_self_observe_invariants(self):
"""
Check some invariant conditions for self_observe.
Goal is to catch potential places where we forget to call self_observe.
"""
if self.observation is None:
raise RuntimeError(
"You're self_observing without having observed something. Check if "
"you're missing a step in your observe/act/self_observe loop."
)
if self.observation['episode_done']:
if not self.__expecting_clear_history:
raise RuntimeError(
"You probably overrode observe() without implementing calling "
"super().observe(). This is unexpected. *If you must* avoid the "
"super call, then you should file a GitHub issue referencing "
"#2043."
)
def state_dict(self):
"""
Get the state dict for saving.
Override this method for more specific saving.
"""
states = {}
if hasattr(self, 'model'): # save model params
if hasattr(self.model, 'module'):
# did we wrap in a DistributedDataParallel
states['model'] = self.model.module.state_dict()
else:
states['model'] = self.model.state_dict()
if hasattr(self, 'optimizer'):
# save optimizer params
states['optimizer'] = self.optimizer.state_dict()
states['optimizer_type'] = self.opt['optimizer']
# lr scheduler
if torch.__version__.startswith('0.'):
warn_once(
"Must upgrade to Pytorch 1.0 to save the state of your " "LR scheduler."
)
else:
states['number_training_updates'] = self._number_training_updates
if getattr(self, 'scheduler', None):
states['lr_scheduler'] = self.scheduler.get_state_dict()
states['lr_scheduler_type'] = self.opt['lr_scheduler']
states['warmup_scheduler'] = self.scheduler.get_warmup_state_dict()
return states
def save(self, path=None):
"""
Save model parameters to path (or default to model_file arg).
Please try to refrain from overriding this function, and instead override
`state_dict(self)` for more specific saving.
"""
path = self.opt.get('model_file', None) if path is None else path
if path:
model_dict_path = path + '.dict'
if hasattr(self, 'dict') and not os.path.exists(
model_dict_path
): # force save dictionary
# TODO: Look into possibly overriding opt('dict_file') with new path
self.dict.save(model_dict_path, sort=False)
states = self.state_dict()
if states: # anything found to save?
with open(path, 'wb') as write:
torch.save(states, write)
# save opt file
with open(path + '.opt', 'w', encoding='utf-8') as handle:
if hasattr(self, 'model_version'):
self.opt['model_version'] = self.model_version()
saved_opts = deepcopy(self.opt)
if 'interactive_mode' in saved_opts:
# We do not save the state of interactive mode, it is only decided
# by scripts or command line.
del saved_opts['interactive_mode']
json.dump(saved_opts, handle, indent=4)
# for convenience of working with jq, make sure there's a newline
handle.write('\n')
def load_state_dict(self, state_dict):
"""
Load the state dict into model.
This is easily overridable to facilitate transfer of state dicts.
"""
try:
self.model.load_state_dict(state_dict)
except RuntimeError as msg:
msg_ = str(msg)
if 'size mismatch' in msg_ and 'embedding' in msg_:
raise RuntimeError(
f'{msg_}\n'
'-----------------\n'
'Could not load the model due to a size mismatch in the '
'embeddings. A common reason for this is trying to load '
'a model trained with fp16 but loaded without fp16. Try '
'adding --fp16 true or --force-fp16-tokens true.'
)
else:
raise
def load(self, path: str) -> Dict[str, Any]:
"""
Return opt and model states.
Override this method for more specific loading.
"""
import parlai.utils.pickle
states = torch.load(
path, map_location=lambda cpu, _: cpu, pickle_module=parlai.utils.pickle
)
if 'model' in states:
self.load_state_dict(states['model'])
if 'optimizer' in states and hasattr(self, 'optimizer'):
self.optimizer.load_state_dict(states['optimizer'])
return states
@classmethod
def upgrade_opt(cls, opt_from_disk: Opt):
# call the parent upgrades
opt_from_disk = super(TorchAgent, cls).upgrade_opt(opt_from_disk)
if opt_from_disk.get('fp16'):
# 2020-01-28 If the model was trained with fp16, we might not have saved
# the dict with the special fp16 tokens (https://git.io/Jvm7N), IF the
# dict was built the same time as the model. We set this to tell the
# model it MUST add the fp16 tokens, even if it's not fp16 mode now.
opt_from_disk['force_fp16_tokens'] = True
return opt_from_disk
def reset(self):
"""
Clear internal states.
"""
# assumption violation trackers
self.__expecting_clear_history = False
self.__expecting_to_reply = False
self.observation = None
self.history.reset()
self.reset_metrics()
def reset_metrics(self):
"""
Reset all TorchAgentMetrics.
"""
super().reset_metrics()
self.global_metrics.clear()
def act(self):
"""
Call batch_act with the singleton batch.
"""
# BatchWorld handles calling self_observe, but we're in a Hogwild or Interactive
# world, so we need to handle this ourselves.
response = self.batch_act([self.observation])[0]
self.self_observe(response)
return response
def batch_act(self, observations):
"""
Process a batch of observations (batchsize list of message dicts).
These observations have been preprocessed by the observe method.
Subclasses can override this for special functionality, but if the
default behaviors are fine then just override the ``train_step`` and
``eval_step`` methods instead. The former is called when labels are
present in the observations batch; otherwise, the latter is called.
"""
# clear local metrics before anything else
self._local_metrics.clear()
# initialize a list of replies with this agent's id
batch_reply = [
Message({'id': self.getID(), 'episode_done': False}) for _ in observations
]
# check if there are any labels available, if so we will train on them
self.is_training = any('labels' in obs for obs in observations)
# create a batch from the vectors
batch = self.batchify(observations)
if (
'label_vec' in batch
and 'text_vec' in batch
and batch.label_vec is not None
and batch.text_vec is not None
):
# tokens per batch
# we divide by the binary is_primary_worker() so that the numerator is
# num_tokens in all workers, and the denominator is 1.
tpb = GlobalAverageMetric(
(batch.label_vec != self.NULL_IDX).sum().item(),
float(is_primary_worker()),
)
self.global_metrics.add('tpb', tpb)
if self.is_training:
output = self.train_step(batch)
else:
with torch.no_grad():
# save memory and compute by disabling autograd.
# use `with torch.enable_grad()` to gain back gradients.
output = self.eval_step(batch)
if output is not None:
# local metrics are automatically matched up
self.match_batch(batch_reply, batch.valid_indices, output)
# broadcast the metrics back
for k, values in self._local_metrics.items():
if len(values) != len(batch.valid_indices):
raise IndexError(
f"Batchsize mismatch on metric {k} (got {len(values)}, "
f"expected {len(batch.valid_indices)}"
)
for i, value in zip(batch.valid_indices, values):
if 'metrics' not in batch_reply[i]:
batch_reply[i]['metrics'] = {}
batch_reply[i]['metrics'][k] = value
# Make sure we push all the metrics to main thread in hogwild/workers
self.global_metrics.flush()
return batch_reply
@abstractmethod
def train_step(self, batch):
"""
[Abstract] Process one batch with training labels.
"""
pass
@abstractmethod
def eval_step(self, batch):
"""
[Abstract] Process one batch but do not train on it.
"""
pass
def set_interactive_mode(self, mode, shared):
"""
Set interactive mode on or off.
"""
if shared is None and mode:
# Only print in the non-shared version.
print("[" + self.id + ': full interactive mode on.' + ']')
def backward(self, loss):
"""
Perform a backward pass.
It is recommended you use this instead of loss.backward(), for integration with
distributed training and FP16 training.
"""
update_freq = self.opt.get('update_freq', 1)
if update_freq > 1:
# gradient accumulation, but still need to average across the minibatches
loss = loss / update_freq
self._number_grad_accum = (self._number_grad_accum + 1) % update_freq
# we're doing gradient accumulation, so we don't need to sync gradients
# amoung GPUs
if self._number_grad_accum != 0 and is_distributed():
# accumulate without syncing
with self.model.no_sync():
if self.fp16:
self.optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
return
if self.fp16:
self.optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
def update_params(self):
"""
Perform step of optimization.
Handles clipping gradients and adjusting LR schedule if needed.
Gradient accumulation is also performed if agent is called with
--update-freq.
It is recommended (but not forced) that you call this in train_step.
"""
update_freq = self.opt.get('update_freq', 1)
if update_freq > 1:
# we're doing gradient accumulation, so we don't only want to step
# every N updates instead
# self._number_grad_accum is updated in backward function
if self._number_grad_accum != 0:
return
if self.fp16:
# we've been accumulating grads in fp16 and delaying the fp32 copy update.
# finally time to perform the update.
self.optimizer.update_master_grads()
if self.opt.get('gradient_clip', -1) > 0:
if self.fp16:
grad_norm = self.optimizer.clip_master_grads(self.opt['gradient_clip'])
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.opt['gradient_clip']
)
self.global_metrics.add('gnorm', GlobalAverageMetric(grad_norm))
self.global_metrics.add(
'clip',
GlobalAverageMetric(float(grad_norm > self.opt['gradient_clip'])),
)
else:
parameters = self.model.parameters()
grad_norm = compute_grad_norm(parameters)
self.global_metrics.add('gnorm', GlobalAverageMetric(grad_norm))
if self.fp16:
self.global_metrics.add(
'fp16_loss_scalar', GlobalAverageMetric(self.optimizer.loss_scale)
)
self.optimizer.step()
# keep track up number of steps, compute warmup factor
self._number_training_updates += 1
# in distributed mode, all workers step together, but we need to only
# count it once. Only the primary worker gets to make the count
if is_primary_worker():
self.global_metrics.add('updates', GlobalSumMetric(1))
if getattr(self, 'scheduler', None):
self.scheduler.step(self._number_training_updates)
def zero_grad(self):
"""
Zero out optimizer.
It is recommended you call this in train_step. It automatically handles gradient
accumulation if agent is called with --update-freq.
"""
if self._number_grad_accum != 0:
# if we're accumulating gradients, don't actually zero things out yet.
return
self.optimizer.zero_grad()
|
py | 7dff2e90c3aa9d03bd8e00c8143c352535ab79be | from flask import Flask, request, json, abort, render_template
from redis import StrictRedis
app = Flask(__name__, static_url_path='')
redis = StrictRedis(host='localhost', port=6379, db=0)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/views/<log>', methods=['GET'])
def get_log_page(log):
# try:
if check_log_name(log):
if redis.keys(log):
data, last_entry = format_log_data(log)
entry_count = redis.llen(log)
return render_template('view.html', data=data, entry_count=entry_count, last_entry=last_entry, log_name=log)
else:
return abort(404)
else:
return abort(400)
# except Exception as e:
# return abort(500)
@app.route('/logs/<log>', methods=['GET'])
def get_log_data(log):
return log
@app.route('/logs/<log>/config', methods=['GET'])
def get_log_config(log):
# try:
if check_log_name(log):
if redis.keys(log):
data, last_entry = format_log_data(log)
entry_count = redis.llen(log)
return render_template('view.html', data=data, entry_count=entry_count, last_entry=last_entry, log_name=log)
else:
return abort(404)
else:
return abort(400)
# except Exception as e:
# return abort(500)
@app.route('/logs/<log>', methods=['PUT'])
def add_log_data(log):
if check_log_name(log) is not True or check_log_data(request.json['data']) is not True:
return abort(400)
else:
try:
redis.lpush(log, json.dumps({'datetime': request.json['datetime'], 'data': request.json['data']}))
return ""
except Exception as e:
pass
@app.route('/logs', methods=['POST'])
def create_log():
try:
log_id = redis.incr('log_id')
redis.hmset('log', {'id': log_id})
response = app.response_class(response=json.dumps({'id': str(log_id)}), status=200, mimetype='application/json')
return response
except Exception as e:
return str(e)
@app.route('/logs/<log>', methods=['DELETE'])
def delete_log(log):
try:
redis.delete(log)
return ''
except Exception as e:
print(e)
pass
def check_log_name(name):
if len(name) <= 0:
return False
else:
return True
def check_log_data(data):
if not isinstance(data, float):
return False
else:
return True
def format_log_data(log):
data = redis.lrange(log, 0, -1)
dict = {}
x = []
y = []
last_entry = json.loads(data[-1])['datetime']
for entry in data:
entry = json.loads(entry)
x.append(entry['datetime'])
y.append(entry['data'])
dict['x'] = x
dict['y'] = y
dict['type'] = 'scatter'
return [dict], last_entry
if __name__ == '__main__':
app.run(debug=True)
|
py | 7dff2f061b676e49f4ca37cfde307685e5241744 | from ctypes import *
import math
import random
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
return (ctype * len(values))(*values)
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
lib = CDLL("libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict_p
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network_p
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
if __name__ == "__main__":
net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0)
im = load_image("data/wolf.jpg", 0, 0)
meta = load_meta("cfg/imagenet1k.data")
r = classify(net, meta, im)
print r[:10]
|
py | 7dff2f0b7863b3235f10fd051b4de3f9249984b1 | #!/usr/bin/python
from sfa.rspecs.pg_rspec_converter import PGRSpecConverter
from sfa.rspecs.sfa_rspec_converter import SfaRSpecConverter
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.version_manager import VersionManager
class RSpecConverter:
@staticmethod
def to_sfa_rspec(in_rspec, content_type=None):
rspec = RSpec(in_rspec)
version_manager = VersionManager()
sfa_version = version_manager._get_version('sfa', '1')
pg_version = version_manager._get_version('protogeni', '2')
if rspec.version.type.lower() == sfa_version.type.lower():
return in_rspec
elif rspec.version.type.lower() == pg_version.type.lower():
return PGRSpecConverter.to_sfa_rspec(in_rspec, content_type)
else:
return in_rspec
@staticmethod
def to_pg_rspec(in_rspec, content_type=None):
rspec = RSpec(in_rspec)
version_manager = VersionManager()
sfa_version = version_manager._get_version('sfa', '1')
pg_version = version_manager._get_version('protogeni', '2')
if rspec.version.type.lower() == pg_version.type.lower():
return in_rspec
elif rspec.version.type.lower() == sfa_version.type.lower():
return SfaRSpecConverter.to_pg_rspec(in_rspec, content_type)
else:
return in_rspec
if __name__ == '__main__':
pg_rspec = 'test/protogeni.rspec'
sfa_rspec = 'test/nodes.rspec'
print "converting pg rspec to sfa rspec"
print RSpecConverter.to_sfa_rspec(pg_rspec)
print "converting sfa rspec to pg rspec"
print RSpecConverter.to_pg_rspec(sfa_rspec)
|
py | 7dff2f7fdf467b985d2e33aae86b5903d0ea48ce | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return []
queue=deque()
queue.append(root)
res=[]
while(queue):
size=len(queue)
for i in range(size):
currNode=queue.popleft()
if i==size-1:
res.append(currNode.val)
if currNode.left:
queue.append(currNode.left)
if currNode.right:
queue.append(currNode.right)
return res
|
py | 7dff2ffa492de807c9672a08ee4ef94b7cdc2ffd | #
# PySNMP MIB module DNSSERVEREXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DNSSERVEREXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:52:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
dnsServerExt, = mibBuilder.importSymbols("APENT-MIB", "dnsServerExt")
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, ObjectIdentity, ModuleIdentity, Counter64, MibIdentifier, Integer32, iso, Counter32, IpAddress, Bits, TimeTicks, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ObjectIdentity", "ModuleIdentity", "Counter64", "MibIdentifier", "Integer32", "iso", "Counter32", "IpAddress", "Bits", "TimeTicks", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
apDnsServerMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2467, 1, 40, 1))
if mibBuilder.loadTexts: apDnsServerMib.setLastUpdated('9806122000Z')
if mibBuilder.loadTexts: apDnsServerMib.setOrganization('ArrowPoint Communications Inc.')
if mibBuilder.loadTexts: apDnsServerMib.setContactInfo('Postal: ArrowPoint Communications Inc. 50 Nagog Park Acton, Massachusetts 01720 Tel: +1 978-206-3000 option 1 E-Mail: [email protected]')
if mibBuilder.loadTexts: apDnsServerMib.setDescription('This MIB module describes the ArrowPoint enterprise MIB support for DNS Server')
apDnsServerEnable = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsServerEnable.setStatus('current')
if mibBuilder.loadTexts: apDnsServerEnable.setDescription('Parameter to enable or disable DNS Server functionality.')
apDnsServerBufferCount = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 1000)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsServerBufferCount.setStatus('current')
if mibBuilder.loadTexts: apDnsServerBufferCount.setDescription('The number of buffers allocated for Query Responses')
apDnsServerResponderTasks = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 250)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsServerResponderTasks.setStatus('current')
if mibBuilder.loadTexts: apDnsServerResponderTasks.setDescription('The number of Tasks to handle DNS Responses')
apDnsPeerRcvEntries = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(128, 1024)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsPeerRcvEntries.setStatus('current')
if mibBuilder.loadTexts: apDnsPeerRcvEntries.setDescription('The number of DNS entries which can be received from a peer')
apDnsPeerSndEntries = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(128, 1024)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsPeerSndEntries.setStatus('current')
if mibBuilder.loadTexts: apDnsPeerSndEntries.setDescription('The number of DNS entries which can be sent to a peer')
apDnsPeerReportInterval = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 120)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsPeerReportInterval.setStatus('current')
if mibBuilder.loadTexts: apDnsPeerReportInterval.setDescription('The number of seconds between generation of DNS peer load reports')
apDnsAclIndex = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apDnsAclIndex.setStatus('current')
if mibBuilder.loadTexts: apDnsAclIndex.setDescription('The Acl which has been applied to DNS resolutions, zero means no Acl')
apProximityType = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("pdns", 2), ("pdb", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apProximityType.setStatus('current')
if mibBuilder.loadTexts: apProximityType.setDescription('Parameter to specify PDB or PDNS functionality.')
apProximityEnable = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apProximityEnable.setStatus('current')
if mibBuilder.loadTexts: apProximityEnable.setDescription('Parameter to enable or disable PDB / PDNS functionality.')
apProximityZone = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apProximityZone.setStatus('current')
if mibBuilder.loadTexts: apProximityZone.setDescription('The local proximity zone index')
apProximityDescription = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20)).clone(hexValue="0")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityDescription.setStatus('current')
if mibBuilder.loadTexts: apProximityDescription.setDescription('A name which identifies this proximity zone')
apProximityZoneMax = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(6, 16))).clone(namedValues=NamedValues(("tier1", 6), ("tier2", 16))).clone('tier1')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apProximityZoneMax.setStatus('current')
if mibBuilder.loadTexts: apProximityZoneMax.setDescription('The maximum number of proximity zones allowed')
apProximityPDBIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 40, 14), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityPDBIpAddr.setStatus('current')
if mibBuilder.loadTexts: apProximityPDBIpAddr.setDescription('The Interface IP Address of the PDB')
apProximityRecordTable = MibTable((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15), )
if mibBuilder.loadTexts: apProximityRecordTable.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordTable.setDescription('A list of Proximity domain name records.')
apProximityRecordEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1), ).setIndexNames((0, "DNSSERVEREXT-MIB", "apProximityRecordName"))
if mibBuilder.loadTexts: apProximityRecordEntry.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordEntry.setDescription('Proximity domain name record information.')
apProximityRecordName = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordName.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordName.setDescription('The dns name for this Proximity record.')
apProximityRecordType = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("typeA", 1), ("typeNS", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordType.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordType.setDescription('Determines if this record is of type NS or A.')
apProximityRecordAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordAddr.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordAddr.setDescription('The Interface IP Address of Peer')
apProximityRecordTtl = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 4), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordTtl.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordTtl.setDescription('The Time to Live Value returned in DNS Responses')
apProximityRecordKalType = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("kal-none", 0), ("kal-icmp", 1), ("kal-ap", 2))).clone('kal-icmp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordKalType.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordKalType.setDescription('The type of keepalive performed on this record')
apProximityRecordKalAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 6), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordKalAddr.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordKalAddr.setDescription('The IP Address of interface to send AP keepalive messages')
apProximityRecordKalThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 254)).clone(254)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordKalThreshold.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordKalThreshold.setDescription('The record is considered DOWN when the load number is greater than this value.')
apProximityRecordRtnSingleArec = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("multiple", 0), ("single", 1))).clone('single')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordRtnSingleArec.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordRtnSingleArec.setDescription('Determines if multiple A records will be returned in a Response for this DNS name.')
apProximityRecordStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 40, 15, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apProximityRecordStatus.setStatus('current')
if mibBuilder.loadTexts: apProximityRecordStatus.setDescription('Status entry for this row ')
mibBuilder.exportSymbols("DNSSERVEREXT-MIB", apDnsServerBufferCount=apDnsServerBufferCount, apProximityRecordTtl=apProximityRecordTtl, apProximityRecordEntry=apProximityRecordEntry, apProximityRecordKalThreshold=apProximityRecordKalThreshold, apProximityRecordStatus=apProximityRecordStatus, apProximityRecordType=apProximityRecordType, apDnsAclIndex=apDnsAclIndex, apDnsPeerRcvEntries=apDnsPeerRcvEntries, apDnsServerMib=apDnsServerMib, apProximityRecordRtnSingleArec=apProximityRecordRtnSingleArec, apDnsPeerSndEntries=apDnsPeerSndEntries, apProximityRecordKalType=apProximityRecordKalType, apProximityDescription=apProximityDescription, apProximityRecordKalAddr=apProximityRecordKalAddr, apProximityRecordTable=apProximityRecordTable, apProximityEnable=apProximityEnable, apDnsServerResponderTasks=apDnsServerResponderTasks, PYSNMP_MODULE_ID=apDnsServerMib, apProximityZoneMax=apProximityZoneMax, apProximityRecordName=apProximityRecordName, apDnsPeerReportInterval=apDnsPeerReportInterval, apProximityZone=apProximityZone, apProximityType=apProximityType, apProximityPDBIpAddr=apProximityPDBIpAddr, apProximityRecordAddr=apProximityRecordAddr, apDnsServerEnable=apDnsServerEnable)
|
py | 7dff301e816c0cca25a2acedb62b2156d396c156 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""
Channel event manager for pulse schedules.
This module provides a `ChannelEvents` class that manages a series of instructions for a
pulse channel. Channel-wise filtering of the pulse program makes
the arrangement of channels easier in the core drawer function.
The `ChannelEvents` class is expected to be called by other programs (not by end-users).
The `ChannelEvents` class instance is created with the class method ``load_program``:
```python
event = ChannelEvents.load_program(sched, DriveChannel(0))
```
The `ChannelEvents` is created for a specific pulse channel and loosely assorts pulse
instructions within the channel with different visualization purposes.
Phase and frequency related instructions are loosely grouped as frame changes.
The instantaneous value of those operands are combined and provided as ``PhaseFreqTuple``.
Instructions that have finite duration are grouped as waveforms.
The grouped instructions are returned as an iterator by the corresponding method call:
```python
for t0, frame, instruction in event.get_waveforms():
...
for t0, frame_change, instructions in event.get_frame_changes():
...
```
The class method ``get_waveforms`` returns the iterator of waveform type instructions with
the ``PhaseFreqTuple`` (frame) at the time when instruction is issued.
This is because a pulse envelope of ``Waveform`` may be modulated with a
phase factor $exp(-i \omega t - \phi)$ with frequency $\omega$ and phase $\phi$ and
appear on the canvas. Thus, it is better to tell users in which phase and frequency
the pulse envelope is modulated from a viewpoint of program debugging.
On the other hand, the class method ``get_frame_changes`` returns a ``PhaseFreqTuple`` that
represents a total amount of change at that time because it is convenient to know
the operand value itself when we debug a program.
Because frame change type instructions are usually zero duration, multiple instructions
can be issued at the same time and those operand values should be appropriately
combined. In Qiskit Pulse we have set and shift type instructions for the frame control,
the set type instruction will be converted into the relevant shift amount for visualization.
Note that these instructions are not interchangeable and the order should be kept.
For example:
```python
sched1 = Schedule()
sched1 = sched1.insert(0, ShiftPhase(-1.57, DriveChannel(0))
sched1 = sched1.insert(0, SetPhase(3.14, DriveChannel(0))
sched2 = Schedule()
sched2 = sched2.insert(0, SetPhase(3.14, DriveChannel(0))
sched2 = sched2.insert(0, ShiftPhase(-1.57, DriveChannel(0))
```
In this example, ``sched1`` and ``sched2`` will have different frames.
On the drawer canvas, the total frame change amount of +3.14 should be shown for ``sched1``,
while `sched2` is +1.57. Since the `SetPhase` and the `ShiftPhase` instruction behave
differently, we cannot simply sum up the operand values in visualization output.
It should be also noted that zero duration instructions issued at the same time will be
overlapped on the canvas. Thus it is convenient to plot a total frame change amount rather
than plotting each operand value bound to the instruction.
"""
from collections import defaultdict
from typing import Dict, List, Iterator, Tuple
from qiskit import pulse
from qiskit.visualization.pulse_v2.types import PhaseFreqTuple
class ChannelEvents:
"""Channel event manager.
"""
_waveform_group = tuple((pulse.instructions.Play,
pulse.instructions.Delay,
pulse.instructions.Acquire))
_frame_group = tuple((pulse.instructions.SetFrequency,
pulse.instructions.ShiftFrequency,
pulse.instructions.SetPhase,
pulse.instructions.ShiftPhase))
def __init__(self,
waveforms: Dict[int, pulse.Instruction],
frames: Dict[int, List[pulse.Instruction]],
channel: pulse.channels.Channel):
"""Create new event manager.
Args:
waveforms: List of waveforms shown in this channel.
frames: List of frame change type instructions shown in this channel.
channel: Channel object associated with this manager.
"""
self._waveforms = waveforms
self._frames = frames
self.channel = channel
# initial frame
self.init_phase = 0
self.init_frequency = 0
@classmethod
def load_program(cls,
program: pulse.Schedule,
channel: pulse.channels.Channel):
"""Load a pulse program represented by ``Schedule``.
Args:
program: Target ``Schedule`` to visualize.
channel: The channel managed by this instance.
Returns:
ChannelEvents: The channel event manager for the specified channel.
"""
waveforms = dict()
frames = defaultdict(list)
# parse instructions
for t0, inst in program.filter(channels=[channel]).instructions:
if isinstance(inst, cls._waveform_group):
waveforms[t0] = inst
elif isinstance(inst, cls._frame_group):
frames[t0].append(inst)
return ChannelEvents(waveforms, frames, channel)
def is_empty(self):
"""Check if there is any nonzero waveforms in this channel."""
for waveform in self._waveforms.values():
if isinstance(waveform, (pulse.instructions.Play, pulse.instructions.Acquire)):
return False
return True
def get_waveforms(self) -> Iterator[Tuple[int, PhaseFreqTuple, pulse.Instruction]]:
"""Return waveform type instructions with frame."""
sorted_frame_changes = sorted(self._frames.items(), key=lambda x: x[0], reverse=True)
sorted_waveforms = sorted(self._waveforms.items(), key=lambda x: x[0])
# bind phase and frequency with instruction
phase = self.init_phase
frequency = self.init_frequency
for t0, inst in sorted_waveforms:
while len(sorted_frame_changes) > 0 and sorted_frame_changes[-1][0] <= t0:
_, frame_changes = sorted_frame_changes.pop()
for frame_change in frame_changes:
if isinstance(frame_change, pulse.instructions.SetFrequency):
frequency = frame_change.frequency
elif isinstance(frame_change, pulse.instructions.ShiftFrequency):
frequency += frame_change.frequency
elif isinstance(frame_change, pulse.instructions.SetPhase):
phase = frame_change.phase
elif isinstance(frame_change, pulse.instructions.ShiftPhase):
phase += frame_change.phase
frame = PhaseFreqTuple(phase, frequency)
yield t0, frame, inst
def get_frame_changes(self) -> Iterator[Tuple[int, PhaseFreqTuple, List[pulse.Instruction]]]:
"""Return frame change type instructions with total frame change amount."""
sorted_frame_changes = sorted(self._frames.items(), key=lambda x: x[0])
phase = self.init_phase
frequency = self.init_frequency
for t0, insts in sorted_frame_changes:
pre_phase = phase
pre_frequency = frequency
for inst in insts:
if isinstance(inst, pulse.instructions.SetFrequency):
frequency = inst.frequency
elif isinstance(inst, pulse.instructions.ShiftFrequency):
frequency += inst.frequency
elif isinstance(inst, pulse.instructions.SetPhase):
phase = inst.phase
elif isinstance(inst, pulse.instructions.ShiftPhase):
phase += inst.phase
frame = PhaseFreqTuple(phase - pre_phase, frequency - pre_frequency)
yield t0, frame, insts
|
py | 7dff30a3f9b473b9bf6ffcf7f7b9245175a4b198 | class Solution:
def twoSum(self, nums, target: int):
for i in range(0, len(nums)):
for j in range(i + 1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
return []
def twoSum2(self, nums, target: int):
d = {}
for i in range(len(nums)):
left = target - nums[i]
if left in d:
return [i, d[left]]
d[nums[i]] = i
return []
s = Solution()
print(s.twoSum2([1,2,3], 5)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.