blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edcddd8e2f551a6693766bb167fef95132f4a54d
|
0e0c67d6eabd63653c02121d83ac1de863231cb6
|
/myblog/blog/migrations/0001_initial.py
|
297902279ecc0337ff08c570e48986f2864a62a8
|
[] |
no_license
|
ragyrad/DjangoLearn
|
cb22fee4a1f97ccf67421c97f5857fef7d3f1e95
|
0577a0488d7339d7a1a15e79bc331dc5869c06a3
|
refs/heads/master
| 2023-03-22T23:30:51.276045 | 2021-03-09T09:16:46 | 2021-03-09T09:16:46 | 332,638,284 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,365 |
py
|
# Generated by Django 3.1.6 on 2021-02-11 05:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('publish',),
},
),
]
|
[
"[email protected]"
] | |
f82d1bfc18cf23dccc01d4ee011811e1f567837a
|
0092041336a420af59b73e2ab1bf6e7077b11f6e
|
/autoeq/constants.py
|
9e3aa99e634a4cadadc3b973ff61a777af07f613
|
[
"MIT"
] |
permissive
|
jaakkopasanen/AutoEq
|
e10280a5413a406623ddbc8b87ddf7953ffd020c
|
ab5869c8f4996f8eea88abca50a41510263ed098
|
refs/heads/master
| 2023-08-22T22:43:51.969927 | 2023-08-09T11:13:24 | 2023-08-09T11:13:24 | 123,807,729 | 11,367 | 2,940 |
MIT
| 2023-08-11T08:23:26 | 2018-03-04T16:37:35 |
Python
|
UTF-8
|
Python
| false | false | 9,711 |
py
|
# -*- coding: utf-8 -*
import os
import math
DEFAULT_F_MIN = 20.0
DEFAULT_F_MAX = 20000.0
DEFAULT_STEP = 1.01
DEFAULT_MAX_GAIN = 6.0
DEFAULT_TREBLE_F_LOWER = 6000.0
DEFAULT_TREBLE_F_UPPER = 8000.0
DEFAULT_TREBLE_MAX_GAIN = 6.0
DEFAULT_TREBLE_GAIN_K = 1.0
DEFAULT_SMOOTHING_WINDOW_SIZE = 1 / 12
DEFAULT_SMOOTHING_ITERATIONS = 1
DEFAULT_TREBLE_SMOOTHING_F_LOWER = 6000.0
DEFAULT_TREBLE_SMOOTHING_F_UPPER = 8000.0
DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE = 2.0
DEFAULT_TREBLE_SMOOTHING_ITERATIONS = 1
DEFAULT_SOUND_SIGNATURE_SMOOTHING_WINDOW_SIZE = None
DEFAULT_FS = 44100
DEFAULT_BIT_DEPTH = 16
DEFAULT_PHASE = 'minimum'
DEFAULT_F_RES = 10.0
DEFAULT_TILT = 0.0
DEFAULT_BASS_BOOST_GAIN = 0.0
DEFAULT_BASS_BOOST_FC = 105.0
DEFAULT_BASS_BOOST_Q = 0.7
DEFAULT_TREBLE_BOOST_GAIN = 0.0
DEFAULT_TREBLE_BOOST_FC = 10000.0
DEFAULT_TREBLE_BOOST_Q = 0.7
DEFAULT_PEQ_OPTIMIZER_MIN_F = 20.0
DEFAULT_PEQ_OPTIMIZER_MAX_F = 20000.0
DEFAULT_PEQ_OPTIMIZER_MAX_TIME = None
DEFAULT_PEQ_OPTIMIZER_TARGET_LOSS = None
DEFAULT_PEQ_OPTIMIZER_MIN_CHANGE_RATE = None
DEFAULT_PEQ_OPTIMIZER_MIN_STD = 0.002
DEFAULT_FIXED_BAND_FILTER_MIN_GAIN = -12.0
DEFAULT_FIXED_BAND_FILTER_MAX_GAIN = 12.0
DEFAULT_PEAKING_FILTER_MIN_FC = 20.0
DEFAULT_PEAKING_FILTER_MAX_FC = 10000.0
DEFAULT_PEAKING_FILTER_MIN_Q = 0.18248 # AUNBandEq has maximum bandwidth of 5 octaves which is Q of 0.182479
DEFAULT_PEAKING_FILTER_MAX_Q = 6.0
DEFAULT_PEAKING_FILTER_MIN_GAIN = -20.0
DEFAULT_PEAKING_FILTER_MAX_GAIN = 20.0
DEFAULT_SHELF_FILTER_MIN_FC = 20.0
DEFAULT_SHELF_FILTER_MAX_FC = 10000.0
DEFAULT_SHELF_FILTER_MIN_Q = 0.4 # Shelf filters start to overshoot below 0.4
DEFAULT_SHELF_FILTER_MAX_Q = 0.7 # Shelf filters start to overshoot above 0.7
DEFAULT_SHELF_FILTER_MIN_GAIN = -20.0
DEFAULT_SHELF_FILTER_MAX_GAIN = 20.0
DEFAULT_BIQUAD_OPTIMIZATION_F_STEP = 1.02
DEFAULT_MAX_SLOPE = 18.0
DEFAULT_PREAMP = 0.0
DEFAULT_GRAPHIC_EQ_STEP = 1.0563 # Produces 127 samples with greatest frequency of 19871
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
MOD_REGEX = r' \((sample|serial number) [a-zA-Z0-9\-]+\)$'
DBS = ['crinacle', 'headphonecom', 'innerfidelity', 'oratory1990', 'rtings']
HARMAN_OVEREAR_PREFERENCE_FREQUENCIES = [20.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 43.0, 45.0, 48.0, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
HARMAN_INEAR_PREFENCE_FREQUENCIES = [20.0, 21.2, 22.4, 23.6, 25.0, 26.5, 28.0, 30.0, 31.5, 33.5, 35.5, 37.5, 40.0, 42.5, 45.0, 47.5, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
PREAMP_HEADROOM = 0.2
PEQ_CONFIGS = {
'10_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': math.sqrt(2), 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 31.25 * 2 ** i} for i in range(10)]
},
'31_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': 4.318473, 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 20 * 2 ** (i / 3), 'type': 'PEAKING'} for i in range(31)]
},
'10_PEAKING': {
'filters': [{'type': 'PEAKING'}] * 10
},
'8_PEAKING_WITH_SHELVES': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 8
},
'4_PEAKING_WITH_LOW_SHELF': {
'optimizer': {
'max_f': 10000.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'4_PEAKING_WITH_HIGH_SHELF': {
'filters': [{
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'AUNBANDEQ': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_fc': 20.0, # Can go to 16 Hz
'max_fc': 10000.0, # Can go to 20 kHz
'min_q': 0.182479, # Max bw of 5.0
'max_q': 10.0 # Min bw of 0.01 = 144.27 Q
}] * 8
},
'MINIDSP_2X4HD': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'MINIDSP_IL_DSP': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'NEUTRON_MUSIC_PLAYER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 5.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'POWERAMP_EQUALIZER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -15.0,
'max_gain': 15.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 12.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'QUDELIX_5K': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 7.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'SPOTIFY': {
'optimizer': {'min_std': 0.01},
'filters': [
{'fc': 60.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 150.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 2400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 15000.0, 'q': 1.0, 'type': 'PEAKING'},
]
},
'USB_AUDIO_PLAYER_PRO': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -20.0,
'max_gain': 20.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 10.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
}
|
[
"[email protected]"
] | |
a9ce27dab2091e921cd004331e4fd2bda5e1d9f0
|
913d05cc0c20b8c80b7fd1cd7a4da65a059a2f44
|
/utils.py
|
f2e7c30a3a1e5de42ee6fbbe5237f0b6298f6835
|
[] |
no_license
|
paksu/MERCURYCLAVE
|
6544fef4a1fedcf9bd121d577f813c83427ca6c8
|
2847ab8a749609261df4eccac6871faab8cd76d0
|
refs/heads/master
| 2021-07-12T02:28:04.718463 | 2017-05-21T11:06:34 | 2017-05-21T11:06:34 | 106,080,196 | 0 | 0 | null | 2017-10-07T07:43:34 | 2017-10-07T07:43:33 | null |
UTF-8
|
Python
| false | false | 502 |
py
|
from __future__ import print_function
import re
def print_error(err):
print("[ERROR]", err)
def print_info(inf):
print("[INFO]", inf)
def is_valid_b64(s):
validator = re.compile(
'^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$')
if validator.match(s) != None:
return True
else:
return False
def is_valid_ascii(s):
try:
s.decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
|
[
"[email protected]"
] | |
ffd52c187b40075684ae17e912ffaad85f787083
|
82260f32dcf1597ddf4902b0b88b11c9d82ac1ae
|
/A6/6.1.py
|
1dbdc6f1e148660aba65b0ae4a6d80eface54fb9
|
[] |
no_license
|
jorgeacosta19/BrandTech_WebDev
|
ac0ff9c0ee024353b9f9c046b6104a2db3bcc7fc
|
1fd573ea1b0f67c6d654c9dbfe71c273b26a391e
|
refs/heads/main
| 2023-01-14T13:22:12.235950 | 2020-11-24T20:31:42 | 2020-11-24T20:31:42 | 301,190,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 91 |
py
|
# 1- Write a program that prints ‘Hello World’ to the screen.
print("Hello World")
|
[
"[email protected]"
] | |
dd2581b2b922761111f73de6a66b37bef9ca71ad
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/servicebus/latest/list_disaster_recovery_config_keys.py
|
25a135b1c7de1f742920f2d68de3190e3c721078
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 6,888 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/ServiceBus Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace authorization rule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_disaster_recovery_config_keys(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Use this data source to access information about an existing resource.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus/latest:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
|
[
"[email protected]"
] | |
0d4217ba1b325e87c690927e48f1717142aec8e0
|
46f043d557eba57da5b8c3e9937e4dc84556ae65
|
/UDPserver.py
|
116b0fbf6c3d8c523d4d06c586ac5bf27a68ad5a
|
[] |
no_license
|
fengrenxiaoli/Mypython
|
822f397d89db1e511ba6785a404efea99dd8600b
|
3cb08e0b9e760f44068d31c151afacef21e099f8
|
refs/heads/master
| 2021-01-10T12:12:15.338696 | 2015-11-17T15:11:19 | 2015-11-17T15:11:19 | 44,948,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999...')
while True:
data,addr=s.recvfrom(1024)
print('Redeived from %s:%s'%addr)
s.sendto(b'Hello,%s'%data,addr)
|
[
"[email protected]"
] | |
9aab50959e6376757d51b3fef3e88483eb1d7494
|
07c3124153a6909f19a21c3c664d8e3f8e0481d0
|
/fractals/sierpinski_triangle/sierpinski_triangle.py
|
aae6e3da8f1aaeec51acdaeab10b98c9d1557216
|
[] |
no_license
|
gridl/art-of-turtle-programming
|
94ed422a4e75f83e4c3abf7910ed9e5ed8a40aa9
|
db6b2c1059bffc9df468691c6ecf1c110b38aafd
|
refs/heads/master
| 2020-03-19T16:20:48.680667 | 2015-12-15T05:46:03 | 2015-12-15T05:46:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
from turtle import *
import math
tracer(1, 0)
setworldcoordinates(0, 0, 960, 810)
bgcolor(0.1, 0.1, 0.1)
BASE_SIZE = 13
BASE_HEIGHT = BASE_SIZE * math.sin(60 * (math.pi / 180))
START_X = 50
START_Y = 20
def draw_triangle(x, y, color):
penup()
pencolor(color)
goto(x, y) # go to bottom-left corner
pendown()
setheading(60)
forward(BASE_SIZE) # draw first side
right(120)
forward(BASE_SIZE) # draw second side
right(120)
forward(BASE_SIZE) # draw third side
def draw_sierpinski(x, y, level, color):
if level == 0:
draw_triangle(x, y, color)
draw_triangle(x + (BASE_SIZE * 0.5), y + BASE_HEIGHT, color)
draw_triangle(x + BASE_SIZE, y, color)
else:
draw_sierpinski(x, y, level - 1, color)
draw_sierpinski(x + (BASE_SIZE * 0.5 * (2 ** level)), y + (BASE_HEIGHT * (2 ** level)), level - 1, color)
draw_sierpinski(x + (BASE_SIZE * (2 ** level)), y, level - 1, color)
# loop from 5 to 0, drawing 5 sets of sierpinski triangles each with a different color
for i in range(5, -1, -1):
red = 1 - (0.2 * i)
green = 0.1 * i
blue = 0.1 * i
draw_sierpinski(START_X, START_Y, i, (red, green, blue))
hideturtle()
update()
exitonclick()
|
[
"[email protected]"
] | |
22d7e44524dc9cd48166afdf000431fc3f606e9a
|
6ca0d0be3f59b14e36a7262fdb6da929597dbcfc
|
/lorawan/user_agent/logger/log_main.py
|
e618b6941eec08bd11b4104ce6a739e16f3b15b0
|
[
"MIT"
] |
permissive
|
pablomodernell/lorawan_conformance_testing
|
79f12845840ef8b0f427743d760de9495ab36a9a
|
3e6b9028ee7a6a614e52bac684e396ecd04fd10c
|
refs/heads/master
| 2023-05-13T12:59:04.908279 | 2020-08-23T16:45:26 | 2020-08-23T16:45:26 | 280,359,564 | 1 | 0 |
MIT
| 2023-05-01T20:42:47 | 2020-07-17T07:39:34 |
HTML
|
UTF-8
|
Python
| false | false | 2,255 |
py
|
"""
Auxiliary functions for accessing the logging information generated by the
Test Application Server (TAS).
"""
#################################################################################
# MIT License
#
# Copyright (c) 2018, Pablo D. Modernell, Universitat Oberta de Catalunya (UOC),
# Universidad de la Republica Oriental del Uruguay (UdelaR).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#################################################################################
import click
import lorawan.user_agent.logger.loggers as loggers
def log_all():
""" Starts a logger that collects the messages from all the TAS services."""
logger_mock = loggers.LoggerAll()
print("Starting log.")
logger_mock.start_logging()
def log_nwk_forwarder():
""" Starts a logger that collects the messages from the Payload Forwarder service."""
logger_mock = loggers.PayloadForwarderLog()
print("Starting Payload Forwarder Service log.")
logger_mock.start_logging()
def log_test_session_coordinator():
""" Starts a logger that collects the messages from the Test Session Coordinatior service."""
logger_mock = loggers.TestServerLog()
print("Starting Test Server log.")
logger_mock.start_logging()
|
[
"[email protected]"
] | |
fa2debd4b7df01163deb530cc13213e4631ef425
|
67281f76d77308756c2530517e302475f596a834
|
/pythonscripts/set.py
|
ab49e035441d467e50e19aebfab1c956fde0f2dc
|
[] |
no_license
|
Surajprasanna/epsilon-python
|
5edac9a186e6298e8209f60bbe0ed24dffa68e2f
|
879063774e3d4bfa8d713ba26857f881e39aaa44
|
refs/heads/master
| 2020-04-05T20:28:05.402489 | 2018-11-14T11:14:34 | 2018-11-14T11:14:34 | 157,181,489 | 0 | 0 | null | 2018-11-12T08:37:54 | 2018-11-12T08:37:53 | null |
UTF-8
|
Python
| false | false | 177 |
py
|
#/bin/python3
setA = {2,4,5,7,78,34,56,3}
setB = {2,4,3,9,10}
#print(setA)
#for i in setA:
# print(i)
print(dir(setA))
#print(setA.intersection(setB))
print(setA.union(setB))
|
[
"[email protected]"
] | |
6b51b24a86d97f35f69a59c8dbc0e913bf0876c9
|
cdf9ba7b329d66a1b664d505332d4a441f6bf075
|
/benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_pinned/cmp_mcf/power.py
|
ba961d5f8f3483e208416648d0c7e4f2c4795df5
|
[
"MIT"
] |
permissive
|
TugberkArkose/MLScheduler
|
3247c0bbc11c09261a3bad777f3940a465e5f15a
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
refs/heads/master
| 2021-03-27T19:11:44.207818 | 2020-03-19T11:32:08 | 2020-03-19T11:32:08 | 92,518,861 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 68,592 |
py
|
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115405,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.19984,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.114614,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.429859,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.114073,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.08077,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00418352,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0309397,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.030252,
'Execution Unit/Register Files/Runtime Dynamic': 0.0351232,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0731013,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.213101,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.28615,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000506958,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000440908,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000170326,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000444452,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00189928,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00488396,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0297431,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.89192,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0581824,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.101021,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.20366,
'Instruction Fetch Unit/Runtime Dynamic': 0.19573,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0379509,
'L2/Runtime Dynamic': 0.00918222,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.39798,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.571277,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0375566,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.57605,
'Load Store Unit/Runtime Dynamic': 0.79405,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0926082,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.185217,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0328669,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0334364,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.117632,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00953991,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.332951,
'Memory Management Unit/Runtime Dynamic': 0.0429763,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 16.7931,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00590118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0622644,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.0681656,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.39625,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0870089,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140342,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.07084,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.298191,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0995127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01747,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364955,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269906,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263907,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306402,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555979,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.162075,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09897,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000458365,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402941,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000158012,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387723,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170739,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00426236,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259468,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.65044,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.050756,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0881269,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94905,
'Instruction Fetch Unit/Runtime Dynamic': 0.170799,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321542,
'L2/Runtime Dynamic': 0.007576,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24982,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497683,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327632,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40453,
'Load Store Unit/Runtime Dynamic': 0.692023,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807884,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161577,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286721,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291546,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102618,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00832216,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307981,
'Memory Management Unit/Runtime Dynamic': 0.0374767,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.3007,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039256,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0458316,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497572,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.0566,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0869202,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140199,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707678,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0994127,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01728,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364582,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269631,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0263642,
'Execution Unit/Register Files/Runtime Dynamic': 0.0306089,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.055542,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.16191,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09847,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457936,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402566,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157866,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387327,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170576,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259203,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64875,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0507027,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880371,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.94729,
'Instruction Fetch Unit/Runtime Dynamic': 0.170624,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321237,
'L2/Runtime Dynamic': 0.00756408,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24879,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.497168,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327299,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327298,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40335,
'Load Store Unit/Runtime Dynamic': 0.691309,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0807063,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161412,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286429,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0291248,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102513,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831343,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307826,
'Memory Management Unit/Runtime Dynamic': 0.0374383,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2973,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0039216,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457848,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0497064,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05511,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0868907,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.140151,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0707437,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.297786,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0993778,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01721,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00364458,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0269539,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.026355,
'Execution Unit/Register Files/Runtime Dynamic': 0.0305985,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0555225,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.161855,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09831,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000457793,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000402441,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000157818,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000387195,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00170522,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00425693,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0259115,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.64819,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0506849,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0880071,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.9467,
'Instruction Fetch Unit/Runtime Dynamic': 0.170566,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0321135,
'L2/Runtime Dynamic': 0.00756057,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24844,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.496997,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0327187,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0327186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.40295,
'Load Store Unit/Runtime Dynamic': 0.691073,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0806787,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.161357,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0286331,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.029115,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.102479,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00831051,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.307774,
'Memory Management Unit/Runtime Dynamic': 0.0374255,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2962,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00392027,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0457692,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0496895,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.05462,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.739548837198542,
'Runtime Dynamic': 5.739548837198542,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.280118,
'Runtime Dynamic': 0.0738874,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 59.9674,
'Peak Power': 93.0796,
'Runtime Dynamic': 8.63648,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 59.6873,
'Total Cores/Runtime Dynamic': 8.56259,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.280118,
'Total L3s/Runtime Dynamic': 0.0738874,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
|
[
"[email protected]"
] | |
2d5ccf17197699d50e0b2fa57a4243eb7ca907aa
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/carbuisness/main_currency_supply.py
|
ed84e5c37083ff51e2afabd4f2216adcf44c254f
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from scrapy.cmdline import execute
import sys
import os
website = "currency_supply"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
|
[
"[email protected]"
] | |
9eeb4be1cb93ab85fd14e38c367ec1ba4dc52f74
|
a4e8849dfcbb64cb6b56b9eb45fb7e431c9cfdc0
|
/s061-repaso/p03.py
|
b8bb2e0aeace2f6522ad3a560ec74647955b7d7a
|
[] |
no_license
|
marvjaramillo/ulima-intro210-clases
|
96b546eb79fbe34dbfa3e5726b1b8ed57523e110
|
fef2d2ef487ef386196e0b9dd2fa66338de141bf
|
refs/heads/main
| 2023-04-27T13:56:57.898602 | 2023-04-19T13:17:06 | 2023-04-19T13:17:06 | 344,644,221 | 2 | 0 | null | 2021-03-05T00:08:57 | 2021-03-05T00:08:56 | null |
UTF-8
|
Python
| false | false | 1,466 |
py
|
'''
Los minutos de tardanza de un grupo de empleados se encuentran almacenados en un diccionario
que tiene como clave el codigo de empleado y como valor una lista con los minutos de
tardanza por dia.
Implemente un programa que reciba este diccionario, un listado de codigos de empleado
y permita mostrar el empleado de la lista que tuvo la mayor cantidad de minutos acumulados
por tardanza.
Ejemplo:
dicc = {"E001": [5, 10, 3, 4], "E002": {}, "E003":[30, 10] }
lista = ["E001", "E003"]
E001 --> [5, 10, 3, 4] --> 22
E003 --> [30, 10] --> 40
Comparando los minutos de tardanza, el empleado con mayor cantidad de minutos de
tardanza es "E003".
'''
def sumar_tardanzas(lista):
suma = 0
for i in range(len(lista)):
suma = suma + lista[i]
return suma
def mostrar_mayor_tardanza(dic_tardanzas, lista_empleados):
cod_elegido = ""
total_elegido = 0
for i in range(len(lista_empleados)):
cod_emp = lista_empleados[i]
tardanzas_emp = dic_tardanzas[cod_emp]
total_minutos = sumar_tardanzas(tardanzas_emp)
if(total_minutos > total_elegido):
total_elegido = total_minutos
cod_elegido = cod_emp
print("Empleado con mas minutos de tardanza:", cod_elegido)
print("Minutos de tardanza: ", total_elegido)
if __name__ == "__main__":
dicc = {"E001": [50, 10, 3, 4], "E002": {}, "E003":[30, 10] }
lista = ["E001", "E003"]
mostrar_mayor_tardanza(dicc, lista)
|
[
"[email protected]"
] | |
d69370d7a2f4e7087b2969610f4b97703dddf151
|
2f5e406579e965acb535183f4c4cb0e889db2ecd
|
/ExtraDataset.py
|
557cddf77b561247ca30c66f56771cc0edc5b273
|
[] |
no_license
|
rm3028/Deep-Generative-Model
|
7504296de65739e842274cec824ec045526a59d2
|
b7587c5f2f6aac0530d460e76e6c2614360bd570
|
refs/heads/master
| 2023-02-25T13:19:44.853641 | 2021-01-29T17:48:04 | 2021-01-29T17:48:04 | 329,917,999 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
import pandas as pd
from skimage import io
import torch
from torch.utils.data import Dataset
class ExtraDataset(Dataset):
def __init__(self, dataset_dir):
self.dataset_dir = dataset_dir
self.dataset_df = pd.read_csv(dataset_dir + '/tags.csv', names=['id', 'tag'])
def __len__(self):
return len(self.dataset_df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = self.dataset_dir + '/images/' + str(self.dataset_df['id'][idx]) + '.jpg'
image = io.imread(img_name)
img_tag = self.dataset_df['tag'][idx]
return { 'image': image, 'tag': img_tag }
|
[
"[email protected]"
] | |
b03d463ca4f81654c0ca10f1a8a910e295f5ae85
|
8a6bac97182629f426e442308f6db53ee932e537
|
/venv/Lib/site-packages/django/contrib/gis/db/backends/oracle/adapter.py
|
40989df765a8ea953c4834167ea168d8fd853b8e
|
[] |
no_license
|
AmalioF96/DashBoard
|
8b8af75e7db7ab095c0cd05acb8b2b2764ab5fd5
|
4500a84a934fd5c24199d1864f0667c0d90e6174
|
refs/heads/master
| 2023-01-08T02:03:05.168925 | 2020-11-07T12:19:53 | 2020-11-07T12:19:53 | 230,789,973 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,507 |
py
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
"""Fix single polygon orientation as described in __init__()."""
if poly.empty:
return poly
if not poly.exterior_ring.is_counterclockwise:
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if poly[i].is_counterclockwise:
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
|
[
"[email protected]"
] | |
434f059f47cc43ee8c54755a5358bb465f552f55
|
36466c39d3ae94c2f936d4fdfe0fd4b034bbfa80
|
/3rdparty/tvm/python/tvm/relay/ir_pass.py
|
6de6437b9eb9aad573e7603f12fc20fde1da7c86
|
[
"Apache-2.0",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"MIT",
"BSD-2-Clause",
"Zlib",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause-Views"
] |
permissive
|
zhouhuaman/dgt
|
ccc674dc6abb055eeb5b88eaa0177de3a051b362
|
a1df50efa3b635c20ddaa6bc5068e5f7bb863b5e
|
refs/heads/master
| 2022-11-27T21:53:05.980980 | 2020-01-13T09:33:14 | 2020-01-13T09:33:14 | 233,558,790 | 1 | 2 |
Apache-2.0
| 2022-11-23T15:05:17 | 2020-01-13T09:29:56 |
C++
|
UTF-8
|
Python
| false | false | 1,556 |
py
|
# pylint: disable=no-else-return,
# pylint: disable=unidiomatic-typecheck
"""The set of passes for Relay.
Exposes an interface for configuring the passes and scripting
them in Python.
"""
from . import _ir_pass
from . import _make
# pylint: disable=invalid-name
def infer_type(env, expr):
"""Infer the type of expr under the context of env.
Parameters
----------
env : relay.Environment
The global environment.
expr : relay.Expr
The input expression.
Returns
-------
checked_expr : relay.Expr
The checked expression.
"""
return _ir_pass.infer_type(env, expr)
well_formed = _ir_pass.well_formed
check_kind = _ir_pass.check_kind
free_vars = _ir_pass.free_vars
free_type_vars = _ir_pass.free_type_vars
def dead_code_elimination(e):
""" Remove expressions which does not effect the program result (dead code).
Parameters
----------
e: relay.Expr
The input Expression
Returns
-------
result: relay.Expr
An expression which is semantically equal to the input expression,
but with dead code removed.
"""
return _ir_pass.dead_code_elimination(e)
def alpha_equal(lhs, rhs):
"""Compare two Relay expr for structural equivalence (alpha equivalence).
Parameters
----------
lhs: relay.Expr
One of the input Expression.
rhs: relay.Expr
One of the input Expression.
Returns
-------
result: bool
True iff lhs is alpha equal to rhs.
"""
return bool(_make._alpha_equal(lhs, rhs))
|
[
"[email protected]"
] | |
a2c1d5da1c0a0a81f541829e0fa78e83503a4b56
|
7177274b29e5daece1c00585ec92090571b5cd28
|
/__init__.py
|
72734e593d1390178430c23e0923102259ae01af
|
[
"MIT"
] |
permissive
|
tmizu23/SlideShow_plugin
|
cdd76a973269fa016f95a1b02f0b090b63a61db8
|
8634728fe497d11cd81467dc5aa29aee101887af
|
refs/heads/master
| 2021-01-10T21:20:01.755222 | 2014-10-25T14:48:48 | 2014-10-25T14:48:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,506 |
py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SlideShow
A QGIS plugin
This Plugin is SlideShow
-------------------
begin : 2014-09-20
copyright : (C) 2014 by Takayuki Mizutani
email : [email protected]
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load SlideShow class from file SlideShow.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .slide_show import SlideShow
return SlideShow(iface)
|
[
"[email protected]"
] | |
244c6743b325be89e3cda486203303f568032386
|
8ea28a828b808acedb405670fa1be13f3ce1b463
|
/pyqtdeploy/sysroot/packages/pyqt3d.py
|
aba52d3b28fdd883d1c52b50b4988d66d839de32
|
[
"BSD-3-Clause"
] |
permissive
|
GreatFruitAndy/pyqtdeploy
|
bed2c784e9ce554ac448ae9355bf3ffb802b885a
|
ea1ade32f8f5bff203ae24400381f6697da2221e
|
refs/heads/master
| 2021-05-07T03:05:51.241234 | 2017-11-10T17:02:57 | 2017-11-10T17:02:57 | 110,604,244 | 1 | 0 | null | 2017-11-16T23:12:52 | 2017-11-13T21:26:41 |
Python
|
UTF-8
|
Python
| false | false | 3,206 |
py
|
# Copyright (c) 2017, Riverbank Computing Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from ... import AbstractPackage, PackageOption
class PyQt3DPackage(AbstractPackage):
""" The PyQt3D package. """
# The package-specific options.
options = [
PackageOption('source', str, required=True,
help="The archive containing the PyQt3D source code."),
]
def build(self, sysroot):
""" Build PyQt3D for the target. """
sysroot.progress("Building PyQt3D")
# Unpack the source.
archive = sysroot.find_file(self.source)
sysroot.unpack_archive(archive)
# Create a configuration file.
cfg = '''py_platform = {0}
py_inc_dir = {1}
py_pylib_dir = {2}
py_pylib_lib = {3}
py_sip_dir = {4}
[PyQt 5]
module_dir = {5}
'''.format(sysroot.target_py_platform, sysroot.target_py_include_dir,
sysroot.target_lib_dir, sysroot.target_py_lib,
sysroot.target_sip_dir,
os.path.join(sysroot.target_sitepackages_dir, 'PyQt5'))
disabled_features = sysroot.find_package('pyqt5').disabled_features
if disabled_features:
cfg += 'pyqt_disabled_features = {0}\n'.format(
' '.join(disabled_features))
cfg_name = 'pyqt3d-' + sysroot.target_arch_name + '.cfg'
with open(cfg_name, 'wt') as cfg_file:
cfg_file.write(cfg)
# Configure, build and install.
args = [sysroot.host_python, 'configure.py', '--static', '--qmake',
sysroot.host_qmake, '--sysroot', sysroot.sysroot_dir,
'--no-qsci-api', '--no-sip-files', '--no-stubs', '--configuration',
cfg_name, '--sip', sysroot.host_sip, '-c']
if sysroot.verbose_enabled:
args.append('--verbose')
sysroot.run(*args)
sysroot.run(sysroot.host_make)
sysroot.run(sysroot.host_make, 'install')
|
[
"[email protected]"
] | |
8df3b3f50a43565b98eb313b84920ee53a5850e9
|
c86b2d4e8431e35681e9725f6174042ad7411d5f
|
/Exercise_02/Shop/SH_10.py
|
ecfd62cbe230b3c2f2c659b55a98e198083c89a9
|
[] |
no_license
|
nadung65/Assignment_10
|
a44a04cd47838abf37634791e4aa4e67b93561d4
|
03faa49cba5a105475cc980001e60a88e8ff3dd8
|
refs/heads/main
| 2023-04-22T12:53:10.754476 | 2021-05-13T14:26:17 | 2021-05-13T14:26:17 | 367,067,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,774 |
py
|
import unittest
import time
from selenium import webdriver
PATH = "C:\Program Files\chromedriver_win32\chromedriver.exe"
class SH_10(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(PATH)
def testSH_10(self):
driver = self.driver
driver.get('http://practice.automationtesting.in/')
driver.find_element_by_link_text('Shop').click()
# Check Add to cart button
driver.find_element_by_class_name('add_to_cart_button').click()
time.sleep(1)
cart_content = driver.find_element_by_xpath('//*[@id="wpmenucartli"]/a/span[1]').text
self.assertEqual('1 Item', cart_content, 'User can not view that book in menu!')
# Test clicking View Basket link
driver.find_element_by_link_text('View Basket').click()
current_url = driver.current_url
self.assertEqual('http://practice.automationtesting.in/basket/', current_url, 'Can not click View basket link!')
time.sleep(1)
# Check if subtotal < total
subtotal = float(driver.find_element_by_css_selector('.cart-subtotal td span').text[1:])
total = float(driver.find_element_by_css_selector('.order-total td span').text[1:])
self.assertTrue(subtotal < total, "Subtotal is not less than total!")
# Test Check out button
driver.find_element_by_class_name('checkout-button').click()
current_url = driver.current_url
self.assertEqual('http://practice.automationtesting.in/checkout/', current_url, "Can not navigate to check out page!")
# Fill details in check out page
driver.find_element_by_id('billing_first_name').send_keys('AD')
driver.find_element_by_id('billing_last_name').send_keys('Nguyen')
driver.find_element_by_id('billing_email').send_keys('[email protected]')
driver.find_element_by_id('billing_phone').send_keys('0123456789')
driver.find_element_by_id('select2-chosen-1').click()
driver.find_element_by_id('s2id_autogen1_search').send_keys('Vietnam')
driver.find_element_by_class_name('select2-match').click()
driver.find_element_by_id('billing_address_1').send_keys('Nam Ky Khoi Nghia')
driver.find_element_by_id('billing_city').send_keys('Danang')
driver.find_element_by_id('payment_method_cod').click()
# Test Place order button
driver.find_element_by_id('place_order').click()
time.sleep(3)
message = driver.find_element_by_class_name('woocommerce-thankyou-order-received').text
self.assertEqual('Thank you. Your order has been received.', message, "Fail to check out!")
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
73a212ad058bfe0804c7b0bca1a93042ce35c082
|
8783d015169267c27062a231c33aa7450fc7153d
|
/hackers_rank/euler/0013_large_sum.py
|
c36466ed1a90eb344d6aadd42097768775c0189f
|
[] |
no_license
|
thangarajan8/misc_python
|
51619e932ffd972be78a23b62ad69b34f84f035d
|
b00ad259e240a3897348bc80fb9040a257db208f
|
refs/heads/master
| 2021-06-26T02:14:13.613212 | 2021-02-05T04:35:25 | 2021-02-05T04:35:25 | 209,036,549 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 17:16:29 2019
@author: Thanga
"""
a = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676]
str(sum(a))[:10]
|
[
"[email protected]"
] | |
35614a4b8e4a335c54fd174d3cf65ff29c823483
|
db9ff8accaa4d8d4a96d3f9122c0fdc5e83ea2a5
|
/test/test_price_quantity.py
|
12635c2d23b1dcacf3ca517e059fcaba37c32bd5
|
[] |
no_license
|
agtt/ebay-openapi-inventory
|
4754cdc8b6765acdb34f6b8f89b017ccbc6b1d2b
|
d990c26f16e811431892ac6401c73c4599c2d414
|
refs/heads/master
| 2023-06-17T10:53:43.204075 | 2021-07-14T18:32:38 | 2021-07-14T18:32:38 | 386,039,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,200 |
py
|
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
The version of the OpenAPI document: 1.13.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.offer_price_quantity import OfferPriceQuantity
from openapi_client.model.ship_to_location_availability import ShipToLocationAvailability
globals()['OfferPriceQuantity'] = OfferPriceQuantity
globals()['ShipToLocationAvailability'] = ShipToLocationAvailability
from openapi_client.model.price_quantity import PriceQuantity
class TestPriceQuantity(unittest.TestCase):
"""PriceQuantity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPriceQuantity(self):
"""Test PriceQuantity"""
# FIXME: construct object with mandatory attributes with example values
# model = PriceQuantity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
e59eaebb53a1dd0de0208e35718b32e92973811d
|
b7126fb70f72fea0e7bba6fe2fef6925302ef07b
|
/tceh5_opp/self_work/task1.py
|
735da977c22bdb199e6944c42bfec6b0ac104bb8
|
[] |
no_license
|
duk1edev/tceh
|
79cd909c5a6221a2ca77d342b917462345140faa
|
21649d42488883beb58d709f4a9d1a05c75d2900
|
refs/heads/master
| 2021-07-12T10:20:22.330005 | 2020-04-29T09:24:08 | 2020-04-29T09:24:08 | 239,434,484 | 0 | 0 | null | 2021-03-20T03:38:26 | 2020-02-10T05:25:33 |
Python
|
UTF-8
|
Python
| false | false | 1,781 |
py
|
# 1. Создать класс корзина у кторого можно выставить разную вмесительность
# для разных обьектов. В обект можн опомещать разные
# 2. Создать класс - пакет в кторый тожно можн опомещать предмет у него тоже есть вместимость
# 3. Любой класс что бы можно было помещать в корзину и в пакет
# 4. Если вместимоть не достаточна сказать, что обьект поместить нельзя
class Trash:
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that trash, \n'
'trash size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the trash'.format(obj, obj.size))
class Packet(Trash):
def __init__(self, set_size):
self.size = set_size
def get_obj(self, obj):
if obj.size > self.size:
print('You could not put this stuff({} size) to that packet, \n'
'packet size is {}'.format(obj.size, self.size))
else:
print('You put the {} size {} to the packet'.format(obj, obj.size))
class SomeStuff:
def __init__(self, set_size):
self.size = set_size
small_trash = Trash(5)
middle_trash = Trash(10)
big_trash = Trash(50)
small_packet = Packet(3)
middle_packet = Packet(5)
big_packet = Packet(10)
apple = SomeStuff(25)
print(apple.size)
garbage = SomeStuff(50)
small_trash.get_obj(apple)
big_trash.get_obj(garbage)
big_packet.get_obj(garbage)
|
[
"[email protected]"
] | |
5804b448d279b66e3077be6b2016ef4e6230d463
|
46279163a543cd8820bdc38133404d79e787c5d2
|
/benchmarks/tensorexpr/reduction.py
|
bc3e4e158a1750a0c9732c91297461f01ff5126b
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
erwincoumans/pytorch
|
31738b65e7b998bfdc28d0e8afa7dadeeda81a08
|
ae9f39eb580c4d92157236d64548b055f71cf14b
|
refs/heads/master
| 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 |
NOASSERTION
| 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null |
UTF-8
|
Python
| false | false | 5,706 |
py
|
from . import benchmark
class ReduceBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, case, M, N, K):
super().__init__(mode, device, dtype)
self.case = case
self.M = M
self.N = N
self.K = K
self.inputs = [self.randn(
[M, N, K], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if case == "row":
self.dims = [1, 2]
elif case == "mid":
self.dims = [0, 2]
elif case == "col":
self.dims = [0, 1]
else:
raise ValueError("invalid case: %s" % case)
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, self.dims)
return y
def config(self):
return [self.M, self.N, self.K]
@staticmethod
def default_configs():
return [
# [512, 512, 512],
[512, 64, 512],
]
@staticmethod
def module():
return "reduce"
def memory_workload(self):
if self.mode == "fwd":
sol_count = 1
algorithmic_count = 1
else:
sol_count = (1) + (1)
algorithmic_count = 1 + 1
buffer_size = self.M * self.N * self.K
return {
"sol": buffer_size * sol_count,
"algorithmic": buffer_size * algorithmic_count,
}
class ReduceRowBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceRowBench, self).__init__(mode, device, dtype, "row", M, N, K)
@staticmethod
def module():
return "reduce_row"
class ReduceMidBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceMidBench, self).__init__(mode, device, dtype, "mid", M, N, K)
@staticmethod
def module():
return "reduce_mid"
class ReduceColBench(ReduceBench):
def __init__(self, mode, device, dtype, M, N, K):
super(ReduceColBench, self).__init__(mode, device, dtype, "col", M, N, K)
@staticmethod
def module():
return "reduce_col"
class Reduce2DBench(benchmark.Benchmark):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
super().__init__(mode, device, dtype)
self.red_dim = red_dim
self.dim0 = dim0
self.dim1 = dim1
self.inputs = [self.randn(
[dim0, dim1], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
if red_dim != 0 and red_dim != 1 :
raise ValueError("invalid reduction dimension: {}".format(red_dim))
def forward(self, inputs):
x = self.add(inputs, 0.001)
y = self.sum(x, [self.red_dim])
return y
def config(self):
return [self.red_dim, self.dim0, self.dim1]
@staticmethod
def default_configs():
return [
[1, 640, 524288],
]
@staticmethod
def module():
return "reduce2d"
@staticmethod
def input_iterable() :
return True
def memory_workload(self):
assert self.mode == "fwd", "Only the forward operation is modeled!"
buffer_size = self.dim0 * self.dim1
if self.red_dim == 0 :
buffer_size += self.dim1
else :
buffer_size += self.dim0
return {
"sol": buffer_size,
"algorithmic": buffer_size,
}
class Reduce2DInnerBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DInnerBench, self).__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_inner"
class Reduce2DOuterBench(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super(Reduce2DOuterBench, self).__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_outer"
benchmark.register_benchmark_class(ReduceRowBench)
benchmark.register_benchmark_class(ReduceMidBench)
benchmark.register_benchmark_class(ReduceColBench)
benchmark.register_benchmark_class(Reduce2DInnerBench)
benchmark.register_benchmark_class(Reduce2DOuterBench)
class DynamicReduce2DBench(benchmark.DynamicShape, Reduce2DBench):
'''
A benchmark class to validate 2 dimensional reduction performance.
Only a simple add is fused to induce the fuser and isolate reduction perf.
'''
def __init__(self, mode, device, dtype, red_dim, dim0, dim1):
benchmark.DynamicShape.__init__(self)
Reduce2DBench.__init__(self, mode, device, dtype, red_dim, dim0, dim1)
def instantiate_input(self):
dim0, dim1 = self.rand_shape([self.dim0, self.dim1])
self.inputs = [self.randn(
[dim0, dim1], device=self.device, dtype=self.dtype, requires_grad=self.requires_grad
)]
@staticmethod
def module():
return "dynamicreduce2d"
class DynamicReduce2DInnerBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_inner"
class DynamicReduce2DOuterBench(DynamicReduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 0, dim0, dim1)
@staticmethod
def module():
return "reduce2d_dynamic_outer"
benchmark.register_benchmark_class(DynamicReduce2DInnerBench)
benchmark.register_benchmark_class(DynamicReduce2DOuterBench)
|
[
"[email protected]"
] | |
9a2ea1d5b16e6bceebfb05ef2b319e294caf9509
|
f61208a1bb90c03c2a6c4540c04623d9c2a77064
|
/python labs/hackerrank/percentage.py
|
3f151c38e935d737f7360773b3c8c44a2492f4bc
|
[] |
no_license
|
albinai/Wd
|
f49b39ae8387fd02d04c5721b9505ebc1c6897da
|
2d2e315327cf60c1943da3b8ca29017d07fc3843
|
refs/heads/master
| 2020-12-29T06:02:27.177059 | 2020-04-09T23:54:49 | 2020-04-09T23:54:49 | 238,482,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
scores=sum(scores)/3
student_marks[name] = scores
query_name = input()
print('%.2f' % student_marks[query_name])
|
[
"[email protected]"
] | |
1d50b61828a456cb2f62f40d2b4df66539beed6a
|
262867f5676720d60387d39028079ba564bb0d87
|
/bot_news/ml_news/ml_news/ml.py
|
9110b160ffc7066ad520b72b573909cc937ae916
|
[] |
no_license
|
carlosb1/projects-rust
|
665da7a98a3c73bb6d23208f63718deb888e4f6b
|
43415681cd15a5a3745f135173654eba79fe6908
|
refs/heads/master
| 2023-09-03T15:46:34.422455 | 2023-08-18T20:53:24 | 2023-08-18T20:53:24 | 163,627,222 | 5 | 0 | null | 2023-03-24T23:41:54 | 2018-12-31T00:26:47 |
Rust
|
UTF-8
|
Python
| false | false | 872 |
py
|
from transformers import AutoTokenizer, AutoConfig
from transformers import AutoModelForSequenceClassification
from transformers import TextClassificationPipeline
def model_fn(name_model):
tokenizer = AutoTokenizer.from_pretrained(name_model)
model = AutoModelForSequenceClassification.from_pretrained(name_model)
return model, tokenizer
def predict_fn(input_data, model):
trained_model, tokenizer = model
pipe = TextClassificationPipeline(model=trained_model, tokenizer=tokenizer)
output = pipe(input_data)
return output
SENTIMENT_MODEL = 'nlptown/bert-base-multilingual-uncased-sentiment'
class MyBertTransformerSentimentAnalysis():
def __init__(self, name_model: str = SENTIMENT_MODEL):
self.model_tuple = model_fn(name_model)
def run(self, input_data: str) -> dict:
predict_fn(input_data, self.model_tuple)
|
[
"[email protected]"
] | |
38968e8d9f98d633ef3f2e85e0e1b808a3a42451
|
be3f8597b2d3224c7a6d9d64eba54b382f3e5936
|
/WebApp/TextRank.py
|
798e266b8092c584de82cc4b02a3b9fb45e010e9
|
[] |
no_license
|
ya2366/unilever_nlp_capstone
|
a979e7717af1e97a83a36dbb30f89be5cfe23cff
|
5df3d094765ae01874fe66b8b3579aca02648e99
|
refs/heads/master
| 2021-09-02T10:44:28.980591 | 2018-01-02T01:37:56 | 2018-01-02T01:37:56 | 113,112,355 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,973 |
py
|
"""
From this paper: https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf
External dependencies: nltk, numpy, networkx
Based on https://gist.github.com/voidfiles/1646117
"""
import io
import nltk
import itertools
from operator import itemgetter
import networkx as nx
import os
# apply syntactic filters based on POS tags
def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']):
return [item for item in tagged if item[1] in tags]
def normalize(tagged):
return [(item[0].replace('.', ''), item[1]) for item in tagged]
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1 + 1], newDistances[-1])))
distances = newDistances
return distances[-1]
def buildGraph(nodes):
"nodes - list of hashables that represents the nodes of the graph"
gr = nx.Graph() # initialize an undirected graph
gr.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
# add edges to the graph (weighted by Levenshtein distance)
for pair in nodePairs:
firstString = pair[0]
secondString = pair[1]
levDistance = lDistance(firstString, secondString)
gr.add_edge(firstString, secondString, weight=levDistance)
return gr
def extractKeyphrases(text,top_n):
# tokenize the text using nltk
wordTokens = nltk.word_tokenize(text)
print("Tokenized Words")
# assign POS tags to the words in the text
tagged = nltk.pos_tag(wordTokens)
textlist = [x[0] for x in tagged]
print("Pos Tagging")
tagged = filter_for_tags(tagged)
tagged = normalize(tagged)
unique_word_set = unique_everseen([x[0] for x in tagged])
word_set_list = list(unique_word_set)
# this will be used to determine adjacent words in order to construct keyphrases with two words
graph = buildGraph(word_set_list)
print("Graph Builded")
# pageRank - initial value of 1.0, error tolerance of 0,0001,
calculated_page_rank = nx.pagerank(graph, weight='weight')
print("")
# most important words in ascending order of importance
keyphrases = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
# the number of keyphrases returned will be relative to the size of the text (a third of the number of vertices)
aThird = int(len(word_set_list) / 3)
keyphrases = keyphrases[0:aThird + 1]
# take keyphrases with multiple words into consideration as done in the paper - if two words are adjacent in the text and are selected as keywords, join them
# together
modifiedKeyphrases = set([])
dealtWith = set([]) # keeps track of individual keywords that have been joined to form a keyphrase
i = 0
j = 1
while j < len(textlist):
firstWord = textlist[i]
secondWord = textlist[j]
if firstWord in keyphrases and secondWord in keyphrases:
keyphrase = firstWord + ' ' + secondWord
modifiedKeyphrases.add(keyphrase)
dealtWith.add(firstWord)
dealtWith.add(secondWord)
else:
if firstWord in keyphrases and firstWord not in dealtWith:
modifiedKeyphrases.add(firstWord)
# if this is the last word in the text, and it is a keyword,
# it definitely has no chance of being a keyphrase at this point
if j == len(textlist) - 1 and secondWord in keyphrases and secondWord not in dealtWith:
modifiedKeyphrases.add(secondWord)
i = i + 1
j = j + 1
result=list(modifiedKeyphrases)
if top_n>len(result):
return_result=result
else:
return_result=result[0:top_n]
return return_result
def extractSentences(text):
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentenceTokens = sent_detector.tokenize(text.strip())
graph = buildGraph(sentenceTokens)
calculated_page_rank = nx.pagerank(graph, weight='weight')
# most important sentences in ascending order of importance
sentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
# return a 100 word summary
summary = ' '.join(sentences)
summaryWords = summary.split()
summaryWords = summaryWords[0:101]
summary = ' '.join(summaryWords)
return summary
def writeFiles(summary, keyphrases, fileName):
"outputs the keyphrases and summaries to appropriate files"
print("Generating output to " + 'keywords/' + fileName)
keyphraseFile = io.open('keywords/' + fileName, 'w')
for keyphrase in keyphrases:
keyphraseFile.write(keyphrase + '\n')
keyphraseFile.close()
print("Generating output to " + 'summaries/') + fileName
summaryFile = io.open('summaries/' + fileName, 'w')
summaryFile.write(summary)
summaryFile.close()
print("-")
|
[
"[email protected]"
] | |
26cb0c372639eca1917f3f89ff693d0b6ea8e6c8
|
c6c0ed7585ee7dbdb328e23ffd6f9f8e007b3356
|
/python/everything_app/trainer.py
|
cc842a06dc85bcf616831906fc6132a791114daf
|
[] |
no_license
|
yoavilovich/Everything_Test_App
|
51fe18d8a35d0899b109cae307292b4c7030973a
|
4d8c73c415fcfbed852ab57ff7efa0b332e5eb0b
|
refs/heads/master
| 2021-01-18T14:10:38.728437 | 2013-02-25T20:02:09 | 2013-02-25T20:02:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,752 |
py
|
'''
Created on Feb 25, 2013
@author: yoav
'''
import json
import nltk
import math
import urllib
import os, sys
### Trainer extracts a relevant dictionary from the training set, and creates the occurunce matrix of the words in the movie plot
def get_training_set(): #extracts the training set from file into a python list
data = []
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
path=os.path.join(dirname, "movies_train.json")
with open(path) as f:
for line in f:
data.append(json.loads(line))
return data
def get_dictionary(data):
# finds the most common words from combining all plots together,
# and creates a dictionary. Returns a list of all plots in training
# set and a list of all words (tokens) in all the plots
plots=[]
tokens=[]
for movie in data:
plots.append(movie["plot"])
#tokenized_movie_plot=nltk.word_tokenize(movie["plot"])
tokens=nltk.word_tokenize("".join(plots))
for t in tokens:
t=t.lower()
#tokens.append(tokenized_movie_plot)
token_dist = nltk.FreqDist(tokens)
dictionary = token_dist.keys()[50:500]
#dictionary_values = token_dist.values()[50:500]
return (plots,tokens,dictionary)
def get_genre_dictionary (data): #return a genre dictionary, i.e, all the possible genres
all_generes=[]
for movie in data:
movie_generes=movie["genres"]
for genre in movie_generes:
all_generes.append(genre)
#get unique categories
genre_dist = nltk.FreqDist(all_generes)
return genre_dist.keys()
#gets the indexes of the movies in genre c
def get_genre_indexes(c,dictionary,genre_dictionary):
selected_movie_genre=genre_dictionary[c]
genre_indexes=[]
for index,movie in enumerate(data):
movie_generes=movie["genres"]
for genre in movie_generes:
if genre==selected_movie_genre:
genre_indexes.append(index)
return genre_indexes
#the distribution of genres in train corpus, as probability
def get_genre_probability(c,dictionary,genre_dictionary):
return float(len(get_genre_indexes(c,dictionary,genre_dictionary)))/float(len(data))
#helper function for aithmetic
def Nic(i,c,dictionary,genre_dictionary):
Nic=0
indexes = get_genre_indexes(c,dictionary,genre_dictionary)
for j in range(len(indexes)):
if dictionary[i] in plots[indexes[j]]:
Nic+=1
return Nic
#helper function for aithmetic
def Nc(c,dictionary,genre_dictionary):
number_of_movies_in_genre=len(get_genre_indexes(c,dictionary,genre_dictionary))
return number_of_movies_in_genre
#helper function for aithmetic
def Tetaic(i,c,dictionary,genre_dictionary):
teta=float(Nic(i,c,dictionary,genre_dictionary)+1)/float(Nc(c,dictionary,genre_dictionary)+2)
return teta
#calculates teta matrix with helper function
def getTeta(dictionary,genre_dictionary):
teta=[]
for c in range(len(genre_dictionary)):
teta_c=[]
for i in range(len(dictionary)):
teta_c.append(Tetaic(i,c,dictionary,genre_dictionary))
teta.append(teta_c)
return teta
data=get_training_set()
#sets inital data as global params
results=get_dictionary(data)
plots=results[0]
tokens=results[1]
dictionary=results[2]
genre_dictionary=get_genre_dictionary(data)
#produces the teta matrix and passes params to classifier
def main():
genre_probability=[]
for index in range(len(genre_dictionary)):
genre_probability.append(get_genre_probability(index,dictionary,genre_dictionary))
teta=getTeta(dictionary,genre_dictionary)
return (teta,dictionary,genre_dictionary,genre_probability)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
5fc764e2fc52a3262e04593a0fbc5a6b954f383e
|
89f3ba8905ce2ebad1a9605f683024dcd9ae1f7f
|
/api/models.py
|
8ff6448a8317132d187dd5c7b219dbd43e49f6fc
|
[] |
no_license
|
vishnualapra/carservice
|
1d26efb355ff54cb942ea6f36e96590e41df88d1
|
69aba53576aad96c169f64b5384ebe7b49a73234
|
refs/heads/master
| 2020-08-22T16:06:48.903210 | 2019-10-23T21:07:17 | 2019-10-23T21:07:17 | 216,432,482 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,313 |
py
|
from django.db import models
# Create your models here.
#manufacturer
class Manufacturer(models.Model):
manufacturer_code = models.IntegerField(primary_key=True)
manufacturer_name = models.CharField(max_length=100)
manufacturer_detail = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.manufacturer_name
class Model(models.Model):
model_code = models.IntegerField(primary_key=True)
daily_hire_rate = models.IntegerField()
model_name = models.CharField(max_length=100)
manufacturer = models.ForeignKey(Manufacturer,on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.model_name
class Mechanic(models.Model):
mechanic_id = models.AutoField(primary_key=True)
mechanic_name = models.CharField(max_length=100)
other_mechanic_details = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.mechanic_name
class Customer(models.Model):
customer_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
title = models.CharField(max_length=20)
gender = models.CharField(max_length=10)
email_address = models.EmailField()
phone_number = models.CharField(max_length=15)
address_line_1 = models.CharField(max_length=500)
address_line_2 = models.CharField(max_length=500)
address_line_3 = models.CharField(max_length=500)
city = models.CharField(max_length=200)
state = models.CharField(max_length=100)
other_customer_details = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.last_name
class Car(models.Model):
license_number = models.IntegerField(primary_key=True)
current_milage = models.CharField(max_length=50)
engine_size = models.CharField(max_length=50)
other_car_details = models.TextField()
model = models.ForeignKey(Model,on_delete=models.PROTECT)
customer = models.ForeignKey(Customer,on_delete=models.PROTECT)
on_service = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.license_number)
class Booking(models.Model):
booking_id = models.AutoField(primary_key=True)
datetime_of_service = models.DateTimeField(null=True)
payment_received_yn = models.BooleanField(default=False)
completed = models.BooleanField(default=False)
other_bookin_details = models.TextField()
service_date = models.DateField()
day_position = models.IntegerField()
car = models.ForeignKey(Car,on_delete=models.PROTECT)
customer = models.ForeignKey(Customer,on_delete=models.PROTECT)
mechanic = models.ForeignKey(Mechanic,on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
[
"[email protected]"
] | |
e6363546ba11afa88ac3d92f07661dcdc012c4da
|
8c44cf09689711b9389eeb9416c8fad45aee2009
|
/phron/text_sanitizer.py
|
cdf2b53e6de63af45639f2cb6c8e3dd940d5c3ba
|
[
"Apache-2.0"
] |
permissive
|
pacu/phron
|
71e880865a13d194257acc399c3397da58739e2e
|
03d6b0cb997b361bb1c7fe6a1be5414638036450
|
refs/heads/master
| 2021-06-16T23:13:24.420625 | 2021-05-27T18:09:28 | 2021-05-27T18:09:28 | 197,436,355 | 0 | 0 |
Apache-2.0
| 2021-05-27T18:09:29 | 2019-07-17T17:45:29 |
Python
|
UTF-8
|
Python
| false | false | 1,228 |
py
|
def sanitize_weka(text: str, remove_newlines=True, escape_doublequote=True, escape_singlequote=True,remove_separator=None) -> str:
"""
sanitize this text for weka CSV importer.
Parameters:
remove_newlines(Bool): removes newline charaters and replaces them with blank spaces. Default: True
escape_doublequote(Bool): escapes a every doublequote character \\\" with \\\\\\\". Default: True.
if False, it will remove the doublequote and replace it with empty String
escape_singlequote(Bool): escapes a every singlequote character \\\' with \\\\\\\'. Default: True.
if False, it will remove the singlequote and replace it with empty String
remove_separator(str): removes the separator str passed as argument. Default: None
"""
if remove_newlines:
text = text.replace('\n', ' ')
if escape_doublequote:
text = text.replace('"', '\\\"')
else:
text = text.replace('"', '')
if escape_singlequote:
text = text.replace("'", "\\\'")
else:
text = text.replace("'", "")
if remove_separator:
text = text.replace(remove_separator," ")
return text
|
[
"[email protected]"
] | |
2c21c9fdf85b8db3d86708de109471dd19577441
|
3ed216ddff0ce7c303c33cfb54c0153518ee26d6
|
/2_Last Position & Math Table.py
|
594b4079ef607f75ec526eb8776c3f43f911e3bb
|
[] |
no_license
|
Tuseeq1/PythonPractice
|
9d289e49b71b00701100e22120d37f76d0bba8f7
|
c1b3f9e1844be11b1211add17dcdffaeaf0820c1
|
refs/heads/master
| 2020-03-26T11:13:28.165390 | 2018-08-15T09:42:47 | 2018-08-15T09:42:47 | 144,834,065 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
# Define a procedure, print_multiplication_table,
# that takes as input a positive whole number, and prints out a multiplication,
# table showing all the whole number multiplications up to and including the
# input number. The order in which the equations are printed matters.
def print_multiplication_table( n ):
# your code goes here
#print_multiplication_table(2)
#>>> 1 * 1 = 1
#>>> 1 * 2 = 2
#>>> 2 * 1 = 2
#>>> 2 * 2 = 4
#print_multiplication_table(3)
#>>> 1 * 1 = 1
#>>> 1 * 2 = 2
#>>> 1 * 3 = 3
#>>> 2 * 1 = 2
#>>> 2 * 2 = 4
#>>> 2 * 3 = 6
#>>> 3 * 1 = 3
#>>> 3 * 2 = 6
#>>> 3 * 3 = 9
|
[
"[email protected]"
] | |
28ae56610dcda85516ba0f5cbeda86fcbdc07548
|
862c806d1d277ad4444af13b05f0d364f1c24b83
|
/examples/operator_v1.py
|
85a5ba5aa1f47f2f57e738add72e9c953fbd2a2f
|
[] |
no_license
|
irvinlim/pymesos-0.3.4-bugrepro
|
babc1f057093f3e291c780e337b856d67b3e581e
|
38909cad4f1feb7d7b996ac701340f305e364905
|
refs/heads/master
| 2020-03-24T07:43:13.893083 | 2018-07-27T12:11:28 | 2018-07-27T12:11:28 | 142,572,827 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,287 |
py
|
#!/usr/bin/env python2.7
from __future__ import print_function
import sys
from pymesos import MesosOperatorMasterDriver, OperatorMaster
class MinimalOperator(OperatorMaster):
def __init__(self):
pass
def taskAdded(self, task_info):
logging.debug('Task added')
logging.debug(task_info)
def taskUpdated(self, task_info):
logging.debug('Task updated')
logging.debug(task_info)
def frameworkAdded(self, framework_info):
logging.debug('Framework added')
logging.debug(framework_info)
def frameworkUpdated(self, framework_info):
logging.debug('Framework updated')
logging.debug(framework_info)
def frameworkRemoved(self, framework_info):
logging.debug('Framework removed')
logging.debug(framework_info)
def agentAdded(self, agent_info):
logging.debug('Agent added')
logging.debug(agent_info)
def agentRemoved(self, agent_info):
logging.debug('Agent removed')
logging.debug(agent_info)
def main(master):
driver = MesosOperatorMasterDriver(master, MinimalOperator())
res = driver.getHealth()
logging.debug(res)
driver.run()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
logging.error('Usage: {} <mesos_master>'.format(sys.argv[0]))
sys.exit(1)
else:
main(sys.argv[1])
|
[
"[email protected]"
] | |
9002db9fb689e2de7cb305ce596ae3d6f5abfe61
|
59062b36911a3f827d638910a653d280556869cb
|
/python/snippet1.py
|
14e7233d5cb9b374b8e1a8da7099bc8edf2fce31
|
[] |
no_license
|
atharva-bhange/codesnippets
|
aedeca7782b730ea35b5cf1de589f9d577b5e839
|
d6d2dc1da5889f26f1864b547f5cdc14cfd071d9
|
refs/heads/master
| 2021-01-02T07:37:48.514000 | 2020-02-10T20:02:08 | 2020-02-10T20:02:08 | 239,551,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 139 |
py
|
# Snippet 1
class dog(object):
def __init__(self):
pass
def speak(self):
pass
mark = dog()
print("Code complete")
|
[
"[email protected]"
] | |
69bef76ac68fc60f87f5f5e549027b0bcfae66f7
|
91a2ecfaf5dc6c917ec2fda31f56291103f68ceb
|
/tests/protos/test_ctc_loss.py
|
6da44120062bdda6381ed74e2c0f8225fffc8ae4
|
[
"BSD-3-Clause"
] |
permissive
|
MyrtleSoftware/myrtlespeech
|
635d1d16d1bd60fb07a4d30edbf9acb61786c13f
|
8522048fd37744ffa06827a0cbd202b839a15453
|
refs/heads/master
| 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 |
NOASSERTION
| 2020-03-20T14:33:17 | 2019-06-18T08:44:33 |
Python
|
UTF-8
|
Python
| false | false | 1,042 |
py
|
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import hypothesis.strategies as st
from myrtlespeech.protos import ctc_loss_pb2
from tests.protos.utils import all_fields_set
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_losses(
draw, return_kwargs: bool = False, alphabet_len: Optional[int] = None
) -> Union[
st.SearchStrategy[ctc_loss_pb2.CTCLoss],
st.SearchStrategy[Tuple[ctc_loss_pb2.CTCLoss, Dict]],
]:
"""Returns a SearchStrategy for CTCLoss plus maybe the kwargs."""
kwargs = {}
end = 1000
if alphabet_len is not None:
end = max(0, alphabet_len - 1)
kwargs["blank_index"] = draw(st.integers(0, end))
kwargs["reduction"] = draw(
st.sampled_from(ctc_loss_pb2.CTCLoss.REDUCTION.values())
)
all_fields_set(ctc_loss_pb2.CTCLoss, kwargs)
ctc_loss = ctc_loss_pb2.CTCLoss(**kwargs)
if not return_kwargs:
return ctc_loss
return ctc_loss, kwargs
|
[
"[email protected]"
] | |
d212b119feedd836b1965727e519777fd8b95557
|
fea44d5ca4e6c9b2c7950234718a4531d453849e
|
/sktime/forecasting/tests/test_all_forecasters.py
|
c528a23d1d8d1d4b7fe5fc87dd17cbf747f4fa26
|
[
"BSD-3-Clause"
] |
permissive
|
mlgig/sktime
|
288069ab8c9b0743113877032dfca8cf1c2db3fb
|
19618df351a27b77e3979efc191e53987dbd99ae
|
refs/heads/master
| 2023-03-07T20:22:48.553615 | 2023-02-19T18:09:12 | 2023-02-19T18:09:12 | 234,604,691 | 1 | 0 |
BSD-3-Clause
| 2020-01-17T17:50:12 | 2020-01-17T17:50:11 | null |
UTF-8
|
Python
| false | false | 28,833 |
py
|
# -*- coding: utf-8 -*-
"""Tests for BaseForecaster API points.
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "kejsitake", "fkiraly"]
import numpy as np
import pandas as pd
import pytest
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_cutoff
from sktime.exceptions import NotFittedError
from sktime.forecasting.base._delegate import _DelegatedForecaster
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
SlidingWindowSplitter,
temporal_train_test_split,
)
from sktime.forecasting.tests._config import (
TEST_ALPHAS,
TEST_FHS,
TEST_OOS_FHS,
TEST_STEP_LENGTHS_INT,
TEST_WINDOW_LENGTHS_INT,
VALID_INDEX_FH_COMBINATIONS,
)
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
from sktime.tests.test_all_estimators import BaseFixtureGenerator, QuickTester
from sktime.utils._testing.forecasting import (
_assert_correct_columns,
_assert_correct_pred_time_index,
_get_expected_index_for_update_predict,
_get_n_columns,
_make_fh,
make_forecasting_problem,
)
from sktime.utils._testing.series import _make_series
from sktime.utils.validation.forecasting import check_fh
# get all forecasters
FH0 = 1
INVALID_X_INPUT_TYPES = [list("foo"), tuple()]
INVALID_y_INPUT_TYPES = [list("bar"), tuple()]
# testing data
y = make_forecasting_problem()
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# names for index/fh combinations to display in tests
index_fh_comb_names = [f"{x[0]}-{x[1]}-{x[2]}" for x in VALID_INDEX_FH_COMBINATIONS]
pytest_skip_msg = (
"ForecastingHorizon with timedelta values "
"is currently experimental and not supported everywhere"
)
class ForecasterFixtureGenerator(BaseFixtureGenerator):
"""Fixture generator for forecasting tests.
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
estimator_instance: instance of estimator inheriting from BaseObject
ranges over all estimator classes not excluded by EXCLUDED_TESTS
instances are generated by create_test_instance class method
scenario: instance of TestScenario
ranges over all scenarios returned by retrieve_scenarios
"""
# note: this should be separate from TestAllForecasters
# additional fixtures, parameters, etc should be added here
# TestAllForecasters should contain the tests only
estimator_type_filter = "forecaster"
fixture_sequence = [
"estimator_class",
"estimator_instance",
"n_columns",
"scenario",
# "fh",
"update_params",
"step_length",
]
def _generate_n_columns(self, test_name, **kwargs):
"""Return number of columns for series generation in positive test cases.
Fixtures parameterized
----------------------
n_columns: int
1 for univariate forecasters, 2 for multivariate forecasters
ranges over 1 and 2 for forecasters which are both uni/multivariate
"""
if "estimator_class" in kwargs.keys():
scitype_tag = kwargs["estimator_class"].get_class_tag("scitype:y")
elif "estimator_instance" in kwargs.keys():
scitype_tag = kwargs["estimator_instance"].get_tag("scitype:y")
else:
return []
n_columns_list = _get_n_columns(scitype_tag)
if len(n_columns_list) == 1:
n_columns_names = ["" for x in n_columns_list]
else:
n_columns_names = [f"y:{x}cols" for x in n_columns_list]
return n_columns_list, n_columns_names
def _generate_update_params(self, test_name, **kwargs):
"""Return update_params for update calls.
Fixtures parameterized
----------------------
update_params: bool
whether to update parameters in update; ranges over True, False
"""
return [True, False], ["update_params=True", "update_params=False"]
def _generate_step_length(self, test_name, **kwargs):
"""Return step length for window.
Fixtures parameterized
----------------------
step_length: int
1 if update_params=True; TEST_STEP_LENGTH_INT if update_params=False
"""
update_params = kwargs["update_params"]
if update_params:
return [1], [""]
else:
return TEST_STEP_LENGTHS_INT, [f"step={a}" for a in TEST_STEP_LENGTHS_INT]
class TestAllForecasters(ForecasterFixtureGenerator, QuickTester):
"""Module level tests for all sktime forecasters."""
def test_get_fitted_params(self, estimator_instance, scenario):
"""Test get_fitted_params."""
scenario.run(estimator_instance, method_sequence=["fit"])
try:
params = estimator_instance.get_fitted_params()
assert isinstance(params, dict)
except NotImplementedError:
pass
# todo: should these not be checked in test_all_estimators?
def test_raises_not_fitted_error(self, estimator_instance):
"""Test that calling post-fit methods before fit raises error."""
# We here check extra method of the forecaster API: update and update_predict.
with pytest.raises(NotFittedError):
estimator_instance.update(y_test, update_params=False)
with pytest.raises(NotFittedError):
cv = SlidingWindowSplitter(fh=1, window_length=1, start_with_window=False)
estimator_instance.update_predict(y_test, cv=cv)
try:
with pytest.raises(NotFittedError):
estimator_instance.get_fitted_params()
except NotImplementedError:
pass
def test_y_multivariate_raises_error(self, estimator_instance):
"""Test that wrong y scitype raises error (uni/multivariate not supported)."""
if estimator_instance.get_tag("scitype:y") == "multivariate":
y = _make_series(n_columns=1)
with pytest.raises(ValueError, match=r"two or more variables"):
estimator_instance.fit(y, fh=FH0)
if estimator_instance.get_tag("scitype:y") in ["univariate", "both"]:
# this should pass since "both" allows any number of variables
# and "univariate" automatically vectorizes, behaves multivariate
pass
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("y", INVALID_y_INPUT_TYPES)
def test_y_invalid_type_raises_error(self, estimator_instance, y):
"""Test that invalid y input types raise error."""
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y, fh=FH0)
# todo: should these not be "negative scenarios", tested in test_all_estimators?
@pytest.mark.parametrize("X", INVALID_X_INPUT_TYPES)
def test_X_invalid_type_raises_error(self, estimator_instance, n_columns, X):
"""Test that invalid X input types raise error."""
y_train = _make_series(n_columns=n_columns)
try:
with pytest.raises(TypeError, match=r"type"):
estimator_instance.fit(y_train, X, fh=FH0)
except NotImplementedError as e:
msg = str(e).lower()
assert "exogenous" in msg
# todo: refactor with scenarios. Need to override fh and scenario args for this.
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize("fh_int", TEST_FHS, ids=[f"fh={fh}" for fh in TEST_FHS])
def test_predict_time_index(
self, estimator_instance, n_columns, index_fh_comb, fh_int
):
"""Check that predicted time index matches forecasting horizon.
Tests predicted time index for predict and predict_residuals.
"""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=50
)
cutoff = get_cutoff(y_train, return_index=True)
fh = _make_fh(cutoff, fh_int, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh=fh_int)
_assert_correct_columns(y_pred, y_train)
y_test = _make_series(
n_columns=n_columns, index_type=index_type, n_timepoints=len(y_pred)
)
y_test.index = y_pred.index
y_res = estimator_instance.predict_residuals(y_test)
_assert_correct_pred_time_index(y_res.index, cutoff, fh=fh)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_time_index_with_X(
self, estimator_instance, n_columns, index_fh_comb, fh_int_oos
):
"""Check that predicted time index matches forecasting horizon."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
z, X = make_forecasting_problem(index_type=index_type, make_X=True)
# Some estimators may not support all time index types and fh types, hence we
# need to catch NotImplementedErrors.
y = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y.iloc[: len(y) // 2], return_index=True)
fh = _make_fh(cutoff, fh_int_oos, fh_type, is_relative)
y_train, _, X_train, X_test = temporal_train_test_split(y, X, fh=fh)
try:
estimator_instance.fit(y_train, X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
_assert_correct_columns(y_pred, y_train)
except NotImplementedError:
pass
@pytest.mark.parametrize(
"index_fh_comb", VALID_INDEX_FH_COMBINATIONS, ids=index_fh_comb_names
)
def test_predict_time_index_in_sample_full(
self, estimator_instance, n_columns, index_fh_comb
):
"""Check that predicted time index equals fh for full in-sample predictions."""
index_type, fh_type, is_relative = index_fh_comb
if fh_type == "timedelta":
return None
# todo: ensure check_estimator works with pytest.skip like below
# pytest.skip(
# "ForecastingHorizon with timedelta values "
# "is currently experimental and not supported everywhere"
# )
y_train = _make_series(n_columns=n_columns, index_type=index_type)
cutoff = get_cutoff(y_train, return_index=True)
steps = -np.arange(len(y_train))
fh = _make_fh(cutoff, steps, fh_type, is_relative)
try:
estimator_instance.fit(y_train, fh=fh)
y_pred = estimator_instance.predict()
_assert_correct_pred_time_index(y_pred.index, cutoff, fh)
except NotImplementedError:
pass
def test_predict_series_name_preserved(self, estimator_instance):
"""Test that fit/predict preserves name attribute and type of pd.Series."""
# skip this test if estimator needs multivariate data
# because then it does not take pd.Series at all
if estimator_instance.get_tag("scitype:y") == "multivariate":
return None
y_train = _make_series(n_timepoints=15)
y_train.name = "foo"
estimator_instance.fit(y_train, fh=[1, 2, 3])
y_pred = estimator_instance.predict()
_assert_correct_columns(y_pred, y_train)
def _check_pred_ints(
self, pred_ints: pd.DataFrame, y_train: pd.Series, y_pred: pd.Series, fh_int
):
# make iterable
if isinstance(pred_ints, pd.DataFrame):
pred_ints = [pred_ints]
for pred_int in pred_ints:
# check column naming convention
assert list(pred_int.columns) == ["lower", "upper"]
# check time index
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_int.index, cutoff, fh_int)
# check values
assert np.all(pred_int["upper"] >= pred_int["lower"])
# check if errors are weakly monotonically increasing
# pred_errors = y_pred - pred_int["lower"]
# # assert pred_errors.is_mononotic_increasing
# assert np.all(
# pred_errors.values[1:].round(4) >= pred_errors.values[:-1].round(4)
# )
@pytest.mark.parametrize("index_type", [None, "range"])
@pytest.mark.parametrize(
"coverage", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_interval(
self, estimator_instance, n_columns, index_type, fh_int_oos, coverage
):
"""Check prediction intervals returned by predict.
Arguments
---------
estimator_instance : BaseEstimator class descendant instance, forecaster to test
n_columns : number of columns for the test data
index_type : index type of the test data
fh_int_oos : forecasting horizon to test the forecaster at, all out of sample
coverage: float, coverage at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns, index_type=index_type)
estimator_instance.fit(y_train, fh=fh_int_oos)
if estimator_instance.get_tag("capability:pred_int"):
pred_ints = estimator_instance.predict_interval(
fh_int_oos, coverage=coverage
)
valid, msg, _ = check_is_mtype(
pred_ints, mtype="pred_interval", scitype="Proba", return_metadata=True
) # type: ignore
assert valid, msg
else:
with pytest.raises(NotImplementedError, match="prediction intervals"):
estimator_instance.predict_interval(fh_int_oos, coverage=coverage)
def _check_predict_quantiles(
self, pred_quantiles: pd.DataFrame, y_train: pd.Series, fh, alpha
):
# check if the input is a dataframe
assert isinstance(pred_quantiles, pd.DataFrame)
# check time index (also checks forecasting horizon is more than one element)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(pred_quantiles.index, cutoff, fh)
# Forecasters where name of variables do not exist
# In this cases y_train is series - the upper level in dataframe == 'Quantiles'
if isinstance(y_train, pd.Series):
expected = pd.MultiIndex.from_product([["Quantiles"], [alpha]])
else:
# multiply variables with all alpha values
expected = pd.MultiIndex.from_product([y_train.columns, [alpha]])
found = pred_quantiles.columns.to_flat_index()
assert all(expected == found)
if isinstance(alpha, list):
# sorts the columns that correspond to alpha values
pred_quantiles = pred_quantiles.reindex(
columns=pred_quantiles.columns.reindex(sorted(alpha), level=1)[0]
)
# check if values are monotonically increasing
for var in pred_quantiles.columns.levels[0]:
for index in range(len(pred_quantiles.index)):
assert pred_quantiles[var].iloc[index].is_monotonic_increasing
@pytest.mark.parametrize(
"alpha", TEST_ALPHAS, ids=[f"alpha={a}" for a in TEST_ALPHAS]
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_predict_quantiles(self, estimator_instance, n_columns, fh_int_oos, alpha):
"""Check prediction quantiles returned by predict.
Arguments
---------
Forecaster: BaseEstimator class descendant, forecaster to test
fh: ForecastingHorizon, fh at which to test prediction
alpha: float, alpha at which to make prediction intervals
Raises
------
AssertionError - if Forecaster test instance has "capability:pred_int"
and pred. int are not returned correctly when asking predict for them
AssertionError - if Forecaster test instance does not have "capability:pred_int"
and no NotImplementedError is raised when asking predict for pred.int
"""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=fh_int_oos)
try:
quantiles = estimator_instance.predict_quantiles(fh=fh_int_oos, alpha=alpha)
self._check_predict_quantiles(quantiles, y_train, fh_int_oos, alpha)
except NotImplementedError:
pass
def test_pred_int_tag(self, estimator_instance):
"""Checks whether the capability:pred_int tag is correctly set.
Arguments
---------
estimator_instance : instance of BaseForecaster
Raises
------
ValueError - if capability:pred_int is True, but neither
predict_interval nor predict_quantiles have implemented content
this can be by direct implementation of _predict_interval/_predict_quantiles
or by defaulting to each other and/or _predict_proba
"""
f = estimator_instance
# we skip the _DelegatedForecaster, since it implements delegation methods
# which may look like the method is implemented, but in fact it is not
if isinstance(f, _DelegatedForecaster):
return None
# check which methods are implemented
implements_interval = f._has_implementation_of("_predict_interval")
implements_quantiles = f._has_implementation_of("_predict_quantiles")
implements_proba = f._has_implementation_of("_predict_proba")
pred_int_works = implements_interval or implements_quantiles or implements_proba
if not pred_int_works and f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does not implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to True incorrectly. '
'The flag "capability:pred_int" should instead be set to False.'
)
if pred_int_works and not f.get_class_tag("capability:pred_int", False):
raise ValueError(
f"{type(f).__name__} does implement probabilistic forecasting, "
'but "capability:pred_int" flag has been set to False incorrectly. '
'The flag "capability:pred_int" should instead be set to True.'
)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_score(self, estimator_instance, n_columns, fh_int_oos):
"""Check score method."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.predict()
fh_idx = check_fh(fh_int_oos).to_indexer() # get zero based index
expected = mean_absolute_percentage_error(
y_test.iloc[fh_idx], y_pred, symmetric=False
)
# compare expected score with actual score
actual = estimator_instance.score(y_test.iloc[fh_idx], fh=fh_int_oos)
assert actual == expected
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
def test_update_predict_single(
self, estimator_instance, n_columns, fh_int_oos, update_params
):
"""Check correct time index of update-predict."""
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict_single(
y_test, update_params=update_params
)
cutoff = get_cutoff(y_train, return_index=True)
_assert_correct_pred_time_index(y_pred.index, cutoff, fh_int_oos)
_assert_correct_columns(y_pred, y_train)
@pytest.mark.parametrize(
"fh_int_oos", TEST_OOS_FHS, ids=[f"fh={fh}" for fh in TEST_OOS_FHS]
)
@pytest.mark.parametrize("initial_window", TEST_WINDOW_LENGTHS_INT)
def test_update_predict_predicted_index(
self,
estimator_instance,
n_columns,
fh_int_oos,
step_length,
initial_window,
update_params,
):
"""Check predicted index in update_predict."""
y = _make_series(n_columns=n_columns, all_positive=True, index_type="datetime")
y_train, y_test = temporal_train_test_split(y)
cv = ExpandingWindowSplitter(
fh=fh_int_oos,
initial_window=initial_window,
step_length=step_length,
)
estimator_instance.fit(y_train, fh=fh_int_oos)
y_pred = estimator_instance.update_predict(
y_test, cv=cv, update_params=update_params
)
assert isinstance(y_pred, (pd.Series, pd.DataFrame))
expected = _get_expected_index_for_update_predict(
y_test, fh_int_oos, step_length, initial_window
)
actual = y_pred.index
np.testing.assert_array_equal(actual, expected)
def test__y_and_cutoff(self, estimator_instance, n_columns):
"""Check cutoff and _y."""
# check _y and cutoff is None after construction
f = estimator_instance
y = _make_series(n_columns=n_columns)
y_train, y_test = temporal_train_test_split(y, train_size=0.75)
# check that _y and cutoff are empty when estimator is constructed
assert f._y is None
assert f.cutoff is None
# check that _y and cutoff is updated during fit
f.fit(y_train, fh=FH0)
# assert isinstance(f._y, pd.Series)
# action:uncomments the line above
# why: fails for multivariates cause they are DataFrames
# solution: look for a general solution for Series and DataFrames
assert len(f._y) > 0
assert f.cutoff == y_train.index[-1]
# check data pointers
np.testing.assert_array_equal(f._y.index, y_train.index)
# check that _y and cutoff is updated during update
f.update(y_test, update_params=False)
np.testing.assert_array_equal(
f._y.index, np.append(y_train.index, y_test.index)
)
assert f.cutoff == y_test.index[-1]
def test__y_when_refitting(self, estimator_instance, n_columns):
"""Test that _y is updated when forecaster is refitted."""
y_train = _make_series(n_columns=n_columns)
estimator_instance.fit(y_train, fh=FH0)
estimator_instance.fit(y_train[3:], fh=FH0)
# using np.squeeze to make the test flexible to shape differeces like
# (50,) and (50, 1)
assert np.all(np.squeeze(estimator_instance._y) == np.squeeze(y_train[3:]))
def test_fh_attribute(self, estimator_instance, n_columns):
"""Check fh attribute and error handling if two different fh are passed."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
f.predict()
np.testing.assert_array_equal(f.fh, FH0)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
# if fh is not required in fit, test this again with fh passed late
if not f.get_tag("requires-fh-in-fit"):
f.fit(y_train)
f.predict(FH0)
np.testing.assert_array_equal(f.fh, FH0)
def test_fh_not_passed_error_handling(self, estimator_instance, n_columns):
"""Check that not passing fh in fit/predict raises correct error."""
f = estimator_instance
y_train = _make_series(n_columns=n_columns)
if f.get_tag("requires-fh-in-fit"):
# if fh required in fit, should raise error if not passed in fit
with pytest.raises(ValueError):
f.fit(y_train)
else:
# if fh not required in fit, should raise error if not passed until predict
f.fit(y_train)
with pytest.raises(ValueError):
f.predict()
def test_different_fh_in_fit_and_predict_error_handling(
self, estimator_instance, n_columns
):
"""Check that fh different in fit and predict raises correct error."""
f = estimator_instance
# if fh is not required in fit, can be overwritten, should not raise error
if not f.get_tag("requires-fh-in-fit"):
return None
y_train = _make_series(n_columns=n_columns)
f.fit(y_train, fh=FH0)
np.testing.assert_array_equal(f.fh, FH0)
# changing fh during predict should raise error
with pytest.raises(ValueError):
f.predict(fh=FH0 + 1)
def test_hierarchical_with_exogeneous(self, estimator_instance, n_columns):
"""Check that hierarchical forecasting works, also see bug #3961.
Arguments
---------
estimator_instance : instance of BaseForecaster
n_columns : number of columns, of the endogeneous data y_train
Raises
------
Exception - if fit/predict does not complete without error
AssertionError - if forecast is not expected mtype pd_multiindex_hier,
and does not have expected row and column indices
"""
from sktime.datatypes import check_is_mtype
from sktime.datatypes._utilities import get_window
from sktime.utils._testing.hierarchical import _make_hierarchical
y_train = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=n_columns,
min_timepoints=22,
max_timepoints=22,
index_type="period",
)
X = _make_hierarchical(
hierarchy_levels=(2, 4),
n_columns=2,
min_timepoints=24,
max_timepoints=24,
index_type="period",
)
X.columns = ["foo", "bar"]
X_train = get_window(X, lag=2)
X_test = get_window(X, window_length=2)
fh = [1, 2]
estimator_instance.fit(y=y_train, X=X_train, fh=fh)
y_pred = estimator_instance.predict(X=X_test)
assert isinstance(y_pred, pd.DataFrame)
assert check_is_mtype(y_pred, "pd_multiindex_hier")
msg = (
"returned columns after predict are not as expected. "
f"expected: {y_train.columns}. Found: {y_pred.columns}"
)
assert np.all(y_pred.columns == y_train.columns), msg
# check consistency of forecast hierarchy with training data
# some forecasters add __total levels, e.g., ReconcilerForecaster
# if = not such a forecaster; else = levels are added
if len(y_pred.index) == len(X_test.index):
# the indices should be equal iff no levels are added
assert np.all(y_pred.index == X_test.index)
else:
# if levels are added, all expected levels and times should be contained
assert set(X_test.index).issubset(y_pred.index)
|
[
"[email protected]"
] | |
084d8ca89f293bf5398b5ab07d7076af43a5fb8d
|
590a0c3a7254b8dac85ab18072dbf766aca7af93
|
/Python-Exercise-100/python-exercise-example07.py
|
01777ba168c7f8e9c5ee7615fd7642d9f407aaf6
|
[
"MIT"
] |
permissive
|
MiracleWong/PythonPractice
|
90c66d29a9cdf0200d3dbac946d05f12dd856e91
|
40aecd84045ad18f6aff95d5b8be8e352ca0a726
|
refs/heads/master
| 2021-08-15T17:19:51.543013 | 2021-06-15T03:59:51 | 2021-06-15T03:59:51 | 98,256,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example7.html
a = [1, 2, 4, 5, 5, 6, 7, 7]
b = a[:]
print(b)
|
[
"[email protected]"
] | |
5d314b91eab30ca0734edabfe18f84b0b0ac2a17
|
9aab31e0a55d1f56c5e4eff383760f93cf7445ca
|
/RamseyNumber/classification/irrep_preloaded.py
|
fff97eaf5329ea2031f367a9a5aa6fecd051f6be
|
[] |
no_license
|
chngr/kakko
|
d6ecbe252dfed19e62e221116aea9e2ec696a1f6
|
92ab05ccda63d92a0f8c81df82b1f7d624dc03f6
|
refs/heads/master
| 2020-12-03T05:10:43.592407 | 2017-08-02T17:21:53 | 2017-08-02T17:21:53 | 95,740,495 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,491 |
py
|
# irrep.py
# weight_space_gen(): generates root spaces
# Input: cartan_basis -- list with Cartan basis set
# diag_mat_list -- list of diagonal matrices corresponding to Cartan basis
# (with corresponding indices)
# alg_dim -- dimension of overall Lie algebra
# Output: weight_space_list -- ker((rho(H_i) - a{ij} * id)^{dim V}) for all i and j
def weight_space_gen(cartan_basis, diag_mat_list, alg_dim):
weight_space_list = []
mat_size = cartan_basis[0].ncols()
# for each element in Cartan basis
for i in range(len(cartan_basis)):
elem = cartan_basis[i]
cur_diag = diag_mat_list[i].diagonal()
sub_list = []
# for each eigenvalue
for eigenvalue in cur_diag:
cur_space = ((elem - eigenvalue * matrix.identity(mat_size))^alg_dim).kernel()
# add to list for given i and j
sub_list.append(cur_space)
# add sublist for given i to overall list
weight_space_list.append(sub_list)
return weight_space_list
# weight_space_decomp(): calculates root space decomposition
# Input: weight_space_list -- list with sublists: each sublist has root spaces for
# given element in Cartan basis
# Output: decomp_list -- list with spaces in root space decomposition
def weight_space_decomp(weight_space_list):
# max_index for tuple set of indices
max_index = len(weight_space_list[0]) - 1
# length of each tuple in tuple set of indices
basis_size = len(weight_space_list)
index_set = get_tuples(max_index,basis_size)
# direct_sum stores all of the intersections
to_direct_sum = []
# for each index
for index in index_set:
list_to_intersect = []
# pair index with each sublist
for i in range(len(index)):
cur_index = index[i]
list_to_intersect.append(weight_space_list[i][cur_index])
cur_intersection = intersect_spaces(list_to_intersect)
to_direct_sum.append(cur_intersection)
to_direct_sum = list(set(to_direct_sum))
for elem in to_direct_sum:
if elem.dimension() == 0:
to_direct_sum.remove(elem)
return to_direct_sum
# get_tuples(): generates all possible tuples from 0 to max_val, inclusive
# Input: max_val -- maximum value in tuple
# list_len -- length of each tuple
# Output: tuple_list -- list of all possible tuples within range
def get_tuples(max_val, list_len):
tuple_list = []
# perform recursion
if list_len > 1:
return tuple_helper(get_tuples(max_val,list_len-1),max_val)
# base case
else:
for i in range(max_val+1):
tuple_list.append([i])
return tuple_list
# tuple_helper(): helper function to perform recursion for get_tuples()
# Input: old_list -- list before current step of the recursion
# max_val -- maximum value in tuple
# Output: new_list -- list after current step of the recursion
def tuple_helper(old_list, max_val):
new_list = []
for i in range(len(old_list)):
cur_tuple = old_list[i]
for j in range(max_val+1):
new_cur_tuple = []
new_cur_tuple = cur_tuple + [j]
new_list.append(new_cur_tuple)
return new_list
# adjoint_rep(): computes adjoint representation matrices of
# Lie algebra
# Input: input_elems -- set of matrices to compute adjoint rep of
# basis -- compute with respect to this basis
# Output: ad -- list of adjoint representation matrices
def adjoint_rep(input_elems, basis):
basis_vec = []
ad = []
# find matrix of basis
for b in basis:
basis_vec.append(b.transpose().list())
basis_mat = matrix(QQ,basis_vec).transpose()
# find adjoint rep matrices
for mat_elem in input_elems:
mat_list = []
for basis_elem in basis:
bracket_vec = vector(QQ,bracket(mat_elem,basis_elem).transpose().list())
coords = basis_mat.solve_right(bracket_vec)
mat_list.append(coords.list())
adj_mat = matrix(QQ,mat_list).transpose()
ad.append(adj_mat)
return ad
# ------------------------------------------------------------------------------------------
from random import randint
# simultaneous_diag(): simultaneously diagonalizes a commuting basis set
# Input: basis -- commuting basis
# Output: P -- matrix P of D = P^{-1} * A * P that simultaneously diagonalizes
# diag_mat_list -- list of diagonalized matrices
def simultaneous_diag(basis):
valid_elem = False
# common P and unique D for each element in Cartan
P = None
diag_mat_list = []
# find element that diagonalizes the Cartan basis
while not valid_elem:
diag_mat_list = []
# compute a random element of the Cartan subalgebra
cartan_elem = compute_random_element(basis)
# diagonalize random element
D, P = cartan_elem.eigenmatrix_right()
# assume the diagonalization works
valid_elem = True
# check if diagonalizes all elements
for elem in basis:
cur_diag_mat = P.inverse() * elem * P
diag_mat_list.append(cur_diag_mat)
# check if each element is diagonalized
if not gap.IsDiagonalMat(cur_diag_mat):
valid_elem = False
break
return P, diag_mat_list
# compute_random_element(): computes random matrix element, random linear
# combination of basis vectors
# Input: basis -- basis of Lie algebra
# Output: random_elem -- random element of Lie algebra
def compute_random_element(basis):
mat_size = basis[0].ncols()
# choose coefficients from 1 to 100 inclusive
scaling = [randint(1,100) for p in range(len(basis))]
random_elem = matrix(QQ,mat_size)
for i in range(len(basis)):
random_elem = random_elem + scaling[i] * basis[i]
return random_elem
# extract_weights(): determines a list of weights
# Input: diag_mat_list -- set of diagonal matrices after simultaneously
# diagonalizing basis for the Cartan
# Output: weight_vec_list -- list of weights
def extract_weights(diag_mat_list):
# extract the diagonals from the diagonalized matrices
diag_vec_list = []
for elem in diag_mat_list:
diag_vec_list.append(elem.diagonal())
# dim_H is the dimension of Cartan subalgebra
# dim_V is the dimension of the entire space
dim_H = len(diag_vec_list)
dim_V = len(diag_vec_list[0])
weight_vec_list = []
# for ith index in each diagonal
for i in range(dim_V):
# for jth diagonal vector, create a vector across a common index
cur_vec = []
for j in range(dim_H):
cur_vec.append(diag_vec_list[j][i])
weight_vec_list.append(cur_vec)
return weight_vec_list
# highest_weight_gen(): determines direct sum of highest weight spaces
# Input: pos_root_vec -- set of positive root vectors
# Output: highest_weight_intersection -- direct sum of highest weight spaces
def highest_weight_gen(pos_root_vec):
spaces_to_intersect = []
for elem in pos_root_vec:
spaces_to_intersect.append(elem.right_kernel())
highest_weight_intersection = intersect_spaces(spaces_to_intersect)
return highest_weight_intersection
# intersect_spaces(): computes intersection of vector spaces in space_list
# Input: space_list -- list of vector spaces over common base ring
# Output: inter_space -- intersection of spaces
def intersect_spaces(space_list):
inter_space = space_list[0]
for space in space_list:
inter_space = inter_space.intersection(space)
return inter_space
# find_highest_weights(): finds the weights in weight_list which are highest weights
# Input: highest_weight_intersection -- intersection of the highest weight spaces
# weight_list -- list of all weights
# P -- matrix of simultaneous eigenvectors
# Output: highest_weights -- weights in weight_list which are highest weights
def find_highest_weights(highest_weight_intersection, weight_list, P):
highest_weights = []
col_list = P.columns()
for i in range(len(col_list)):
cur_weight_space = span([col_list[i]],QQ)
if highest_weight_intersection.intersection(cur_weight_space).dimension() != 0:
highest_weights.append(weight_list[i])
return highest_weights
# find_irreps(): finds the multiplicities of irreps
# Input: simple_roots -- list of simple roots
# highest_weights -- list of highest weights
# Output: irrep_dict -- dictionary mapping irrep identifier to frequency
def find_irreps(simple_roots, highest_weights):
# map from tuple to frequency
irrep_dict = {}
# build matrix of simple roots
simple_root_mat = matrix(QQ,simple_roots).transpose()
# solve for int coordinates of highest_weights wrt simple_root_mat
for elem in highest_weights:
coords = tuple(simple_root_mat.solve_right(vector(QQ,elem)))
if coords not in irrep_dict:
irrep_dict[coords] = 1
else:
irrep_dict[coords] += 1
return irrep_dict
# --------------------- MAIN SCRIPT ---------------------
# SL_3 Test
# e_1 = matrix([[0,1,0],[0,0,0],[0,0,0]])
# e_2 = matrix([[0,0,0],[1,0,0],[0,0,0]])
# e_3 = matrix([[0,0,0],[0,0,1],[0,0,0]])
# e_4 = matrix([[0,0,0],[0,0,0],[0,1,0]])
# gens = [e_1,e_2,e_3,e_4]
# SO_4 Test
# e_1 = matrix([[0,0,1,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]])
# e_2 = matrix([[0,0,0,0],[0,0,0,1],[0,0,0,0],[0,0,0,0]])
# e_3 = matrix([[0,0,0,0],[0,0,0,0],[1,0,0,0],[0,0,0,0]])
# e_4 = matrix([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,1,0,0]])
# gens = [e_1,e_2,e_3,e_4]
# # P+1, P=6
# e = matrix([[0, 1, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 2, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 7],[0, 0, 0, 0, 0, 0]])
# f = matrix([[0, 0, 0, 0, 0, 0],[1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 1, 0, 0, 0],[0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 0]])
# gens = [e,f]
# In GAP -- Compute:
# Lie algebra
# dimension of Lie algebra
# Cartan subalgebra
# basis for Cartan subalgebra
# root System for Lie algebra
# simple roots of Lie algebra
# positive root vectors of Lie algebra
# gens = [E,F]
# lie_alg = gap.LieAlgebra('Rationals',gens)
# alg_dim = gap.Dimension(lie_alg)
# cartan_alg = gap.CartanSubalgebra(lie_alg)
# cartan_basis = gap.BasisVectors(gap.Basis(cartan_alg))
# root_sys = gap.RootSystem(lie_alg)
# simple_roots = gap.SimpleSystem(root_sys)
# pos_root_vec = gap.PositiveRootVectors(root_sys)
# # convert from GAP to Sage format: cartan_basis
# sage_cartan_basis = []
# for elem in cartan_basis:
# sage_cartan_basis.append(matrix(QQ,elem))
# # convert from GAP to Sage format: pos_root_vec
# sage_pos_root_vec = []
# for elem in pos_root_vec:
# sage_pos_root_vec.append(matrix(QQ,elem))
# # convert from GAP to Sage format: simple_roots
# sage_simple_roots = []
# for elem in simple_roots:
# sage_simple_roots.append(list(elem))
# simultaneously diagonalize the Cartan basis
P, diag_mat_list = simultaneous_diag(sage_cartan_basis)
# extract the weights from the diagonalized matrices
weight_list = extract_weights(diag_mat_list)
# find the intersection of highest weight spaces
highest_weight_intersection = highest_weight_gen(sage_pos_root_vec)
# find the highest weights
highest_weights = find_highest_weights(highest_weight_intersection, weight_list, P)
# find coordinates of highest weights wrt simple roots
irrep_dict = find_irreps(sage_simple_roots, highest_weights)
|
[
"[email protected]"
] | |
bcb87b977ae9f3dda477d957cc6ee78f8f5cdf2e
|
fbf6fcd3720d1a5f1f01f91c7ecad68f1b296924
|
/tools/test_modules.py
|
85199d0138cfbbde70f10f93fa006cc06675053a
|
[
"MIT"
] |
permissive
|
uvavision/DrillDown
|
9602ddabd712d14df10e7026db3d7e62e7e4edba
|
ad0ef773b3af0859e48ea302f4f1d87215b26cef
|
refs/heads/master
| 2022-04-28T21:42:06.366515 | 2022-04-15T12:14:25 | 2022-04-15T12:14:25 | 214,220,415 | 11 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,358 |
py
|
#!/usr/bin/env python
import _init_paths
import os, sys, cv2, json
import math, PIL, cairo
import numpy as np
import pickle, random
import os.path as osp
from time import time
from config import get_config
from copy import deepcopy
from glob import glob
import matplotlib.pyplot as plt
from vocab import Vocabulary
from utils import *
#######################################################################
from modules.text_encoder import TextEncoder
from modules.region_encoder import RegionEncoder
from modules.image_encoder import ImageEncoder
from modules.context_encoder import ContextEncoder
#######################################################################
from modules.attention import Attention
from modules.tirg_rnn import TIRGRNN
from modules.grounding_loss import GroundingLoss
#######################################################################
from modules.image_model import ImageModel
from modules.region_model import RegionModel
from modules.paragraph_model import ParagraphModel
from modules.image_hred_model import ImageHREDModel
from modules.region_grounding_model import RegionGroundingModel
#######################################################################
import torch, torchtext
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from datasets.vg import vg
from datasets.loader import region_loader, region_collate_fn
from datasets.loader import caption_loader, caption_collate_fn
from datasets.loader import paragraph_loader, paragraph_collate_fn
def test_attention(config):
attention = Attention(config, config.attn_type, 1024, 1024)
h_s = torch.randn(7, 36, 1024)
h_t = torch.randn(7, 5, 1024)
m_s = torch.randn(7, 36).random_(0, 2)
context, scores = attention(h_t, h_s, m_s)
print(context.size(), scores.size())
def test_tirg_rnn(config):
net = TIRGRNN(config, config.n_feature_dim, config.n_feature_dim, config.n_rnn_layers, dropout=0.1)
input_var = np.random.randn(2, 3, config.n_feature_dim)
prev_hidden = np.random.randn(config.n_rnn_layers, 2, config.n_feature_dim)
input_var_th = torch.from_numpy(input_var).float()
prev_hidden_th = torch.from_numpy(prev_hidden).float()
last_layer_hiddens, last_step_hiddens = net(input_var_th, prev_hidden_th)
print('last_layer_hiddens.size()', last_layer_hiddens.size())
print('last_step_hiddens.size()', last_step_hiddens.size())
def test_region_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionEncoder(config)
for cnt, batched in enumerate(loader):
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
img_feats, masked_feats, mm = net(region_feats, region_clses)
print('img_feats', img_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('mm', mm.size())
break
def test_image_encoder(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageEncoder(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
print('images', images.size())
feats = net(images)
print('features', feats.size())
break
def test_text_encoder(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = TextEncoder(config)
for cnt, batched in enumerate(loader):
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].float()
bsize, slen, fsize = sent_inds.size()
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
f1, f2, h = net(sent_inds.view(bsize*slen, fsize), sent_msks.view(bsize*slen, fsize))
print(f1.size(), f2.size(), h.size())
break
def test_image_model(config):
db = vg(config, 'test')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageModel(config)
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
break
def test_grounding_loss(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
criterion = GroundingLoss(config)
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
masked_feats = img_feats
sim1 = criterion.compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
sim2 = criterion.debug_compute_batch_mutual_similarity(masked_feats, region_masks, txt_feats)
print('sim1', sim1.size())
print('sim2', sim2.size())
print('diff', torch.sum(torch.abs(sim1-sim2)))
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = criterion.forward_loss(masked_feats, region_masks, txt_feats, txt_masks, config.loss_reduction_mode)
print('losses', losses.size())
break
def test_paragraph_model(config):
db = vg(config, 'test')
loaddb = paragraph_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=paragraph_collate_fn)
net = ParagraphModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
img_feats, txt_feats = net(sent_inds, sent_msks, region_feats, region_clses, region_masks)
losses = net.loss(img_feats, region_masks, txt_feats.unsqueeze(1))
print('losses', losses.size(), torch.mean(losses))
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats.unsqueeze(1))
print('metrics', metrics)
print('sent_inds', sent_inds.size())
print('sent_msks', sent_msks.size())
print('region_feats', region_feats.size())
print('region_clses', region_clses.size())
print('region_masks', region_masks.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
print('time:', time() - start)
break
def test_region_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
start = time()
scene_inds = batched['scene_inds'].long()[:config.batch_size]
sent_inds = batched['sent_inds'].long()[:config.batch_size]
sent_msks = batched['sent_msks'].long()[:config.batch_size]
region_feats = batched['region_feats'].float()[:config.batch_size]
region_clses = batched['region_clses'].long()[:config.batch_size]
region_masks = batched['region_masks'].float()[:config.batch_size]
src_region_feats = batched['region_feats'].float()[config.batch_size:2*config.batch_size]
src_region_clses = batched['region_clses'].long()[config.batch_size:2*config.batch_size]
src_region_masks = batched['region_masks'].float()[config.batch_size:2*config.batch_size]
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks,
src_region_feats, src_region_clses, src_region_masks,
region_feats, region_clses, region_masks,
config.explore_mode)
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
if config.subspace_alignment_mode > 0:
print('masked_feats', masked_feats.size())
print('subspace_masks', subspace_masks.size())
if config.instance_dim > 1:
print('sample_logits', sample_logits.size())
print('sample_indices', sample_indices.size())
print('time:', time() - start)
break
def test_image_hred_model(config):
db = vg(config, 'train')
loaddb = caption_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=caption_collate_fn)
net = ImageHREDModel(config)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
images = batched['images'].float()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
img_feats, txt_feats = net(sent_inds, sent_msks, None, images)
print('images', images.size())
print('img_feats', img_feats.size())
print('txt_feats', txt_feats.size())
loss = net.forward_loss(img_feats, txt_feats)
print(loss)
metrics, caches = net.evaluate(img_feats, txt_feats)
print(metrics)
break
def test_region_grounding_model(config):
db = vg(config, 'test')
loaddb = region_loader(db)
loader = DataLoader(loaddb, batch_size=3*config.batch_size,
shuffle=True, num_workers=config.num_workers,
collate_fn=region_collate_fn)
net = RegionGroundingModel(config)
if config.pretrained is not None:
pretrained_path = osp.join(config.data_dir, 'caches/region_grounding_ckpts', config.pretrained+'.pkl')
states = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
net.load_state_dict(states['state_dict'], strict=False)
net.train()
for name, param in net.named_parameters():
print(name, param.size())
for cnt, batched in enumerate(loader):
scene_inds = batched['scene_inds'].long()
sent_inds = batched['sent_inds'].long()
sent_msks = batched['sent_msks'].long()
region_feats = batched['region_feats'].float()
region_clses = batched['region_clses'].long()
region_masks = batched['region_masks'].float()
img_feats, masked_feats, txt_feats, subspace_masks, sample_logits, sample_indices = \
net(scene_inds, sent_inds, sent_msks, None, None, None, region_feats, region_clses, region_masks, config.explore_mode)
if config.instance_dim > 1:
print(sample_indices[0])
# print('sample_logits', sample_logits.size())
# print('sample_indices', sample_indices.size())
txt_masks = txt_feats.new_ones(txt_feats.size(0), txt_feats.size(1))
losses = net.final_loss(img_feats, masked_feats, region_masks, txt_feats, txt_masks, sample_logits, sample_indices)
print('losses', losses.size(), torch.mean(losses))
if config.subspace_alignment_mode > 0:
metrics, cache_results = net.evaluate(masked_feats, region_masks, txt_feats)
else:
metrics, cache_results = net.evaluate(img_feats, region_masks, txt_feats)
print('metrics', metrics)
print('txt_feats', txt_feats.size())
print('img_feats', img_feats.size())
break
if __name__ == '__main__':
config, unparsed = get_config()
np.random.seed(config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
if(config.cuda):
torch.cuda.manual_seed_all(config.seed)
prepare_directories(config)
# test_attention(config)
# test_softmax_rnn(config)
# test_image_model(config)
# test_region_model(config)
# test_region_grounding_model(config)
test_paragraph_model(config)
# test_image_hred_model(config)
# test_region_encoder(config)
# test_image_encoder(config)
# test_text_encoder(config)
# test_tirg_rnn(config)
# test_grounding_loss(config)
|
[
"[email protected]"
] | |
49ad24efef53d23c86760ee96c78f87e3dbe2cf5
|
7200d065030f2daf00a5249e9e4fe569438c78c7
|
/scrapers/dizilab_scraper.py
|
76713de8e84af6b17220f3eaed0295e7b7a714f8
|
[] |
no_license
|
matt2005/salts
|
c765b037be1a2bb0e486ae9b30eceaf2b7c3bf14
|
5f71bc71e7b0b480f40d948d5568604dd181b6ad
|
refs/heads/master
| 2020-12-31T04:16:45.574380 | 2015-12-07T22:57:31 | 2015-12-07T22:57:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,957 |
py
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import urllib
from salts_lib import kodi
from salts_lib import dom_parser
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
BASE_URL = 'http://dizilab.com'
class Dizilab_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Dizilab'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s ' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
for match in re.finditer('{\s*file\s*:\s*"([^"]+)', html):
stream_url = match.group(1)
if 'dizlab' in stream_url.lower():
continue
hoster = {'multi-part': False, 'host': self._get_direct_hostname(stream_url), 'class': self, 'quality': self._gv_get_quality(stream_url), 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Dizilab_Scraper, self)._default_get_url(video)
def _get_episode_url(self, show_url, video):
episode_pattern = 'class="episode"\s+href="([^"]+/sezon-%s/bolum-%s)"' % (video.season, video.episode)
title_pattern = 'class="episode-name"\s+href="(?P<url>[^"]+)">(?P<title>[^<]+)'
return super(Dizilab_Scraper, self)._default_get_episode_url(show_url, video, episode_pattern, title_pattern)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/arsiv?limit=&tur=&orderby=&ulke=&order=&yil=&dizi_adi=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=8)
results = []
for item in dom_parser.parse_dom(html, 'div', {'class': 'tv-series-single'}):
try:
url = re.search('href="([^"]+)', item).group(1)
except:
url = ''
try:
match_year = re.search('<span>\s*(\d{4})\s*</span>', item).group(1)
except:
match_year = ''
try:
match_title = dom_parser.parse_dom(item, 'a', {'class': 'title'})
match_title = re.search('([^>]+)$', match_title[0]).group(1)
match_title = match_title.strip()
except:
match_title = ''
if url and match_title and (not year or not match_year or year == match_year):
result = {'url': self._pathify_url(url), 'title': match_title, 'year': ''}
results.append(result)
return results
|
[
"[email protected]"
] | |
6d3a3465b4ee31a0ef11af36dbc99065914d9f18
|
dae17a2d278ce78ab987e77658a24f89903e8fac
|
/ecomm/account/migrations/0003_auto_20180402_1601.py
|
4709df63bfa1ba9b83496a7c91f2ca6efc625579
|
[] |
no_license
|
derikkip96/efarm
|
fdf15412cc3d77e166ffe90a2f6cb8a47f28092d
|
a1588ae6e7d49bac87e41b1fc5e566b28f437581
|
refs/heads/master
| 2022-12-09T23:28:01.200170 | 2019-09-02T21:41:12 | 2019-09-02T21:41:12 | 137,985,336 | 0 | 0 | null | 2022-11-22T02:34:00 | 2018-06-20T05:44:09 |
CSS
|
UTF-8
|
Python
| false | false | 404 |
py
|
# Generated by Django 2.0.2 on 2018-04-02 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20180331_0212'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, upload_to='upload'),
),
]
|
[
"[email protected]"
] | |
b7558607fcad286760fb506037fdaea76c39703a
|
5662986bdd309e898186fab4b18e3c2acd7b854b
|
/your_project/your_package/migrations/0001_initial.py
|
939d2573e283f839628f5c24ea1c6a7d2f34813a
|
[] |
no_license
|
axiome-oss/dive-into-django-i18n
|
8cf02243d20b47a5c4df39e0ce2434c72b3fd031
|
94016731ee58200feae56bfa5fa0c7d75cd76ba1
|
refs/heads/master
| 2021-01-19T21:36:42.338160 | 2015-11-06T13:27:23 | 2015-11-06T13:27:23 | 39,247,664 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(null=True, blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"[email protected]"
] | |
af928c4a421a6a4199fcdf6c6e6f13a037405bf3
|
4870cf316c69e6c404915318839b9bffd19233ba
|
/haystack/pipeline.py
|
bbad3380406c5891a4e24ae9272fa5f263f8dc7d
|
[
"Apache-2.0"
] |
permissive
|
marjanhs/haystack
|
bdf16e3f7365772462efd199ceb3f9654e1c3715
|
2a226daac4ceec3eb9707fa6618500e247929684
|
refs/heads/master
| 2023-07-12T06:42:30.266327 | 2021-08-20T15:01:55 | 2021-08-20T15:01:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 58,675 |
py
|
import copy
import inspect
import logging
import os
import traceback
from abc import ABC
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, Dict, Union, Any
import pickle
import urllib
from functools import wraps
try:
from ray import serve
import ray
except:
ray = None
serve = None
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline
import networkx as nx
import yaml
from networkx import DiGraph
from networkx.drawing.nx_agraph import to_agraph
from haystack import BaseComponent
from haystack.generator.base import BaseGenerator
from haystack.reader.base import BaseReader
from haystack.retriever.base import BaseRetriever
from haystack.summarizer.base import BaseSummarizer
from haystack.translator.base import BaseTranslator
from haystack.knowledge_graph.base import BaseKnowledgeGraph
from haystack.graph_retriever.base import BaseGraphRetriever
logger = logging.getLogger(__name__)
class BasePipeline:
def run(self, **kwargs):
raise NotImplementedError
@classmethod
def load_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None, overwrite_with_env_variables: bool = True):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
pipeline_config = cls._get_pipeline_config_from_yaml(path=path, pipeline_name=pipeline_name)
if pipeline_config["type"] == "Pipeline":
return Pipeline.load_from_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
elif pipeline_config["type"] == "RayPipeline":
return RayPipeline.load_from_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
else:
raise KeyError(f"Pipeline Type '{pipeline_config['type']}' is not a valid. The available types are"
f"'Pipeline' and 'RayPipeline'.")
@classmethod
def _get_pipeline_config_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None):
"""
Get the definition of Pipeline from a given YAML. If the YAML contains more than one Pipeline,
then the pipeline_name must be supplied.
:param path: Path of Pipeline YAML file.
:param pipeline_name: name of the Pipeline.
"""
with open(path, "r", encoding='utf-8') as stream:
data = yaml.safe_load(stream)
if pipeline_name is None:
if len(data["pipelines"]) == 1:
pipeline_config = data["pipelines"][0]
else:
raise Exception("The YAML contains multiple pipelines. Please specify the pipeline name to load.")
else:
pipelines_in_yaml = list(filter(lambda p: p["name"] == pipeline_name, data["pipelines"]))
if not pipelines_in_yaml:
raise KeyError(f"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file.")
pipeline_config = pipelines_in_yaml[0]
return pipeline_config
@classmethod
def _read_yaml(cls, path: Path, pipeline_name: Optional[str], overwrite_with_env_variables: bool):
"""
Parse the YAML and return the full YAML config, pipeline_config, and definitions of all components.
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
with open(path, "r", encoding="utf-8") as stream:
data = yaml.safe_load(stream)
pipeline_config = cls._get_pipeline_config_from_yaml(path=path, pipeline_name=pipeline_name)
definitions = {} # definitions of each component from the YAML.
component_definitions = copy.deepcopy(data["components"])
for definition in component_definitions:
if overwrite_with_env_variables:
cls._overwrite_with_env_variables(definition)
name = definition.pop("name")
definitions[name] = definition
return data, pipeline_config, definitions
@classmethod
def _overwrite_with_env_variables(cls, definition: dict):
"""
Overwrite the YAML configuration with environment variables. For example, to change index name param for an
ElasticsearchDocumentStore, an env variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param definition: a dictionary containing the YAML definition of a component.
"""
env_prefix = f"{definition['name']}_params_".upper()
for key, value in os.environ.items():
if key.startswith(env_prefix):
param_name = key.replace(env_prefix, "").lower()
definition["params"][param_name] = value
class Pipeline(BasePipeline):
"""
Pipeline brings together building blocks to build a complex search pipeline with Haystack & user-defined components.
Under-the-hood, a pipeline is represented as a directed acyclic graph of component nodes. It enables custom query
flows with options to branch queries(eg, extractive qa vs keyword match query), merge candidate documents for a
Reader from multiple Retrievers, or re-ranking of candidate documents.
"""
def __init__(self):
self.graph = DiGraph()
self.root_node = None
self.components: dict = {}
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
if self.root_node is None:
root_node = inputs[0]
if root_node in ["Query", "File"]:
self.root_node = root_node
self.graph.add_node(root_node, component=RootNode())
else:
raise KeyError(f"Root node '{root_node}' is invalid. Available options are 'Query' and 'File'.")
self.graph.add_node(name, component=component, inputs=inputs)
if len(self.graph.nodes) == 2: # first node added; connect with Root
assert len(inputs) == 1 and inputs[0].split(".")[0] == self.root_node, \
f"The '{name}' node can only input from {self.root_node}. " \
f"Set the 'inputs' parameter to ['{self.root_node}']"
self.graph.add_edge(self.root_node, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["component"].outgoing_edges
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["component"].outgoing_edges
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
def get_node(self, name: str) -> Optional[BaseComponent]:
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
graph_node = self.graph.nodes.get(name)
component = graph_node["component"] if graph_node else None
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.graph.nodes[name]["component"] = component
def run(self, **kwargs):
node_output = None
queue = {
self.root_node: {"root_node": self.root_node, **kwargs}
} # ordered dict with "node_id" -> "input" mapping that acts as a FIFO queue
i = 0 # the first item is popped off the queue unless it is a "join" node with unprocessed predecessors
while queue:
node_id = list(queue.keys())[i]
node_input = queue[node_id]
node_input["node_id"] = node_id
predecessors = set(nx.ancestors(self.graph, node_id))
if predecessors.isdisjoint(set(queue.keys())): # only execute if predecessor nodes are executed
try:
logger.debug(f"Running node `{node_id}` with input `{node_input}`")
node_output, stream_id = self.graph.nodes[node_id]["component"].run(**node_input)
except Exception as e:
tb = traceback.format_exc()
raise Exception(f"Exception while running node `{node_id}` with input `{node_input}`: {e}, full stack trace: {tb}")
queue.pop(node_id)
next_nodes = self.get_next_nodes(node_id, stream_id)
for n in next_nodes: # add successor nodes with corresponding inputs to the queue
if queue.get(n): # concatenate inputs if it's a join node
existing_input = queue[n]
if "inputs" not in existing_input.keys():
updated_input = {"inputs": [existing_input, node_output]}
else:
existing_input["inputs"].append(node_output)
updated_input = existing_input
queue[n] = updated_input
else:
queue[n] = node_output
i = 0
else:
i += 1 # attempt executing next node in the queue as current `node_id` has unprocessed predecessors
return node_output
def get_next_nodes(self, node_id: str, stream_id: str):
current_node_edges = self.graph.edges(node_id, data=True)
next_nodes = [
next_node
for _, next_node, data in current_node_edges
if not stream_id or data["label"] == stream_id or stream_id == "output_all"
]
return next_nodes
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
try:
import pygraphviz
except ImportError:
raise ImportError(f"Could not import `pygraphviz`. Please install via: \n"
f"pip install pygraphviz\n"
f"(You might need to run this first: apt install libgraphviz-dev graphviz )")
graphviz = to_agraph(self.graph)
graphviz.layout("dot")
graphviz.draw(path)
@classmethod
def load_from_yaml(cls, path: Path, pipeline_name: Optional[str] = None, overwrite_with_env_variables: bool = True):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
data, pipeline_config, definitions = cls._read_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
pipeline = cls()
components: dict = {} # instances of component objects.
for node_config in pipeline_config["nodes"]:
name = node_config["name"]
component = cls._load_or_get_component(name=name, definitions=definitions, components=components)
pipeline.add_node(component=component, name=node_config["name"], inputs=node_config.get("inputs", []))
return pipeline
@classmethod
def _load_or_get_component(cls, name: str, definitions: dict, components: dict):
"""
Load a component from the definition or return if component object already present in `components` dict.
:param name: name of the component to load or get.
:param definitions: dict containing definitions of all components retrieved from the YAML.
:param components: dict containing component objects.
"""
try:
if name in components.keys(): # check if component is already loaded.
return components[name]
component_params = definitions[name].get("params", {})
component_type = definitions[name]["type"]
logger.debug(f"Loading component `{name}` of type `{definitions[name]['type']}`")
for key, value in component_params.items():
# Component params can reference to other components. For instance, a Retriever can reference a
# DocumentStore defined in the YAML. All references should be recursively resolved.
if isinstance(value, str) and value in definitions.keys(): # check if the param value is a reference to another component.
if value not in components.keys(): # check if the referenced component is already loaded.
cls._load_or_get_component(name=value, definitions=definitions, components=components)
component_params[key] = components[value] # substitute reference (string) with the component object.
instance = BaseComponent.load_from_args(component_type=component_type, **component_params)
components[name] = instance
except Exception as e:
raise Exception(f"Failed loading pipeline component '{name}': {e}")
return instance
def save_to_yaml(self, path: Path, return_defaults: bool = False):
"""
Save a YAML configuration for the Pipeline that can be used with `Pipeline.load_from_yaml()`.
:param path: path of the output YAML file.
:param return_defaults: whether to output parameters that have the default values.
"""
nodes = self.graph.nodes
pipeline_name = self.root_node.lower()
pipelines: dict = {pipeline_name: {"name": pipeline_name, "type": "Pipeline", "nodes": []}}
components = {}
for node in nodes:
if node == self.root_node:
continue
component_instance = self.graph.nodes.get(node)["component"]
component_type = component_instance.pipeline_config["type"]
component_params = component_instance.pipeline_config["params"]
components[node] = {"name": node, "type": component_type, "params": {}}
component_signature = inspect.signature(type(component_instance)).parameters
for key, value in component_params.items():
# A parameter for a Component could be another Component. For instance, a Retriever has
# the DocumentStore as a parameter.
# Component configs must be a dict with a "type" key. The "type" keys distinguishes between
# other parameters like "custom_mapping" that are dicts.
# This currently only checks for the case single-level nesting case, wherein, "a Component has another
# Component as a parameter". For deeper nesting cases, this function should be made recursive.
if isinstance(value, dict) and "type" in value.keys(): # the parameter is a Component
components[node]["params"][key] = value["type"]
sub_component_signature = inspect.signature(BaseComponent.subclasses[value["type"]]).parameters
params = {
k: v for k, v in value["params"].items()
if sub_component_signature[k].default != v or return_defaults is True
}
components[value["type"]] = {"name": value["type"], "type": value["type"], "params": params}
else:
if component_signature[key].default != value or return_defaults is True:
components[node]["params"][key] = value
# create the Pipeline definition with how the Component are connected
pipelines[pipeline_name]["nodes"].append({"name": node, "inputs": list(self.graph.predecessors(node))})
config = {"components": list(components.values()), "pipelines": list(pipelines.values()), "version": "0.8"}
with open(path, 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=False)
class BaseStandardPipeline(ABC):
pipeline: Pipeline
def add_node(self, component, name: str, inputs: List[str]):
"""
Add a new node to the pipeline.
:param component: The object to be called when the data is passed to the node. It can be a Haystack component
(like Retriever, Reader, or Generator) or a user-defined object that implements a run()
method to process incoming data from predecessor node.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.pipeline.add_node(component=component, name=name, inputs=inputs)
def get_node(self, name: str):
"""
Get a node from the Pipeline.
:param name: The name of the node.
"""
component = self.pipeline.get_node(name)
return component
def set_node(self, name: str, component):
"""
Set the component for a node in the Pipeline.
:param name: The name of the node.
:param component: The component object to be set at the node.
"""
self.pipeline.set_node(name, component)
def draw(self, path: Path = Path("pipeline.png")):
"""
Create a Graphviz visualization of the pipeline.
:param path: the path to save the image.
"""
self.pipeline.draw(path)
class ExtractiveQAPipeline(BaseStandardPipeline):
def __init__(self, reader: BaseReader, retriever: BaseRetriever):
"""
Initialize a Pipeline for Extractive Question Answering.
:param reader: Reader instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=reader, name="Reader", inputs=["Retriever"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: int = 10, top_k_reader: int = 10):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_reader=top_k_reader
)
return output
class DocumentSearchPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
document_dicts = [doc.to_dict() for doc in output["documents"]]
output["documents"] = document_dicts
return output
class GenerativeQAPipeline(BaseStandardPipeline):
def __init__(self, generator: BaseGenerator, retriever: BaseRetriever):
"""
Initialize a Pipeline for Generative Question Answering.
:param generator: Generator instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=generator, name="Generator", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
top_k_generator: Optional[int] = None
):
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, top_k_generator=top_k_generator
)
return output
class SearchSummarizationPipeline(BaseStandardPipeline):
def __init__(self, summarizer: BaseSummarizer, retriever: BaseRetriever):
"""
Initialize a Pipeline that retrieves documents for a query and then summarizes those documents.
:param summarizer: Summarizer instance
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=summarizer, name="Summarizer", inputs=["Retriever"])
def run(
self,
query: str,
filters: Optional[Dict] = None,
top_k_retriever: Optional[int] = None,
generate_single_summary: Optional[bool] = None,
return_in_answer_format: bool = False,
):
"""
:param query: Your search query
:param filters:
:param top_k_retriever: Number of top docs the retriever should pass to the summarizer.
The higher this value, the slower your pipeline.
:param generate_single_summary: Whether to generate single summary from all retrieved docs (True) or one per doc (False).
:param return_in_answer_format: Whether the results should be returned as documents (False) or in the answer format used in other QA pipelines (True).
With the latter, you can use this pipeline as a "drop-in replacement" for other QA pipelines.
"""
output = self.pipeline.run(
query=query, filters=filters, top_k_retriever=top_k_retriever, generate_single_summary=generate_single_summary
)
# Convert to answer format to allow "drop-in replacement" for other QA pipelines
if return_in_answer_format:
results: Dict = {"query": query, "answers": []}
docs = deepcopy(output["documents"])
for doc in docs:
cur_answer = {
"query": query,
"answer": doc.text,
"document_id": doc.id,
"context": doc.meta.pop("context"),
"score": None,
"offset_start": None,
"offset_end": None,
"meta": doc.meta,
}
results["answers"].append(cur_answer)
else:
results = output
return results
class FAQPipeline(BaseStandardPipeline):
def __init__(self, retriever: BaseRetriever):
"""
Initialize a Pipeline for finding similar FAQs using semantic document search.
:param retriever: Retriever instance
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
def run(self, query: str, filters: Optional[Dict] = None, top_k_retriever: Optional[int] = None):
output = self.pipeline.run(query=query, filters=filters, top_k_retriever=top_k_retriever)
documents = output["documents"]
results: Dict = {"query": query, "answers": []}
for doc in documents:
# TODO proper calibration of pseudo probabilities
cur_answer = {
"query": doc.text,
"answer": doc.meta["answer"],
"document_id": doc.id,
"context": doc.meta["answer"],
"score": doc.score,
"offset_start": 0,
"offset_end": len(doc.meta["answer"]),
"meta": doc.meta,
}
results["answers"].append(cur_answer)
return results
class TranslationWrapperPipeline(BaseStandardPipeline):
"""
Takes an existing search pipeline and adds one "input translation node" after the Query and one
"output translation" node just before returning the results
"""
def __init__(
self,
input_translator: BaseTranslator,
output_translator: BaseTranslator,
pipeline: BaseStandardPipeline
):
"""
Wrap a given `pipeline` with the `input_translator` and `output_translator`.
:param input_translator: A Translator node that shall translate the input query from language A to B
:param output_translator: A Translator node that shall translate the pipeline results from language B to A
:param pipeline: The pipeline object (e.g. ExtractiveQAPipeline) you want to "wrap".
Note that pipelines with split or merge nodes are currently not supported.
"""
self.pipeline = Pipeline()
self.pipeline.add_node(component=input_translator, name="InputTranslator", inputs=["Query"])
graph = pipeline.pipeline.graph
previous_node_name = ["InputTranslator"]
# Traverse in BFS
for node in graph.nodes:
if node == "Query":
continue
# TODO: Do not work properly for Join Node and Answer format
if graph.nodes[node]["inputs"] and len(graph.nodes[node]["inputs"]) > 1:
raise AttributeError("Split and merge nodes are not supported currently")
self.pipeline.add_node(name=node, component=graph.nodes[node]["component"], inputs=previous_node_name)
previous_node_name = [node]
self.pipeline.add_node(component=output_translator, name="OutputTranslator", inputs=previous_node_name)
def run(self, **kwargs):
output = self.pipeline.run(**kwargs)
return output
class QuestionGenerationPipeline(BaseStandardPipeline):
"""
A simple pipeline that takes documents as input and generates
questions that it thinks can be answered by the documents.
"""
def __init__(self, question_generator):
self.pipeline = Pipeline()
self.pipeline.add_node(component=question_generator, name="QuestionGenerator", inputs=["Query"])
def run(self, documents, **kwargs):
kwargs["documents"] = documents
output = self.pipeline.run(**kwargs)
return output
class RetrieverQuestionGenerationPipeline(BaseStandardPipeline):
"""
A simple pipeline that takes a query as input, performs retrieval, and then generates
questions that it thinks can be answered by the retrieved documents.
"""
def __init__(self, retriever, question_generator):
self.pipeline = Pipeline()
self.pipeline.add_node(component=retriever, name="Retriever", inputs=["Query"])
self.pipeline.add_node(component=question_generator, name="Question Generator", inputs=["Retriever"])
def run(self, query, **kwargs):
kwargs["query"] = query
output = self.pipeline.run(**kwargs)
return output
class QuestionAnswerGenerationPipeline(BaseStandardPipeline):
"""
This is a pipeline which takes a document as input, generates questions that the model thinks can be answered by
this document, and then performs question answering of this questions using that single document.
"""
def __init__(self, question_generator, reader):
question_generator.run = self.formatting_wrapper(question_generator.run)
# Overwrite reader.run function so it can handle a batch of questions being passed on by the QuestionGenerator
reader.run = reader.run_batch
self.pipeline = Pipeline()
self.pipeline.add_node(component=question_generator, name="QuestionGenerator", inputs=["Query"])
self.pipeline.add_node(component=reader, name="Reader", inputs=["QuestionGenerator"])
# This is used to format the output of the QuestionGenerator so that its questions are ready to be answered by the reader
def formatting_wrapper(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
output, output_stream = fn(*args, **kwargs)
questions = output["generated_questions"][0]["questions"]
documents = output["documents"]
query_doc_list = []
for q in questions:
query_doc_list.append({"queries": q, "docs": documents})
kwargs["query_doc_list"] = query_doc_list
return kwargs, output_stream
return wrapper
def run(self, document, **kwargs):
kwargs["documents"] = [document]
output = self.pipeline.run(**kwargs)
return output
class RootNode(BaseComponent):
"""
RootNode feeds inputs(`query` or `file`) together with corresponding parameters to a Pipeline.
"""
outgoing_edges = 1
def run(self, **kwargs):
return kwargs, "output_1"
class SklearnQueryClassifier(BaseComponent):
"""
A node to classify an incoming query into one of two categories using a lightweight sklearn model. Depending on the result, the query flows to a different branch in your pipeline
and the further processing can be customized. You can define this by connecting the further pipeline to either `output_1` or `output_2` from this node.
Example:
```python
|{
|pipe = Pipeline()
|pipe.add_node(component=SklearnQueryClassifier(), name="QueryClassifier", inputs=["Query"])
|pipe.add_node(component=elastic_retriever, name="ElasticRetriever", inputs=["QueryClassifier.output_2"])
|pipe.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
|# Keyword queries will use the ElasticRetriever
|pipe.run("kubernetes aws")
|# Semantic queries (questions, statements, sentences ...) will leverage the DPR retriever
|pipe.run("How to manage kubernetes on aws")
```
Models:
Pass your own `Sklearn` binary classification model or use one of the following pretrained ones:
1) Keywords vs. Questions/Statements (Default)
query_classifier can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/model.pickle)
query_vectorizer can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/vectorizer.pickle)
output_1 => question/statement
output_2 => keyword query
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/readme.txt)
2) Questions vs. Statements
query_classifier can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/model.pickle)
query_vectorizer can be found [here](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/vectorizer.pickle)
output_1 => question
output_2 => statement
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/readme.txt)
See also the [tutorial](https://haystack.deepset.ai/docs/latest/tutorial11md) on pipelines.
"""
outgoing_edges = 2
def __init__(
self,
model_name_or_path: Union[
str, Any
] = "https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/model.pickle",
vectorizer_name_or_path: Union[
str, Any
] = "https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/vectorizer.pickle"
):
"""
:param model_name_or_path: Gradient boosting based binary classifier to classify between keyword vs statement/question
queries or statement vs question queries.
:param vectorizer_name_or_path: A ngram based Tfidf vectorizer for extracting features from query.
"""
if (
(not isinstance(model_name_or_path, Path))
and (not isinstance(model_name_or_path, str))
) or (
(not isinstance(vectorizer_name_or_path, Path))
and (not isinstance(vectorizer_name_or_path, str))
):
raise TypeError(
"model_name_or_path and vectorizer_name_or_path must either be of type Path or str"
)
# save init parameters to enable export of component config as YAML
self.set_config(model_name_or_path=model_name_or_path, vectorizer_name_or_path=vectorizer_name_or_path)
if isinstance(model_name_or_path, Path):
file_url = urllib.request.pathname2url(r"{}".format(model_name_or_path))
model_name_or_path = f"file:{file_url}"
if isinstance(vectorizer_name_or_path, Path):
file_url = urllib.request.pathname2url(r"{}".format(vectorizer_name_or_path))
vectorizer_name_or_path = f"file:{file_url}"
self.model = pickle.load(urllib.request.urlopen(model_name_or_path))
self.vectorizer = pickle.load(urllib.request.urlopen(vectorizer_name_or_path))
def run(self, **kwargs):
query_vector = self.vectorizer.transform([kwargs["query"]])
is_question: bool = self.model.predict(query_vector)[0]
if is_question:
return (kwargs, "output_1")
else:
return (kwargs, "output_2")
class TransformersQueryClassifier(BaseComponent):
"""
A node to classify an incoming query into one of two categories using a (small) BERT transformer model. Depending on the result, the query flows to a different branch in your pipeline
and the further processing can be customized. You can define this by connecting the further pipeline to either `output_1` or `output_2` from this node.
Example:
```python
|{
|pipe = Pipeline()
|pipe.add_node(component=TransformersQueryClassifier(), name="QueryClassifier", inputs=["Query"])
|pipe.add_node(component=elastic_retriever, name="ElasticRetriever", inputs=["QueryClassifier.output_2"])
|pipe.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
|# Keyword queries will use the ElasticRetriever
|pipe.run("kubernetes aws")
|# Semantic queries (questions, statements, sentences ...) will leverage the DPR retriever
|pipe.run("How to manage kubernetes on aws")
```
Models:
Pass your own `Transformer` binary classification model from file/huggingface or use one of the following pretrained ones hosted on Huggingface:
1) Keywords vs. Questions/Statements (Default)
model_name_or_path="shahrukhx01/bert-mini-finetune-question-detection"
output_1 => question/statement
output_2 => keyword query
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier/readme.txt)
2) Questions vs. Statements
`model_name_or_path`="shahrukhx01/question-vs-statement-classifier"
output_1 => question
output_2 => statement
[Readme](https://ext-models-haystack.s3.eu-central-1.amazonaws.com/gradboost_query_classifier_statements/readme.txt)
See also the [tutorial](https://haystack.deepset.ai/docs/latest/tutorial11md) on pipelines.
"""
outgoing_edges = 2
def __init__(
self,
model_name_or_path: Union[
Path, str
] = "shahrukhx01/bert-mini-finetune-question-detection"
):
"""
:param model_name_or_path: Transformer based fine tuned mini bert model for query classification
"""
# save init parameters to enable export of component config as YAML
self.set_config(model_name_or_path=model_name_or_path)
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.query_classification_pipeline = TextClassificationPipeline(
model=model, tokenizer=tokenizer
)
def run(self, **kwargs):
is_question: bool = (
self.query_classification_pipeline(kwargs["query"])[0]["label"] == "LABEL_1"
)
if is_question:
return (kwargs, "output_1")
else:
return (kwargs, "output_2")
class JoinDocuments(BaseComponent):
"""
A node to join documents outputted by multiple retriever nodes.
The node allows multiple join modes:
* concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
* merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
`weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
"""
outgoing_edges = 1
def __init__(
self, join_mode: str = "concatenate", weights: Optional[List[float]] = None, top_k_join: Optional[int] = None
):
"""
:param join_mode: `concatenate` to combine documents from multiple retrievers or `merge` to aggregate scores of
individual documents.
:param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
adjusting document scores when using the `merge` join_mode. By default, equal weight is given
to each retriever score. This param is not compatible with the `concatenate` join_mode.
:param top_k_join: Limit documents to top_k based on the resulting scores of the join.
"""
assert join_mode in ["concatenate", "merge"], f"JoinDocuments node does not support '{join_mode}' join_mode."
assert not (
weights is not None and join_mode == "concatenate"
), "Weights are not compatible with 'concatenate' join_mode."
# save init parameters to enable export of component config as YAML
self.set_config(join_mode=join_mode, weights=weights, top_k_join=top_k_join)
self.join_mode = join_mode
self.weights = [float(i)/sum(weights) for i in weights] if weights else None
self.top_k_join = top_k_join
def run(self, **kwargs):
inputs = kwargs["inputs"]
if self.join_mode == "concatenate":
document_map = {}
for input_from_node in inputs:
for doc in input_from_node["documents"]:
document_map[doc.id] = doc
elif self.join_mode == "merge":
document_map = {}
if self.weights:
weights = self.weights
else:
weights = [1/len(inputs)] * len(inputs)
for input_from_node, weight in zip(inputs, weights):
for doc in input_from_node["documents"]:
if document_map.get(doc.id): # document already exists; update score
document_map[doc.id].score += doc.score * weight
else: # add the document in map
document_map[doc.id] = deepcopy(doc)
document_map[doc.id].score *= weight
else:
raise Exception(f"Invalid join_mode: {self.join_mode}")
documents = sorted(document_map.values(), key=lambda d: d.score, reverse=True)
if self.top_k_join:
documents = documents[: self.top_k_join]
output = {"query": inputs[0]["query"], "documents": documents, "labels": inputs[0].get("labels", None)}
return output, "output_1"
class RayPipeline(Pipeline):
"""
Ray (https://ray.io) is a framework for distributed computing.
With Ray, the Pipeline nodes can be distributed across a cluster of machine(s).
This allows scaling individual nodes. For instance, in an extractive QA Pipeline, multiple replicas
of the Reader, while keeping a single instance for the Retriever. It also enables efficient resource
utilization as load could be split across GPU vs CPU machines.
In the current implementation, a Ray Pipeline can only be created with a YAML Pipeline config.
>>> from haystack.pipeline import RayPipeline
>>> pipeline = RayPipeline.load_from_yaml(path="my_pipelines.yaml", pipeline_name="my_query_pipeline")
>>> pipeline.run(query="What is the capital of Germany?")
By default, RayPipelines creates an instance of RayServe locally. To connect to an existing Ray instance,
set the `address` parameter when creating RayPipeline instance.
"""
def __init__(self, address: str = None, **kwargs):
"""
:param address: The IP address for the Ray cluster. If set to None, a local Ray instance is started.
:param kwargs: Optional parameters for initializing Ray.
"""
ray.init(address=address, **kwargs)
serve.start()
super().__init__()
@classmethod
def load_from_yaml(
cls,
path: Path, pipeline_name: Optional[str] = None,
overwrite_with_env_variables: bool = True,
address: Optional[str] = None,
**kwargs,
):
"""
Load Pipeline from a YAML file defining the individual components and how they're tied together to form
a Pipeline. A single YAML can declare multiple Pipelines, in which case an explicit `pipeline_name` must
be passed.
Here's a sample configuration:
```yaml
| version: '0.8'
|
| components: # define all the building-blocks for Pipeline
| - name: MyReader # custom-name for the component; helpful for visualization & debugging
| type: FARMReader # Haystack Class name for the component
| params:
| no_ans_boost: -10
| model_name_or_path: deepset/roberta-base-squad2
| - name: MyESRetriever
| type: ElasticsearchRetriever
| params:
| document_store: MyDocumentStore # params can reference other components defined in the YAML
| custom_query: null
| - name: MyDocumentStore
| type: ElasticsearchDocumentStore
| params:
| index: haystack_test
|
| pipelines: # multiple Pipelines can be defined using the components from above
| - name: my_query_pipeline # a simple extractive-qa Pipeline
| nodes:
| - name: MyESRetriever
| inputs: [Query]
| - name: MyReader
| inputs: [MyESRetriever]
```
:param path: path of the YAML file.
:param pipeline_name: if the YAML contains multiple pipelines, the pipeline_name to load must be set.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param address: The IP address for the Ray cluster. If set to None, a local Ray instance is started.
"""
data, pipeline_config, definitions = cls._read_yaml(
path=path, pipeline_name=pipeline_name, overwrite_with_env_variables=overwrite_with_env_variables
)
pipeline = cls(address=address, **kwargs)
for node_config in pipeline_config["nodes"]:
if pipeline.root_node is None:
root_node = node_config["inputs"][0]
if root_node in ["Query", "File"]:
pipeline.root_node = root_node
handle = cls._create_ray_deployment(component_name=root_node, pipeline_config=data)
pipeline._add_ray_deployment_in_graph(handle=handle, name=root_node, outgoing_edges=1, inputs=[])
else:
raise KeyError(f"Root node '{root_node}' is invalid. Available options are 'Query' and 'File'.")
name = node_config["name"]
component_type = definitions[name]["type"]
component_class = BaseComponent.get_subclass(component_type)
replicas = next(comp for comp in data["components"] if comp["name"] == name).get("replicas", 1)
handle = cls._create_ray_deployment(component_name=name, pipeline_config=data, replicas=replicas)
pipeline._add_ray_deployment_in_graph(
handle=handle,
name=name,
outgoing_edges=component_class.outgoing_edges,
inputs=node_config.get("inputs", []),
)
return pipeline
@classmethod
def _create_ray_deployment(cls, component_name: str, pipeline_config: dict, replicas: int = 1):
"""
Create a Ray Deployment for the Component.
:param component_name: Class name of the Haystack Component.
:param pipeline_config: The Pipeline config YAML parsed as a dict.
:param replicas: By default, a single replica of the component is created. It can be
configured by setting `replicas` parameter in the Pipeline YAML.
"""
RayDeployment = serve.deployment(_RayDeploymentWrapper, name=component_name, num_replicas=replicas)
RayDeployment.deploy(pipeline_config, component_name)
handle = RayDeployment.get_handle()
return handle
def run(self, **kwargs):
has_next_node = True
current_node_id = self.root_node
input_dict = {"root_node": self.root_node, **kwargs}
output_dict = None
while has_next_node:
output_dict, stream_id = ray.get(self.graph.nodes[current_node_id]["component"].remote(**input_dict))
input_dict = output_dict
next_nodes = self.get_next_nodes(current_node_id, stream_id)
if len(next_nodes) > 1:
join_node_id = list(nx.neighbors(self.graph, next_nodes[0]))[0]
if set(self.graph.predecessors(join_node_id)) != set(next_nodes):
raise NotImplementedError(
"The current pipeline does not support multiple levels of parallel nodes."
)
inputs_for_join_node = {"inputs": []}
for n_id in next_nodes:
output = self.graph.nodes[n_id]["component"].run(**input_dict)
inputs_for_join_node["inputs"].append(output)
input_dict = inputs_for_join_node
current_node_id = join_node_id
elif len(next_nodes) == 1:
current_node_id = next_nodes[0]
else:
has_next_node = False
return output_dict
def add_node(self, component, name: str, inputs: List[str]):
raise NotImplementedError(
"The current implementation of RayPipeline only supports loading Pipelines from a YAML file."
)
def _add_ray_deployment_in_graph(self, handle, name: str, outgoing_edges: int, inputs: List[str]):
"""
Add the Ray deployment handle in the Pipeline Graph.
:param handle: Ray deployment `handle` to add in the Pipeline Graph. The handle allow calling a Ray deployment
from Python: https://docs.ray.io/en/master/serve/package-ref.html#servehandle-api.
:param name: The name for the node. It must not contain any dots.
:param inputs: A list of inputs to the node. If the predecessor node has a single outgoing edge, just the name
of node is sufficient. For instance, a 'ElasticsearchRetriever' node would always output a single
edge with a list of documents. It can be represented as ["ElasticsearchRetriever"].
In cases when the predecessor node has multiple outputs, e.g., a "QueryClassifier", the output
must be specified explicitly as "QueryClassifier.output_2".
"""
self.graph.add_node(name, component=handle, inputs=inputs, outgoing_edges=outgoing_edges)
if len(self.graph.nodes) == 2: # first node added; connect with Root
self.graph.add_edge(self.root_node, name, label="output_1")
return
for i in inputs:
if "." in i:
[input_node_name, input_edge_name] = i.split(".")
assert "output_" in input_edge_name, f"'{input_edge_name}' is not a valid edge name."
outgoing_edges_input_node = self.graph.nodes[input_node_name]["component"].outgoing_edges
assert int(input_edge_name.split("_")[1]) <= outgoing_edges_input_node, (
f"Cannot connect '{input_edge_name}' from '{input_node_name}' as it only has "
f"{outgoing_edges_input_node} outgoing edge(s)."
)
else:
outgoing_edges_input_node = self.graph.nodes[i]["outgoing_edges"]
assert outgoing_edges_input_node == 1, (
f"Adding an edge from {i} to {name} is ambiguous as {i} has {outgoing_edges_input_node} edges. "
f"Please specify the output explicitly."
)
input_node_name = i
input_edge_name = "output_1"
self.graph.add_edge(input_node_name, name, label=input_edge_name)
class _RayDeploymentWrapper:
"""
Ray Serve supports calling of __init__ methods on the Classes to create "deployment" instances.
In case of Haystack, some Components like Retrievers have complex init methods that needs objects
like Document Stores.
This wrapper class encapsulates the initialization of Components. Given a Component Class
name, it creates an instance using the YAML Pipeline config.
"""
node: BaseComponent
def __init__(self, pipeline_config: dict, component_name: str):
"""
Create an instance of Component.
:param pipeline_config: Pipeline YAML parsed as a dict.
:param component_name: Component Class name.
"""
if component_name in ["Query", "File"]:
self.node = RootNode()
else:
self.node = BaseComponent.load_from_pipeline_config(pipeline_config, component_name)
def __call__(self, *args, **kwargs):
"""
Ray calls this method which is then re-directed to the corresponding component's run().
"""
return self.node.run(*args, **kwargs)
class Docs2Answers(BaseComponent):
outgoing_edges = 1
def __init__(self):
self.set_config()
def run(self, query, documents, **kwargs):
# conversion from Document -> Answer
answers = []
for doc in documents:
# For FAQ style QA use cases
if "answer" in doc.meta:
cur_answer = {
"query": doc.text,
"answer": doc.meta["answer"],
"document_id": doc.id,
"context": doc.meta["answer"],
"score": doc.score,
"offset_start": 0,
"offset_end": len(doc.meta["answer"]),
"meta": doc.meta,
}
else:
# Regular docs
cur_answer = {
"query": None,
"answer": None,
"document_id": doc.id,
"context": doc.text,
"score": doc.score,
"offset_start": None,
"offset_end": None,
"meta": doc.meta,
}
answers.append(cur_answer)
output = {"query": query, "answers": answers}
# Pass also the other incoming kwargs so that future nodes still have access to it
output.update(**kwargs)
return output, "output_1"
|
[
"[email protected]"
] | |
8ebeb25ae069db43b23b35eea9b3cb49e7564d1c
|
d4e1b610db981020019a10af1fc90311cc0900d6
|
/students/ReemAlqaysi/lesson06/test_mailroom.py
|
af851981a3cb52f99e0b0734f1d64f3604772217
|
[] |
no_license
|
InduKolli/SP_Online_PY210
|
c9c7b52b6ac6be3f10c210cebe74b4564f35b989
|
49589778454c1549a12fd6f8bc2e44e022b86b72
|
refs/heads/master
| 2020-06-11T16:40:49.368669 | 2019-11-11T03:17:54 | 2019-11-11T03:17:54 | 193,431,588 | 1 | 0 | null | 2019-06-24T04:06:29 | 2019-06-24T04:06:29 | null |
UTF-8
|
Python
| false | false | 2,046 |
py
|
#!/usr/bin/env python3
import mailroom
import os
donor_list = {
"Jan Balard": [600.00,250.00],
"Joe McHennry": [1500.00,1500.00],
"Jeff Hansen": [450.00,150.00],
"Scott Newman": [100.00,5000.00],
"Rabi Das": [500.00,950.00]
}
def test_send_letter_text():
letter = '''\n\nDear Reem Alqaysi:\n Thank you for your donation of $222, we appriciate your support to our service. \n MailRoom Team\n'''
assert mailroom.thank_you_text('Reem Alqaysi',222) == letter
def test_new_donor():
fullname = 'Reem Alqaysi'
mailroom.add_name(fullname)
assert fullname in donor_list
#assert donor_list == {'Jan Balard': [600.0, 250.0], 'Joe McHennry': [1500.0, 1500.0], 'Jeff Hansen': [450.0, 150.0], 'Scott Newman': [100.0, 5000.0], 'Rabi Das': [500.0, 950.0], 'Reem Alqaysi': []}
def test_update_donor():
fullname = 'Rabi Das'
mailroom.add_name(fullname)
assert fullname in donor_list
def test_add_amount():
fullname = 'Reem Alqaysi'
amount = 222
mailroom.add_amount(fullname,amount)
assert donor_list[fullname][-1] == [amount]
def test_create_report():
report = \
f'Donor Name | Total Given |Num Gifts |Average Gift \n\
------------------------------------------------------------------------------------------\n\
Scott Newman $ 5100.0 2 $ 2550.0\n\
Jeff Hansen $ 600.0 2 $ 300.0\n\
Rabi Das $ 1450.0 2 $ 725.0\n\
Jan Balard $ 850.0 2 $ 425.0\n\
Joe McHennry $ 3000.0 2 $ 1500.0\n'
assert mailroom.create_report() == report
def test_create_report_file():
mailroom.letter_to_all()
for name in donor_list:
filename = name.replace(' ', '_').replace(',', '') + ".txt"
filename = filename.lower()
assert os.path.isfile(filename) is True
|
[
"[email protected]"
] | |
7f4cb87cab420060f0713c8c91401f606532723a
|
b26c0b0d767f62325fb3963118698e5c77819c70
|
/Rice Python/Rice Rocks (no animation).py
|
c441c42cf385f97d4c47b119bfa31f318d65ec60
|
[] |
no_license
|
alecmchiu/MOOCs
|
8336ad3ed52262ce543ed0a817252362041900c9
|
f87549d19f304b64df8ad51387aa8252062676fd
|
refs/heads/master
| 2021-01-12T01:31:48.061261 | 2017-08-18T02:59:06 | 2017-08-18T02:59:06 | 78,399,530 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,259 |
py
|
# implementation of Spaceship - program template for RiceRocks
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
started = False
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_info = ImageInfo([5,5], [10, 10], 3, 50)
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
# .ogg versions of sounds are also available, just replace .mp3 by .ogg
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
def process_sprite_group(a_set, canvas):
copy = set(a_set)
for each in a_set:
if (each.update()):
copy.remove(each)
else:
each.draw(canvas)
a_set.intersection_update(copy)
def group_collide(group, other_object):
original = len(group)
group_copy = set(group)
for each in group:
if (each.collide(other_object)):
group_copy.remove(each)
group.intersection_update(group_copy)
if (len(group) < original):
return True
else:
return False
def group_group_collide(group1,group2):
copy = set(group1)
collisions = 0
for each in group1:
if(group_collide(group2, each)):
collisions += 1
copy.discard(each)
group1.intersection_update(copy)
return collisions
# Ship class
class Ship:
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0], pos[1]]
self.vel = [vel[0], vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
def draw(self,canvas):
if self.thrust:
canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0], self.image_center[1]] , self.image_size,
self.pos, self.image_size, self.angle)
else:
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
# canvas.draw_circle(self.pos, self.radius, 1, "White", "White")
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# update velocity
if self.thrust:
acc = angle_to_vector(self.angle)
self.vel[0] += acc[0] * .1
self.vel[1] += acc[1] * .1
self.vel[0] *= .99
self.vel[1] *= .99
def set_thrust(self, on):
self.thrust = on
if on:
ship_thrust_sound.rewind()
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
def increment_angle_vel(self):
self.angle_vel += .05
def decrement_angle_vel(self):
self.angle_vel -= .05
def shoot(self):
global missile_group
forward = angle_to_vector(self.angle)
missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]
missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]
a_missile = Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound)
missile_group.add(a_missile)
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
# Sprite class
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def draw(self, canvas):
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
#update age
self.age += 1
if (self.age < self.lifespan):
return False
else:
return True
def get_position(self):
return self.pos
def get_radius(self):
return self.radius
def collide(self, other_object):
distance = dist(self.pos,other_object.get_position())
collision_distance = self.radius + other_object.get_radius()
if (distance < collision_distance):
return True
else:
return False
# key handlers to control ship
def keydown(key):
if key == simplegui.KEY_MAP['left']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(True)
elif key == simplegui.KEY_MAP['space']:
my_ship.shoot()
def keyup(key):
if key == simplegui.KEY_MAP['left']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(False)
# mouseclick handlers that reset UI and conditions whether splash image is drawn
def click(pos):
global started, timer, lives
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
started = True
timer.start()
lives = 3
soundtrack.play()
def draw(canvas):
global time, started, lives, score, timer, rock_group
# animiate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
# draw UI
canvas.draw_text("Lives", [50, 50], 22, "White")
canvas.draw_text("Score", [680, 50], 22, "White")
canvas.draw_text(str(lives), [50, 80], 22, "White")
canvas.draw_text(str(score), [680, 80], 22, "White")
# draw ship and sprites
my_ship.draw(canvas)
# update ship and sprites
my_ship.update()
#process rocks and missiles
process_sprite_group(rock_group, canvas)
process_sprite_group(missile_group, canvas)
#collisions
if (group_collide(rock_group, my_ship)):
lives -= 1
score += group_group_collide(rock_group, missile_group)
if (lives == 0):
started = False
rock_group = set()
timer.stop()
soundtrack.pause()
soundtrack.rewind()
time = 0
# draw splash screen if not started
if not started:
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
# timer handler that spawns a rock
def rock_spawner():
global rock_group, my_ship, time
rock_pos = [random.randrange(0, WIDTH), random.randrange(0, HEIGHT)]
rock_vel = [0.01*time*(random.random() * .6 - .3), 0.01*time*(random.random() * .6 - .3)]
rock_avel = random.random() * .2 - .1
a_rock = Sprite(rock_pos, rock_vel, 0, rock_avel, asteroid_image, asteroid_info)
if (len(rock_group) <= 12):
if (dist(my_ship.get_position(),a_rock.get_position()) > my_ship.get_radius()+a_rock.get_radius()):
rock_group.add(a_rock)
# initialize stuff
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
rock_group = set()
missile_group = set()
# register handlers
frame.set_keyup_handler(keyup)
frame.set_keydown_handler(keydown)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
frame.start()
|
[
"[email protected]"
] | |
31bda42177c67668b02106a2e58888a61630ed09
|
99e1a15d8f605be456f17608843c309dd8a3260f
|
/src/Battle/Attack/Steps/Test/suite.py
|
a11d3df523d7d71da56074941becf66d934c86c9
|
[] |
no_license
|
sgtnourry/Pokemon-Project
|
e53604096dcba939efca358e4177374bffcf0b38
|
3931eee5fd04e18bb1738a0b27a4c6979dc4db01
|
refs/heads/master
| 2021-01-17T23:02:25.910738 | 2014-04-12T17:46:27 | 2014-04-12T17:46:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
import unittest
from Battle.Attack.Steps.Test.remove_pp_step_test import suite as remove_pp_step_suite
from Battle.Attack.Steps.Test.handle_miss_effects_step_test import suite as handle_miss_effects_step_suite
from Battle.Attack.Steps.Test.handle_contact_step_test import suite as handle_contact_step_suite
from Battle.Attack.Steps.Test.effects_step_test import suite as effects_step_suite
from Battle.Attack.Steps.Test.damage_step_test import suite as damage_step_suite
from Battle.Attack.Steps.Test.announcement_step_test import suite as announcement_step_suite
from Battle.Attack.Steps.Test.hit_step_test import suite as hit_step_suite
from Battle.Attack.Steps.Test.precondition_step_test import suite as precondition_step_suite
suites = [precondition_step_suite,
hit_step_suite,
announcement_step_suite,
damage_step_suite,
effects_step_suite,
handle_contact_step_suite,
handle_miss_effects_step_suite,
remove_pp_step_suite]
suite = unittest.TestSuite(suites)
|
[
"[email protected]"
] | |
74a70cddec3707af88424f902a735dd471053666
|
7ed05e81c563b8931bdf232daf88d466bb06d698
|
/polls/admin.py
|
896bfe8b3f74c75e466c660292ed8b4b3f4afc85
|
[] |
no_license
|
chetansurwade/poller
|
c940ffc8bd19b6a5ee671322c8d2483a53170ee9
|
77657f248a3ba856e89b432593b41eaa7f455e7f
|
refs/heads/master
| 2020-09-25T22:29:36.609327 | 2019-12-05T15:17:39 | 2019-12-05T15:17:39 | 226,101,472 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 555 |
py
|
from django.contrib import admin
from .models import Question, Choice
admin.site.site_header = "Poller Admin"
admin.site.site_title = "Poller Admin Area"
admin.site.index_title = "Welcome to the Poller admin area"
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['question_text']}),
('Date Information', {'fields': ['pub_date'], 'classes': ['collapse']}), ]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin)
|
[
"[email protected]"
] | |
6843646e4bfc8dd6d189f4981122d415672c1403
|
8937c4d452c98699610923f76a395a2247f576df
|
/preprocess/crop.py
|
5b05cb13ad998812b4d8e78a1b99878b47e16046
|
[] |
no_license
|
mistycheney/MouseBrainAtlas
|
812b204af06ed303f3c12d5c81edef50c8d9d1ed
|
bffbaa1ede9297084e64fc197716e63d5cb54275
|
refs/heads/master
| 2020-04-11T13:44:09.632311 | 2018-11-20T22:32:15 | 2018-11-20T22:32:15 | 20,377,173 | 3 | 9 | null | 2017-03-15T19:39:27 | 2014-06-01T12:42:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,884 |
py
|
#! /usr/bin/env python
import os
import argparse
import sys
import time
import numpy as np
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'utilities'))
from utilities2015 import *
from metadata import *
from data_manager import *
from learning_utilities import *
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='')
parser.add_argument("stack", type=str, help="Brain name")
parser.add_argument("versions", type=str, help="json encoded str list")
parser.add_argument("resolutions", type=str, help="json encoded str list")
parser.add_argument("prep_in", type=str, help="")
parser.add_argument("prep_out", type=str, help="")
parser.add_argument("input_crop_json", type=str, help="")
parser.add_argument("output_crop_json", type=str, help="")
parser.add_argument("n_jobs", type=int, help="", default=1)
args = parser.parse_args()
versions = json.loads(args.versions)
if isinstance(versions, str):
versions = [versions]
else:
assert isinstance(versions, list), "Argument versions must be str or str list."
resolutions = json.loads(args.resolutions)
if isinstance(resolutions, str):
resolutions = [resolutions]
else:
assert isinstance(resolutions, list), "Argument resolutions must be str or str list."
n_jobs = args.n_jobs
def crop(stack, img_name, version, resol, x,y,w,h):
input_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol=resol, version=version, fn=img_name)
output_fp = DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol=resol)
img = imread(input_fp)
save_data(img[y:y+h, x:x+w], output_fp)
for version in versions:
for resol in resolutions:
if resol == 'raw':
x = x_tb * 32
y = y_tb * 32
w = w_tb * 32
h = h_tb * 32
elif resol == 'thumbnail':
x = x_tb
y = y_tb
w = w_tb
h = h_tb
else:
raise
# input_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=5, version=version, resol='raw')
out_dir = DataManager.get_image_dir_v2(stack=stack, prep_id=2, resol=resol, version=version)
print 'out_dir:', out_dir
# script = os.path.join(REPO_DIR, 'preprocess', 'warp_crop_IM_v3.py')
# ! rm -rf {out_dir}
create_if_not_exists(out_dir)
t = time.time()
pool = Pool(8)
_ = pool.map(lambda img_name: crop(stack=stack, img_name=img_name, version=version, resol=resol,
x=x, y=y, w=w, h=h),
metadata_cache['valid_filenames'][stack])
pool.close()
pool.join()
# for img_name in metadata_cache['valid_filenames'][stack]:
# f(stack=stack, img_name=img_name, version=version, resol=resol,
# x=x, y=y, w=w, h=h)
# run_distributed('convert \"%%(input_fp)s\" -crop %(w)dx%(h)d+%(x)d+%(y)d \"%%(output_fp)s\"' % \
# {'w':w_raw, 'h':h_raw, 'x':x_raw, 'y':y_raw},
# kwargs_list=[{'input_fp': DataManager.get_image_filepath_v2(stack=stack, prep_id=5, resol='raw', version=version, fn=img_name),
# 'output_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=2, version=version, resol='raw')}
# for img_name in metadata_cache['valid_filenames'][stack]],
# # for img_name in ['CHATM3_slide35_2018_02_17-S1']],
# argument_type='single',
# jobs_per_node=1,
# local_only=True)
# wait_qsub_complete()
print 'done in', time.time() - t, 'seconds' # 1500s
|
[
"[email protected]"
] | |
a7c3a8dc9de426e13429cbc87ae0f7f5de87a5fb
|
fd69c5d94b20161a9f4dd6c39c7f61289d16b603
|
/replics/errors.py
|
5723c0af9a6ce486a6ef14acd1059d553960bf6c
|
[] |
no_license
|
k-t-l-h/AIS-2
|
57785a284eed9f460551c69a77d297be19dcc6c8
|
560f4de6271fa26e2bdff1d685722a158f4eca57
|
refs/heads/main
| 2023-02-02T23:08:53.580104 | 2020-12-26T04:31:06 | 2020-12-26T04:31:06 | 320,883,945 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
SORRY = ["Извини, я пока не понимаю, что ты говоришь",
"Оу, я тебя не совсем понимаю, можешь перефразировать?",
"Извини, я пока не очень хорошо умею разбирать слова. Можешь повторить?"]
ALL = ["Что я могу сделать для тебя?", "Чем я могу помочь?", "Что сегодня делаем?", "Я пришел помочь, что мне сделать?"]
|
[
"[email protected]"
] | |
b81fcd5e3a4bced2bbf26ad772ff6291dd4a369c
|
40a441c075fdb63a5b30f9baa7d3e5165070c034
|
/trained_model.py
|
1fa8e983e420f1ce49702cf3b7b85a38d2e62812
|
[] |
no_license
|
nanditashekar/Food-Classifier-Tool
|
aef8a8a92056118f11eacab3ebb7b63948f1ea30
|
e7025b9dd99771a6b8b06ebb588da8a2a7f2bfb7
|
refs/heads/master
| 2022-11-22T06:29:30.607387 | 2020-07-27T16:07:02 | 2020-07-27T16:07:02 | 282,947,275 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,142 |
py
|
# -*- coding: utf-8 -*-
"""Model_Demo_File.ipynb
Created by Aravind R Krishnan
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BRvmIlk4lgc-UMRxssbJtJxRk1h4bAdE
"""
#Loading the model and testing
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
model = load_model('MINI_PROJECT_MODEL_FINAL.h5')
def pred(path):
test = image.load_img(path, target_size =(256,256))
test = image.img_to_array(test)
plt.imshow(test, cmap='gray')
plt.show()
test = np.expand_dims(test, axis=0)
result = model.predict(test)
if result[0][0] == 1:
print("CUPCAKES!")
elif result[0][1] == 1:
print("DUMPLINGS")
elif result[0][2] == 1:
print("FRENCH FRIES")
elif result[0][3] == 1:
print("FRIED RICE")
else:
print("PIZZA!")
def demo():
flag=1
while flag:
print("Input File Path of Image: ")
filepath=input()
pred(filepath)
print("Enter 0 to Quit, else 1")
flag=input()
demo()
|
[
"[email protected]"
] | |
a72473ebf4f825bee83939c8f6354360345830ee
|
1781eeb99cb758106f3a41a6aab96c4108c3bffd
|
/ParserTranscript.py
|
6e8ae6169dc4e4507392a3dd762cc3256f694668
|
[] |
no_license
|
Ilhyon/Scripts
|
10015163647c2204c93d0da4d58224a116863a1d
|
496b6eb589501aa8e84ef25720d465bda2eb305f
|
refs/heads/master
| 2021-07-13T16:26:28.576512 | 2020-07-09T18:41:27 | 2020-07-09T18:41:27 | 159,869,935 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,828 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-:
import os
import argparse
import numpy as np
import pandas as pd
from pprint import pprint
def readTr(filename):
dico = {}
with open(filename) as f: # file opening
content = f.read()
lines = content.split('\n')
for l in lines:
if l :
w = l.split('|')
if w[3] == '1':
w[3] = '+'
else:
w[3] = '-'
chrStrand = w[2]+'|'+w[3]
if chrStrand not in dico:
dico[chrStrand] = {}
exon = w[5].split(';')
for e in exon:
if e not in dico[chrStrand]:
dico[chrStrand][e] = []
dico[chrStrand][e].append(w[0])
return dico
def main(path):
trAll = path + 'HS_transcript_unspliced_All.txt'
files = ['kunv', 'sinv', 'zikv', 'yvf']
dicoAllTr = readTr(trAll)
for v in files:
newF = []
with open(path+v+'_RI1New.csv') as f: # file opening
content = f.read()
lines = content.split('\n')
for l in lines:
tr1 = []
tr2 = []
w = l.split('\t')
if w[2] == '-':
E1E = str(int(w[9])+1)
E1S = str(int(w[10]))
E2E = str(int(w[11])+1)
E2S = str(int(w[12]))
chrStrand = w[3]+'|'+w[2]
if E1S+'-'+E1E in dicoAllTr[chrStrand]:
tr1 = dicoAllTr[chrStrand][ E1S+'-'+E1E ]
else:
print('tr1')
print(E1S+'-'+E1E)
if E2S+'-'+E2E in dicoAllTr[chrStrand]:
tr2 = dicoAllTr[chrStrand][ E2S+'-'+E2E ]
else:
print('tr2')
print(E2S+'-'+E2E)
if tr1 and tr2:
commonTr = list(set(tr1).intersection(tr2))
else:
commonTr = []
w.extend(commonTr)
w = '\t'.join(w)
newF.append(w)
else:
E1S = str(int(w[9])+1)
E1E = str(int(w[10]))
E2S = str(int(w[11])+1)
E2E = str(int(w[12]))
chrStrand = w[3]+'|'+w[2]
if E1S+'-'+E1E in dicoAllTr[chrStrand]:
tr1 = dicoAllTr[chrStrand][ E1S+'-'+E1E ]
else:
print('tr1')
print(E1S+'-'+E1E)
if E2S+'-'+E2E in dicoAllTr[chrStrand]:
tr2 = dicoAllTr[chrStrand][ E2S+'-'+E2E ]
else:
print('tr2')
print(E2S+'-'+E2E)
if tr1 and tr2:
commonTr = list(set(tr1).intersection(tr2))
else:
commonTr = []
w.extend(commonTr)
w = '\t'.join(w)
newF.append(w)
outputF = open(path+v+'_RI1TESTtranscript.csv', "w")
outputF.write( 'Location\tGeneSymbol\tStrand\tchr\tStartEvent\tEndEvent\tStartpG4\tEndpG4\tpG4Sequence\tE1S\tE1E\tE2S\tE2E\tTr\n' )
outputF.write( '\n'.join(newF) )
outputF.close()
def build_arg_parser():
parser = argparse.ArgumentParser(description = 'generateRandom')
GITDIR = os.getcwd()+'/'
parser.add_argument ('-p', '--path', default = GITDIR)
return parser
if __name__ == '__main__':
parser = build_arg_parser()
arg = parser.parse_args()
path = arg.path
main(path)
|
[
"[email protected]"
] | |
641393e4ba73eb019ef8abc5d60bcf52802b1b08
|
b82efae8184e01630e0befb2be675cbcec254758
|
/src/GraphGP.py
|
1a3daddddffb4d1351f884553595eff014a03f1b
|
[] |
no_license
|
tankred-saanum/Cognitive-maps-for-rewards
|
9ba16e3252c1c4698b719d017cc4d4e9a262802b
|
1ebb133af8e3a37bec4863ee38b233f1c15c4edd
|
refs/heads/main
| 2023-04-07T03:28:04.269511 | 2023-01-16T20:29:54 | 2023-01-16T20:29:54 | 371,415,219 | 4 | 3 | null | 2023-01-16T20:29:30 | 2021-05-27T15:08:34 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 8,842 |
py
|
import matplotlib
from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
import copy
import scipy
from scipy.optimize import minimize
#from scipy import minimize
from MonsterPrior import MonsterPrior
import pickle
class LaplacianGP():
''' A GP model which computes the kernel function over a graph based on the graph Laplacian. However,
you can also pass this object a covariance matrix, accompanied by a set of training indices and rewards,
and it will use those observations to condition its predictions when calling the mean function.
Example:
gp = LaplacianGP()
gp.set_training_data(training_idx, y)
gp.set_covariance(K)
mu = gp.mean()
Here K is the kernel matrix for all output points
This object also contains methods for maximizing the marginal likelihood of the data using gradient descent (scipy.optimize integration).
This works both for the RBF kernel, as well as the diffusion kernel, if the object is given a graph Laplacian.
'''
def train(self, graph, observed_nodes, y, alpha = 1):
'''
graph: This is a networkx graph object, or something that inherits from it.
observed_nodes: an array of integers indexing the nodes whose values were observed
y: an array of outcome values
alpha: the lengthscale parameter
'''
self.L = nx.normalized_laplacian_matrix(graph).todense()
self.training_idx = observed_nodes
self.y = y
self.alpha = alpha
self.sigma = 0.01
self.__K(self.L, self.alpha)
def __K(self, L, alpha):
''' A method which creates the 3 kernel matrices needed to compute the posterior mean and
covariance using the exponential of the graph laplacian weighted by negative alpha. Note that
it is assumed that the conditioning points are included in the set of evaluation points (self.K)'''
# the full covariance matrix
self.K = scipy.linalg.expm(-alpha * L)
# the matrix which will contain the covariance between all training points
self.K_obs = np.zeros((len(self.training_idx), len(self.training_idx)))
# first get the rows of the observed points
K_obs_rows = self.K[self.training_idx]
# fill in with the corresponding values at the indices of the observed points
for i, arr in enumerate(K_obs_rows):
self.K_obs[i] = arr[self.training_idx]
# create matrix containing covariance between all input points and all observed points
self.K_input_obs = np.zeros((len(self.K), len(self.training_idx)))
# fill in with the values of indices of observations
for i in range(len(self.K)):
self.K_input_obs[i] = self.K[i][self.training_idx]
def mean(self, sigma=0.01, jitter = 0.0000001):
''' computes the posterior mean function '''
self.inv_K = np.linalg.inv(self.K_obs + (sigma*np.eye(len(self.K_obs))))
return self.K_input_obs @ (self.inv_K) @ self.y
def covariance(self, sigma = 0.1):
''' computes the posterior covariance '''
return self.K - (self.K_input_obs @ np.linalg.inv(self.K_obs + sigma * np.eye(len(self.K_obs))) @ self.K_input_obs.T)
def get_prior_covariance(self):
''' Getter for the kernel matrix'''
return self.K
def set_training_data(self, training_idx, y):
''' Set training data for the GP'''
self.training_idx = training_idx
self.y = y
def set_covariance(self, covariance_matrix):
''' This method allows one to set the full covariance matrix needed to arbitrary matrices
(i.e. the matrix isn't computed from the graph Laplacian). This is useful if the covariance
one wishes to use is already known for instance'''
self.K = covariance_matrix
# the matrix which will contain the covariance between all training points
self.K_obs = np.zeros((len(self.training_idx), len(self.training_idx)))
# first get the rows of the observed points
K_obs_rows = self.K[self.training_idx]
# fill in with the corresponding values at the indices of the observed points
for i, arr in enumerate(K_obs_rows):
self.K_obs[i] = arr[self.training_idx]
self.K_input_obs = np.zeros((len(self.K), len(self.training_idx)))
# fill in with the values of indices of observations
for i in range(len(self.K)):
self.K_input_obs[i] = self.K[i][self.training_idx]
def RBF(self, X1, X2, var = 1, l = 1):
''' Computes the RBF similarity between two n x m matrices, where n is
the number of observations, and m is the number of feature dimensions'''
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return var**2 * np.exp(-0.5 / l**2 * sqdist)
def assign_inputs(self, X):
'''Convenience function for nll minimization'''
if len(list(X.shape)) == 1:
self.X = X.reshape(-1, 1)
else:
self.X = X
def nll(self, theta):
''' This function is adapted from Martin Krasser's tutorial on GP regression,
using a Cholesky decomposition as a more numerically stable method for getting
the negative log likelihood, introduced in Rasmussen and Williams'''
l = theta[0]
noise = theta[1]
K = self.RBF(self.X, self.X, var=noise, l=l)
K = K + ((noise**2) *np.eye(len(self.y)))
L = np.linalg.cholesky(K)
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def set_laplacian_matrix(self, L):
self.L = L
def nll_diffusion_kernel(self, theta):
''' Performs nll minimization with scipy on a diffusion kernel'''
l = theta[0]
noise = 0.01 ## add jitter
self.__K(self.L, l)
K_ = self.K_obs.copy()
K_ = K_ + ((noise**2)*np.eye(len(self.y)))
try:
L = np.linalg.cholesky(K_)
# L = scipy.linalg.cholesky(K_)
except np.linalg.LinAlgError as err:
print("Warning: Cholesky didn't work - trying to remove negative eigenvalues and reconstruct using Eigendecomposition")
# print(l)
eig_v, eig_vec = np.linalg.eig(K_)
eig_v[eig_v < 0] = -eig_v[eig_v < 0]
lam = np.eye(len(K_))
np.fill_diagonal(lam, eig_v)
K_ = eig_vec @ lam @ np.linalg.inv(eig_vec + (np.eye(len(eig_vec))*0.000000001))
try:
L = np.linalg.cholesky(K_)
except np.linalg.LinAlgError:
raise np.linalg.LinAlgError("Could not compute Cholesky decomposition after removing negative eigenvalues")
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def evaluate_nll(self, noise=0.01):
''' This one is better suited if you just want the nll of the GP's kernel kernel.
Assuming 0 noise'''
K_ = self.K_obs.copy()
K_ += ((noise**2)*np.eye(len(self.y)))
L = np.linalg.cholesky(K_)
S1 = scipy.linalg.solve_triangular(L, self.y, lower=True)
S2 = scipy.linalg.solve_triangular(L.T, self.y, lower=False)
return np.sum(np.log(np.diagonal(L))) + \
0.5 * self.y.dot(S2) + \
0.5 * len(self.training_idx) * np.log(2*np.pi)
def minimize_nll(self, X, X_train):
''' Minimize nll function to be called when the kernel is RBF'''
self.assign_inputs(X_train)
l = np.random.uniform(0.01, 4)
n = np.random.uniform(0.0001, 1)
output = minimize(self.nll, [l, n], bounds=((1e-5, None), (1e-5, None)),
method='L-BFGS-B')
l, n = output.x
if len(list(X.shape)) == 1:
X = X.reshape(-1, 1)
else:
X = X
return self.RBF(X, X, var=n, l=l), l, n
def minimize_nll_diffusion(self):
''' Minimize nll function to be called when the kernel is a diffusion kernel'''
l = np.random.uniform(0.01, 4)
try:
output = minimize(self.nll_diffusion_kernel, [l], bounds=((1e-5, None), ),
method='L-BFGS-B')
except np.linalg.LinAlgError:
print("Could not compute cholesky - lengthscale is set to 1")
return 1
l = output.x
return l
|
[
"[email protected]"
] | |
30f858dd902db2be0d5101090796c8980b6e4b42
|
d990f320b549916aea7ae9f7349e5445d472a61e
|
/replay_buffer.py
|
c867c91d31d0269f53f6b8e8cf052c0a62931090
|
[
"MIT"
] |
permissive
|
alleboudy/navigation-drl
|
d88ac83bb72824f2bfc18aebd6aacea7bf12415e
|
091ae4ffb028288dc4f0464c8109a2b54cab8250
|
refs/heads/main
| 2023-04-12T20:15:39.204542 | 2021-05-04T21:49:20 | 2021-05-04T21:49:20 | 363,675,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,942 |
py
|
import torch
import numpy as np
import random
from collections import namedtuple
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
[
"[email protected]"
] | |
61d30e685f5062f0bd16062b1d190bee3ea93ccf
|
5c4c8fcf39d83c3ba9031825115f7416f474ecfd
|
/Paxel/wsgi.py
|
430007cb764f6c7f483a7190f91bfd4b2a87d076
|
[] |
no_license
|
SergioParraC/Paxel-Django
|
0fc42cec94c3c142fd06bf4cbbb550f1786c6c1a
|
25e9501902151b1b7ded45c1abf9282a5c1c0dd9
|
refs/heads/master
| 2023-03-11T09:41:55.248734 | 2021-02-25T21:08:10 | 2021-02-25T21:08:10 | 328,280,984 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
"""
WSGI config for Paxel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Paxel.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
d0c47516027d338f264dbded0c03ad00d6542d82
|
17bd49682f7236956f0681c7126a11f8981503fe
|
/conftest.py
|
a8f4dd7cfa3dbf3a34bd1384bbd9fb8cec552a97
|
[] |
no_license
|
saferq/TZ_tenzor
|
d7104a30a91a6da3242a4be8d9a1e21410b66952
|
42e07f32682776ae91986e48f82b546c21451cc0
|
refs/heads/main
| 2023-08-06T01:52:45.279315 | 2021-09-30T06:04:26 | 2021-09-30T06:04:26 | 411,941,523 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
import pytest
from selenium import webdriver
@pytest.fixture(scope="session")
def browser():
driver = webdriver.Firefox()
yield driver
driver.quit()
|
[
"[email protected]"
] | |
7c4b4221e5c0374176572d6f71f5c551f817f379
|
0c08a15045b24b56bdb42dff5cf210f9bee6827f
|
/family_album/images/models.py
|
d5b5c4f36766d7947af2bbdb671029aa4607d9dd
|
[
"MIT"
] |
permissive
|
squadran2003/family-album
|
205d6f4a7256e466506d796d7da37a0eeff65fe3
|
eae75987e4786255269ecee2482d715ae2229db2
|
refs/heads/master
| 2022-12-05T00:19:29.629432 | 2019-01-20T13:10:22 | 2019-01-20T13:10:22 | 165,837,569 | 0 | 0 |
MIT
| 2022-11-22T03:23:44 | 2019-01-15T11:15:38 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,199 |
py
|
from django.utils import timezone
from PIL import Image as img
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
import sys
from django.db import models
from django.contrib.auth.models import User
class Image(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
description = models.TextField()
image = models.ImageField(upload_to='pictures')
created_at = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-created_at',)
def save(self):
# Opening the uploaded image
im = img.open(self.image)
output = BytesIO()
# Resize/modify the image
im = im.resize((400, 300))
# after modifications, save it to the output
im.save(output, format='JPEG', quality=100)
output.seek(0)
# change the imagefield value to be the newley modifed image value
self.image = InMemoryUploadedFile(
output, 'ImageField',
"%s.jpeg" % self.image.name.split('.')[0],
'jpeg', sys.getsizeof(output), None
)
super(Image, self).save()
def __str__(self):
return self.description
|
[
"[email protected]"
] | |
7fcc061464f4b66349e06e3ed825d4fc3e207c07
|
9b9a5ae297558d87e871e052d3d2e2c582e17ec4
|
/COW_PROJECT/テストコード/Beysian/gibbs_sampling_main.py
|
dc4c1c8950674625557baf35504f929a5515cde6
|
[] |
no_license
|
vijaydairyf/cow_python
|
9b7632915db1685b6fd2813db9d4310a54d5600b
|
8e07845c4527e753e405da708a010a8c2ca7c425
|
refs/heads/master
| 2021-01-09T17:52:07.500578 | 2020-02-11T07:51:02 | 2020-02-11T07:51:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,049 |
py
|
import numpy as np
import math
import matplotlib.pyplot as plt
import pdb # デバッグ用
# 自作クラス
import myClass.plotting as plotting
import myClass.mixed_model as mixed_model
def create_artificial_poissondata(lam, num):
""" テスト用のデータセットを作成する
Parameter
lam : ポアソン分布のλパラメータ (1次元)
num : データ生成個数 """
X = np.random.poisson(lam, num) # ndarray
return X
def create_artificial_gaussiandata(mu, cov, num):
""" テスト用のデータセットを作成する
Parameter
mu : ガウス分布の平均パラメータ (多次元)
cov : ガウス分布の分散共分散行列パラメータ
num : : データ生成個数 """
X = np.random.multivariate_normal(mu, cov, num) # ndarray
return X
def extract_data(X, S, k):
""" Sの結果からk番目のクラスタに所属するデータをXから抽出する """
N = len(X.T)
new_X = []
for n in range(N):
if (S[k, n] == 1):
new_X.append(X[:,n])
return new_X
def poisson_mixed_model_test():
""" 1次元の入力データをポアソン混合モデルを用いてクラスタリングする """
# 多峰性の1次元データ点を生成
X1 = create_artificial_poissondata(20, 1000)
X2 = create_artificial_poissondata(50, 750)
X = np.hstack((X1, X2)) # 2つのndarrayを結合
np.random.shuffle(X) # データをシャッフル
X = np.array([X]) # データの2次元化
# データを可視化
plotter = plotting.PlotUtility()
plotter.hist_plot([X1,X2], 20, color=None) # ヒストグラムを表示,正解で色分け
# ポアソン混合モデルのパラメータの設定
lambda_vector = np.array([30, 40])
pi_vector = np.array([0.5, 0.5])
alpha_vector = np.array([1, 1])
max_iterater = 50
# ギブスサンプリングによるクラスタリング
a_0, b_0 = 1, 1
poisson_model = mixed_model.PoissonMixedModel(lambda_vector, pi_vector, alpha_vector, max_iterater)
result = poisson_model.gibbs_sample(X, a_0, b_0)
# 新たな入力に対する確率を推定
new_X = np.array([np.arange(1,100)])
prob_matrix = poisson_model.predict(new_X)
# クラスタリング結果を可視化
X1 = extract_data(X, result, 0)
X2 = extract_data(X, result, 1)
plotter2 = plotting.PlotUtility()
plotter2.hist_plot([X1,X2], 20, color=None)
plotter_prob = plotting.PlotUtility()
prob1, prob2 = prob_matrix[0,:], prob_matrix[1,:]
plotter_prob.scatter_plot(new_X, prob1, [0 for _ in range(len(new_X))])
plotter_prob.scatter_plot(new_X, prob2, [1 for _ in range(len(new_X))])
# 表示
plotter.show()
plotter2.show()
plotter_prob.show()
def gaussian_mixed_model_test():
# 多峰性の2次元データ点を生成
X1 = create_artificial_gaussiandata(np.array([30, 40]), np.array([[100, 25], [25, 100]]), 1100)
X2 = create_artificial_gaussiandata(np.array([70, 20]), np.array([[150, 75], [75, 150]]), 900)
X = np.concatenate([X1, X2], 0) # 2つのndarrayを結合
np.random.shuffle(X) # データをシャッフル
X = X.T
# データの可視化
plotter = plotting.PlotUtility()
plotter.scatter_plot(X1[:,0], X1[:,1], [1 for _ in range(len(X1))])
plotter.scatter_plot(X2[:,0], X2[:,1], [2 for _ in range(len(X2))])
# ガウス混合分布のパラメータ設定
mu_vectors = [np.array([30, 50]), np.array([70, 50])]
cov_matrixes = [np.array([[110, 45], [45, 110]]), np.array([[130, 55], [55, 130]])]
pi_vector = np.array([0.6, 0.4])
alpha_vector = np.array([1, 1])
max_iterater = 10
# ギブスサンプリングによるクラスタリング
gaussian_model = mixed_model.GaussianMixedModel(cov_matrixes, mu_vectors, pi_vector, alpha_vector, max_iterater)
result = gaussian_model.gibbs_sample(X, np.array([[50, 50]]).T, 1, 3, np.array([[1, 0], [0, 1]]))
# 新たな入力に対する確率を推定
new_X = np.arange(1,101, 2)
new_Y = np.arange(1,101, 2)
grid_X, grid_Y = np.meshgrid(new_X, new_Y)
new_X = np.array([grid_X.ravel(), grid_Y.ravel()])
prob_matrix = gaussian_model.predict(new_X)
# クラスタリング結果を可視化
X1 = np.array(extract_data(X, result, 0))
X2 = np.array(extract_data(X, result, 1))
plotter2 = plotting.PlotUtility()
plotter2.scatter_plot(X1[:,0], X1[:,1], [1 for _ in range(len(X1))])
plotter2.scatter_plot(X2[:,0], X2[:,1], [2 for _ in range(len(X2))])
plotter_prob = plotting.PlotUtility3D()
prob1, prob2 = prob_matrix[0], prob_matrix[1]
plotter_prob.plot_surface(grid_X, grid_Y, prob1.reshape([50, 50]), c=1)
plotter_prob.plot_surface(grid_X, grid_Y, prob2.reshape([50, 50]), c=2)
# 表示
plotter.show()
plotter2.show()
plotter_prob.show()
if __name__ == '__main__':
#poisson_mixed_model_test()
gaussian_mixed_model_test()
|
[
"[email protected]"
] | |
b8cf141fea4b1a22938b4d48884f5fa6a015aed3
|
8be847caa7b226c7530a530a719a6987feacf7fb
|
/large_app/python/auth0.py
|
5a027e14dbb6f3c93af41684fdee5aa6c67522e5
|
[
"MIT"
] |
permissive
|
sahilGupta89/large_flask_app
|
91af1a6fc32d6d9b9903720d132773ae5e8d18a7
|
e1ab54431bb935c02186f586d9246b741d9f2d33
|
refs/heads/master
| 2023-05-29T16:51:46.599875 | 2020-11-08T11:10:35 | 2020-11-08T11:10:35 | 213,057,891 | 0 | 0 |
MIT
| 2023-05-01T21:37:35 | 2019-10-05T19:19:37 |
Python
|
UTF-8
|
Python
| false | false | 8,356 |
py
|
from dataclasses import dataclass
from datetime import datetime, timedelta
import logging
from urllib.parse import urljoin
from jose import jwt
import requests
import env
from jwks import jwks
log = logging.getLogger(__name__)
def auth0_url(path=""):
return urljoin(f"https://{env.AUTH0_DOMAIN}/", path)
@dataclass
class TokenResult:
access_token: dict
id_token: dict
result: dict
@property
def subject(self) -> str:
return self.access_token["sub"]
@property
def expires(self) -> datetime:
return datetime.utcfromtimestamp(self.access_token["exp"])
def is_expired(self) -> bool:
return datetime.utcnow() > self.expires
@property
def token_type(self) -> str:
return self.result["token_type"]
@property
def access_token_value(self) -> str:
return self.result["access_token"]
def token_from_username_password(username, password) -> TokenResult:
r = requests.post(
auth0_url("oauth/token"),
json={
"grant_type": "password",
"username": username,
"password": password,
"audience": env.AUTH0_API_AUDIENCE,
"client_id": env.AUTH0_CLIENT_ID,
"scope": "openid",
"client_secret": env.AUTH0_CLIENT_SECRET,
},
)
if r.status_code == 403:
raise AuthError(r.json(), 401, reauth=True)
parse_status_code(r)
return _oauth_token_to_token_result(r.json())
def token_info_from_client_credentials(client_id, client_secret) -> dict:
r = requests.post(
auth0_url("oauth/token"),
json={
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"audience": env.AUTH0_ZEAPI_AUDIENCE,
},
)
r.raise_for_status()
token_info = r.json()
log.info("Credentials login result: %s", token_info)
return token_info
def token_result_from_client_credentials(
client_id, client_secret
) -> TokenResult:
token_info = token_info_from_client_credentials(client_id, client_secret)
return TokenResult(
access_token=parse_it(
token_info["access_token"], env.AUTH0_ZEAPI_AUDIENCE
),
id_token={},
result=token_info,
)
def _oauth_token_to_token_result(
token_info: dict, audience=env.AUTH0_API_AUDIENCE
) -> TokenResult:
assert "access_token" in token_info
return TokenResult(
access_token=parse_it(
token_info["access_token"], env.AUTH0_API_AUDIENCE
),
id_token=parse_it(token_info["id_token"], env.AUTH0_CLIENT_ID),
result=token_info,
)
def token_from_header_value(token, audience=env.AUTH0_API_AUDIENCE) -> dict:
return parse_it(token, audience)
def token_result_from_header_value(
token, audience=env.AUTH0_API_AUDIENCE
) -> TokenResult:
return TokenResult(
access_token=token_from_header_value(token, audience),
id_token={},
result={"access_token": token},
)
def get_userinfo(token) -> dict:
return requests.get(
auth0_url("userinfo"), headers={"Authorization": f"Bearer {token}"}
).json()
def parse_it(token, audience) -> dict:
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"],
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=env.AUTH0_ALGORITHMS,
audience=audience,
issuer=auth0_url(),
)
except jwt.ExpiredSignatureError:
raise AuthError(
{"code": "token_expired", "description": "token is expired"},
401,
)
except jwt.JWTClaimsError as claims_error:
raise AuthError(
{
"code": "invalid_claims",
"description": "incorrect claims,"
"please check the audience and issuer",
},
401,
) from claims_error
except Exception:
raise AuthError(
{
"code": "invalid_header",
"description": "Unable to parse authentication" " token.",
},
401,
)
return payload
raise AuthError(
{
"code": "invalid_header",
"description": "Unable to find appropriate key",
},
401,
)
class ManagementAPI(object):
def __init__(self):
self.grant_type = "client_credentials"
self._current_access_token = None
self._api_base = auth0_url("api/v2/")
self._users_api_url = urljoin(self._api_base, "users")
def _access_token(self):
if self._current_access_token:
expire_max = self._current_access_token.expires + timedelta(
minutes=30
)
if expire_max > datetime.utcnow():
log.debug(
"ManagementAPI token expires soon(%s). Renewing",
self._current_access_token.expires,
)
self._renew()
else:
self._renew()
return self._current_access_token
def _renew(self):
res = requests.post(
auth0_url("oauth/token"),
json=dict(
grant_type=self.grant_type,
client_id=env.AUTH0_CLIENT_ID,
client_secret=env.AUTH0_CLIENT_SECRET,
audience=self._api_base,
),
)
if res.status_code > 299:
log.warning(
"Failed to get token for management api: %r", res.content
)
parse_status_code(res)
token_info = res.json()
self._current_access_token = TokenResult(
access_token=parse_it(token_info["access_token"], self._api_base),
id_token={},
result=token_info,
)
def _headers(self):
token = self._access_token()
return {
"Authorization": f"{token.token_type} {token.access_token_value}"
}
def create_user(self, user, password: str):
res = requests.post(
self._users_api_url,
json={
"email": user.email,
"password": password,
"connection": env.AUTH0_UP_CONNECTION_NAME,
"user_metadata": user.dump(),
},
headers=self._headers(),
)
if res.status_code > 299:
log.warning(
"Got %r",
res.content,
extra={
"auth0_create_user_context": {
"user_id": user.id,
"email": user.email,
"name": user.name,
}
},
)
parse_status_code(res)
return res.json()
def get_userinfo(self, sub: str):
res = requests.get(
urljoin(self._users_api_url.rstrip("/") + "/", sub),
headers=self._headers(),
)
parse_status_code(res)
userinfo_result = res.json()
# Paste over the main difference between id_token and userinfo
userinfo_result.setdefault("sub", userinfo_result.get("user_id"))
return userinfo_result
class AuthError(Exception):
def __init__(self, error, status_code, reauth=False):
self.error = error
self.status_code = status_code
self.reauth = reauth
def parse_status_code(res):
if res.status_code in (409, 400, 429): # duplicate user
raise AuthError(error=res.json(), status_code=res.status_code)
res.raise_for_status()
def request_bearer_token(request) -> str:
header = request.headers.get("authorization", "")
if not header.lower().startswith("bearer"):
return None
_, header_token = header.split(" ", 1)
return header_token
management_api = ManagementAPI()
|
[
"[email protected]"
] | |
c03744b393ec5f98ff295969921ddf3de80aecaf
|
9c52998e7d92640b82284e7e85bf69205fc94d73
|
/SeleniumLearningFiles/SeleniumLearning01/webdrivertest/web04.py
|
ec6aa9036031cb6a57f01829bff64e05c5c91ab3
|
[] |
no_license
|
github653224/GitProjects_SeleniumLearing
|
b0c57d27fa48b0cd7475f8d8e8b19c57160e65fc
|
818b573a3b0f18def98610e59e3c0c6500a675bc
|
refs/heads/master
| 2021-07-20T05:54:46.392948 | 2017-10-27T12:53:41 | 2017-10-27T12:53:41 | 107,764,014 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
from random import randint
verify =randint(1000,9999)
print(u"生成的随机数字: %d " %verify)
number=input("请输入随机数字:")
print(number)
number=int(number)
if number ==verify:
print ("登录成功!!")
elif number==132741:
print("登陆成功!!")
else:
print("输入错误")
|
[
"[email protected]"
] | |
8375cedfd57bf1a7dd0794d23b840cd0ffe5bb75
|
6f7495631dcf2d8ad1e878f8492ffc686691d50a
|
/day03/ex03/ColorFilter.py
|
37bff11b9302a956184f017affb0d8cde2999409
|
[] |
no_license
|
mli42/python_bootcamp
|
0e0012f611902c0be40ea4933d17255652465501
|
4e71ec20b12676016514875ee96d15dafb177718
|
refs/heads/main
| 2022-12-11T00:55:44.880734 | 2022-09-16T15:13:16 | 2022-09-16T15:14:13 | 233,590,858 | 3 | 2 | null | 2022-12-08T13:07:05 | 2020-01-13T12:30:49 |
Python
|
UTF-8
|
Python
| false | false | 6,240 |
py
|
# **************************************************************************** #
# #
# ::: :::::::: #
# ColorFilter.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: mli <[email protected]> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/11/24 22:42:30 by mli #+# #+# #
# Updated: 2022/03/12 23:30:33 by mli ### ########.fr #
# #
# **************************************************************************** #
import numpy as np
from copy import deepcopy
from ImageProcessor import ImageProcessor
class ColorFilter:
def __guard_ndarray(funct):
def inner(*args, **kwargs):
array = args[0]
if not (isinstance(array, np.ndarray) and
('float' in str(array.dtype) or 'int' in str(array.dtype))):
return None
try:
return_value = funct(*args, **kwargs)
except:
return None
return return_value
return (inner)
@staticmethod
@__guard_ndarray
def invert(array: np.ndarray) -> np.ndarray:
res = 1 - array
res[..., 3:] = array[..., 3:]
return res
@staticmethod
@__guard_ndarray
def to_blue(array: np.ndarray) -> np.ndarray:
res = np.zeros(array.shape)
res[..., 2:] = array[..., 2:]
return res
@staticmethod
@__guard_ndarray
def to_green(array: np.ndarray) -> np.ndarray:
res = deepcopy(array)
res[..., :3:2] = res[..., :3:2] * 0
return res
@staticmethod
@__guard_ndarray
def to_red(array: np.ndarray) -> np.ndarray:
only_blue_green = ColorFilter.to_blue(array) + ColorFilter.to_green(array)
res = array - only_blue_green
res[..., 3:] = array[..., 3:]
return res
@staticmethod
@__guard_ndarray
def to_celluloid(array: np.ndarray) -> np.ndarray:
bounds = np.linspace(array.min(), array.max(), 5)
res = array.copy()
lower_bound = bounds[0]
for upper_bound in bounds[1:]:
mask = (res[..., :3] > lower_bound) & (res[..., :3] < upper_bound)
res[..., :3][mask] = lower_bound
lower_bound = upper_bound
return res
@staticmethod
def __guard_grayscale(filter: str, **kwargs) -> bool:
weights = kwargs.pop('weights', None)
hasWeights = weights is not None
if (
(len(kwargs) != 0) or
(filter not in ['m', 'mean', 'w', 'weight']) or
(filter in ['m', 'mean'] and hasWeights) or
(filter in ['w', 'weight'] and (
not isinstance(weights, list) or
len(weights) != 3 or
not all([isinstance(obj, float) and obj >= 0 for obj in weights]) or
np.sum(weights) != 1.
))
):
return False
return True
@staticmethod
@__guard_ndarray
def to_grayscale(array: np.ndarray, filter: str, **kwargs) -> np.ndarray:
if ColorFilter.__guard_grayscale(filter, **kwargs) is False:
return None
weights = kwargs.get('weights')
res = None
if (filter in ['m', 'mean']):
mono = np.sum(array[..., :3], axis=2, keepdims=True) / 3
res = np.dstack((np.tile(mono, 3), array[..., 3:]))
elif (filter in ['w', 'weight']):
mono = np.sum(array[..., :3] * weights, axis=2, keepdims=True)
res = np.dstack((np.tile(mono, 3), array[..., 3:]))
return res
def main():
imgProc = ImageProcessor()
cfilter = ColorFilter()
elon = imgProc.load("../resources/elon.png")
def display_img(array):
if array is None:
print('Array is None')
return
imgProc.display(array)
def launch_filters(img):
if img is None:
print('Img is None')
return
base_ope = ('Base img', lambda x: x, [], {})
arr = [
base_ope,
('Inverted', cfilter.invert, [], {}),
('To blue', cfilter.to_blue, [], {}),
('To green', cfilter.to_green, [], {}),
('To red', cfilter.to_red, [], {}),
('To celluloid', cfilter.to_celluloid, [], {}),
('To grayscale m', cfilter.to_grayscale, ['m'], {}),
('To grayscale mean', cfilter.to_grayscale, ['mean'], {}),
('To grayscale w', cfilter.to_grayscale, ['w'], {'weights': [.2, .3, .5]}),
('To grayscale weight', cfilter.to_grayscale, ['weight'], {'weights': [.6, .2, .2]}),
base_ope
]
for label, fct, args, kwargs in arr:
print(label)
display_img(fct(img, *args, **kwargs))
def grayscale_err(img):
arr = [
('Args err', ['hey'], {'weights': [.8, .1, .1]}),
('Kwargs err', ['m'], {'hey': 123}),
('Weight value', ['m'], {'weights': 123}),
('Mean with weight', ['m'], {'weights': [.8, .1, .1]}),
('Weight tuple', ['w'], {'weights': (.8, .1, .1)}),
('Weight intruder', ['w'], {'weights': [1., 2., 'a']}),
('Too much float', ['w'], {'weights': [.8, .1, .1, .0]}),
('Too high float', ['w'], {'weights': [.8, .1, .2]}),
('Too much kwargs', ['w'], {'weights': [.8, .1, .1], 'hey': 'a'}),
('Negativ float', ['w'], {'weights': [.8, -.1, .3]}),
]
for label, args, kwargs in arr:
print(label, end=': ')
display_img(cfilter.to_grayscale(img, *args, **kwargs))
print('Trying with Elon')
launch_filters(elon)
print('Trying with inverted Elon')
launch_filters(cfilter.invert(elon))
print('Check grayscale guardian')
grayscale_err(elon)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
d0594ba180ac2eb8f8df3854ae9e4fd1f3cf86e6
|
e2b4c4dc7b9ad43e5e06d050eccd43ebf98d76c3
|
/snap_plugin/v1/pub_proc_arg.py
|
c6486d5adc3ed1562e447aa52d1182f141293507
|
[
"Apache-2.0"
] |
permissive
|
intelsdi-x/snap-plugin-lib-py
|
4bcf7d6c665f85285af83271380f23413b23082e
|
24b08eb5feaeb64d7c6e25781abe3b8ce2fa9277
|
refs/heads/master
| 2022-11-12T11:31:11.420061 | 2022-11-07T23:11:16 | 2022-11-07T23:11:16 | 69,615,435 | 5 | 16 | null | 2017-08-28T13:38:17 | 2016-09-29T23:16:25 |
Python
|
UTF-8
|
Python
| false | false | 1,282 |
py
|
# -*- coding: utf-8 -*-
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .plugin_pb2 import PubProcArg
class _PubProcArg(object):
def __init__(self, metrics=[], **kwargs):
self._pb = PubProcArg(Metrics=[m.pb for m in metrics])
if "config" in kwargs:
self._pb.Config.MergeFrom(kwargs.get("config").pb)
@property
def pb(self):
return self._pb
class _ProcessArg(_PubProcArg):
def __init__(self, metrics=[], **kwargs):
super(_ProcessArg, self).__init__(metrics=metrics, **kwargs)
class _PublishArg(_PubProcArg):
def __init__(self, metrics=None, **kwargs):
super(_PublishArg, self).__init__(metrics=metrics, **kwargs)
|
[
"[email protected]"
] | |
04dd25f2e360e6a0b81d6329398e7373d37c3db2
|
ff801544b1979442b886d2d1eaf8480e7d6b0d24
|
/main.py
|
20bae383952351920f5e31df5cc21b3dcc2b56c3
|
[] |
no_license
|
BLimmie/OctoGAN
|
7d420cd223ea0dd77dd0dfa1827f12fcd32e9dec
|
38bb4d76eb8dea22278da2d496b712c171be080f
|
refs/heads/master
| 2021-05-11T02:11:55.498819 | 2018-01-21T17:34:58 | 2018-01-21T17:34:58 | 118,352,908 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,747 |
py
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=128, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=150, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'fake':
dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
transform=transforms.ToTensor())
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netG(nn.Module):
def __init__(self, ngpu):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
#
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
netG = _netG(ngpu)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
#
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
netD = _netD(ngpu)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, _ = data
batch_size = real_cpu.size(0)
if opt.cuda:
real_cpu = real_cpu.cuda()
input.resize_as_(real_cpu).copy_(real_cpu)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv)
errD_real = criterion(output, labelv)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev)
labelv = Variable(label.fill_(fake_label))
output = netD(fake.detach())
errD_fake = criterion(output, labelv)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labelv = Variable(label.fill_(real_label)) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, labelv)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(real_cpu,
'%s/real_samples.png' % opt.outf,
normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.data,
'%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
|
[
"[email protected]"
] | |
821a36d24596e0ac1a7bce97e1a3d9b9992c271f
|
03043b715d2e177dd3ba93078463ce79c33173dc
|
/NI_DAQmx/models/NI_PXIe_6535.py
|
ffdfbaabce93ed1ea32f606174fc1da92d542ec7
|
[] |
no_license
|
labscript-suite-bitbucket-archive/cavitylab-labscript_devices--forked-from--labscript_suite-labscript_devices
|
2efc068eb35ca70e1eecab9c7fec7991fd596c9c
|
e665d3ee0ce1cfd7fb7cd5c6cc4d783528bc4935
|
refs/heads/master
| 2020-12-27T02:35:41.710162 | 2019-12-06T20:57:48 | 2019-12-06T20:57:48 | 253,143,395 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,629 |
py
|
#####################################################################
# #
# /NI_DAQmx/models/_subclass_template.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
#####################################################################
# WARNING #
# #
# This file is auto-generated, any modifications may be #
# overwritten. See README.txt in this folder for details #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
str = unicode
from labscript_devices.NI_DAQmx.labscript_devices import NI_DAQmx
CAPABILITIES = {
'AI_range': None,
'AI_start_delay': None,
'AO_range': None,
'max_AI_multi_chan_rate': None,
'max_AI_single_chan_rate': None,
'max_AO_sample_rate': None,
'max_DO_sample_rate': 10000000.0,
'min_semiperiod_measurement': None,
'num_AI': 0,
'num_AO': 0,
'num_CI': 0,
'ports': {
'port0': {'num_lines': 8, 'supports_buffered': True},
'port1': {'num_lines': 8, 'supports_buffered': True},
'port2': {'num_lines': 8, 'supports_buffered': True},
'port3': {'num_lines': 8, 'supports_buffered': True},
'port4': {'num_lines': 6, 'supports_buffered': False},
},
'supports_buffered_AO': False,
'supports_buffered_DO': True,
'supports_semiperiod_measurement': False,
}
class NI_PXIe_6535(NI_DAQmx):
description = 'NI-PXIe-6535'
def __init__(self, *args, **kwargs):
# Any provided kwargs take precedent over capabilities
combined_kwargs = CAPABILITIES.copy()
combined_kwargs.update(kwargs)
NI_DAQmx.__init__(self, *args, **combined_kwargs)
|
[
"[email protected]"
] | |
c3ce6f4907c56922e923d921e78478a4fe44f176
|
ce73050565ebdec828919f339e81da54b5fd7fcf
|
/GeneralProblems/DynamicArray.py
|
cb9487aadfc557076f184d6d7d48c600069796c3
|
[] |
no_license
|
VaibhavDesai/Algorithms
|
b4b1ad6a13a32cfe16abb4174a672841d45628e2
|
32f43f0c4b28eb4aa2b6142ff962fc322ac796b0
|
refs/heads/master
| 2020-12-30T13:28:11.729137 | 2017-10-02T08:02:30 | 2017-10-02T08:02:30 | 91,217,973 | 1 | 0 | null | 2017-05-19T16:52:25 | 2017-05-14T03:41:20 |
Python
|
UTF-8
|
Python
| false | false | 231 |
py
|
firstIn = [int(x) for x in input().split()]
n = firstIn[0]
q = firstIn[1]
quries = []
for i in range(q):
ans.append(calDy([int(x) for x in input().split()],n))
def calDy(inputList,n):
if(inputList[0] == 1):
|
[
"[email protected]"
] | |
b32507222fde3f24d7b8b4d925485d3b237f7ea4
|
6e1fe9ac115c8404e61e880375af685fb09696f1
|
/__main__.py
|
439817a9148425e5eb50db57a8a891ffa5ec19d4
|
[
"MIT"
] |
permissive
|
ValentinKolb/scon
|
01ab016775df71bd767c92ab26b1db03ef8912ac
|
c4a6646a0815d0c8ef9fa2505f7afb7ac68c3c2c
|
refs/heads/main
| 2023-08-28T04:16:21.075881 | 2021-11-03T20:37:28 | 2021-11-03T20:37:28 | 399,600,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,112 |
py
|
#!/usr/bin/env python3
# This script configures ssh for new hosts
# Author: Valentin Kolb
# Version: 1.1
# License: MIT
import os
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import List, Union
import re
import argparse
from prompt_toolkit import PromptSession, HTML, print_formatted_text
from prompt_toolkit.completion import NestedCompleter
from prompt_toolkit.shortcuts import clear
from prompt_toolkit.styles import Style
import subprocess
#########################
# DEFAULT CONFIGURATION #
#########################
DEFAULT_USER = "admin"
DEFAULT_PORT = 22
CONFIG_FILE = str(Path.home()) + "/.ssh/config"
SSH_KEY_DIR = str(Path.home()) + "/.ssh/keys"
#########################
# END DEFAULTS #
#########################
def bottom_toolbar():
return HTML('SSH Wizard - type <b>help</b> to list all commands')
def stderr(text, end="\n"):
"""
prints error msg
"""
print_formatted_text(text, file=sys.stderr, end=end)
session = PromptSession(
bottom_toolbar=bottom_toolbar,
complete_while_typing=True
)
style = Style.from_dict({
'cmd': '#ff0066',
'hlp': '#44ff00 italic',
})
REVERSED = u"\u001b[7m"
RESET = u"\u001b[0m"
FNULL = open(os.devnull, 'w')
SSH_KEY_FILE_REGEX = r"Host +(?P<ID>.+)\n\tHostname +(?P<hostname>\S+)\n\tUser +(?P<user>\S+)\n\tPort +(?P<port>\d+)\n\tIdentityFile +(?P<key_file>\S+)\n?"
@dataclass(frozen=True)
class SSHConfig:
ID: str
hostname: str
user: str
port: int
key_file: str
def file_to_dataclass(file: str) -> List[SSHConfig]:
"""
reads a ssh config file an parses it to an list of dataclasses
:param file: the ssh config file
:return: an array of dataclasses
"""
with open(file) as file:
content = file.read()
results = []
for match in re.finditer(pattern=SSH_KEY_FILE_REGEX, string=content):
results.append(
SSHConfig(
ID=match.group("ID"),
hostname=match.group("hostname"),
user=match.group("user"),
port=int(match.group("port")),
key_file=match.group("key_file")
)
)
return results
def dataclass_to_file(file: str, data: List[SSHConfig]):
"""
writes the ssh config file
:param file: the path of the file
:param data: the data to be written
"""
with open(file, mode="w") as file:
for config in data:
file.write(
f'Host {config.ID}\n' +
f'\tHostname {config.hostname}\n' +
f'\tUser {config.user}\n' +
f'\tPort {config.port}\n' +
f'\tIdentityFile {config.key_file}\n\n'
)
def yes(prompt="[Y/n]"):
"""
asks user yes or no question, yes is default
:param prompt: the prompt for the user
:return: true if answer was yes
"""
while True:
_in = session.prompt(prompt).strip().lower()
if _in in ["y", "yes", ""]:
return True
elif _in in ["n", "no"]:
return False
def list_config():
"""
this will print all currently configured hosts
"""
hosts = file_to_dataclass(CONFIG_FILE)
i = max(len(h.ID) for h in hosts)
j = max(len(h.hostname) + 1 + len(h.user) for h in hosts)
print(f'{"identifier".upper().ljust(i)} | HOST')
print("=" * (i + j + 3))
for host in hosts:
print(f'{host.ID.ljust(i, ".")} | {(host.user + "@" + host.hostname).ljust(j, ".")}')
print(f"\nUsage: 'ssh <identifier>' (eg: ssh {hosts[0].ID})")
def add_host():
# domain name
hostname = session.prompt("Enter the domain name. (e.g. host.example.com): ").strip().lower()
ID, _ = hostname.split(".", 1)
ID = session.prompt(
f"Enter an alias of the host (usage: ssh <alias>) [{ID}]: ") or ID
# check if host is up
if not subprocess.run(["ping", "-c", "1", "-i", "0.5", hostname],
stdout=FNULL,
stderr=subprocess.STDOUT).returncode == 0:
stderr(f"{hostname} can't be reached, do want to continue anyway? [Y/n] ", end="")
if not yes(prompt=""):
stderr("... aborting")
return
# user name
user = session.prompt(f"please enter the user [{DEFAULT_USER}]: ").strip() or DEFAULT_USER
# port
port = int(session.prompt(f"please enter the port [{DEFAULT_PORT}]: ").strip() or 22)
# check for existing configuration
hosts = file_to_dataclass(CONFIG_FILE)
if any(hostname == h.hostname for h in hosts):
stderr(f"There is already a configuration for the host {hostname}, do you want to overwrite it? [Y/n] ", end="")
if not yes(prompt=""):
stderr("... aborting")
return
else:
hosts = [h for h in hosts if h.hostname != hostname]
# generate public and private key
print("generating keys ...")
subprocess.run(["mkdir", "-p", SSH_KEY_DIR])
key_file = f'{SSH_KEY_DIR}/{hostname.replace(".", "_")}'
if os.path.exists(key_file):
os.remove(key_file)
os.remove(f'{key_file}.pub')
subprocess.run(["ssh-keygen", "-t", "ed25519", "-C", f"'key for {hostname}'", "-f", key_file, "-q"])
new_config_data = SSHConfig(
ID=ID,
hostname=hostname,
user=user,
port=port,
key_file=key_file
)
with open(f'{key_file}.pub') as file:
public_key = file.read().strip()
dataclass_to_file(CONFIG_FILE, hosts + [new_config_data])
print("... wizard done.")
print()
print(f'PUBLIC KEY: {REVERSED}{public_key}{RESET}')
print()
print("To connect to the VM follow these steps:")
print(f"\t1. copy the public key to the cloud-init drive of the VM. "
f"\n\t this can be done in proxmox")
print(f"\t2. run {REVERSED}ssh {ID}{RESET} to connect to the VM")
def configure(cmd: List[str]):
"""
change the default values of this script
"""
if cmd[0] == "show":
print("Configured values for this script:")
print(f" DEFAULT-USER : {DEFAULT_USER}")
print(f" DEFAULT-PORT : {DEFAULT_PORT}")
print(f" CONFIG-FILE : {CONFIG_FILE}")
print(f" SSH-KEY-DIR : {SSH_KEY_DIR}")
elif cmd[0] == "set" and len(cmd) == 3:
if cmd[1] == "DEFAULT-USER":
...
elif cmd[1] == "DEFAULT-PORT":
...
elif cmd[1] == "CONFIG-FILE":
...
elif cmd[1] == "SSH-KEY-DIR":
...
else:
stderr(f"Invalid cmd for 'configure: {' '.join(cmd)}")
if __name__ == '__main__':
while True:
hosts = file_to_dataclass(CONFIG_FILE)
completer = NestedCompleter.from_nested_dict({
'ssh ': {host.ID for host in hosts},
'remove ': {host.ID for host in hosts},
'add': None,
'list': None,
'help': None,
'exit': None,
'clear': None,
'configure': {
"show", "set"
}
})
try:
text: str = session.prompt(message=">>> ",
completer=completer)
except KeyboardInterrupt:
stderr(HTML("Enter <b>exit</b> to exit the shell or press <b>CTRL-D</b>."))
continue
except EOFError:
stderr("... exiting")
exit(-1)
if text.startswith("ssh"):
cmd = text.split(" ")
try:
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if result.stdout:
print(result.stdout)
if result.stderr:
stderr(result.stderr)
except KeyboardInterrupt:
stderr(" Keyboard Interrupt!")
elif text.startswith("remove"):
...
elif text.startswith("add"):
...
elif text.startswith("list"):
list_config()
elif text.startswith("help"):
help_text = {
'ssh <alias>': "Connect to a ssh host by it's alias.",
'remove <alias>': "Remove an ssh host from the config.",
'add': "Run wizard to add a new ssh host.",
'list': "List all ssh hosts.",
'help': "Print this help.",
'exit': "Exit the shell.",
'clear': "Clears the screen.",
'configure [show | set ..]': "Show and change the default values of the wizard."
}
width = max(len(s) for s in help_text)
for cmd in help_text:
print(f'{cmd.ljust(width)} : {help_text[cmd]}')
elif text.startswith("exit"):
break
elif text.startswith("configure"):
_, *cmd = text.split(" ")
configure(cmd)
elif text.startswith("clear"):
clear()
else:
print_formatted_text(HTML(f"Unknown Command: {text}\nEnter <b>help</b> for a list of all commands."))
|
[
"[email protected]"
] | |
e770ee03f163f76ae10f97c7f4917e3649348a06
|
01799c12f6f18573cb132c6706c4d2fd7c56aadc
|
/billings/billing/venv/Scripts/pip3-script.py
|
ce92d9b3396739ad519f1ed29ab68109aff0f4a4
|
[] |
no_license
|
MyPrivatePlace/billing
|
2d1a2ef0fde83ac98c8b1b75ac56ed1b17c27116
|
5bd2ffccaac3863a5909699c70f89ddd363dd184
|
refs/heads/master
| 2020-03-28T10:42:29.653496 | 2018-10-31T19:54:23 | 2018-10-31T19:54:23 | 148,136,514 | 0 | 0 | null | 2018-09-10T10:39:43 | 2018-09-10T10:09:08 | null |
UTF-8
|
Python
| false | false | 395 |
py
|
#!C:\Projects\billings\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
3cc871344d6720297182aaba7b29ac5e814f33b7
|
2b4e7f8dcf3296bdb33b29b44a83650f5bfab8e1
|
/common/content.py
|
43a8c8ab1da8f1697d3f2ef0dd1ec2649a9305f4
|
[] |
no_license
|
bp72/asd
|
9e42e88f6fe18abfcce52be646649aab11946aaf
|
a687dfba154b2682c521d5a4ee329ef13c84c5a7
|
refs/heads/master
| 2016-09-10T12:42:37.485619 | 2015-06-22T17:50:27 | 2015-06-22T17:50:27 | 37,869,546 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,031 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'bp'
__version__ = (0, 0, 1)
from fields import MD5Field, FilenameField
################################################################################
class File(object):
"""
Объект файла
>>> with open('./filename.txt', 'w') as f:
... f.write('1')
... f.close()
>>> a = File('filename.txt', 'c4ca4238a0b923820dcc509a6f75849b')
>>> a.filename
'filename.txt'
>>> a.md5sum
'c4ca4238a0b923820dcc509a6f75849b'
>>> a.filepath()
'./filename.txt'
>>> import os
>>> os.unlink('./filename.txt')
"""
md5sum = MD5Field()
filename = FilenameField()
def __init__(self, filename, md5, root=None):
self.root = root or '.'
self.filename = filename
self.md5sum = md5
def filepath(self):
return '{}/{}'.format(self.root, self.filename)
# end of class FileField(BaseField)
################################################################################
|
[
"[email protected]"
] | |
1884b26999b578c08e920c4f7f1ae2e648715491
|
174d1c8465550eeb356a698e370828c4854ac883
|
/chapter04/qt04_QTextEdit.py
|
1afeb7d0415818bda0b65def2e78652ca439d518
|
[] |
no_license
|
Junkiwang/PyQtUI
|
a34876da8fc65b546f7e5348eaad7b9c1e54321d
|
d93a793d18c4bfc117ca374ae28a2a71631c2121
|
refs/heads/master
| 2020-03-18T23:45:13.314811 | 2018-07-09T05:58:13 | 2018-07-09T05:58:13 | 135,425,386 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,449 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Junki
from PyQt5.QtWidgets import QApplication, QTextEdit, QWidget, QVBoxLayout, QPushButton
import sys
class textEditDemo(QWidget):
def __init__(self, parent=None):
super(textEditDemo, self).__init__(parent)
self.setWindowTitle('QTextEdit例子')
self.resize(300, 300)
self.textEdit = QTextEdit()
self.btnPress0 = QPushButton('获取输入内容')
self.btnPress1 = QPushButton('显示文本')
self.btnPress2 = QPushButton('显示Html')
layout = QVBoxLayout()
layout.addWidget(self.textEdit)
layout.addWidget(self.btnPress0)
layout.addWidget(self.btnPress1)
layout.addWidget(self.btnPress2)
self.setLayout(layout)
self.btnPress0.clicked.connect(self.getText)
self.btnPress1.clicked.connect(self.btnPress1_Clicked)
self.btnPress2.clicked.connect(self.btnPress2_Clicked)
def getText(self):
print('获取到文本框中的输入内容:%s' % self.textEdit.toPlainText())
def btnPress1_Clicked(self):
self.textEdit.setPlainText('Hello PyQt5!\n单击按钮。')
def btnPress2_Clicked(self):
self.textEdit.setHtml('<font color="red" size="6"><red>Hello PyQt5!<br>单击按钮。</red></font>')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = textEditDemo()
win.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
e57b674fc4450a28f95cfb01f1c0395260b4adec
|
3ae12bedf5c32d91fe148d49cfa0cfb59651e43e
|
/backend/users/admin.py
|
71f60e56d93c75c186127f3a31f3e6620af645ac
|
[] |
no_license
|
aminuolawale/personal_store
|
cb3aa4a09b5392d4cd7d400c44787d8ae4fab9ec
|
9ae2da507140430af519f27edc23340948db9e55
|
refs/heads/master
| 2023-01-03T12:01:35.291757 | 2020-11-06T21:45:25 | 2020-11-06T21:45:25 | 308,445,011 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 123 |
py
|
from django.contrib import admin
from .models import User, Address
admin.site.register(User)
admin.site.register(Address)
|
[
"[email protected]"
] | |
bf055d3d9a0f6250e6e0336a5e27ccf9328377c7
|
0a118de91d880058dd2b9301d81ffa3ffd17514a
|
/benchmarking/smartseq2/merge_picard_metrics/merge_picard_mets.py
|
a39d568b22a0d409d3946b10422bf79c73dfc4ec
|
[] |
no_license
|
garyluu/skylab
|
9b15aee18f1240122331eef6de8cc04e8212bf81
|
319d0ac57654d14056669dc836f894d482891dbc
|
refs/heads/master
| 2020-03-13T08:51:55.944993 | 2018-05-24T13:42:59 | 2018-05-24T13:42:59 | 131,052,488 | 0 | 4 | null | 2018-04-25T19:13:26 | 2018-04-25T19:13:25 | null |
UTF-8
|
Python
| false | false | 4,167 |
py
|
from crimson import picard
import pandas as pd
import numpy as np
from google.cloud import storage
import json
from os.path import basename
import sys
import requests
import argparse
def retrieve_workflow_outputs(cromwell_uuid, output_name):
# load cromwell credential
logins = json.load(open('/usr/secrets/broad-dsde-mint-dev-cromwell.json'))
metadata_url = "https://cromwell.mint-dev.broadinstitute.org/api/workflows/v1/" + cromwell_uuid + "/metadata?expandSubWorkflows=false"
r = requests.get(
metadata_url,
auth=(logins['cromwell_username'], logins['cromwell_password']))
data = r.json()
# load output files
files = data['outputs'][output_name]
return (files)
def merge_picard_metrics(files, metric_name):
"""
piepline output picard QC metrics at sinle cell/sample level.
This functin is called to merge/aggregate QC metrics by metrics type and then merge multiple QC measurement
into single matrix file. In this file, column is sample/cell and row is QC metrics
:param files: metric files from pipeline outputs
:param met_name: metrics name with workflow name and subworkflow name as prefix. such as 'run_pipelines.RunStarPipeline.alignment_summary_metrics'
"""
# set up auth
client = storage.Client()
bucket = client.get_bucket('broad-dsde-mint-dev-cromwell-execution')
# load cromwell credential
logins = json.load(open('/usr/secrets/broad-dsde-mint-dev-cromwell.json'))
# initial output
mets = {}
for kk in range(0, len(files)):
fc = files[kk]
fc = fc.replace('gs://broad-dsde-mint-dev-cromwell-execution/', '')
blob = bucket.get_blob(fc)
met_name = basename(fc)
# sample name is prefix of file name
sample_name = met_name.split('.')[0]
with open(met_name, 'wb') as file_obj:
blob.download_to_file(file_obj)
# use picard package parse out picard output, a json file is returned
parsed = picard.parse(met_name)
class_name = parsed['metrics']['class']
# Aignment metrics return multiple lines, but only output PAIRED-READS/third line
if class_name == "picard.analysis.AlignmentSummaryMetrics":
## only parse out pair reads
met = parsed['metrics']['contents'][2]
# sometimes(very rare), insertion metrics also return multiple lines results to include TANDEM repeats. but we only output the first line.
elif class_name == "picard.analysis.InsertSizeMetrics":
# if the elemnet counts is less than 21, it means insertion metrics returns multiple line results.
if len(parsed['metrics']['contents']) < 21:
met = parsed['metrics']['contents'][0]
else:
met = parsed['metrics']['contents']
else:
# other metrics(so far) only return one line results.
met = parsed['metrics']['contents']
mets[sample_name] = met
merged = pd.DataFrame.from_dict(mets)
return merged
def run_merge_metrics(cromwell_uuid, metric_name, output_name):
"""
call functions to nerge metrics and output in one file
:param cromwell_uuid cromwell workflow uuid
:param metric_name a Picard metric name
:param output_name, the output csv file name
"""
metfiles = retrieve_workflow_outputs(cromwell_uuid, metric_name)
metrics_matrix = merge_picard_metrics(metfiles, metric_name)
metrics_matrix.to_csv(output_name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--cromwell_uuid",
dest="cromwell_uuid",
required=True,
help="The uuid of workflow")
parser.add_argument(
"-m",
"--metrics_name",
dest="met_name",
required=True,
help="The list of Picard metrics class names")
parser.add_argument(
"-o",
"--output_name",
dest="output_name",
required=True,
help="The output file name")
args = parser.parse_args()
run_merge_metrics(args.cromwell_uuid, args.met_name, args.output_name)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
402bc890c5f10dde4ade6ceda9b8d76f67c850f4
|
843d8d6bcba5ceff4f289b9566a6594d8984308d
|
/Week_3/lab-code-simplicity-efficiency/your-code/challenge-1.py
|
a4c913ff1da118ef30a143fa02097131421afc0b
|
[] |
no_license
|
GuillemGodayol/Ironhack_Data_Labs
|
df6e1db00ca3c4370b26f25a06aa9d4fdcd1a821
|
56275959d276d3ef9542efb8c287aa16876d45fa
|
refs/heads/master
| 2020-11-26T16:34:07.971756 | 2019-12-19T21:25:01 | 2019-12-19T21:25:01 | 229,141,062 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,910 |
py
|
"""
This is a dumb calculator that can add and subtract whole numbers from zero to five.
When you run the code, you are prompted to enter two numbers (in the form of English
word instead of number) and the operator sign (also in the form of English word).
The code will perform the calculation and give the result if your input is what it
expects.
The code is very long and messy. Refactor it according to what you have learned about
code simplicity and efficiency.
"""
from num2word import word
print('Welcome to this calculator!')
print('It can add and subtract whole numbers from zero to five')
a = input('Please choose your first number (zero to five): ')
b = input('What do you want to do? plus or minus: ')
c = input('Please choose your second number (zero to five): ')
# I create a diccionary with the different inputs we can have for numbers and its corresponding integer
numbers = {'zero':0, 'one':1, 'two':2, 'three':3, 'four':4, 'five':5, '0':0, '1':1, '2':2, '3':3, '4':4, '5':5}
# I create two lists with the different inputs we can have for operators
op_plus = ['plus', '+']
op_minus =['minus','-']
if (a or c) not in numbers.keys() or b not in op_plus and b not in op_minus: # I check if any of the 3 inputs is wrong
print("I am not able to answer this question. Check your input.")
elif b in op_plus: # if b is a plus, I add a + c
print(word(numbers[a]), 'plus', word(numbers[c]), 'equals',word(numbers[a] + numbers[c]))
else: # else, I substract a - c
if numbers[a] >= numbers[c]:
print(word(numbers[a]), 'minus', word(numbers[c]), 'equals',word(numbers[a] - numbers[c]))
else:
print(word(numbers[a]), 'minus', word(numbers[c]), 'equals negative', word(-(numbers[a] - numbers[c])))
print("Thanks for using this calculator, goodbye :)")
|
[
"[email protected]"
] | |
b42c9a05e876a611b682a0b70a86878e4a80aebb
|
27426683a9af095c4bbbf9bb6f2dce68a49b8302
|
/stacked_generalization.py
|
d19bff9deaba6a8bad04eaedd0a34bd231abbd48
|
[] |
no_license
|
chetanmehra/stacked_generalization-1
|
aae8bcdedd05e59d93063f5058f3c9f875b9bf5b
|
5eab38bcd9cebf0f37f52fb58b4793b85e8f0b1e
|
refs/heads/master
| 2021-06-01T00:22:58.495122 | 2016-05-09T11:31:03 | 2016-05-09T11:31:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 437 |
py
|
from sklearn.cross_validation import StratifiedKFold
import numpy
class StackedGeneralization:
def __init__(self, n_folds, train_data, train_target, test_data):
self.n_folds = n_folds
self.train_data = train_data
self.train_target = train_target
self.test_data = test_data
self.n_classes = len(numpy.unique(train_target))
self.skf = StratifiedKFold(y=train_target, n_folds=n_folds)
|
[
"[email protected]"
] | |
a2116f849321bb09ca0351c79ae1a80cf17d6dec
|
588396f66a5c0fbfcf1d2af44386c8f4dca95abf
|
/sanjiaoxing.py
|
c045ef04118103c5a2613365e5f8cf7601af0c9d
|
[] |
no_license
|
yuki9965/PAT_python
|
219dc4deedf097bbb41b325f538f8a5bb806104d
|
5a7ad358d9beaeb9e4c47a4026248cd5d2268b5b
|
refs/heads/master
| 2021-05-04T18:41:35.403984 | 2017-10-06T05:19:18 | 2017-10-06T05:19:18 | 105,956,338 | 1 | 0 | null | 2017-10-06T01:15:10 | 2017-10-06T01:15:10 | null |
UTF-8
|
Python
| false | false | 325 |
py
|
#-*- coding=utf-8 -*-
__author__ = 'Yaicky'
sides = map(int, raw_input().strip().split())
sides.sort()
longside = (sides[2])**2
shortsides = (sides[0])**2 + (sides[1])**2
if longside > shortsides:
print (u"钝角三角形")
elif shortsides > longside:
print (u"锐角三角形")
else:
print(u"直角三角形")
|
[
"[email protected]"
] | |
387635873635283c5290831c6f2104f6d7e1fed8
|
aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5
|
/experiment/ex_025_predict.py
|
db89c037080c832fffa5c1b6a6ffee69035c39e7
|
[] |
no_license
|
kurupical/riiid
|
7e68239cd50243fbb734bf433d60ebd7469cb180
|
7bab580ce03d03873748a6afc91092c11871465f
|
refs/heads/master
| 2023-03-30T04:15:54.109815 | 2021-04-04T01:20:33 | 2021-04-04T01:20:33 | 302,828,112 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,041 |
py
|
from datetime import datetime as dt
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
TargetEncoder, \
CountEncoder, \
MeanAggregator, \
TagsSeparator, \
UserLevelEncoder, \
NUniqueEncoder, \
ShiftDiffEncoder
import pandas as pd
import glob
import os
import tqdm
import lightgbm as lgb
import pickle
import riiideducation
import numpy as np
from logging import Logger, StreamHandler, Formatter
import shutil
import time
import warnings
warnings.filterwarnings("ignore")
model_dir = "../output/ex_025/20201022082802"
data_types_dict = {
'row_id': 'int64',
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'user_answer': 'int8',
'answered_correctly': 'int8',
}
prior_columns = ["prior_group_responses", "prior_group_answers_correct"]
def get_logger():
formatter = Formatter("%(asctime)s|%(levelname)s| %(message)s")
logger = Logger(name="log")
handler = StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(debug,
model_dir,
kaggle=False):
if kaggle:
files_dir = "/kaggle/input/riiid-split10/*.pickle"
else:
files_dir = "../input/riiid-test-answer-prediction/split10_base/*.pickle"
logger = get_logger()
# environment
env = riiideducation.make_env()
df_question = pd.read_csv("../input/riiid-test-answer-prediction/questions.csv",
dtype={"bundle_id": "int32",
"question_id": "int32",
"correct_answer": "int8",
"part": "int8"})
df_lecture = pd.read_csv("../input/riiid-test-answer-prediction/lectures.csv",
dtype={"lecture_id": "int32",
"tag": "int16",
"part": "int8"})
# model loading
models = []
for model_path in glob.glob(f"{model_dir}/*model*.pickle"):
with open(model_path, "rb") as f:
models.append(pickle.load(f))
# data preprocessing
logger = get_logger()
feature_factory_dict = {}
feature_factory_dict["tags"] = {
"TagsSeparator": TagsSeparator()
}
for column in ["content_id", "user_id", "content_type_id", "prior_question_had_explanation",
"tags1", "tags2", "tags3", "tags4", "tags5", "tags6",
("user_id", "content_type_id"), ("user_id", "prior_question_had_explanation")]:
is_partial_fit = column == "content_id"
is_onebyone = "content_id" in column
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column, onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=column, is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column), onebyone=is_onebyone),
"TargetEncoder": TargetEncoder(column=list(column), is_partial_fit=is_partial_fit, onebyone=is_onebyone)
}
for column in ["part", ("user_id", "tag"), ("user_id", "part"), ("content_type_id", "part")]:
if type(column) == str:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=column)
}
else:
feature_factory_dict[column] = {
"CountEncoder": CountEncoder(column=list(column))
}
feature_factory_dict["user_id"]["MeanAggregatorTimestamp"] = MeanAggregator(column="user_id",
agg_column="timestamp",
remove_now=False)
feature_factory_dict["user_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="user_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_dict["user_id"]["ShiftDiffEncoder"] = ShiftDiffEncoder(groupby="user_id",
column="timestamp")
feature_factory_dict["content_id"]["MeanAggregatorPriorQuestionElapsedTime"] = MeanAggregator(column="content_id",
agg_column="prior_question_elapsed_time",
remove_now=True)
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger)
for model_id, fname in enumerate(glob.glob(files_dir)):
logger.info(f"loading... {fname}")
df = pd.read_pickle(fname)
df["answered_correctly"] = df["answered_correctly"].replace(-1, np.nan)
df["prior_question_had_explanation"] = df["prior_question_had_explanation"].fillna(-1).astype("int8")
if debug:
df = df.head(1000)
df = pd.concat([pd.merge(df[df["content_type_id"] == 0], df_question,
how="left", left_on="content_id", right_on="question_id"),
pd.merge(df[df["content_type_id"] == 1], df_lecture,
how="left", left_on="content_id", right_on="lecture_id")]).sort_values(["user_id", "timestamp"])
feature_factory_manager.fit(df, is_first_fit=True)
iter_test = env.iter_test()
df_test_prev = pd.DataFrame()
df_test_prev1 = pd.DataFrame()
answered_correctlies = []
user_answers = []
i = 0
t = time.time()
for (df_test, df_sample_prediction) in iter_test:
i += 1
logger.info(f"[time: {int(time.time() - t)}iteration {i}: data_length: {len(df_test)}")
# 前回のデータ更新
if len(df_test_prev) > 0: # 初回のみパスするためのif
answered_correctly = df_test.iloc[0]["prior_group_answers_correct"]
answered_correctly = [int(x) for x in answered_correctly.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
user_answer = df_test.iloc[0]["prior_group_responses"]
user_answer = [int(x) for x in user_answer.replace("[", "").replace("'", "").replace("]", "").replace(" ", "").split(",")]
answered_correctlies.extend(answered_correctly)
user_answers.extend(user_answer)
df_test_prev1["answered_correctly"] = answered_correctly
df_test_prev1["user_answer"] = user_answer
df_test_prev1["answered_correctly"] = df_test_prev1["answered_correctly"].replace(-1, np.nan)
df_test_prev1["prior_question_had_explanation"] = \
df_test_prev1["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev1, partial_predict_mode=True, onebyone_mode=True)
df_test_prev1 = pd.DataFrame()
if debug:
update_record = 50
else:
update_record = 150
# update1
if len(df_test_prev) > update_record:
df_test_prev["answered_correctly"] = answered_correctlies
df_test_prev["user_answer"] = user_answers
# df_test_prev = df_test_prev.drop(prior_columns, axis=1)
df_test_prev["answered_correctly"] = df_test_prev["answered_correctly"].replace(-1, np.nan)
df_test_prev["prior_question_had_explanation"] = df_test_prev["prior_question_had_explanation"].fillna(-1).astype("int8")
feature_factory_manager.fit(df_test_prev, partial_predict_mode=True, onebyone_mode=False)
df_test_prev = pd.DataFrame()
answered_correctlies = []
user_answers = []
# 今回のデータ取得&計算
# logger.info(f"[time: {int(time.time() - t)}dataload")
logger.info(f"merge... ")
w_df1 = pd.merge(df_test[df_test["content_type_id"] == 0], df_question, how="left", left_on="content_id",
right_on="question_id")
w_df2 = pd.merge(df_test[df_test["content_type_id"] == 1], df_lecture, how="left", left_on="content_id",
right_on="lecture_id")
df_test = pd.concat([w_df1, w_df2]).sort_values(["user_id", "timestamp"])
df_test["tag"] = df_test["tag"].fillna(-1)
df_test["correct_answer"] = df_test["correct_answer"].fillna(-1)
df_test["bundle_id"] = df_test["bundle_id"].fillna(-1)
logger.info(f"transform... ")
df_test["prior_question_had_explanation"] = df_test["prior_question_had_explanation"].astype("float16").fillna(-1).astype("int8")
df = feature_factory_manager.partial_predict(df_test)
df.columns = [x.replace(" ", "_") for x in df.columns]
logger.info(f"other... ")
# predict
predicts = []
cols = models[0].feature_name()
for model in models:
predicts.append(model.predict(df[cols]))
df["answered_correctly"] = np.array(predicts).transpose().mean(axis=1)
df_sample_prediction = pd.merge(df_sample_prediction[["row_id"]],
df[["row_id", "answered_correctly"]],
how="inner")
env.predict(df_sample_prediction)
df_test_prev = df_test_prev.append(df[cols + ["user_id", "tags"]])
df_test_prev1 = df[cols + ["user_id", "tags"]]
if i < 5:
df_test_prev.to_csv(f"{i}.csv")
if __name__ == "__main__":
run(debug=True,
model_dir=model_dir)
|
[
"[email protected]"
] | |
0702087eed1caf59c86a54c11a4482b18f7b120e
|
b0346d8d798a8534fb2e1c0f1f98b4038e23d1ba
|
/Modetool/wsgi.py
|
7e2c4b744a0f08c2f3c78b30af8c415c12c9cb53
|
[] |
no_license
|
pavelcerny/modetool
|
ed1237f1ac54b617eed7161341ab640e52190fe3
|
ba5379e6b2604e1c1b0c5a84fec01ab0ef4e5e41
|
refs/heads/master
| 2020-03-29T12:36:41.111251 | 2018-09-23T08:30:26 | 2018-09-23T08:30:26 | 149,908,494 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
"""
WSGI config for Modetool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Modetool.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
1234f26b6c4eeb7584ae2a210bca4db698d88a26
|
e5712ee7ff8e013b33d0ee236252909997429b15
|
/Python/Sets/No Idea.py
|
7378798bad44140fa629cac23a0e92ac26634898
|
[] |
no_license
|
shubhamkatore/HackerRank
|
fdb031b2875eebcf63b0f7dc5c996f8f80fc42ac
|
11b75a356987d3aa63901413994bffb8d33b50bb
|
refs/heads/master
| 2021-05-05T06:10:47.537066 | 2018-06-24T06:41:12 | 2018-06-24T06:41:12 | 118,781,433 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 223 |
py
|
n,m=map(int,input().split(' '))
narr=map(int,input().split(' '))
a=set(map(int,input().split(' ')))
b=set(map(int,input().split(' ')))
ha=0
for i in narr:
if i in a:
ha+=1
if i in b:
ha-=1
print(ha)
|
[
"[email protected]"
] | |
e66e93413063fb93740bd8dbb7b6721fabef46c9
|
22adb6a4cbd88a5d5e8b006b07fbdd03a23dca97
|
/update_scheduler.py
|
945c39766368bcc821432e3d79db6b9ded1f8f97
|
[] |
no_license
|
shatteroff/flask_CU_price_checker
|
71719bf6865a0775923909f43a67af8cb0c74f22
|
a285cd70905d95ec452cdb68acf14705e3011cef
|
refs/heads/master
| 2022-12-14T08:52:41.408014 | 2020-12-30T09:30:42 | 2020-12-30T09:30:42 | 241,875,724 | 0 | 0 | null | 2022-07-06T20:29:15 | 2020-02-20T12:14:07 |
Python
|
UTF-8
|
Python
| false | false | 738 |
py
|
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from config import Config
from redis_helper import RedisHelper
scheduler = BlockingScheduler()
redis_helper = RedisHelper()
@scheduler.scheduled_job('cron', misfire_grace_time=3000, hour=Config.hour_for_update, minute=Config.minute_for_update)
def update_prices():
print(f'{datetime.datetime.now()}\tUpdate started')
conn = Config.conn
redis_helper.update_date()
redis_helper.load_prices(conn)
redis_helper.add_product(conn)
conn.close()
print(f'{datetime.datetime.now()}\tUpdate ended')
@scheduler.scheduled_job('interval', minutes=5)
def timed_job():
print('Test scheduler is run every 5 minutes.')
scheduler.start()
|
[
"[email protected]"
] | |
37e0fb4dbe4d99d999a4a4ff25c33d7f504d8fc8
|
ab574f7511fa15e5ea50a26f26e3e38f7e33505a
|
/win_2018/scipy/special/_ufuncs_cxx.py
|
65fc513447b7d344b151f7ba228174ebe12f7257
|
[] |
no_license
|
zclongpop123/maya_python_packages
|
49d6b340512a2580bc8c14ae6281ca3f57017acd
|
4dd4a48c41749443ac16053d20aec04e9d2db202
|
refs/heads/master
| 2021-11-30T01:49:41.846727 | 2021-11-17T01:47:08 | 2021-11-17T01:47:08 | 49,186,909 | 16 | 9 | null | 2017-03-07T00:13:41 | 2016-01-07T06:48:35 |
Python
|
UTF-8
|
Python
| false | false | 288 |
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_ufuncs_cxx.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"[email protected]"
] | |
139a60ffd6e82195e835f691c53c0f317ab5a8d9
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/heatmap/_yperiod.py
|
6496c7ed1592b867d1b2a5946e177c084910c381
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 |
MIT
| 2023-09-08T19:55:32 | 2013-11-21T05:53:08 |
Python
|
UTF-8
|
Python
| false | false | 470 |
py
|
import _plotly_utils.basevalidators
class YperiodValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod", parent_name="heatmap", **kwargs):
super(YperiodValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"ytype": "scaled"}),
**kwargs,
)
|
[
"[email protected]"
] | |
41f2df2137a227386f0dece011dcf1d628037fd7
|
ad544b38ec09828cda1b1918f407975bc79bf976
|
/missioncontrol/mc/mc/views.py
|
82f5e002d54b800f164e42ee9229c4612ff2bd76
|
[] |
no_license
|
mattvenn/earth-to-mars
|
6de13606f3f8087da40e8ed0543a03e0093c25fb
|
c2b0064ef87c3d095d231587ee3ef48b00360bfd
|
refs/heads/master
| 2021-01-10T07:29:17.557441 | 2016-03-17T16:34:42 | 2016-03-17T16:34:42 | 45,628,116 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,754 |
py
|
from mc import app
from mc import db
from sqlalchemy.exc import IntegrityError
import datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify, make_response, send_file
from contextlib import closing
from flask_admin.contrib.sqla import ModelView
import time
from wtforms import TextAreaField, TextField, IntegerField, FloatField, SelectField, PasswordField
from wtforms import validators
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from mc.models import Teams, School, Sample, Answers, Questions, GroupGraph, Photo, Panorama
from graphing import submit_graph, update_group_graph, get_group_graph_name
from werkzeug import secure_filename
import os
class SecureView(ModelView):
def is_accessible(self):
if 'logged_in' in session.keys():
return True
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect(url_for('login', next=request.url))
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
# tested
def get_teams():
return Teams.query.all()
class LoginForm(Form):
username = TextField('Username', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.username.data != app.config['USERNAME']:
self.username.errors.append('Unknown username')
return False
if self.password.data != app.config['PASSWORD']:
self.password.errors.append('bad password')
return False
return True
class AnswerForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
answer = TextAreaField('Answer', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
self.answer = Answers(None, self.answer.data, self.team.data)
return True
class PhotoForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
photo = FileField('Image', validators=[
FileRequired(message="you must choose a photo"),
FileAllowed(['jpg', 'png'], message='only images allowed')
])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
return True
class SampleForm(Form):
team = QuerySelectField(query_factory=get_teams, allow_blank=True, blank_text=u'Please choose')
types = app.config['SAMPLE_TYPES']
methane = FloatField('Methane', [validators.NumberRange(min=types['methane']['min'], max=types['methane']['max'])])
temperature = FloatField('Temperature', [validators.NumberRange(min=types['temperature']['min'], max=types['temperature']['max'])])
humidity = FloatField('Humidity', [validators.NumberRange(min=types['humidity']['min'], max=types['humidity']['max'])])
maxx = app.config['MAX_X']
maxy = app.config['MAX_Y']
x = IntegerField('X', [validators.NumberRange(min=0, max=maxx - 1)])
y = IntegerField('Y', [validators.NumberRange(min=0, max=maxy - 1)])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not self.team.data:
self.team.errors.append('choose a team')
return False
if Sample.query.filter(Sample.x == self.x.data, Sample.y == self.y.data, Sample.team == self.team.data).first():
self.team.errors.append('your team already uploaded this sample')
return False
return True
# tested
def add_school_point(points=1):
school = School.query.order_by(School.timestamp.desc()).first()
if school is not None:
school.points += points
db.session.commit()
# tested
def get_group_id():
try:
group_id = GroupGraph.query.all()[-1].id
except IndexError:
group_id = 0
return group_id
# tested
@app.route('/')
def mission_control():
school = School.query.order_by(School.timestamp.desc()).first()
now = datetime.datetime.now()
end_hour = app.config['END_HOUR']
end_min = app.config['END_MIN']
end_time = datetime.datetime.now().replace(hour=end_hour,minute=end_min,second=0)
delta = end_time - now
mins = delta.total_seconds() / 60
hours = mins / 60
mins = mins % 60
secs = delta.total_seconds() % 60
time_info = { 'now': now.strftime('%H:%M'), 'left': '%02d:%02d' % (hours, mins) }
pan = Panorama.query.first()
pan_info = { 'name': pan.get_pan_name(), 'num': pan.get_num_photos() }
return render_template('mission_control.html', school_info=school, time_info=time_info, pan_info=pan_info, group_id=get_group_id())
# tested
@app.route('/show/samples')
def show_samples():
samples = Sample.query.all()
return render_template('show_samples.html', samples=samples)
# tested
@app.route('/show/graph/<type>')
def show_group_graph(type):
return render_template('show_group_graph.html', type=type, group_id=get_group_id())
# tested
@app.route('/upload/sample', methods=['GET', 'POST'])
def add_sample():
form = SampleForm()
if form.validate_on_submit():
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
add_school_point()
submit_graph(sample) # make a graph
#update_group_graph(form.sample)
flash('sample logged')
return render_template('sample_submitted.html', sample=sample)
return render_template('add_sample.html', form=form)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
def make_csv(head, list):
import StringIO
import csv
si = StringIO.StringIO()
cw = csv.writer(si)
cw.writerow(head)
for i in list:
cw.writerow(i.get_csv())
return si
def make_csv_response(head, list, name):
si = make_csv(head, list)
response = make_response(si.getvalue())
response.headers["Content-Disposition"] = "attachment; filename=%s" % name
return response
@app.route('/api/questions')
def api_get_questions():
questions = Questions.query.all()
head = Questions.get_csv_head()
return make_csv_response(head, questions,'questions.csv')
@app.route('/api/answers')
def api_get_answers():
answers = Answers.query.all()
head = Answers.get_csv_head()
return make_csv_response(head, answers,'answers.csv')
# build an archive of all the cool data and zip it
@app.route('/api/zipped-data')
def zipped_data():
import zipfile
import io
import json
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for name in app.config['SAMPLE_TYPES'].keys():
graph_name = get_group_graph_name(name, get_group_id())
zf.write(graph_name, name + '.png')
answers = Answers.query.all()
head = Answers.get_csv_head()
answers_csv = make_csv(head, answers)
zf.writestr('answers.csv', answers_csv.getvalue())
questions = Questions.query.all()
head = Questions.get_csv_head()
questions_csv = make_csv(head, questions)
zf.writestr('questions.csv', questions_csv.getvalue())
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
zf.writestr('samples.json', json.dumps(data))
memory_file.seek(0)
return send_file(memory_file, attachment_filename='missioncontrol.zip', as_attachment=True)
# tested
@app.route('/api/team/<name>')
def api_get_team_by_name(name):
name = name.lower()
teams = get_teams()
for team in teams:
if team.name.lower() == name:
return jsonify(team.serialise())
raise InvalidUsage("no team of that name found")
# tested
@app.route('/api/samples')
def api_get_all_samples():
samples = Sample.query.all()
data = { 'samples' : [sample.serialise() for sample in samples]}
return jsonify(data)
# tested
@app.route('/api/sample/<int:sample_id>')
def api_get_sample(sample_id):
sample = Sample.query.get(sample_id)
if not sample:
raise InvalidUsage("no sample of that id found")
return jsonify(sample.serialise())
# tested
@app.route('/api/sample', methods=['POST'])
def api_add_sample():
if not request.json:
raise InvalidUsage("json needed")
form = SampleForm(data = request.get_json())
form.csrf_enabled = False
if not form.validate():
raise InvalidUsage("invalid data", payload=form.errors)
sample = Sample()
form.populate_obj(sample)
db.session.add(sample)
db.session.commit()
#update_group_graph(form.sample)
add_school_point()
return jsonify(sample.serialise()), 201
# tested
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
session['logged_in'] = True
flash('You were logged in')
return redirect('/admin')
return render_template('login.html', form=form)
# tested
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect('/admin')
# tested
@app.route('/answers/<int:question_id>')
def answers(question_id):
question = Questions.query.get(question_id)
return render_template('answer.html', question=question)
# tested
@app.route('/questions/<int:question_id>', methods=['GET', 'POST'])
def questions(question_id):
form = AnswerForm()
question = Questions.query.get(question_id)
if form.validate_on_submit():
form.answer.question = question
db.session.add(form.answer)
db.session.commit()
add_school_point(10)
flash('answer logged')
return redirect(url_for('answers', question_id=question_id))
return render_template('question.html', question=question, form=form)
@app.route('/upload/photo', methods=['GET', 'POST'])
def add_photo():
form = PhotoForm()
if form.validate_on_submit():
filename = secure_filename(form.photo.data.filename)
form.photo.data.save(os.path.join(app.static_folder, 'photos', filename))
photo = Photo()
form.populate_obj(photo)
photo.image_path = filename
db.session.add(photo)
db.session.commit()
pan = Panorama.query.first()
pan.add_to_panorama(photo)
add_school_point()
return render_template('photo_submitted.html', photo=photo)
return render_template('add_photo.html', form=form)
|
[
"[email protected]"
] | |
07216bcd55a48955b32cea2c65be6627df8648d9
|
56ff870edec243b9b4b6d54e15fd95f741a9bd33
|
/settings_dev.py
|
c49d68ea5358f1c59db2320d72f631b35990dca6
|
[
"Apache-2.0"
] |
permissive
|
mushkevych/grazer
|
2a0357c33448fadc6e91528098e0eabf74bc3cd1
|
37254a550eeaaa8125bb1a643d493bcaa785fb25
|
refs/heads/master
| 2016-09-15T20:03:30.653432 | 2015-05-05T06:00:19 | 2015-05-05T06:00:19 | 31,232,304 | 0 | 1 | null | 2015-02-24T00:00:08 | 2015-02-23T22:05:11 |
Python
|
UTF-8
|
Python
| false | false | 594 |
py
|
settings = dict(
# created with: sudo rabbitmqctl add_vhost /hadoop
# set permissions with: sudo rabbitmqctl set_permissions -p /hadoop guest ".*" ".*" ".*"
mq_host='rabbitmq.yourdomain.com',
mq_user_id='MQ_USER',
mq_password='MQ_PASSWORD',
mq_vhost='/grazer',
mq_port=5672,
aws_redshift_host='REDSHIFT_HOST.redshift.amazonaws.com',
aws_redshift_db='DB_NAME',
aws_redshift_user='DB_USER',
aws_redshift_password='DB_PASSWORD',
aws_redshift_port=5439,
mq_timeout_sec=10.0,
aws_redshift_grazer_suffix='_test',
csv_bulk_threshold=64,
)
|
[
"[email protected]"
] | |
41527e638d93cfffa7419214e8a19a547c0222fc
|
7c0cffba0b0e37daee3cf33d3750e1c8a89d1822
|
/Controller/control.py
|
c4c437dd392a25382a5c2fc191f5ec90304aeb1b
|
[] |
no_license
|
ShanghaitechGeekPie/IFTHEN
|
47f0e9ebf51a65ed16ea130139e2a8cc9ff900e9
|
c67b5c925d91553a5e07a9dee84bb8af419b5827
|
refs/heads/master
| 2021-01-18T18:11:42.077635 | 2016-10-15T04:17:24 | 2016-10-15T04:17:24 | 59,354,507 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,190 |
py
|
# Python 3.4.3 #
from apscheduler.schedulers.blocking import BlockingScheduler
from logic.models import Logic
import django
import json
import requests
import time
def excute():
commands = Logic.objects.all()
for command in commands:
time_present = time.time()
query = json.loads(command['Q'])
action = json.loads(command['A'])
time_interval = command['T']
time_stamp = command['TimeStamp']
if (time_present - time_stamp) % time_interval >= 5:
continue
i = 0
while (i + 4 < len(query)):
API1 = API.objects.get(id = query[i]['API'])
API2 = API.objects.get(id = query[i + 2]['API'])
tmp1 = requests.get(API1.provider.baseurl + API1.slug, data = query[i]['args'])
tmp2 = requests.get(API2.provider.baseurl + API2.slug, data = query[i + 2]['args'])
if API1.retu in ['int', 'float']:
flag = eval(tmp1 + query[i + 1] + tmp2)
else:
if qurey[i+1] == '=':
flag = (tmp1 == tmp2)
else:
flag = (tmp1 != tmp2)
if flag == False:
continue
i = i + 4
API1 = API.objects.get(id = action['API'])
requests.get(API1.provider.baseurl + API1.slug)
sched = BlockingScheduler()
sched.add_job(excute, 'interval', seconds = 5)
sched.start()
|
[
"[email protected]"
] | |
6140826c1e42e213c230cc67aa4e7a4aa67603fd
|
81e87227fb6eee0c6c00608d3913f6c5fb951b41
|
/project_1/task_1.py
|
a6ed401a518727661b498183be37886a29ead373
|
[] |
no_license
|
pierwiastekzminusjeden/Graph-Theory-Course
|
e43b7e8b7dba0945360b09873aa300d778da3638
|
6c95575b3bea397d1b8ad9aeb29d23280dab4a71
|
refs/heads/master
| 2020-03-11T15:35:00.953471 | 2018-07-11T18:52:38 | 2018-07-11T18:52:38 | 130,088,484 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,126 |
py
|
#!/usr/bin/env python3
#############################
#@author Karolina Mizera
#@author Krystian Molenda
#@author Marcin Miś
#############################
#import sys
#sys.path.append('$(src)') #add path to project_11/src or being all files in the same catalog is required
from list import List
from adjmatrix import AdjMatrix
from incidencematrix import IncidenceMatrix
from adjMatrixFile import SaveToFile
import convert
from draw import draw_graph
#Enter first matrix
print('''Import matrix from file.
A - Adjacency Matrix
I - Incidence Matrix
L - Adjacency List
other - exit''')
#@key representation flag
key = input(" ")
fileName = input("Enter file name: ") #enter name of data file. File must be in the same catalog. Examples in catalog /data
if (key not in 'AIL') or (fileName != ''):
if key == 'A':
adjMatrix = AdjMatrix
adjMatrix.createAdjMatrixFromFile(adjMatrix,fileName)
elif key == 'I':
incMatrix = IncidenceMatrix
incMatrix.createIncMatrixFromFile(incMatrix,fileName)
elif key == 'L':
_list = List
_list.createListFromFile(_list, fileName)
print(" ")
#conversions
while key in 'AIL' :
if key == 'A':
draw_graph(adjMatrix, 'zad1Graph.png')
print('''Convert representation:
AI - Adjacency Matrix to Incidence Matrix
AL - Adjency Matrix to Adjency List
x - exit''')
key = input(" ")
if key == 'AI':
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
print(incMatrix.matrix)
key = 'I'
elif key == 'AL':
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
_list = incMatrix = convert.fromIncidenceMatrixtoList(incMatrix)
print(_list.matrix)
key = 'L'
elif key == 'I':
print('''Convert representation:
IL - Incidence Matrix to Adjency List
IA - Incidence Matrix to Adjency Matrix
x - exit ''')
key = input(" ")
if key == 'IL':
_list = convert.fromIncidenceMatrixtoList(incMatrix)
print(_list.matrix)
key = 'L'
elif key == 'IA':
_list = convert.fromIncidenceMatrixtoList(incMatrix)
adjMatrix = convert.fromListToAdjMatrix(_list)
print(adjMatrix.matrix)
key = 'A'
elif key == 'L':
print('''Convert representation:
LA - Adjacency List to Adjency Matrix
LI - Adjency List to Incidence Matrix
x - exit''')
key = input(" ")
if key == 'LA':
adjMatrix = convert.fromListToAdjMatrix(_list)
print(adjMatrix.matrix)
key = 'A'
elif key == 'LI':
adjMatrix = convert.fromListToAdjMatrix(_list)
incMatrix = convert.fromAdjMatrixtoIncidenceMatrix(adjMatrix)
print(incMatrix.matrix)
key = 'I'
|
[
"[email protected]"
] | |
4dade9f8a38ec5174c7440af316e5d916ab2f049
|
488a2817b9c55856d367a37fc1d029ebf335f3c7
|
/crawling/cheogajip_scraping.py
|
f6b266219af8026669233763ba9606d556772031
|
[] |
no_license
|
qudals55/chicken-store-visualization
|
18d518df0ad99f10e5d593742d585e0e1e40dcfb
|
d8ac96afc0ae4bdc53fd282f29854b8ff04f0b8e
|
refs/heads/master
| 2020-04-30T21:17:40.395764 | 2019-03-22T07:13:37 | 2019-03-22T07:13:37 | 177,090,052 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,192 |
py
|
import sys
import csv
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
def address(state, city) :
return ({ '경기' : '경기도',
'서울' : '서울특별시',
'서울시' : '서울특별시',
'인천' : '인천광역시',
'인천시' : '인천광역시',
'제주' : '제주특별자치도',
'전남' : '전라남도',
'전북' : '전라북도',
'경북' : '경상북도',
'경남' : '경상남도',
'부산' : '부산광역시',
'울산' : '울산광역시',
'대구' : '대구광역시',
'충북' : '충청북도',
'충남' : '충청남도',
'세종시' : '세종특별자치시',
'세종' : '세종특별자치시',
'대전' : '대전광역시',
'강원' : '강원도',
'광주' : '광주광역시',
}.get(state, state), city)
def main():
driver = webdriver.PhantomJS()
idx = 1
f = open('cheogajip.csv', 'w', encoding='utf-8', newline='')
wr = csv.writer(f, delimiter=',')
wr.writerow(['매장이름', '시도정보', '시군구정보', '매장주소'])
while idx <= 105:
driver.get("http://www.cheogajip.co.kr/bbs/board.php?bo_table=store&page=" + str(idx))
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
chickens = soup.select('#fboardlist > div > table > tbody > tr')
for chicken in chickens :
shopName = chicken.select('td[class=td_date]')[1].text
shopAdd = chicken.select_one('td[class=td_subject]').text
shopAdd = re.sub('\n', '', shopAdd)
shopAddSplit = shopAdd.split()
state, city = address(shopAddSplit[0], shopAddSplit[1])
wr.writerow([shopName, state, city, shopAdd])
idx = idx + 1
f.close()
print('end')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
0ab0e2bee34871966bf2bcc9d4aeefec6b1a9287
|
0196ff82d8022ae81aa7e5d6f0797aa746e40a08
|
/huobi_crawler.py
|
5f3bce850fd40654dd7db5e2624f5d6ca32fa605
|
[] |
no_license
|
Sungbin17/coin_exchange
|
85d691c954f5e58087c7504c5b11451658a3e604
|
4fdf0ffa5d180fac6726516a261fc359f7888c5a
|
refs/heads/master
| 2020-03-18T22:08:28.442186 | 2018-06-07T09:01:11 | 2018-06-07T09:01:11 | 135,327,506 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,172 |
py
|
import urllib.request, json
from urllib.request import Request, urlopen
huobi_symbol_api = 'https://api.huobipro.com/v1/common/symbols'
response = Request(huobi_symbol_api, headers={'User-Agent': 'Mozilla/5.0'})
data = json.loads(urlopen(response).read())
data = data.get('data')
print(type(data))
['BTC', 'BCH', 'ETH', 'ETC', 'LTC', 'EOS', 'XRP', 'OMG', 'DASH', 'ZEC', 'ADA', 'STEEM', 'IOTA', 'SOC', 'CTXC', 'ACT', 'BTM', 'BTS', 'ONT', 'IOST', 'HT', 'TRX', 'DTA', 'NEO', 'QTUM', 'SMT', 'ELA', 'VEN', 'THETA', 'SNT', 'ZIL', 'XEM', 'NAS', 'RUFF', 'HSR', 'LET', 'MDS', 'STORJ', 'ELF', 'ITC', 'CVC', 'GNT', 'BCH', 'ETH', 'LTC', 'ETC', 'EOS', 'OMG', 'XRP', 'DASH', 'ZEC', 'ADA', 'STEEM', 'IOTA', 'POLY', 'KAN', 'LBA', 'WAN', 'BFT', 'BTM', 'ONT', 'IOST', 'HT', 'TRX', 'SMT', 'ELA', 'WICC', 'OCN', 'ZLA', 'ABT', 'MTX', 'NAS', 'VEN', 'DTA', 'NEO', 'WAX', 'BTS', 'ZIL', 'THETA', 'CTXC', 'SRN', 'XEM', 'ICX', 'DGD', 'CHAT', 'WPR', 'LUN', 'SWFTC', 'SNT', 'MEET', 'YEE', 'ELF', 'LET', 'QTUM', 'LSK', 'ITC', 'SOC', 'QASH', 'MDS', 'EKO', 'TOPC', 'MTN', 'ACT', 'HSR', 'STK', 'STORJ', 'GNX', 'DBC', 'SNC', 'CMT', 'TNB', 'RUFF', 'QUN', 'ZRX', 'KNC', 'BLZ', 'PROPY', 'RPX', 'APPC', 'AIDOC', 'POWR', 'CVC', 'PAY', 'QSP', 'DAT', 'RDN', 'MCO', 'RCN', 'MANA', 'UTK', 'TNT', 'GAS', 'BAT', 'OST', 'LINK', 'GNT', 'MTL', 'EVX', 'REQ', 'ADX', 'AST', 'ENG', 'SALT', 'EDU', 'BIFI', 'BCX', 'BCD', 'SBTC', 'BTG', 'EOS', 'OMG', 'IOTA', 'ADA', 'STEEM', 'POLY', 'KAN', 'LBA', 'WAN', 'BFT', 'ZRX', 'AST', 'KNC', 'ONT', 'HT', 'BTM', 'IOST', 'SMT', 'ELA', 'TRX', 'ABT', 'NAS', 'OCN', 'WICC', 'ZIL', 'CTXC', 'ZLA', 'WPR', 'DTA', 'MTX', 'THETA', 'SRN', 'VEN', 'BTS', 'WAX', 'HSR', 'ICX', 'MTN', 'ACT', 'BLZ', 'QASH', 'RUFF', 'CMT', 'ELF', 'MEET', 'SOC', 'QTUM', 'ITC', 'SWFTC', 'YEE', 'LSK', 'LUN', 'LET', 'GNX', 'CHAT', 'EKO', 'TOPC', 'DGD', 'STK', 'MDS', 'DBC', 'SNC', 'PAY', 'QUN', 'AIDOC', 'TNB', 'APPC', 'RDN', 'UTK', 'POWR', 'BAT', 'PROPY', 'MANA', 'REQ', 'CVC', 'QSP', 'EVX', 'DAT', 'MCO', 'GNT', 'GAS', 'OST', 'LINK', 'RCN', 'TNT', 'ENG', 'SALT', 'ADX', 'EDU']
for base_currency in data:
base_currency_list.append(base_currency.get('base-currency').upper())
print(base_currency_list)
|
[
"[email protected]"
] | |
9c6a07dcfbdf352a591d9e7fe0d53f19f2b65bf9
|
c486c7bfe16804a8fd28b2f8d833b44df1a0f553
|
/topi/python/topi/x86/conv3d_transpose.py
|
ad035d34c3a13e715a1247ed4ba5c11825a4df4f
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
TexasInstruments/tvm
|
9ef8ebc5825030e595ea8a667387ea430dd92259
|
c78ea878a05e262a30c3ffa250c1479a695ecf33
|
refs/heads/dev
| 2023-08-03T19:59:53.639979 | 2020-06-15T22:29:11 | 2020-06-18T03:22:39 | 225,893,305 | 14 | 3 |
Apache-2.0
| 2020-07-08T14:34:47 | 2019-12-04T15:02:32 |
Python
|
UTF-8
|
Python
| false | false | 2,238 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=no-value-for-parameter
"""Conv3D Transpose schedule on x86"""
from tvm import te
from ..util import traverse_inline
from .. import nn
from .conv3d import conv3d_ncdhw, schedule_conv3d_ncdhw
def conv3d_transpose_ncdhw(data, kernel, strides, padding, out_dtype):
data_pad, kernel_transform = \
nn.conv3d_transpose_ncdhw_preprocess(data, kernel, strides, padding, out_dtype)
# reuse conv3d_ncdhw implementation
return conv3d_ncdhw(data_pad, kernel_transform, (1, 1, 1),
(0, 0, 0), (1, 1, 1), out_dtype)
def schedule_conv3d_transpose_ncdhw(outs):
"""Create schedule for tensors"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = schedule_conv3d_ncdhw(outs)
def _callback(op):
if 'unpack_ncdhwc' in op.tag:
conv_out = op.input_tensors[0]
# retrieve data
data_vec = conv_out.op.input_tensors[0]
data_pad = data_vec.op.input_tensors[0]
data_dilate = data_pad.op.input_tensors[0]
s[data_dilate].compute_inline()
s[data_pad].compute_inline()
# retrieve kernel
kernel_vec = conv_out.op.input_tensors[1]
kernel_transform = kernel_vec.op.input_tensors[0]
s[kernel_transform].compute_inline()
traverse_inline(s, outs[0].op, _callback)
return s
|
[
"[email protected]"
] | |
6d1a9a8a9639cc6ec0093c2eb0ba511f0654f894
|
4a9ed707b3b9adffd3e2f98c39040cede7dc0cc8
|
/garage/envs/mujoco/gather/ant_gather_env.py
|
7c0e3c54faf07ce45971d590b3efea02eb491053
|
[
"MIT"
] |
permissive
|
flyers/garage
|
f0c568bd850a0770a0f13d6c550318338049a462
|
745dff67d6777b78c5faaf2f2bfafcaf6f71d575
|
refs/heads/master
| 2020-04-15T15:38:42.500998 | 2019-01-29T11:56:29 | 2019-01-29T11:56:29 | 164,802,583 | 0 | 0 |
MIT
| 2019-01-29T12:11:13 | 2019-01-09T06:28:48 |
Python
|
UTF-8
|
Python
| false | false | 161 |
py
|
from garage.envs.mujoco import AntEnv
from garage.envs.mujoco.gather import GatherEnv
class AntGatherEnv(GatherEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
|
[
"[email protected]"
] | |
d978aee1a03ddbd4eec8a61a6d7792586dbbeb14
|
a25aa09af984d08084a395f9b6df427d3756f11a
|
/35.Search Insert Position.py
|
39611cdd7879d9f73747e131d4d9446fec4691dc
|
[] |
no_license
|
luyihsien/leetcodepy
|
31971e851a4ae77942a5d9e3ff07faea6e504c66
|
a54bd09f4b28f106196a6cd8a0f9c056bcd237e6
|
refs/heads/master
| 2020-05-19T13:21:57.854086 | 2019-10-16T14:23:00 | 2019-10-16T14:23:00 | 185,037,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
''''
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
'''
class Solution:
def searchInsert(self, nums, target):
if len(nums)==0:
return 0
for i in range(len(nums)):
if nums[i]==target:
return i
for i in range(1,len(nums)):
if nums[i]>target and nums[i-1]<target:
return i
if max(nums)<target:
return len(nums)
if min(nums)>target:
return 0
'''
成功
显示详情
执行用时 : 52 ms, 在Search Insert Position的Python3提交中击败了90.74% 的用户
内存消耗 : 13.5 MB, 在Search Insert Position的Python3提交中击败了96.03% 的用户
'''
|
[
"[email protected]"
] | |
5cd7a65e1435a46c2cb3ade49bcdca5022026d27
|
0e461c3ca52347efe1df6d7bf4dc9754a1a60bc9
|
/send_text.py
|
86ce81b32de0ab9867834519f07bec56065df80c
|
[] |
no_license
|
nena6/Udacitiy-Programming_foundations_with_Python
|
ebb92837ca7cd002d84b290a7bae6fa55031630c
|
c06a5d32835b603d2fc82dec7e0bec80fdd77226
|
refs/heads/master
| 2021-08-31T19:06:04.076417 | 2017-12-15T13:43:33 | 2017-12-15T13:43:33 | 113,049,865 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
from twilio.rest import Client
# Your Account SID from twilio.com/console
account_sid = "ACc7c6527d71af857207a258a1f0ffeb5e"
# Your Auth Token from twilio.com/console
auth_token = "85b43dbae62be16d3831e23cdda59bb0"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+385913653829",
from_="+12568264529",
body="Hello from the other side!")
print(message.sid)
|
[
"[email protected]"
] | |
165063736ccff5a78e51a0ed056d596280d583b3
|
532a912beca7dc986d2f3ff34fb22edd692932f0
|
/deploy.py
|
cef1301b10c0ac8cd26827be8c47d552f8b4aa27
|
[] |
no_license
|
aGHz/aptgregator
|
ce1539feaeb9bd2cf607a1fea334b415028b7cc4
|
2abed7bebd88e1ad4de2b60b4d5cf668e8d907e8
|
refs/heads/master
| 2021-01-23T03:12:58.027835 | 2014-04-08T01:11:27 | 2014-04-08T01:11:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,939 |
py
|
#!/bin/python
import getopt
import os
import subprocess
import sys
def syntax():
print """Generate instructions to deploy this new installation of aptgregator
After review, the output can be run manually or piped through sh
Syntax:
python deploy.py [restart] [options]
Options:
--flow Initializes git-flow and pulls branch develop if remote is set
--venv Sets up a new virtualenv, installs packages
--nginx= The path to Nginx sites-enabled, will symlink app's nginx.conf
Leave blank for a sensible default, i.e. '--nginx='
--auto= user[:group] under which the Paste process should run at boot
If absent, app will not be set up for starting on boot
If group is absent, it is assumed to match the user
Will also start the app right after deployment
Probably pointless without --nginx
restart Reconfigures the app and restarts it
--nginx When used after restart, will also restart Nginx
Only needed when the Nginx configuration changed
Examples:
Typical activation of a fresh WebCore template setup
python deploy.py --venv
Typical for development, running builtin server without Nginx our autostart
python deploy.py --flow --venv
Typical for production environments
python deploy.py --venv --auto=`id -nu`:`id -ng` --nginx
After making changes to the Python code
python deploy.py restart
"""
def restart(nginx):
pass
def flow():
try:
branches = subprocess.check_output(['git', 'branch'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return [
"", "# " + '-' * 72,
"# WARNING: This is not a git repository",
"# " + '-' * 72,
"",
]
if 'develop' in branches:
return [
"", "# " + '-' * 72,
"# WARNING: --flow requested but git-flow already installed",
"# " + '-' * 72,
"",
]
out = [
"", "# " + '-' * 72,
"# Initialize git-flow",
"# " + '-' * 72,
"git flow init",
"git checkout develop", # Possibly redundant
"",
]
try:
remotes = subprocess.check_output(['git', 'remote'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
remotes = ''
if 'origin' in remotes:
out += [
"# Set the proper upstream for branch develop",
"git branch --set-upstream develop origin/develop",
"git pull",
"git submodule update --init --recursive", # Possibly redundant
"",
]
return out
def venv():
out = [
"", "# " + '-' * 72,
"# Initialize virtualenv",
"# " + '-' * 72,
"virtualenv --no-site-packages --distribute .",
". bin/activate",
"",
"# Install dependencies",
"pip install -r etc/packages.pip",
"python src/setup.py develop",
"cd src && python setup.py develop && cd ..",
"",
]
return out
def nginx(path, linux):
out = []
if not path:
if linux:
path = '/etc/nginx/sites-enabled'
else:
path = '/usr/local/etc/nginx/sites-enabled'
if not os.path.isdir(path):
out = [
"", "# " + '-' * 72,
"# ERROR: Nginx config not found: {0}".format(path),
"# " + '-' * 72,
"",
]
out += [
"", "# " + '-' * 72,
"# Sym-link to the Nginx config from the proper location",
"# " + '-' * 72,
"{0}ln -s /Users/tek/src/aptgregator/etc/nginx.conf {1}".format('sudo ' if linux else '', os.path.join(path, 'aptgregator')),
"",
]
out += ["# Reload the Nginx config"]
if linux:
out += ["sudo /etc/init.d/nginx reload"]
else:
out += ["nginx -s reload"]
out += [""]
return out
def auto(user_group, linux):
[user, group] = (user_group + ':' + user_group).split(':')[:2] # trick to make group=user if absent
out = [
"", "# " + '-' * 72,
"# Configure initd.sh with user {user}:{group}".format(user=user, group=group),
"# " + '-' * 72,
"sed -i '' 's|__user__|{user}|' bin/initd.sh".format(user=user),
"sed -i '' 's|__group__|{group}|' bin/initd.sh".format(group=group),
"",
]
if linux:
out += [
"# Sym-link to the init.d script from the proper location",
"sudo ln -s /Users/tek/src/aptgregator/bin/initd.sh /etc/init.d/aptgregator",
"sudo update-rc.d aptgregator defaults",
"",
"echo",
"echo " + '-' * 80,
"echo ' To no longer start on boot, run:'",
"echo ' sudo /etc/init.d/aptgregator stop'",
"echo ' sudo update-rc.d -f aptgregator remove'",
"echo " + '-' * 80,
"echo",
"",
]
else:
out += [
"# Sym-link to the LaunchAgent plist from the proper location",
"ln -s /Users/tek/src/aptgregator/bin/launchAgent.plist ~/Library/LaunchAgents/com.aptgregator.tek.production.plist",
"launchctl load ~/Library/LaunchAgents/com.aptgregator.tek.production.plist",
"echo",
"echo " + '-' * 80,
"echo ' To no longer start on boot, run:'",
"echo ' launchctl stop com.aptgregator.tek.production'",
"echo ' launchctl remove com.aptgregator.tek.production'",
"echo ' rm ~/Library/LaunchAgents/com.aptgregator.tek.production.plist'",
"echo " + '-' * 80,
"echo",
"",
]
return out
def start(opt, linux):
out = []
if '--auto' in opt and '--nginx' not in opt:
out += [
"", "# " + '-' * 72,
"# WARNING: --auto set without --nginx",
"# The production server will start but FastCGI will not be served by Nginx",
"# This is potentially okay if it was specifically intended",
"# " + '-' * 72,
"",
]
if '--auto' in opt:
out += [
"", "# " + '-' * 72,
"# Start the production server",
"# " + '-' * 72,
"echo",
"echo " + '-' * 80,
"echo ' Starting production server'",
]
if linux:
out += [
"echo ' sudo /etc/init.d/aptgregator start'",
"sudo /etc/init.d/aptgregator start",
]
else:
out += [
"echo ' launchctl start com.aptgregator.tek.production'",
"launchctl start com.aptgregator.tek.production",
]
out += [
"echo " + '-' * 80,
"",
]
out += [
"", "# " + '-' * 72,
"# Server instructions",
"# " + '-' * 72,
"echo",
"echo " + '-' * 80,
"echo ' To run the local development server:'",
"echo ' ./etc/local.ini'",
]
if '--auto' in opt:
out += [
"echo " + '-' * 80,
"echo ' To control the local production server:'",
]
if linux:
out += ["echo ' sudo /etc/init.d/aptgregator start|stop|restart'"]
else:
out += ["echo ' launchctl start|stop com.aptgregator.tek.production'"]
out += [
"echo " + '-' * 80,
"echo",
"",
]
return out
def main(argv):
linux = sys.platform.startswith('linux')
if '--nginx' in argv:
# Silly getopt fix for potentially empty option
argv[argv.index('--nginx')] = '--nginx='
opt = getopt.getopt(argv, 'h', [
'venv',
'flow',
'auto=',
'nginx=',
'help',
])
argv = opt[1]
opt = dict(opt[0])
if '-h' in opt or '--help' in opt or (len(opt) == 0 and len(argv) == 0):
syntax()
return 1
if 'restart' in argv:
restart('--nginx' in argv)
return 1
out = [
"",
"cd /Users/tek/src/aptgregator",
]
if '--flow' in opt:
out += flow()
if '--venv' in opt:
out += venv()
if '--nginx' in opt:
out += nginx(opt['--nginx'], linux)
if '--auto' in opt:
out += auto(opt['--auto'], linux)
out += start(opt, linux)
out += [
"",
"# " + '-' * 72,
"# ",
"# If the script is correct, run the following to deploy:",
"# ",
"# python {0}".format(' '.join(sys.argv) + ' | sh'),
"# ",
"# " + '-' * 72,
"",
]
print "\n".join(out)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | |
32b5c6c58b4c8eeaa2951f17ab0bf0380b2b5467
|
a92b6ed6ba2091e4d4ec9613c6f6affe6e655c40
|
/main.py
|
b3135588610a604ee17520ff6956c0d1e5caabfe
|
[] |
no_license
|
rushali09/Python-Coffee-Machine
|
f3f8770449fb42772ab970f6a52eb43250f856b9
|
572a3b45b414ba8723f972de500fe98d7e9bfcf3
|
refs/heads/main
| 2023-02-17T15:56:41.170337 | 2021-01-21T08:07:39 | 2021-01-21T08:07:39 | 331,557,917 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,594 |
py
|
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
profit = 0
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
def is_resource_sufficient(user_ordered_ingredients):
"""Returns True when ingredients are sufficient, False when ingredients are insufficient"""
for item in user_ordered_ingredients:
if user_ordered_ingredients[item] >= resources[item]:
print(f"Sorry, there is not enough {item}")
return False
return True
def process_coins():
"""Returns the total calculated from coins inserted"""
print("Please insert coins")
total = int(input("How many quarters?: "))* 0.25
total += int(input("How many dimes?: "))* 0.1
total += int(input("How many nickles?: "))* 0.05
total += int(input("How many pennies?: "))* 0.01
return total
def is_transaction_successful(money_received, drink_cost):
"""Returns True when payment is sufficient and False when money received by user is insufficient"""
if money_received >= drink_cost:
change = round(money_received - drink_cost, 2)
print(f"Here is ${change} in change")
global profit
profit += drink_cost
return True
else:
print("Sorry, there is not enough money. Money Refunded")
return False
def make_coffee(drink_name, order_ingredients):
"""deduct the required ingredients from the resources"""
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name} ☕")
hello_kitty = True
while hello_kitty:
choice = input("What would you like? (espresso/latte/cappuccino): ")
if choice == "off":
hello_kitty = False
elif choice == "report":
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}g")
print(f"Money: ${profit}")
else:
drink = MENU[choice]
if is_resource_sufficient(drink["ingredients"]):
payment = process_coins()
if is_transaction_successful(payment, drink["cost"]):
make_coffee(choice, drink["ingredients"])
|
[
"[email protected]"
] | |
341cddee35f5b6e4b78500da685d57d1aaee67e7
|
47ee13dce0907de438461ea7e33832a09f1ba362
|
/corpus/c4bf475a-19a9-11de-ba4e-3babc36f5e84/solution/python/test
|
d33d6575b8e97b88cf40da8de6cfc8937109eb57
|
[] |
no_license
|
Marta81/tapperdan
|
1c6624b12d33a0a0fc7906c11c8c0de88d0d3e05
|
d9d27f47ea378ad04ea0f91ce82b699b1e1d8f5d
|
refs/heads/master
| 2021-01-18T20:42:09.957943 | 2009-03-26T03:18:02 | 2009-03-26T03:18:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 46 |
#!/usr/bin/env python
print "Hello, World!"
|
[
"[email protected]"
] | ||
713a24a7ccdd51e993b29e4b2f542ce44c4723f6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03448/s790400785.py
|
17c0ac19efb39097ef60a9bdde7f5b5bfd5d9764
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 337 |
py
|
def resolve():
A = int(input())
B = int(input())
C = int(input())
X = int(input())
ans = []
for a in range(A + 1):
for b in range(B + 1):
c = (X - 500 * a - 100 * b) / 50
if c <= C and c >= 0:
ans.append((a, b, c))
print((len(set(ans))))
return
resolve()
|
[
"[email protected]"
] | |
807c48c6962ab4fd329836f97eaeb05bb435f2bf
|
d93b5c753ac9c9309d946cc8cfde005027fc1859
|
/No6_1.py
|
82c1e33002a93e0d5c1e77e851c0cd200b024e6b
|
[] |
no_license
|
injoinD0913/Python-case
|
12e0d53ee493e748d51240666f8bb699c21fbbb3
|
13f2cdebf815aaf0367bde1372f7720a792b6d36
|
refs/heads/master
| 2020-09-07T10:17:47.884970 | 2019-11-15T15:55:58 | 2019-11-15T15:55:58 | 220,750,132 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
# _*_ coding:utf-8 _*_
# 开发团队:
# 开发人员:Administrator
# 开发时间:2019/10/12 20:34
# 文件名称:No6_1.py
# 开发工具:PyCharm
# 题目:斐波那契数列。
# 程序分析:斐波那契数列指的是这样一个数列:0、1、1、2、3、5、8、13、21、34、……。
# 可以以递归的方法来定义:
# F0 = 0(n=0)
# F1 = 1(n=1)
# Fn = F[n - 1] + F[n - 2](n= > 2)
# 输出指定个数的斐波那契数列
i = int(input())
def fib(n):
if n == 1:
return [1]
if n == 2:
return [1, 1]
fibs = [1, 1]
for i in range(2, n):
fibs.append(fibs[-1] + fibs[-2])
return fibs
print(fib(i))
|
[
"[email protected]"
] | |
c8dd76f68361f90919bc5ca4d3b4e315a3f3ab89
|
fe752040ed8552246e465d4259a73579acf1b623
|
/drift.py
|
35b4acfa8de452bebd0dfbeb10a4c4adf4c33903
|
[] |
no_license
|
abdifatah87/imt3003
|
2d119c4868fd868de02f78b5716430a38f73f6b4
|
28c471032944fbbd78fcf18b483a2b91b308bd39
|
refs/heads/master
| 2020-12-13T06:53:04.286139 | 2020-01-26T17:34:50 | 2020-01-26T17:34:50 | 234,341,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 685 |
py
|
import os
from openstack import connection
conn = connection.Connection(auth_url= "https://api.skyhigh.iik.ntnu.no:8774/v2.1",
project_name=str(os.getenv("OS_PROJECT_NAME")),
username=str(os.getenv("OS_USERNAME")),
password=str(os.getenv("OS_PASSWORD")),
user_domain_id=str(os.getenv("OS_USER_DOMAIN_NAME")),
project_domain_id=str(os.getenv("OS_PROJECT_DOMAIN_ID"))
)
def list_servers(connection):
print("list servers:")
for server in conn.compute.servers():
print(server)
list_servers(conn)
|
[
"[email protected]"
] | |
164f7e179ec264ee49337f55cfdcec1944421c2b
|
685e1a25f56109de935d1ad443372d3fff8a2264
|
/lesson8/main.py
|
852514b91d0a45e92292f03dc3c701221fcd5b92
|
[] |
no_license
|
osydorchuk/ITEA2
|
8a8afdcfc08aa96aae3182ff19bc9b173d043a67
|
7e64e9d9843017413705367c1e742c3f83b76d14
|
refs/heads/master
| 2020-06-24T16:38:15.625652 | 2019-09-07T13:58:24 | 2019-09-07T13:58:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
print(__name__)
print(globals())
print(locals())
def check_locals():
a = 0
b ="q"
print(locals())
check_locals()
|
[
"[email protected]"
] | |
b1b504761ef386bea3c5ec22159ec1973a0ac635
|
d4c47276c8fbd15240aa228eda04ee8e338caf02
|
/Python/Python Lesson/Second/Lesson9/Sample8.py
|
447d9972d35e1c1f96525406233e419f925a3a61
|
[] |
no_license
|
developer579/Practice
|
a745384450172fb327913c130303ab76492096f1
|
54084468af83afcc44530e757800c8c3678147c1
|
refs/heads/main
| 2023-05-06T01:36:06.222554 | 2021-06-02T07:04:03 | 2021-06-02T07:04:03 | 324,312,009 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
import re
ptr = ["TXT","TXT..",".TXT","..TXT"]
str = ["TXT","TXTT","TXTTT","TTXT","TTTXT"]
for valueptr in ptr:
print("------")
pattern = re.compile(valueptr)
for valuestr in str:
res = pattern.search(valuestr)
if res is not None:
m = "o"
else:
m = "x"
mrs = "(パターン)" + valueptr + "(文字列)" + valuestr + "(マッチ)" + m
print(mrs)
|
[
"[email protected]"
] | |
80fc4b38b7dff6b4f630a8e31f713c5c9b512f3c
|
53163d4129930426c2d7aa650cb1b638d1347d21
|
/lxmert/lxmert/src/tasks/nlvr2_model.py
|
ef93474403461f18461d1da85fb8877b6f6b5364
|
[
"MIT"
] |
permissive
|
fdsig/Transformer-MM-Explainability
|
5e4d9d0c927afd0316311259fc318b325d74628e
|
accc4dd3491d321948e826079ce85f61bb02e0a6
|
refs/heads/main
| 2023-09-03T01:21:27.188260 | 2021-11-17T23:56:49 | 2021-11-17T23:56:49 | 433,759,755 | 1 | 0 |
MIT
| 2021-12-01T09:20:31 | 2021-12-01T09:20:31 | null |
UTF-8
|
Python
| false | false | 1,773 |
py
|
# coding=utf-8
# Copyleft 2019 project LXRT.
import torch.nn as nn
from lxrt.modeling import GeLU, BertLayerNorm
from lxrt.entry import LXRTEncoder
from param import args
class NLVR2Model(nn.Module):
def __init__(self):
super().__init__()
self.lxrt_encoder = LXRTEncoder(
args,
max_seq_length=20
)
self.hid_dim = hid_dim = self.lxrt_encoder.dim
self.logit_fc = nn.Sequential(
nn.Linear(hid_dim * 2, hid_dim * 2),
GeLU(),
BertLayerNorm(hid_dim * 2, eps=1e-12),
nn.Linear(hid_dim * 2, 2)
)
self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)
def forward(self, feat, pos, sent):
"""
:param feat: b, 2, o, f
:param pos: b, 2, o, 4
:param sent: b, (string)
:param leng: b, (numpy, int)
:return:
"""
# Pairing images and sentences:
# The input of NLVR2 is two images and one sentence. In batch level, they are saved as
# [ [img0_0, img0_1], [img1_0, img1_1], ...] and [sent0, sent1, ...]
# Here, we flat them to
# feat/pos = [ img0_0, img0_1, img1_0, img1_1, ...]
# sent = [ sent0, sent0, sent1, sent1, ...]
sent = sum(zip(sent, sent), ())
batch_size, img_num, obj_num, feat_size = feat.size()
assert img_num == 2 and obj_num == 36 and feat_size == 2048
feat = feat.view(batch_size * 2, obj_num, feat_size)
pos = pos.view(batch_size * 2, obj_num, 4)
# Extract feature --> Concat
x = self.lxrt_encoder(sent, (feat, pos))
x = x.view(-1, self.hid_dim*2)
# Compute logit of answers
logit = self.logit_fc(x)
return logit
|
[
"[email protected]"
] | |
0c00cb5df809def448fd1c5f50e41d957f662365
|
e6e3e22f4111e7a9a1c3c8f719a4a00f1a76e36b
|
/ConnectedComp.py
|
3be7256728c3e817679d9c6afafe0a3f9929cadd
|
[] |
no_license
|
GiuliaLovati/Tesy
|
656553b383633c1426abbae7f3da483dd152e238
|
3bb50bfea37c3b0316a479453d629e839aa9a4c4
|
refs/heads/master
| 2022-12-12T00:53:36.020812 | 2020-09-11T17:01:03 | 2020-09-11T17:01:03 | 211,265,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,822 |
py
|
import cv2 as cv
import numpy as np
def imshow_components(image, threshold=70):
img = cv.threshold(image, 70, 255, cv.THRESH_BINARY)[1] # ensure binary
num_labels, labels = cv.connectedComponents(img)
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels)) #each label gets a different hue
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv.merge([label_hue, blank_ch, blank_ch]) #each element of the output array will be a concatenation of the elements of the input arrays
# cvt to BGR for display
labeled_img = cv.cvtColor(labeled_img, cv.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
return labeled_img
#cv.imshow('labeled.png', labeled_img)
#cv.waitKey()
def connected_components_for_binaryimg(img):
num_labels, labels = cv.connectedComponents(img)
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
#print (blank_ch)
labeled_img = cv.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv.cvtColor(labeled_img, cv.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
return labeled_img
#OPERATIONS ON FOUND COMPONENTS:
def equallabels(labels_im, number): #equal to find 5° column of cv.connectedComponentsWithStats for a specific row (number)
numlist=[]
for i in range(labels_im.shape[0]):
for j in range(labels_im.shape[1]):
if labels_im[i][j] == number:
numlist.append(labels_im[i][j])
else:
pass
return len(numlist)
def concompmean(image,thr): #returns np.mean(stats[:,4])
lens=[]
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im = cv.connectedComponents(img)
for k in range(num_labels):
newlen = equallabels(labels_im, k)
lens.append(newlen)
print (lens)
return (np.mean(lens))
def selection(image, thr=70): #selection of connected components with pixel area > certain value (valuemean)
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im, stats, centroids = cv.connectedComponentsWithStats(img)
#print (stats.shape)
#n° stats rows: n° of connected components
#5° column stats: number of pixel of that connected component
#other stats columns describe the box thar contains each component
areas = stats[:,4]
areas1 = areas.tolist()
valuemean = np.mean(areas1)
print ('Total number of connected components:', len(areas1))
print ('Average area of connected components:', valuemean)
bigareasindex = []
bigareas = []
for i in areas1:
if i>=valuemean:
bigareasindex.append(areas1.index(i))
bigareas.append(i)
print ('Labels of connected components with pixel area higher than average:', bigareasindex) #index 0 : background
print ('Number of pixels of each selected area:', bigareas)
print('')
bigareasarray = np.array([bigareasindex, bigareas]).T
print (bigareasarray)
return bigareasindex, bigareas, bigareasarray
def differentSelection(image, thr=70, number=1): #selection of connected components with pixel area > certain value (valuemean) +/- number times standard deviation
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
num_labels, labels_im, stats, centroids = cv.connectedComponentsWithStats(img)
#print (stats.shape)
#n° stats rows: n° of connected components
#5° column stats: number of pixel of that connected component
#other stats columns describe the box thar contains each component
areas = stats[:,4]
areas1 = areas.tolist()
valuemean = np.mean(areas1)
standarddev = np.std(areas1)
print ('Total number of connected components:', len(areas1))
print ('Average area of connected components:', valuemean)
print ('Areas standard deviation:', standarddev)
bigareasindex = []
bigareas = []
for i in areas1:
if i>=(valuemean - (number*standarddev)):
bigareasindex.append(areas1.index(i))
bigareas.append(i)
print ('Labels of selected connected components:', bigareasindex) #index 0 : background
print ('Number of pixels of each selected area:', bigareas)
print('')
bigareasarray = np.array([bigareasindex, bigareas]).T
print (bigareasarray)
return bigareasindex, bigareas, bigareasarray
def newimgbigcomponents(image, bigareasindex, thr=70): #new array image with only the components having area[pixel]> average area of all components
img = cv.threshold(image, thr, 255, cv.THRESH_BINARY)[1]
new= np.zeros_like(img,dtype='int32')
num_labels, labels_im = cv.connectedComponents(img)
hue = range(0, 255, int(255/len(bigareasindex))) #set new colors for the selected components in range(0,255)
for i in range(len(bigareasindex)):
#new += np.where(labels_im == bigareasindex[i], labels_im, 0) #gives problems showing components with label>255
new += np.where(labels_im == bigareasindex[i], hue[i], 0) #selected components are mantained with a new label in range(0,255)
print ('New label for', bigareasindex[i], 'component:', hue[i])
return new, hue
#FINDING EDGES
def FindingUpperEdges(newimg, huenewimg):
edges = np.zeros_like(newimg)
upperlimitx = []
upperlimity = []
for i in range(newimg.shape[1]):
column = newimg[:,i]
colist = column.tolist()
for j in huenewimg[1:]:
try:
print ('column', i, 'upper edge at:', colist.index(j), ', with label:', j)
#if in the i-column, pixels with label equal to one of the selected components are present,
#it finds the index (row) of the first one with that label
edges[colist.index(j)][i] = j
upperlimitx.append(colist.index(j))
upperlimity.append(i)
except ValueError:
pass
return edges, upperlimitx, upperlimity
def FindingLowerEdges(newimg, huenewimg, edges):
lowerlimitx = []
lowerlimity = []
for i in range(newimg.shape[1]):
column = newimg[:,i]
colist = list(reversed(column)) #reversing the column in order to find the last pixel with one of the selected label value
for j in huenewimg[1:]:
try:
print ('column', i, 'lower edge at:', colist.index(j), '(not reversed value), right reversed value:', newimg.shape[0]-colist.index(j), ', with label:', j)
lowerlimitx.append(newimg.shape[0]-colist.index(j))
lowerlimity.append(i)
if colist.index(j) == 0 : #useful if there is a component that ends beyond image limit
edges[newimg.shape[0]-colist.index(j)-1][i] = j #reversing again
else:
edges[newimg.shape[0]-colist.index(j)][i] = j #reversing again
except ValueError:
pass
return edges, lowerlimitx, lowerlimity
#THICKNESS CALCULATION
def Thickness(upperlimity, upperlimitx, lowerlimity, lowerlimitx): #Thickness in pixels
deltacolumn = np.zeros_like(upperlimity)
delta = np.zeros_like(upperlimity)
for i in range(len(upperlimity)):
for j in range(len(lowerlimity)):
if i == j:
delta[i] = lowerlimitx[j] - upperlimitx[i]
deltacolumn[i] = upperlimity[i]
return deltacolumn, delta
#Conversion function has 3 possible argument: a thickness values array in pixel for each column of the selected connected components
#Data type specification: automatically US data (important for pixel to second conversion), specify "ITA" for italian data
#Value for dieletric const. : automatically eps = 3.15 from Putzig et al. 2009, tipical of pure water ice. For Grima et al 2009 is 3.1
def Conversion(delta, datatype = "USA", eps = 3.15):
c = 299792.458 #km/s
if datatype == "USA":
convpx = 0.0375*10**(-6) #US data, MROSH_2001: https://pds.nasa.gov/ds-view/pds/viewProfile.jsp?dsid=MRO-M-SHARAD-5-RADARGRAM-V1
elif datatype == "ITA":
convpx = 0.075*10**(-6) #from 4.3.2.6 TIME ALIGNMENT OF ECHOES paragraph of rdrsis (italian data)
else:
print ('uncorrect datatype, try "USA" or "ITA" ')
deltasec = delta*convpx
print('Thickness [sec]', deltasec)
print('Maximum thickness [microsec]', (deltasec*10**6).max())
deltakm = (deltasec*c)/(2*eps**(0.5))
deltam = deltakm*1000
print ('Thickness [m]:', deltam)
print ('Maximum thickness [m]:', deltam.max())
print ('Average thickness [m]:', deltam.mean())
return deltasec, deltakm, deltam
|
[
"[email protected]"
] | |
9464793a12fd15b36cf79f711c7308ed8e638665
|
e56ad8a3c8b34bed3c5ff0f168beb4ceec19b8bc
|
/test.py
|
3bdc36b350229988e79d2b89c8c32aac239b247f
|
[] |
no_license
|
YoungseogChung/angry_turtle
|
77ba732008abf7433e21a39dc145d9ffde8284cb
|
8d9288c030de3d40d8554aad688a80082ce095c7
|
refs/heads/master
| 2020-05-21T00:57:01.277698 | 2019-05-09T20:08:23 | 2019-05-09T20:08:23 | 185,842,247 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
import turtle
import random
import math
player = turtle.Turtle()
player.color("blue")
player.shape("turtle")
player.penup()
player.speed(0)
screen = player.getscreen()
a1 = turtle.Turtle()
a1.color("red")
a1.shape("circle")
a1.penup()
a1.speed(0)
a1.goto(random.randint(-300, 300), random.randint(-300, 300))
a2 = turtle.Turtle()
a2.color("red")
a2.shape("circle")
a2.penup()
a2.speed(0)
a2.goto(random.randint(-300, 300), random.randint(-300, 300))
def turnleft():
player.left(30) # 왼쪽으로 30도 회전한다.
def turnright():
player.right(30) # 오른쪽으로 30도 회전한다.
def play():
player.forward(2) # 2픽셀 전진
a1.forward(2)
a2.forward(2)
screen.ontimer(play, 10) # 10ms가 지나면 play()를 다시 호출
screen.onkeypress(turnleft, "Left")
screen.onkeypress(turnright, "Right")
screen.listen()
turtle.done()
# screen.ontimer(play, 10)
|
[
"[email protected]"
] | |
8b22af7888df6c2ed8a9604c7b942d3091b1ae42
|
0039e09b2c18efad98a0c51995b68c9c22582ed0
|
/portfollio/migrations/0010_auto_20200327_1914.py
|
dc3138a3efdf84c6ef75038c142e7b9bfa0314bd
|
[] |
no_license
|
aishmn/base_app
|
b72dee7d4ebea2efbd64208c2e4dfbf6a2085779
|
1fde6cd9c95ccf2ada0cf5b802c11f49d3a75048
|
refs/heads/master
| 2021-05-17T02:58:18.861534 | 2020-03-27T16:35:43 | 2020-03-27T16:35:43 | 250,587,235 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 595 |
py
|
# Generated by Django 3.0.4 on 2020-03-27 13:29
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('portfollio', '0009_blog_category'),
]
operations = [
migrations.AddField(
model_name='blog',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='blog',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
6176590b086fa51c97cf9f07166346416c151b32
|
c1a8dd3a5379caa8124ff0c20f4a0b775874c614
|
/venv/bin/pip3
|
0c0400dbeb62afdbd7d795b71041e7d20d471cef
|
[] |
no_license
|
ssm5/illini
|
25a40833be60c125cf91485d78aaa0506bf3b5c9
|
9ca880e9603790e16b7439ece54502884a2a171d
|
refs/heads/master
| 2021-08-15T03:48:12.666900 | 2017-11-17T08:16:55 | 2017-11-17T08:16:55 | 108,466,970 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
#!/Users/johnqian/Documents/College/CS196/illini/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
7f1173e8bb1f003e5a7f5f407b9c460188d6b251
|
20406108a91d05b5e05a16fa17329b68d8cbfc7c
|
/src/mario_maze/settings.py
|
7374af9a22637d9afd5737f2054d705de0181241
|
[] |
no_license
|
Olena-Mordas/mario-maze_be
|
d85f81022f66c7c699e5db11cf187451d96d68a0
|
dc2426793149f81ec275ee64ea3d4344e3fa5c99
|
refs/heads/master
| 2023-04-11T02:32:26.307974 | 2021-04-29T14:49:48 | 2021-04-29T14:49:48 | 359,937,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,557 |
py
|
"""
Django settings for mario_maze project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m(bu0w2sl%kzj@&$r+0*b@)gq)zb#@ld&3pq_&5mx=yq+%&*kl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware'
]
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'http://localhost:4200',
)
ROOT_URLCONF = 'mario_maze.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mario_maze.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {'DEFAULT_SCHEMA_CLASS':
'rest_framework.schemas.coreapi.AutoSchema',
}
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.