hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0d3bab4c52e7bb6548865457b438ad24de1affe | 6,152 | py | Python | hatfieldcmr/ingest/name.py | bcgov/nr_rfc_processing | 7e414b97a29ed5bae8ba3c6decea39733be9a2db | [
"Apache-2.0"
]
| null | null | null | hatfieldcmr/ingest/name.py | bcgov/nr_rfc_processing | 7e414b97a29ed5bae8ba3c6decea39733be9a2db | [
"Apache-2.0"
]
| 6 | 2021-02-08T16:47:02.000Z | 2022-01-30T21:58:18.000Z | hatfieldcmr/ingest/name.py | bcgov/rfc_processing | 7e414b97a29ed5bae8ba3c6decea39733be9a2db | [
"Apache-2.0"
]
| 2 | 2021-02-22T19:13:26.000Z | 2021-05-03T23:58:56.000Z | """
Contains data ingest related functions
"""
import re
import os.path
from dateutil.parser import parse as dateparser
import typing
from typing import Dict
import cmr
from hatfieldcmr.ingest.file_type import MODISBlobType
MODIS_NAME = "modis-terra"
TITLE_PATTERN_STRING = r"\w+:([\w]+\.[\w]+):\w+"
TITLE_PATTERN = re.compile(TITLE_PATTERN_STRING)
GRANULE_TITLE_KEY = 'title'
GRANULE_TIME_KEY = 'time_start'
GRANULE_NAME_KEY = 'producer_granule_id'
def format_object_name(meta: Dict, object_name: str) -> str:
"""
Parameters
----------
metas: Dict
Single Granule metadata JSON response from CMR
object_name: str
Name of object (ex. hdf file, xml file)
Returns
----------
str
Object name for granule.
If insufficient information is available, empty string is returned.
"""
default_value = ""
if meta is None:
return default_value
folder_prefix = ""
try:
folder_prefix = format_object_prefix(meta)
except ValueError:
return ''
os.makedirs(folder_prefix, exist_ok=True)
return f"{folder_prefix}/{object_name}"
def format_object_prefix(meta: Dict):
"""Helper function to generate 'folder prefix' of the bucket object
"""
if not ((GRANULE_TITLE_KEY in meta) and (GRANULE_TIME_KEY in meta) and
(GRANULE_NAME_KEY in meta)):
raise ValueError('granule does not have required keys', meta)
title = meta.get(GRANULE_TITLE_KEY, "")
m = TITLE_PATTERN.match(title)
if m is None:
raise ValueError('granule does not have well formated title', title)
product_name = m.groups()[0]
date_string = dateparser(meta.get("time_start")).strftime('%Y.%m.%d')
folder_prefix = format_object_prefix_helper(product_name, date_string)
# f"{MODIS_NAME}/{product_name}/{date_string}"
return folder_prefix
def format_object_prefix_helper(product_name: str, date_string: str):
return f"{MODIS_NAME}/{product_name}/{date_string}"
class BlobPathMetadata:
def __init__(self, product_name: str, date_string: str):
self.product_name = product_name
self.product_name_without_version = product_name[:7].lower()
self.date_string = date_string
self.date = dateparser(date_string)
@staticmethod
def parse(prefix_or_full_name: str):
parts = prefix_or_full_name.split(r'/')
if (len(parts) >= 3):
product_name = parts[1]
date_string = parts[2]
return BlobPathMetadata(product_name, date_string)
return None
class MODISFileNameParser:
THUMBNAIL_RE = re.compile(r"BROWSE\.([\w\.]+)\.\d+\.jpg")
@classmethod
def identify_file_type(cls, name: str):
basename = os.path.basename(name)
if ('BROWSE' in basename):
return MODISBlobType.THUMBNAIL
elif ('.hdf.xml' in basename):
return MODISBlobType.METADATA_XML
elif ('.hdf_meta.json' in basename):
return MODISBlobType.METADATA_JSON
elif ('.hdf' in basename):
return MODISBlobType.DATA_HDF
elif ('.tif.aux.xml' in basename):
return MODISBlobType.GEOTIFF_XML
elif ('.tif' in basename):
return MODISBlobType.GEOTIFF
else:
print(f'unknown file name {name}')
return ''
@classmethod
def extract_blob_id(cls, name: str, file_type: MODISBlobType = None):
if file_type is None:
file_type = cls.identify_file_type(name)
if file_type == MODISBlobType.THUMBNAIL:
return cls._extract_blob_id_thumbnail(name)
elif file_type == MODISBlobType.METADATA_XML:
return cls._extract_basename_from_file(name, '.hdf.xml')
elif file_type == MODISBlobType.METADATA_JSON:
return cls._extract_basename_from_file(name, '.hdf_meta.json')
elif file_type == MODISBlobType.DATA_HDF:
return cls._extract_basename_from_file(name, '.hdf')
elif file_type == MODISBlobType.GEOTIFF:
return cls._extract_basename_from_file(name, '.tif')
elif file_type == MODISBlobType.GEOTIFF_XML:
return cls._extract_basename_from_file(name, '.tif.aux.xml')
return ''
@classmethod
def _extract_blob_id_thumbnail(cls, name: str) -> str:
basename = os.path.basename(name)
m = cls.THUMBNAIL_RE.match(basename)
if m is None:
return ''
blob_id = m.groups()[0]
name_includes_dir = len(name.split(r'/')) >= 4
if (name_includes_dir):
product_name_doesnt_match_blob_prefix = cls._check_thumbnail_product_inconsistency(
name, blob_id)
if (product_name_doesnt_match_blob_prefix):
blob_id = cls._fix_thumbnail_product_name_inconsistency(
name, blob_id)
return blob_id
@classmethod
def _check_thumbnail_product_inconsistency(cls, name: str, blob_id: str):
full_name_product_name, blob_id_product_name = cls._extract_product_names(
name, blob_id)
return full_name_product_name != blob_id_product_name
@classmethod
def _fix_thumbnail_product_name_inconsistency(cls, name: str,
blob_id: str):
full_name_product_name, blob_id_product_name = cls._extract_product_names(
name, blob_id)
return blob_id.replace(blob_id_product_name, full_name_product_name)
@classmethod
def _extract_product_names(cls, name: str, blob_id: str):
product_name_with_version = name.split(r'/')[1]
full_name_product_name = product_name_with_version[:7]
blob_id_product_name = blob_id[:7]
return full_name_product_name, blob_id_product_name
@classmethod
def _extract_basename_from_file(cls, name: str, extension: str) -> str:
basename = os.path.basename(name).strip()
extension_len = len(extension)
if (len(basename) > extension_len
and basename[-extension_len:] == extension):
return basename[:-extension_len]
return ''
| 34.561798 | 95 | 0.658973 | 4,140 | 0.672952 | 0 | 0 | 3,727 | 0.605819 | 0 | 0 | 946 | 0.153771 |
c0d3c4a39dc9908b549fa2b113fe0a1b00c9a7a2 | 9,520 | py | Python | twittersmash/management/commands/smash_stuff.py | mrkurt/twittersmash | 095af27fafa6f2074e0ce82596196c2141d0b7de | [
"MIT"
]
| 1 | 2016-05-09T11:29:35.000Z | 2016-05-09T11:29:35.000Z | twittersmash/management/commands/smash_stuff.py | mrkurt/twittersmash | 095af27fafa6f2074e0ce82596196c2141d0b7de | [
"MIT"
]
| null | null | null | twittersmash/management/commands/smash_stuff.py | mrkurt/twittersmash | 095af27fafa6f2074e0ce82596196c2141d0b7de | [
"MIT"
]
| null | null | null | import twitter
import datetime
import feedparser
import re
import string
from django.core.management.base import BaseCommand
from optparse import make_option
from twittersmash.models import Feed, TwitterAccount, Message
import pytz
from pytz import timezone
central = timezone('US/Central')
utc = pytz.utc
# Parses the "Tweet Format" in Twitter RSS feeds
twit_re = re.compile(r'^(?P<username>\S+): (?P<message>.*)$')
# Parses out hashtags
tag_pat = r'\#([A-Za-z0-9]+)'
tag_re = re.compile(tag_pat)
class Command(BaseCommand):
help = "Loops through feeds and determines if messages need to be sent to any twitter accounts"
option_list = BaseCommand.option_list + (
make_option('--dryrun', '-D', action='store_true', dest='dryrun', default=False,
help='Go through the motions but commit nothing to Twitter'),
make_option('--quiet', '-q', action='store_true', dest='quiet', default=False,
help='Don\t print anything to console'),
make_option('--debug', '-d', action='store_true', dest='debug', default=False,
help='Return debugging information'),
)
def handle(self, *args, **options):
# Get list of TwitterAccounts
quiet = options.get('quiet')
entries_pulled = 0
accounts_skipped = 0
accounts_ready = 0
entries_tweeted = 0
feeds_pulled = 0
messages_added = 0
feeds_checked = 0
messages_sent = []
accounts = TwitterAccount.objects.all().filter(active=True)
for account in accounts:
api = twitter.Api(username=account.username, password=account.password)
if not quiet:
print "Checking %s" % (account,)
feed_list = account.feeds.all()
for f in feed_list:
feeds_checked += 1
if not quiet:
print " - %s" % (f,)
# Get list of feeds whose last_update + polling_rate is less than now
if f.last_checked == None or f.last_checked + \
datetime.timedelta(minutes=f.polling_rate) < datetime.datetime.now():
accounts_ready += 1
# Update timestamp
f.last_checked = datetime.datetime.now()
f.save()
if not quiet:
print " * Pulling feed"
# Pull each feed
d = feedparser.parse(f.url)
feeds_pulled += 1
# Loop through feed
d.entries.reverse()
for entry in d['entries']:
entries_pulled += 1
guid = entry.id
tweeted = entry.updated_parsed
message = entry.title
# TODO: Should probably consider moving
# to dateutil here
tweeted_dt = datetime.datetime(
tweeted[0],
tweeted[1],
tweeted[2],
tweeted[3],
tweeted[4],
tweeted[5],
tzinfo=None
)
tweeted_dt_cst = central.localize(tweeted_dt)
tweeted_dt_utc = tweeted_dt_cst.astimezone(utc)
tweeted_dt = datetime.datetime(
tweeted_dt_utc.utctimetuple()[0],
tweeted_dt_utc.utctimetuple()[1],
tweeted_dt_utc.utctimetuple()[2],
tweeted_dt_utc.utctimetuple()[3],
tweeted_dt_utc.utctimetuple()[4],
tweeted_dt_utc.utctimetuple()[5],
)
msg, created = Message.objects.get_or_create(
guid=guid,
twitter_account=account,
defaults={
'feed': f,
'tweeted': tweeted_dt,
'message': message,
'twitter_account': account,
})
send_to_twitter = False
if created:
messages_added += 1
send_to_twitter, message = self.process_messages(
account=account,
source_feed = f,
message=message,
created=tweeted_dt_utc,
options=options
)
if send_to_twitter:
try:
if not options.get('dryrun'):
status = api.PostUpdate(message[:139])
if not quiet:
print " * Sent to Twitter: '%s' (%s)" % (message, keyword,)
else:
if not quiet:
print " * Dry run: '%s' (%s)" % (message, keyword,)
entries_tweeted += 1
msg.sent_to_twitter = True
msg.save()
except e:
if not quiet:
print " - Failed to send to twitter (%s)" % (e,)
else:
if not quiet:
print " * Checked within the last %s minutes" % (f.polling_rate)
accounts_skipped += 1
if options.get('debug'):
return {
'entries_pulled': entries_pulled,
'accounts_skipped': accounts_skipped,
'accounts_ready': accounts_ready,
'entries_tweeted': entries_tweeted,
'feeds_pulled': feeds_pulled,
'messages_added': messages_added,
'feeds_checked': feeds_checked,
}
def process_messages(self, account, source_feed, message, created, options):
"""
This method determines whether or not a message should be sent
to Twitter. If needed, filters and munging are applied as well.
`account` - A Twitter account instance
`message` - The text of a single tweet
`created` - The date/time at which a Tweet was Tweeted
`options` - A dict of options, the only values used here are 'quiet'
to suppress output.
"""
send_to_twitter = False
quiet = options.get('quiet')
reply_re = re.compile(r'\@%s' % account.username)
# Prepare keywords
keywords = account.philter.lower().strip().split(',')
keywords = map(string.strip, keywords)
if keywords == ['']: keywords = []
# Prep minimum DT
if account.minimum_datetime:
# Stored value here is UTC
min_dt = utc.localize(account.minimum_datetime)
else:
min_dt = None
# Wasn't already in the db
if min_dt and created <= min_dt:
if not quiet:
print " * Skipped because of time restrictions"
else:
# Remove userames if needed
if twit_re.search(message) and not account.prepend_names:
message = twit_re.search(message).groups()[1]
if account.prepend_names:
message = "@" + message
# Check to see if this message contains any of the keywords
if keywords:
for keyword in keywords:
if keyword in message.lower():
send_to_twitter = True
break
else:
send_to_twitter = False
# Check to see if the message was directed at this account
if account.philter_replies:
if reply_re.search(message):
send_to_twitter = True
message = reply_re.sub('', message).strip()
if account.strip_tags:
if not quiet:
print " * Removing tags"
message = tag_re.sub('', message)
if account.append_tags:
m = re.findall(tag_pat, message)
if m:
# remove each hashtag
for match in m:
message = tag_re.sub('', message)
# clean up whitespace
message = message.strip()
# append each tag to message
for match in m:
message += " #%s" % (match,)
if account.append_initials and source_feed.initials:
message += " ^%s" % source_feed.initials
# Clean up whitespace
message = message.strip()
# Remove double spaces left from replacements
message = message.replace(' ', ' ')
return send_to_twitter, message | 42.5 | 101 | 0.463866 | 9,018 | 0.947269 | 0 | 0 | 0 | 0 | 0 | 0 | 1,954 | 0.205252 |
c0d42b9fa731f071b00e48d88b5dd1b3baf8c28b | 8,661 | py | Python | tests/test_lib.py | bluefloyd00/snowflet | a1676158bffc5f44970845b054d1ad221e9540c7 | [
"MIT"
]
| 1 | 2020-06-23T14:14:48.000Z | 2020-06-23T14:14:48.000Z | tests/test_lib.py | bluefloyd00/snowflet | a1676158bffc5f44970845b054d1ad221e9540c7 | [
"MIT"
]
| 2 | 2020-06-19T15:05:05.000Z | 2020-06-19T15:07:22.000Z | tests/test_lib.py | bluefloyd00/snowflet | a1676158bffc5f44970845b054d1ad221e9540c7 | [
"MIT"
]
| null | null | null | import os
import unittest
from snowflet.lib import read_sql
from snowflet.lib import logging_config
from snowflet.lib import extract_args
from snowflet.lib import apply_kwargs
from snowflet.lib import strip_table
from snowflet.lib import extract_tables_from_query
from snowflet.lib import add_database_id_prefix
from snowflet.lib import is_table
from snowflet.lib import add_table_prefix_to_sql
class StringFunctions(unittest.TestCase):
""" Test """
def test_strip_table(self):
""" Test """
self.assertEqual(
strip_table(table_name='"db"."schema"."table"'),
'"db.schema.table"',
"strip_table: wrong table name"
)
def test_extract_tables_from_query(self):
""" Test """
self.assertEqual(
extract_tables_from_query(sql_query=""" select a,b,c from "db"."schema"."table" and db.schema.table not "schema"."table" """),
[ '"db"."schema"."table"', 'db.schema.table' ],
"does not extract the tables properly"
)
class TableFunctions(unittest.TestCase):
""" Test """
def test_is_table(self):
self.assertTrue(
is_table( word='"db"."test"."table1"' ,sql=""" select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """),
"select: ok"
)
self.assertTrue(
is_table( word='"db"."test"."table4"' ,sql=""" create table "db"."test"."table4" as select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """),
"create - select: ok"
)
def test_add_table_prefix_to_sql(self):
self.assertEqual(
add_table_prefix_to_sql(
sql=""" select a.* from "db1"."test"."table1" a left join db2.test.table2 b on a.id=b.id left join db3."test".table3 c on b.id = c.id """,
prefix="CLONE_1003"
),
""" select a.* from "CLONE_1003_DB1"."TEST"."TABLE1" a left join "CLONE_1003_DB2".TEST.TABLE2 b on a.id=b.id left join "CLONE_1003_DB3"."TEST".TABLE3 c on b.id = c.id """,
"add_table_prefix_to_sql: ok"
)
# def test_extract_tables(self):
# self.assertEqual(
# extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table3 """),
# ["db.test.table1", "db.test.table2", "db.test.table3"],
# "multiple tables, mix double quotes and not"
# )
# self.assertEqual(
# extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table1 """),
# ["db.test.table1", "db.test.table2"],
# "returned unique values"
# )
class ReadSql(unittest.TestCase):
""" Test """
def test_class_read_sql_file(self):
""" Test """
sql = read_sql(
file="tests/sql/read_sql.sql",
param1="type",
param2="300",
param3="shipped_date",
param4='trying'
)
# self.assertEqual(
# sql,
# 'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300',
# "read_sql unit test"
# )
sql = read_sql(
file="tests/sql/read_sql.sql"
)
self.assertTrue(
sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}',
"read_sql file unit test no opt parameters"
)
with self.assertRaises(KeyError):
read_sql(
file="tests/sql/read_sql.sql",
database_id='something'
)
def test_class_read_sql_query(self):
""" Test """
sql = read_sql(
query='select {param1}, {param3} from "db_test"."schema_test"."table1" where amount > {param2}',
param1="type",
param2="300",
param3="shipped_date",
param4='trying'
)
self.assertEqual(
sql,
'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300',
"read_sql unit test"
)
sql = read_sql(
file="tests/sql/read_sql.sql"
)
self.assertTrue(
sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}',
"read_sql query unit test no opt parameters"
)
with self.assertRaises(KeyError):
read_sql(
file="tests/sql/read_sql.sql",
database_id='something'
)
class FunctionsInLib(unittest.TestCase):
"""
Unittest class for lib functions
"""
def test_extract_args_1_param(self):
content = [
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
"pk": ["col1", "col2"],
"mock_data": "sql/table1_mocked.sql"
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
},
"pk": ["col1"],
"mock_data": "sql/table1_mocked.sql"
}
]
self.assertEqual(
extract_args(content, "pk"),
[["col1", "col2"], ["col1"]],
"extracted ok"
)
self.assertEqual(
extract_args(content, "create_table"),
[
{
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
{
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
}
],
"extracted ok"
)
def test_add_database_id_prefix(self):
self.yaml = {
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"database_id": "test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"database_id": "test",
},
}
]
}
add_database_id_prefix(
self.yaml,
prefix='1234'
)
self.assertEqual(
self.yaml
,
{
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"database_id": "1234_test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"database_id": "1234_test",
},
}
]
},
"prefix properly added to database"
)
if __name__ == "__main__":
logging_config()
unittest.main()
| 37.012821 | 225 | 0.419467 | 8,177 | 0.944117 | 0 | 0 | 0 | 0 | 0 | 0 | 3,291 | 0.379979 |
c0d5ca80bb24462f1f4a71c0cb35cf7b23654b5a | 3,207 | py | Python | etl/parsers/etw/Microsoft_Windows_AssignedAccess.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
]
| 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_AssignedAccess.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
]
| 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_AssignedAccess.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
]
| 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-AssignedAccess
GUID : 8530db6e-51c0-43d6-9d02-a8c2088526cd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10001, version=0)
class Microsoft_Windows_AssignedAccess_10001_0(Etw):
pattern = Struct(
"SID" / WString
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10002, version=0)
class Microsoft_Windows_AssignedAccess_10002_0(Etw):
pattern = Struct(
"SID" / WString
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10003, version=0)
class Microsoft_Windows_AssignedAccess_10003_0(Etw):
pattern = Struct(
"SID" / WString
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10004, version=0)
class Microsoft_Windows_AssignedAccess_10004_0(Etw):
pattern = Struct(
"AppID" / WString
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10010, version=0)
class Microsoft_Windows_AssignedAccess_10010_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10020, version=0)
class Microsoft_Windows_AssignedAccess_10020_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=20000, version=0)
class Microsoft_Windows_AssignedAccess_20000_0(Etw):
pattern = Struct(
"SID" / WString,
"UserName" / WString,
"AppID" / WString,
"AppName" / WString
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=30000, version=0)
class Microsoft_Windows_AssignedAccess_30000_0(Etw):
pattern = Struct(
"File" / CString,
"LineNumber" / Int32ul,
"ErrorCode" / Int32ul,
"ErrorCodeExpanded" / Int32sl
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31000, version=0)
class Microsoft_Windows_AssignedAccess_31000_0(Etw):
pattern = Struct(
"ErrorCode" / Int32sl
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31001, version=0)
class Microsoft_Windows_AssignedAccess_31001_0(Etw):
pattern = Struct(
"ErrorCode" / Int32sl
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31002, version=0)
class Microsoft_Windows_AssignedAccess_31002_0(Etw):
pattern = Struct(
"Custom" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=32000, version=0)
class Microsoft_Windows_AssignedAccess_32000_0(Etw):
pattern = Struct(
"Custom" / WString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=33000, version=0)
class Microsoft_Windows_AssignedAccess_33000_0(Etw):
pattern = Struct(
"Custom" / WString,
"ErrorCode" / Int32ul
)
| 28.891892 | 123 | 0.707827 | 1,669 | 0.520424 | 0 | 0 | 2,800 | 0.87309 | 0 | 0 | 803 | 0.25039 |
c0d5daf30a2f4bfa690f7a0a63f28bb7cf1048fc | 249 | py | Python | smarcambuddy/take_a_photo.py | senaranya/SmartCamBuddy | e60632a7661f4fdcfa994e6424f598538f6b7be6 | [
"MIT"
]
| null | null | null | smarcambuddy/take_a_photo.py | senaranya/SmartCamBuddy | e60632a7661f4fdcfa994e6424f598538f6b7be6 | [
"MIT"
]
| null | null | null | smarcambuddy/take_a_photo.py | senaranya/SmartCamBuddy | e60632a7661f4fdcfa994e6424f598538f6b7be6 | [
"MIT"
]
| null | null | null | from capture_image import CaptureImage
if __name__ == '__main__':
"""
This can be directly used from CLI
e.g.: source /home/pi/.smartcambuddy_venv/bin/activate
python smarcambuddy/take_a_photo.py
"""
CaptureImage.trigger()
| 24.9 | 58 | 0.702811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.64257 |
c0d6aa4598374762a450e52b8af29f49f301387d | 105 | py | Python | oyster/celeryconfig.py | jamesturk/oyster | c6e93fdd48ed1ead1ea5b163c6ba9cf227f831ae | [
"BSD-3-Clause"
]
| 1 | 2020-09-03T08:10:43.000Z | 2020-09-03T08:10:43.000Z | oyster/celeryconfig.py | jamesturk/oyster | c6e93fdd48ed1ead1ea5b163c6ba9cf227f831ae | [
"BSD-3-Clause"
]
| null | null | null | oyster/celeryconfig.py | jamesturk/oyster | c6e93fdd48ed1ead1ea5b163c6ba9cf227f831ae | [
"BSD-3-Clause"
]
| null | null | null | from oyster.conf import settings
CELERY_IMPORTS = ['oyster.tasks'] + list(settings.CELERY_TASK_MODULES)
| 26.25 | 70 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.133333 |
c0d718d76a48ce7c669cb050436654bea3cdd296 | 2,891 | py | Python | annotation/black_action/fu.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
]
| null | null | null | annotation/black_action/fu.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
]
| null | null | null | annotation/black_action/fu.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sonnet as snt
import tensorflow as tf
from .drop_mask import make_drop_mask1
from .promotion_mask import make_promotion_mask
from ..boolean_board.black import select_black_fu_board, select_non_black_board
from ..boolean_board.empty import select_empty_board
from ..direction import Direction
from ..piece import Piece
__author__ = 'Yasuhiro'
__date__ = '2018/2/22'
class BlackFuFileLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_file'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board):
fu_board = select_black_fu_board(board=board)
axis = -1 if self.data_format == 'NCHW' else -2
flag = tf.reduce_any(fu_board, axis=axis, keep_dims=True)
flag = tf.logical_not(flag)
repeat_count = [1, 1, 1, 1]
repeat_count[axis] = 9
available_map = tf.tile(flag, repeat_count)
return available_map
class BlackFuDropLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_drop'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board, black_hand, available_square):
fu_available_file = BlackFuFileLayer(
data_format=self.data_format
)(board)
fu_available_area = make_drop_mask1(data_format=self.data_format)
empty_square = select_empty_board(board=board)
available = tf.logical_and(
# FUを置ける筋、2~9段
tf.logical_and(fu_available_file, fu_available_area),
tf.logical_and(
# 空いているマス
empty_square,
# 持ち駒があるかどうか
tf.reshape(
tf.greater_equal(black_hand[:, Piece.BLACK_FU], 1),
[-1, 1, 1, 1]
)
)
)
# 王手の時に有効かどうか
available = tf.logical_and(available, available_square)
return available
class BlackFuMoveLayer(snt.AbstractModule):
def __init__(self, data_format, name='black_fu_move'):
super().__init__(name=name)
self.data_format = data_format
def _build(self, board, fu_effect):
non_black_mask = select_non_black_board(board=board)
movable_effect = tf.logical_and(fu_effect[Direction.UP],
non_black_mask)
available_mask = make_drop_mask1(data_format=self.data_format)
non_promoting_effect = {
Direction.UP: tf.logical_and(movable_effect, available_mask)
}
promotion_mask = make_promotion_mask(
direction=Direction.UP, data_format=self.data_format, step_size=1
)
promoting_effect = {
Direction.UP: tf.logical_and(movable_effect, promotion_mask)
}
return non_promoting_effect, promoting_effect
| 31.769231 | 79 | 0.648219 | 2,531 | 0.854779 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.079027 |
c0d827e88a1ab4916995d9e81ac4218d54ad748c | 631 | py | Python | tests/bdd/test_fixures.py | othercodes/connect-devops-testing-library | 700f1a86f92c3b7f91a3882bf29ed62d98fe9dcd | [
"Apache-2.0"
]
| 3 | 2021-11-19T08:10:17.000Z | 2022-01-05T15:37:28.000Z | tests/bdd/test_fixures.py | othercodes/connect-devops-testing-library | 700f1a86f92c3b7f91a3882bf29ed62d98fe9dcd | [
"Apache-2.0"
]
| 1 | 2021-12-09T16:33:15.000Z | 2021-12-09T16:33:15.000Z | tests/bdd/test_fixures.py | othercodes/connect-devops-testing-library | 700f1a86f92c3b7f91a3882bf29ed62d98fe9dcd | [
"Apache-2.0"
]
| 2 | 2021-11-08T08:39:11.000Z | 2021-11-08T08:40:23.000Z | from connect.devops_testing.bdd.fixtures import use_connect_request_dispatcher, use_connect_request_builder
from connect.devops_testing.request import Builder, Dispatcher
def test_should_successfully_initialize_request_builder_in_behave_context(behave_context):
use_connect_request_builder(behave_context)
assert isinstance(behave_context.builder, Builder)
def test_should_successfully_initialize_request_dispatcher_in_behave_context(behave_context):
use_connect_request_dispatcher(behave_context, use_specs=False)
assert isinstance(behave_context.connect, Dispatcher)
assert behave_context.request == {}
| 39.4375 | 107 | 0.865293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c0d85d28668ecd7ceba3957da1e2fcc7a48d395d | 83 | py | Python | kwueBackend/kwue/apps.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
]
| 6 | 2016-02-14T18:04:48.000Z | 2016-12-18T20:09:15.000Z | kwueBackend/kwue/apps.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
]
| 113 | 2016-02-14T18:06:57.000Z | 2021-06-10T17:57:12.000Z | kwueBackend/kwue/apps.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
]
| 1 | 2017-02-15T18:48:55.000Z | 2017-02-15T18:48:55.000Z | from django.apps import AppConfig
class KwueConfig(AppConfig):
name = 'kwue'
| 13.833333 | 33 | 0.73494 | 46 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.072289 |
c0dc3d97cf9bff141a470ed5055719904a5f9f4c | 2,492 | py | Python | src/commands/refactor/convert_to_arrow_function.py | PranjalPansuriya/JavaScriptEnhancements | 14af4162e86585153cbd4614ad96dff64a0d3192 | [
"MIT"
]
| 690 | 2017-04-11T06:45:01.000Z | 2022-03-21T23:20:29.000Z | src/commands/refactor/convert_to_arrow_function.py | PranjalPansuriya/JavaScriptEnhancements | 14af4162e86585153cbd4614ad96dff64a0d3192 | [
"MIT"
]
| 74 | 2017-11-22T18:05:26.000Z | 2021-05-05T16:25:31.000Z | src/commands/refactor/convert_to_arrow_function.py | PranjalPansuriya/JavaScriptEnhancements | 14af4162e86585153cbd4614ad96dff64a0d3192 | [
"MIT"
]
| 42 | 2017-04-13T10:22:40.000Z | 2021-05-27T19:19:04.000Z | import sublime, sublime_plugin
import os, traceback
from ...libs import util
from ...libs import FlowCLI
class JavascriptEnhancementsRefactorConvertToArrowFunctionCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
selection = view.sel()[0]
flow_cli = FlowCLI(view)
result = flow_cli.ast()
if result[0]:
body = result[1]["body"]
items = util.nested_lookup("type", ["FunctionExpression"], body)
for item in items:
region = sublime.Region(int(item["range"][0]), int(item["range"][1]))
if region.contains(selection):
text = view.substr(region)
if not text.startswith("function"):
return
index_begin_parameter = 8
text = text[index_begin_parameter:].lstrip()
while text[0] != "(" and len(text) > 0:
text = text[1:].lstrip()
block_statement_region = sublime.Region(int(item["body"]["range"][0]), int(item["body"]["range"][1]))
block_statement = view.substr(block_statement_region)
index = text.index(block_statement)
while text[index - 1] == " " and index - 1 >= 0:
text = text[0:index - 1] + text[index:]
index = index - 1
text = text[0:index] + " => " + text[index:]
view.replace(edit, region, text)
break
else:
sublime.error_message("Cannot convert the function. Some problems occured.")
def is_enabled(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selection = view.sel()[0]
scope = view.scope_name(selection.begin()).strip()
if "meta.block.js" in scope:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js")
else:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js")
if not region_scope:
return False
return True
def is_visible(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selection = view.sel()[0]
scope = view.scope_name(selection.begin()).strip()
if "meta.block.js" in scope:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js")
else:
region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js")
if not region_scope:
return False
return True | 31.544304 | 111 | 0.634831 | 2,386 | 0.957464 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.105538 |
c0dde640a5c1e4f5414f4bf02dfa8a2f03ee959e | 33,374 | py | Python | apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py | Techdisc/Microchip | ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54 | [
"0BSD"
]
| 82 | 2015-02-05T10:29:59.000Z | 2022-03-09T22:13:56.000Z | apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py | Techdisc/Microchip | ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54 | [
"0BSD"
]
| 128 | 2015-01-05T00:56:17.000Z | 2022-03-03T19:06:11.000Z | apps/xdmac/xdmac_memory_transfer/firmware/at91bootstrap_sam_a5d2_xult.X/scripts/mpconfig/mpconfig.py | Techdisc/Microchip | ea8391c689c4badbe2f9ac5181e21bbd5d9d1e54 | [
"0BSD"
]
| 219 | 2015-01-01T11:27:14.000Z | 2022-03-25T08:33:54.000Z | import os
import sys
import base64
import fnmatch
from kconfiglib import Kconfig, expr_value, Symbol, Choice, MENU, COMMENT, BOOL, STRING, INT, HEX
from java.awt import BorderLayout, Dimension, FlowLayout
from java.awt.event import ActionListener, MouseEvent
from javax.swing import BorderFactory, BoxLayout, ImageIcon, JButton, JCheckBox, JFileChooser, JFrame, JLabel, JPanel, JRadioButton, JScrollPane, JSplitPane, JTextArea, JTextField, JTree
from javax.swing.event import ChangeEvent, DocumentListener, TreeExpansionListener, TreeSelectionListener, CellEditorListener
from javax.swing.tree import DefaultTreeModel, DefaultMutableTreeNode, DefaultTreeCellRenderer, TreeCellEditor, TreePath
from events import addActionListener
# For icons in code
from org.python.core.util import StringUtil
if 'knodeinfo' in sys.modules:
del sys.modules["knodeinfo"]
from knodeinfo import getNodeInfoString, getNodeName, setKConfig
class PrintLogger():
def info(self, log_string):
print(log_string)
log = PrintLogger()
# If True, use GIF image data embedded in this file instead of separate GIF
# files. See _load_images().
_USE_EMBEDDED_IMAGES = True
def _load_images():
# Loads GIF images, creating the global _*_img ImageIcon variables.
# Base64-encoded images embedded in this script are used if
# _USE_EMBEDDED_IMAGES is True, and separate image files in the same
# directory as the script otherwise.
#
# Using a global variable indirectly prevents the image from being
# garbage-collected. Passing an image to a Tkinter function isn't enough to
# keep it alive.
def load_image(name, data):
var_name = "_{}_img".format(name)
if _USE_EMBEDDED_IMAGES:
globals()[var_name] = ImageIcon(StringUtil.toBytes(base64.b64decode(data)))
else:
globals()[var_name] = ImageIcon(
file=os.path.join(os.path.dirname(__file__), name + ".gif"))
# Note: Base64 data can be put on the clipboard with
# $ base64 -w0 foo.gif | xclip
load_image("icon", "R0lGODlhMAAwAPEDAAAAAADQAO7u7v///yH5BAUKAAMALAAAAAAwADAAAAL/nI+gy+2Pokyv2jazuZxryQjiSJZmyXxHeLbumH6sEATvW8OLNtf5bfLZRLFITzgEipDJ4mYxYv6A0ubuqYhWk66tVTE4enHer7jcKvt0LLUw6P45lvEprT6c0+v7OBuqhYdHohcoqIbSAHc4ljhDwrh1UlgSydRCWWlp5wiYZvmSuSh4IzrqV6p4cwhkCsmY+nhK6uJ6t1mrOhuJqfu6+WYiCiwl7HtLjNSZZZis/MeM7NY3TaRKS40ooDeoiVqIultsrav92bi9c3a5KkkOsOJZpSS99m4k/0zPng4Gks9JSbB+8DIcoQfnjwpZCHv5W+ip4aQrKrB0uOikYhiMCBw1/uPoQUMBADs=")
load_image("n_bool", "R0lGODdhEAAQAPAAAAgICP///ywAAAAAEAAQAAACIISPacHtvp5kcb5qG85hZ2+BkyiRF8BBaEqtrKkqslEAADs=")
load_image("y_bool", "R0lGODdhEAAQAPEAAAgICADQAP///wAAACwAAAAAEAAQAAACMoSPacLtvlh4YrIYsst2cV19AvaVF9CUXBNJJoum7ymrsKuCnhiupIWjSSjAFuWhSCIKADs=")
load_image("n_tri", "R0lGODlhEAAQAPD/AAEBAf///yH5BAUKAAIALAAAAAAQABAAAAInlI+pBrAKQnCPSUlXvFhznlkfeGwjKZhnJ65h6nrfi6h0st2QXikFADs=")
load_image("m_tri", "R0lGODlhEAAQAPEDAAEBAeQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nI+pBrAWAhPCjYhiAJQCnWmdoElHGVBoiK5M21ofXFpXRIrgiecqxkuNciZIhNOZFRNI24PhfEoLADs=")
load_image("y_tri", "R0lGODlhEAAQAPEDAAICAgDQAP///wAAACH5BAUKAAMALAAAAAAQABAAAAI0nI+pBrAYBhDCRRUypfmergmgZ4xjMpmaw2zmxk7cCB+pWiVqp4MzDwn9FhGZ5WFjIZeGAgA7")
load_image("m_my", "R0lGODlhEAAQAPEDAAAAAOQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nIGpxiAPI2ghxFinq/ZygQhc94zgZopmOLYf67anGr+oZdp02emfV5n9MEHN5QhqICETxkABbQ4KADs=")
load_image("y_my", "R0lGODlhEAAQAPH/AAAAAADQAAPRA////yH5BAUKAAQALAAAAAAQABAAAAM+SArcrhCMSSuIM9Q8rxxBWIXawIBkmWonupLd565Um9G1PIs59fKmzw8WnAlusBYR2SEIN6DmAmqBLBxYSAIAOw==")
load_image("n_locked", "R0lGODlhEAAQAPABAAAAAP///yH5BAUKAAEALAAAAAAQABAAAAIgjB8AyKwN04pu0vMutpqqz4Hih4ydlnUpyl2r23pxUAAAOw==")
load_image("m_locked", "R0lGODlhEAAQAPD/AAAAAOQMuiH5BAUKAAIALAAAAAAQABAAAAIylC8AyKwN04ohnGcqqlZmfXDWI26iInZoyiore05walolV39ftxsYHgL9QBBMBGFEFAAAOw==")
load_image("y_locked", "R0lGODlhEAAQAPD/AAAAAADQACH5BAUKAAIALAAAAAAQABAAAAIylC8AyKzNgnlCtoDTwvZwrHydIYpQmR3KWq4uK74IOnp0HQPmnD3cOVlUIAgKsShkFAAAOw==")
load_image("not_selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIrlA2px6IBw2IpWglOvTYhzmUbGD3kNZ5QqrKn2YrqigCxZoMelU6No9gdCgA7")
load_image("selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIzlA2px6IBw2IpWglOvTah/kTZhimASJomiqonlLov1qptHTsgKSEzh9H8QI0QzNPwmRoFADs=")
load_image("edit", "R0lGODlhEAAQAPIFAAAAAKOLAMuuEPvXCvrxvgAAAAAAAAAAACH5BAUKAAUALAAAAAAQABAAAANCWLqw/gqMBp8cszJxcwVC2FEOEIAi5kVBi3IqWZhuCGMyfdpj2e4pnK+WAshmvxeAcETWlsxPkkBtsqBMa8TIBSQAADs=")
class NodeType():
"""Used to determine what GUI control to use in the visual tree."""
_unknown = 0
_radio = 1
_bool = 2
_tri = 3
_text = 4
_menu = 5
_comment = 6
nodeType = _unknown
def __init__(self, t):
self.nodeType = t
def isType(self, t_list):
return self.nodeType in t_list
def getType(self):
return self.nodeType
class TreeNodeData(object):
"""These are the data objects that goes into the tree data model."""
def __init__ (self, node, tree):
"""Create a TreeNodeData object
Parameters
----------
node : Kconfig.MenuNode
The Kconfiglib node object that this tree node visualizes.
tree : KConfigTree
The tree this node object belongs to. Needed for sending events to the tree.
"""
self.knode = node
self.tree = tree
self.expanded = False
def getNodeType(self):
"""Returns the node type"""
item = self.knode.item
if item == MENU:
return NodeType(NodeType._menu)
if item == COMMENT:
return NodeType(NodeType._comment)
if not item.orig_type:
return NodeType(NodeType._unknown)
if item.orig_type in (STRING, INT, HEX):
return NodeType(NodeType._text)
# BOOL or TRISTATE
if isinstance(item, Symbol) and item.choice:
# Choice symbol in y-mode choice
return NodeType(NodeType._radio)
if len(item.assignable) <= 1:
# Pinned to a single value
if isinstance(item, Choice):
return NodeType(NodeType._menu)
if item.type == BOOL:
return NodeType(NodeType._bool)
if item.assignable == (1, 2):
return NodeType(NodeType._tri)
return NodeType(NodeType._tri)
def getText(self):
"""Return the text to display on the tree node"""
if self.knode and self.knode.prompt:
return self.knode.prompt[0]
return getNodeName(self.knode).strip()
def getValue(self):
"""Returns a string-type value, used for STRING, INT, HEX node types."""
if self.knode.item == MENU or self.knode.item == COMMENT:
return None
return self.knode.item.str_value
def getTriValue(self):
"""Returns a boolean or tristate value. A bool checkbox has the values 0 and 2,
while a tristate has 0, 1 and 2. 0 = False/N, 1 = Module/M, 2 = True/Y"""
if self.knode.item == MENU or self.knode.item == COMMENT:
return None
# log.info(self.getText(), str(self.knode.item.tri_value)))
return self.knode.item.tri_value
def setValue(self, val):
"""Set a string value. Can be a text string, or an integer (or hex) encoded as a string."""
# log.info("TreeNodeData.setValue " + self.getText() + " " + str(val) + " was " + self.getValue())
self.knode.item.set_value(val)
self.tree.updateTree()
def setTriValue(self, n):
"""Set a tristate or bool value. 0 = False/N, 1 = Module/M, 2 = True/Y"""
# log.info("TreeNodeData.setTriValue", self.getText(), n)
self.knode.item.set_value(n)
self.tree.updateTree()
def getVisible(self):
"""Return the visibility state of the node."""
return TreeNodeData.isVisible(self.knode)
@staticmethod
def isVisible(node):
"""Return the visibility state of the node passed as an argument."""
return node.prompt and expr_value(node.prompt[1]) and not \
(node.item == MENU and not expr_value(node.visibility))
def isExpanded(self):
return self.expanded
def setExpanded(self, expanded):
self.expanded = expanded
def search(self, searchString, invisibleMatch):
"""Search all text related to this node for searchString.
If it matches, it will tag the node as a search match.
If invisibleMatch = False and the node is not visible, the search match will be False.
The search match result (bool) is returned."""
if self.getVisible() > 0 or invisibleMatch:
infoText = self.getText()
searchString = "*" + searchString + "*"
self.searchMatch = fnmatch.fnmatch(infoText.lower(), searchString.lower())
else:
self.searchMatch = False
return self.searchMatch
def setSearchMatch(self, match):
"""Tags the node with a search match"""
self.searchMatch = match
def isSearchMatch(self):
return self.searchMatch
def toString(self):
return self.getText() + " = " + str(self.getValue())
class TristateCheckBox(JCheckBox):
"""Custom tristate checkbox implementation."""
serialVersionUID = 1
triState = 0
_load_images()
selected = _y_tri_img
unselected = _n_tri_img
halfselected = _m_tri_img
def __init__(self, eventHandler = None):
"""Creates a TristateCheckBox object
Arguments
---------
eventHandler : ActionListener
If supplied, the event handler will be called when
the tristate checkbox state changes.
"""
JCheckBox.__init__(self)
if eventHandler:
addActionListener(self, eventHandler)
addActionListener(self, self.actionPerformed)
def paint(self, g):
"""Called when the tree needs to paint the checkbox icon."""
if self.triState == 2:
self.setIcon(self.selected)
elif self.triState == 1:
self.setIcon(self.halfselected)
else:
self.setIcon(self.unselected)
JCheckBox.paint(self, g)
def getTriState(self):
"""Return the tristate value (0, 1 or 2)."""
return self.triState
def setTriState(self, tri):
"""Set tristate value (0, 1 or 2)."""
self.triState = tri
def actionPerformed(self, e):
"""Increments the checkbox value when clicked"""
# log.info("actionPerformed()")
tcb = e.getSource()
newVal = (tcb.getTriState() + 1) % 3
tcb.setTriState(newVal)
class CustomCellRenderer(DefaultTreeCellRenderer):
"""Renders the various tree controls (checkbox, tristate checkbox, string values etc.)"""
def __init__(self):
DefaultTreeCellRenderer.__init__(self)
flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0)
self.cbPanel = JPanel(flowLayout)
self.cb = JCheckBox()
self.cb.setBackground(None)
self.cbPanel.add(self.cb)
self.cbLabel = JLabel()
self.cbPanel.add(self.cbLabel)
self.tcbPanel = JPanel(flowLayout)
self.tcb = TristateCheckBox()
self.tcb.setBackground(None)
self.tcbPanel.add(self.tcb)
self.tcbLabel = JLabel()
self.tcbPanel.add(self.tcbLabel)
self.rbPanel = JPanel(flowLayout)
self.rb = JRadioButton()
self.rb.setBackground(None)
self.rbPanel.add(self.rb)
self.rbLabel = JLabel()
self.rbPanel.add(self.rbLabel)
def getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus):
"""Return a swing control appropriate for the node type of the supplied value"""
if isinstance(value, DefaultMutableTreeNode):
nodeData = value.getUserObject()
if isinstance(nodeData, TreeNodeData):
t = nodeData.getNodeType()
isEnabled = nodeData.getVisible() > 0
# Boolean checkbox
if t.isType([NodeType._bool]):
self.cbLabel.setText(nodeData.getText())
self.cb.setEnabled(isEnabled)
self.cbLabel.setEnabled(isEnabled)
if nodeData.getTriValue() == 0:
self.cb.setSelected(False)
else:
self.cb.setSelected(True)
control = self.cbPanel
# Tristate chekcbox
elif t.isType([NodeType._tri]):
control = self.tcbPanel
self.tcbLabel.setText(nodeData.getText())
self.tcb.setEnabled(isEnabled)
self.tcbLabel.setEnabled(isEnabled)
self.tcb.setTriState(nodeData.getTriValue())
# Radio button
elif t.isType([NodeType._radio]):
self.rbLabel.setText(nodeData.getText())
self.rb.setEnabled(isEnabled)
self.rbLabel.setEnabled(isEnabled)
if nodeData.getTriValue() == 0:
self.rb.setSelected(False)
else:
self.rb.setSelected(True)
control = self.rbPanel
# Text field
elif t.isType([NodeType._text]):
control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
control.setText(nodeData.getText() + ": " + str(nodeData.getValue()))
# Default tree cell (a node with an icon and a label)
else:
control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
control.setText(nodeData.getText())
self.setColors(control, nodeData, selected) # Background color for the tree item
# log.info("getTreeCellRendererComponent", t.getType(), isEnabled, "'" + nodeData.getText() + "'")
control.setEnabled(isEnabled)
return control
# log.info("Warning: getTreeCellRendererComponent() fallthrough", nodeData)
return DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus)
def setColors(self, control, data, selected):
"""Set background color fot the tree item."""
if selected:
control.setForeground(self.getTextSelectionColor())
control.setBackground(self.getBackgroundSelectionColor())
else:
control.setForeground(self.getTextNonSelectionColor())
control.setBackground(self.getBackgroundNonSelectionColor())
class CustomCellEditor(TreeCellEditor, ActionListener):
"""Renders the various tree edit controls (checkbox, tristate checkbox, text box etc.)"""
def __init__(self, tree):
TreeCellEditor.__init__(self)
self.editor = None
self.tree = tree
flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0)
self.cbPanel = JPanel(flowLayout)
self.cb = JCheckBox(actionPerformed = self.checked)
self.cbPanel.add(self.cb)
self.cbLabel = JLabel()
self.cbPanel.add(self.cbLabel)
self.tcbPanel = JPanel(flowLayout)
self.tcb = TristateCheckBox(self.checked)
self.tcbPanel.add(self.tcb)
self.tcbLabel = JLabel()
self.tcbPanel.add(self.tcbLabel)
self.rbPanel = JPanel(flowLayout)
self.rb = JRadioButton(actionPerformed = self.checked)
self.rbPanel.add(self.rb)
self.rbLabel = JLabel()
self.rbPanel.add(self.rbLabel)
self.tfPanel = JPanel(flowLayout)
self.tfLabel = JLabel()
self.tfPanel.add(self.tfLabel)
self.tf = JTextField()
self.tf.setColumns(12)
self.tf.addActionListener(self)
self.tfPanel.add(self.tf)
def addCellEditorListener(self, l):
"""Register for edit events"""
self.listener = l
def isCellEditable(self, event):
if event != None and isinstance(event.getSource(), JTree) and isinstance(event, MouseEvent):
tree = event.getSource()
path = tree.getPathForLocation(event.getX(), event.getY())
userData = path.getLastPathComponent().getUserObject()
if isinstance(userData, TreeNodeData) and (not userData.getNodeType().isType([NodeType._comment, NodeType._menu])) and (userData.getVisible() > 0):
return True
return False
def shouldSelectCell(self, event):
# log.info("shouldSelectCell")
return True
def cancelCellEditing(self):
# log.info("Cancel editing, please!")
# super(CustomCellEditor, self).cancelCellEditing()
pass
def stopCellEditing(self):
# log.info("stopCellEditing")
if self.nodeData.getNodeType().isType([NodeType._text]):
# log.info("stopCellEditing for sure!")
self.nodeData.setValue(str(self.tf.getText()))
return True
def getTreeCellEditorComponent(self, tree, value, selected, expanded, leaf, row):
"""Return a swing edit control appropriate for the node type of the supplied value"""
self.nodeData = self.getNodeUserData(value)
if self.nodeData:
text = self.nodeData.getText()
t = self.nodeData.getNodeType()
# Boolean checkbox
if t.isType([NodeType._bool]):
self.editor = self.cbPanel
self.cbLabel.setText(text)
if self.nodeData.getTriValue() > 0:
self.cb.setSelected(True)
else:
self.cb.setSelected(False)
# Tristate checkbox
elif t.isType([NodeType._tri]):
# log.info("getTreeCellEditorComponent tristate")
self.editor = self.tcbPanel
self.tcbLabel.setText(text)
self.tcb.setTriState(self.nodeData.getTriValue())
# Radio button
elif t.isType([NodeType._radio]):
self.editor = self.rbPanel
self.rbLabel.setText(text)
if self.nodeData.getTriValue() > 0:
self.rb.setSelected(True)
else:
self.rb.setSelected(False)
# Text field
elif t.isType([NodeType._text]):
self.editor = self.tfPanel
self.tfLabel.setText(str(self.nodeData.getText()) + ":")
self.tf.setText(str(self.nodeData.getValue()))
else:
self.editor = self.tcb
self.editor.setText(text)
return self.editor
def getNodeUserData(self, value):
"""Gets the TreeNodeData from the tree node"""
if isinstance(value, DefaultMutableTreeNode):
nodeData = value.getUserObject()
if isinstance(nodeData, TreeNodeData):
return nodeData
return None
def getCellEditorValue(self):
newNode = TreeNodeData(self.nodeData.knode, self.tree)
if isinstance(self.editor, JTextField):
newNode.setValue(str(self.editor.getText()))
return newNode
def checked(self, e):
"""Updates the node data when a checkbox has been clicked"""
control = e.getSource()
if isinstance(control, TristateCheckBox):
# log.info("tristate checked")
self.nodeData.setTriValue(control.getTriState())
else:
# log.info("checkbox checked")
if control.isSelected():
self.nodeData.setValue(2)
else:
self.nodeData.setValue(0)
def actionPerformed(self, event):
""" ENTER pressed in text field, stop editing."""
tf = event.getSource()
self.listener.editingStopped(ChangeEvent(tf))
class KConfigTree(JTree, CellEditorListener):
"""Custom Swing JTree based tree that visualizes a KConfig configuration.
The full KConfig menu structure is put into a shadow tree model. From the shadow model,
a real model is built (updateModel), where hidden nodes are not included. This update model
is what the tree uses to visualize the configuration menu.
Both the shadow and the updated model has the same TreeNodeData with KConfig data.
The expanded state and search result state is kept in the TreeNodeData.
"""
shadowModel = None
isUpdating = False
showAll = False
isSearching = False
def __init__(self, kconf):
self.setCellRenderer(CustomCellRenderer())
self.setCellEditor(CustomCellEditor(self))
self.createKconfShadowModel(kconf)
self.setModel(self.createUpdatedModel())
self.expandRow(0)
self.setEditable(True)
self.setRootVisible(False)
self.setShowsRootHandles(True)
self.setRowHeight(0)
self.addTreeExpansionListener(KConfigTreeExpansionListener())
self.getCellEditor().addCellEditorListener(self)
def editingCanceled(self, event):
"""From CellEditorListener """
# log.info("editingCanceled", self.cellEditor.getCellEditorValue())
pass
def editingStopped(self, event):
"""From CellEditorListener."""
# log.info("editingStopped", self.cellEditor.getCellEditorValue())
self.stopEditing()
def createKconfShadowModel(self, kconf):
"""Create the one and only shadow data model"""
rootNode = DefaultMutableTreeNode(kconf.mainmenu_text)
self.addNodes(rootNode, kconf.top_node.list)
self.shadowModel = DefaultTreeModel(rootNode)
def addNodes(self, parent, node):
"""Recursively traverse the KConfig structure and add to the shadow model"""
while node:
newUiNode = DefaultMutableTreeNode(TreeNodeData(node, self))
parent.add(newUiNode)
if node.list:
self.addNodes(newUiNode, node.list)
node = node.next
def createUpdatedModel(self):
"""When the user does any changes in the tree, the underlaying kconfig structure
will change. Nodes may change visibility and value. The tree control cannot hide nodes,
so a new datamodel must be generated that does not include invisible nodes."""
shadowTreeRoot = self.shadowModel.getRoot()
rootNode = DefaultMutableTreeNode("Root")
self.addVisibleNodes(rootNode, shadowTreeRoot)
return DefaultTreeModel(rootNode)
def addVisibleNodes(self, visibleParent, shadowParent):
"""Adds visible nodes from the shadow tree model to the update tree model.
If there is an active search operation, only search matches will be added.
If showAll is set, all nodes are added regardless of visibility."""
childrenEnum = shadowParent.children()
while childrenEnum.hasMoreElements():
shadowChild = childrenEnum.nextElement()
if shadowChild.getUserObject().getVisible() > 0 or self.showAll:
if not self.isSearching or shadowChild.getUserObject().isSearchMatch():
visibleChild = DefaultMutableTreeNode(shadowChild.getUserObject())
visibleParent.add(visibleChild)
if shadowChild.getChildCount() > 0:
self.addVisibleNodes(visibleChild, shadowChild)
def isPathEditable(self, path):
comp = path.getLastPathComponent()
if isinstance(comp, DefaultMutableTreeNode):
nodeData = comp.getUserObject()
if isinstance(nodeData, TreeNodeData):
return True
return False
def updateTree(self):
"""Call to create a new updated tree model"""
if not self.isUpdating:
# log.info("updateTree()")
self.isUpdating = True
self.setModel(self.createUpdatedModel())
self.updateExpandedState(self.getModel().getRoot())
self.isUpdating = False
def updateExpandedState(self, parent):
"""Scan through the whole tree and expand the tree node
if the node data has the expanded field set to True."""
childrenEnum = parent.children()
while childrenEnum.hasMoreElements():
child = childrenEnum.nextElement()
if child.getUserObject().isExpanded():
self.expandPath(TreePath(child.getPath()))
if child.getChildCount() > 0:
self.updateExpandedState(child)
def setShowAll(self, show):
self.showAll = show
self.updateTree()
def doSearch(self, searchText):
"""Perform a search in the data model with the supplied text."""
if len(searchText) > 0:
self.isSearching = True
self.doSearchBranch(self.shadowModel.getRoot(), searchText)
else:
self.isSearching = False
self.updateTree()
def doSearchBranch(self, shadowParent, searchText):
"""Traverse the tree model searching for the search text"""
match = False
childrenEnum = shadowParent.children()
while childrenEnum.hasMoreElements():
shadowChild = childrenEnum.nextElement()
if shadowChild.getUserObject().search(searchText, self.showAll):
match = True
if shadowChild.getChildCount() > 0:
if self.doSearchBranch(shadowChild, searchText):
shadowChild.getUserObject().setSearchMatch(True)
match = True
return match
class KConfigTreeExpansionListener(TreeExpansionListener):
"""Listener for tree expand/collapse events. Used for storing the expand state
in the node data, so that a new updated tree's branches can be expanded the same way
as in the old tree."""
def treeExpanded(self, e):
if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot():
e.getPath().getLastPathComponent().getUserObject().setExpanded(True)
def treeCollapsed(self, e):
if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot():
e.getPath().getLastPathComponent().getUserObject().setExpanded(False)
class MPConfig(TreeSelectionListener):
"""The MPConfig component initializes the KConfig library with the requested configuration,
and buildst the GUI, consisting of a "Load" and a "Save as" buttons, a search field, "show all"
checkbox, tree view and information text view."""
def __init__(self, kconfig_file = "Kconfig", config_file=".config", systemLogger = None):
"""[summary]
Parameters
----------
kconfig_file : string (default: "Kconfig")
The Kconfig configuration file
config_file : string (default: ".config")
The save file which will be used for loading and saving the settings
systemLogger (default: None)
A system logger object. If None then print statements are used for logging.
"""
global log
if systemLogger:
log = systemLogger
# Load Kconfig configuration files
self.kconfig = Kconfig(kconfig_file)
setKConfig(self.kconfig)
if os.path.isfile(config_file):
log.info(self.kconfig.load_config(config_file))
elif os.path.isfile(".config"):
log.info(self.kconfig.load_config(".config"))
self.tree = KConfigTree(self.kconfig)
self.tree.addTreeSelectionListener(self.treeSelectionChanged)
jTreeSP = JScrollPane(self.tree)
self.jta = JTextArea()
self.jta.setEditable(False)
jTextSP = JScrollPane(self.jta)
toolPanel = JPanel()
toolPanel.setLayout(BoxLayout(toolPanel, BoxLayout.X_AXIS))
toolPanel.setBorder(BorderFactory.createEmptyBorder(2, 0, 2, 0))
toolPanel.add(JLabel("Search: "))
jSearchPanel = JPanel()
jSearchPanel.setLayout(BoxLayout(jSearchPanel, BoxLayout.X_AXIS))
self.jSearchField = JTextField()
jSearchPanel.setBackground(self.jSearchField.getBackground())
jSearchPanel.setBorder(self.jSearchField.getBorder())
self.jSearchField.setBorder(None)
self.jSearchField.getDocument().addDocumentListener(SearchListener(self.tree))
jSearchPanel.add(self.jSearchField)
clearSearchButton = JButton(u'\u00d7', actionPerformed = self.clearSearch)
d = clearSearchButton.getPreferredSize()
clearSearchButton.setPreferredSize(Dimension(d.height, d.height))
clearSearchButton.setBackground(self.jSearchField.getBackground())
clearSearchButton.setBorder(None)
clearSearchButton.setOpaque(False)
clearSearchButton.setContentAreaFilled(False)
clearSearchButton.setFocusPainted(False)
jSearchPanel.add(clearSearchButton)
toolPanel.add(jSearchPanel)
self.showAllCheckBox = JCheckBox("Show all", actionPerformed = self.OnShowAllCheck)
toolPanel.add(self.showAllCheckBox)
splitPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, jTreeSP, jTextSP)
splitPane.setOneTouchExpandable(True)
splitPane.setDividerLocation(300)
treePanel = JPanel(BorderLayout())
treePanel.add(toolPanel, BorderLayout.NORTH)
treePanel.add(splitPane, BorderLayout.CENTER)
loadSavePanel = JPanel()
loadSavePanel.setLayout(BoxLayout(loadSavePanel, BoxLayout.X_AXIS))
loadSavePanel.add(JButton("Load", actionPerformed=self.loadConfigDialog))
loadSavePanel.add(JButton("Save as", actionPerformed=self.writeConfigDialog))
self.rootPanel = JPanel()
self.rootPanel.setLayout(BorderLayout())
self.rootPanel.add(loadSavePanel, BorderLayout.PAGE_START)
self.rootPanel.add(treePanel, BorderLayout.CENTER)
def clearSearch(self, event):
self.jSearchField.setText("")
def OnShowAllCheck(self, event):
self.tree.setShowAll(self.showAllCheckBox.isSelected())
self.tree.doSearch(self.jSearchField.getText()) # Must repeat the search if one is active
def treeSelectionChanged(self, event):
"""When the user selects a new node in the tree, show info about the selected node
in the info text area below the tree."""
path = event.getNewLeadSelectionPath()
if path:
comp = path.getLastPathComponent()
if isinstance(comp, DefaultMutableTreeNode):
nodeData = comp.getUserObject()
if isinstance(nodeData, TreeNodeData):
self.jta.setText(getNodeInfoString(nodeData.knode))
self.jta.setCaretPosition(0)
def getPane(self):
"""Return the panel containing all the other components that is set up in __init__()."""
return self.rootPanel
def writeConfig(self, fileName):
"""Write the current configuration to the file specified."""
self.kconfig.write_config(fileName) # Save full configuration
#self.kconfig.write_min_config(fileName) # Save minimal configuration
def loadConfig(self, fileName):
"""Load configuration settings from the file specified."""
if os.path.isfile(fileName):
log.info(self.kconfig.load_config(fileName))
self.tree.createKconfShadowModel(self.kconfig)
self.tree.updateTree()
def writeConfigDialog(self, e):
"""Open a file dialog to save configuration"""
fileChooser = JFileChooser(os.getcwd())
retval = fileChooser.showSaveDialog(None)
if retval == JFileChooser.APPROVE_OPTION:
f = fileChooser.getSelectedFile()
self.writeConfig(f.getPath())
def loadConfigDialog(self, e):
"""Open a file dialog to select configuration to load"""
fileChooser = JFileChooser(os.getcwd())
retval = fileChooser.showOpenDialog(None)
if retval == JFileChooser.APPROVE_OPTION:
f = fileChooser.getSelectedFile()
log.info("Selected file: " + f.getPath())
self.loadConfig(f.getPath())
class SearchListener(DocumentListener):
"""Triggered when the user adds or removes characters in the search text field."""
def __init__(self, tree):
self.tree = tree
def changedUpdate(self, e):
doc = e.getDocument()
searchText = doc.getText(0, doc.getLength())
self.tree.doSearch(searchText)
def insertUpdate(self, e):
self.changedUpdate(e)
def removeUpdate(self, e):
self.changedUpdate(e)
if __name__ == "__main__":
# Set default .config file or load it from argv
if len(sys.argv) == 2:
# Specify "Kconfig"
mpconfig = MPConfig(sys.argv[1])
else:
# Specify "Kconfig" and ".config"
mpconfig = MPConfig(sys.argv[1], sys.argv[2])
jframe = JFrame("MPLAB X Kconfig Editor")
jframe.getContentPane().add(mpconfig.getPane())
jframe.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
jframe.setSize(500, 800)
jframe.setVisible(True)
| 39.921053 | 442 | 0.651825 | 28,437 | 0.85207 | 0 | 0 | 251 | 0.007521 | 0 | 0 | 9,687 | 0.290256 |
c0df3129d8955fe3aa993705e4ac485becb1c9ed | 3,495 | py | Python | functional_test/test_sqlite.py | penguinolog/sqlalchemy_jsonfield | 552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8 | [
"Apache-2.0"
]
| 17 | 2017-05-19T14:03:15.000Z | 2022-01-16T08:33:34.000Z | functional_test/test_sqlite.py | penguinolog/sqlalchemy_jsonfield | 552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8 | [
"Apache-2.0"
]
| 5 | 2018-08-01T09:55:48.000Z | 2020-07-06T08:54:00.000Z | functional_test/test_sqlite.py | penguinolog/sqlalchemy_jsonfield | 552bc52af2f2e9c4ebe85928070e2b1b42d9a5d8 | [
"Apache-2.0"
]
| 2 | 2018-08-01T09:47:40.000Z | 2020-07-05T15:31:17.000Z | # coding=utf-8
# pylint: disable=missing-docstring, unused-argument
import os.path
import sqlite3
import tempfile
import unittest
import sqlalchemy.ext.declarative
import sqlalchemy.orm
try:
# noinspection PyPackageRequirements
import ujson as json
except ImportError:
import json
import sqlalchemy_jsonfield
# Path to test database
db_path = os.path.join(tempfile.gettempdir(), "test.sqlite3")
# Table name
table_name = "create_test"
# DB Base class
Base = sqlalchemy.ext.declarative.declarative_base()
# Model
class ExampleTable(Base):
__tablename__ = table_name
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
row_name = sqlalchemy.Column(sqlalchemy.Unicode(64), unique=True)
json_record = sqlalchemy.Column(sqlalchemy_jsonfield.JSONField(), nullable=False)
class SQLIteTests(unittest.TestCase):
def setUp(self): # type: () -> None
if os.path.exists(db_path):
os.remove(db_path)
engine = sqlalchemy.create_engine("sqlite:///{}".format(db_path), echo=False)
Base.metadata.create_all(engine)
# noinspection PyPep8Naming
Session = sqlalchemy.orm.sessionmaker(engine)
self.session = Session()
def test_create(self): # type: () -> None
"""Check column type"""
# noinspection PyArgumentList
with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn:
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("PRAGMA TABLE_INFO({})".format(table_name))
collected = c.fetchall()
result = [dict(col) for col in collected]
columns = {info["name"]: info for info in result}
json_record = columns["json_record"]
self.assertIn(
json_record["type"],
("TEXT", "JSON"),
"Unexpected column type: received: {!s}, expected: TEXT|JSON".format(json_record["type"]),
)
def test_operate(self): # type: () -> None
"""Check column data operation"""
test_dict = {"key": "value"}
test_list = ["item0", "item1"]
# fill table
with self.session.transaction:
self.session.add_all(
[
ExampleTable(row_name="dict_record", json_record=test_dict),
ExampleTable(row_name="list_record", json_record=test_list),
]
)
# Validate backward check
dict_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "dict_record").first()
list_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "list_record").first()
self.assertEqual(
dict_record.json_record,
test_dict,
"Dict was changed: {!r} -> {!r}".format(test_dict, dict_record.json_record),
)
self.assertEqual(
list_record.json_record, test_list, "List changed {!r} -> {!r}".format(test_list, list_record.json_record)
)
# Low level
# noinspection PyArgumentList
with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn:
c = conn.cursor()
c.execute("SELECT row_name, json_record FROM {tbl}".format(tbl=table_name))
result = dict(c.fetchall())
self.assertEqual(result["dict_record"], json.dumps(test_dict))
self.assertEqual(result["list_record"], json.dumps(test_list))
| 30.391304 | 118 | 0.631187 | 2,957 | 0.846066 | 0 | 0 | 0 | 0 | 0 | 0 | 808 | 0.231187 |
c0e271f6251b995e55557dc615aa3e3800aa1106 | 587 | py | Python | demos/cyk_test.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
]
| 8 | 2017-12-18T08:51:27.000Z | 2020-11-26T02:21:06.000Z | demos/cyk_test.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
]
| 1 | 2020-01-09T15:41:09.000Z | 2020-01-09T15:41:09.000Z | demos/cyk_test.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
]
| 6 | 2017-06-12T16:58:40.000Z | 2019-11-27T06:55:07.000Z | '''Run Example 4.8 from Aho & Ullman p. 315-316, printing the steps to stdout.
'''
from cfg import aho_ullman, core
import sys
CFG = core.ContextFreeGrammar
G = CFG('''
S -> AA | AS | b
A -> SA | AS | a
''')
w = map(core.Terminal, 'abaab')
print 'G:'
print G
print
print 'w =', ''.join(map(str, w))
print
T = aho_ullman.cocke_younger_kasami_algorithm(G, w, out=sys.stdout, check=False)
print 'T:'
print aho_ullman.parse_table_str(T)
print
parse = aho_ullman.left_parse_from_parse_table(G, w, T, check=False)
tree = aho_ullman.LeftParse(G, parse).tree()
print 'Parse tree:', tree
| 18.935484 | 80 | 0.691652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.269165 |
c0e326802c17cadedbdcc95d716b27c009b7245b | 495 | py | Python | generic_op/pool_op.py | cap-lab/MidapSim | 4f92a9f9413c29d7e1f37e863cce90ebdde8b420 | [
"MIT"
]
| 2 | 2021-03-28T16:19:06.000Z | 2022-02-26T08:58:33.000Z | generic_op/pool_op.py | cap-lab/MidapSim | 4f92a9f9413c29d7e1f37e863cce90ebdde8b420 | [
"MIT"
]
| null | null | null | generic_op/pool_op.py | cap-lab/MidapSim | 4f92a9f9413c29d7e1f37e863cce90ebdde8b420 | [
"MIT"
]
| 1 | 2021-02-22T08:44:20.000Z | 2021-02-22T08:44:20.000Z | from .convpool_op_base import ConvPoolOpBase
class PoolOp(ConvPoolOpBase):
def __init__(
self,
op_type='Pool',
pool_type=None,
global_pooling=False,
**kwargs
):
super(PoolOp, self).__init__(op_type=op_type, **kwargs)
self.global_pooling = global_pooling
if pool_type is not None:
self.type = pool_type
def flip_operation(self):
self.pad_r, self.pad_l = self.pad_l, self.pad_r
| 26.052632 | 63 | 0.60202 | 447 | 0.90303 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.012121 |
c0e6f4156c765d61d6562b34e46e4af7e1c03fe7 | 1,958 | py | Python | cegs_portal/search/json_templates/v1/tests/test_search_results.py | ReddyLab/cegs-portal | a83703a3557167be328c24bfb866b6aa019ba059 | [
"MIT"
]
| null | null | null | cegs_portal/search/json_templates/v1/tests/test_search_results.py | ReddyLab/cegs-portal | a83703a3557167be328c24bfb866b6aa019ba059 | [
"MIT"
]
| null | null | null | cegs_portal/search/json_templates/v1/tests/test_search_results.py | ReddyLab/cegs-portal | a83703a3557167be328c24bfb866b6aa019ba059 | [
"MIT"
]
| null | null | null | import pytest
from django.db.models import Manager
from cegs_portal.search.json_templates.v1.dna_region import dnaregions
from cegs_portal.search.json_templates.v1.search_results import (
search_results as sr_json,
)
from cegs_portal.search.models import DNARegion, Facet
from cegs_portal.search.models.utils import ChromosomeLocation
pytestmark = pytest.mark.django_db
def test_search_results(regions: list[DNARegion], facets: Manager[Facet]):
search_results = {
"loc_search": {
"location": ChromosomeLocation("chr1", 10_000, 15_000),
"assembly": "GRCh37",
},
"dhss": regions,
"facets": facets,
}
assert sr_json(search_results) == {
"location": {
"assembly": search_results["loc_search"]["assembly"],
"chromosome": search_results["loc_search"]["location"].chromo,
"start": search_results["loc_search"]["location"].range.lower,
"end": search_results["loc_search"]["location"].range.upper,
},
"regions": dnaregions(search_results["dhss"]),
"facets": [
{"name": f.name, "description": f.description, "values": [value.value for value in f.values.all()]}
for f in search_results["facets"].all()
],
}
assert sr_json(search_results, json_format="genoverse") == {
"location": {
"assembly": search_results["loc_search"]["assembly"],
"chromosome": search_results["loc_search"]["location"].chromo,
"start": search_results["loc_search"]["location"].range.lower,
"end": search_results["loc_search"]["location"].range.upper,
},
"regions": dnaregions(search_results["dhss"], json_format="genoverse"),
"facets": [
{"name": f.name, "description": f.description, "values": [value.value for value in f.values.all()]}
for f in search_results["facets"].all()
],
}
| 38.392157 | 111 | 0.62666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.235955 |
c0ebac4ab996b305d4af158e61ede7d45f0985a2 | 5,002 | py | Python | models/special_tensors.py | LaudateCorpus1/learning-compressible-subspaces | 94db8191f5f4d32c1e86834284fcf9f89e4d445b | [
"AML"
]
| 6 | 2021-11-02T23:10:05.000Z | 2021-11-26T06:46:21.000Z | models/special_tensors.py | LaudateCorpus1/learning-compressible-subspaces | 94db8191f5f4d32c1e86834284fcf9f89e4d445b | [
"AML"
]
| null | null | null | models/special_tensors.py | LaudateCorpus1/learning-compressible-subspaces | 94db8191f5f4d32c1e86834284fcf9f89e4d445b | [
"AML"
]
| 2 | 2021-12-02T00:06:41.000Z | 2022-03-26T11:33:04.000Z | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
"""Utility functions to tag tensors with metadata.
The metadata remains with the tensor under torch operations that don't change
the values, e.g. .clone(), .contiguous(), .permute(), etc.
"""
import collections
import copy
from typing import Any
from typing import Optional
import numpy as np
import torch
QuantizeAffineParams2 = collections.namedtuple(
"QuantizeAffineParams", ["scale", "zero_point", "num_bits"]
)
class _SpecialTensor(torch.Tensor):
"""This class denotes special tensors.
It isn't intended to be used directly, but serves as a helper for tagging
tensors with metadata.
It subclasses torch.Tensor so that isinstance(t, torch.Tensor) returns True
for special tensors. It forbids some of the methods of torch.Tensor, and
overrides a few methods used to create other tensors, to ensure the result
is still special.
"""
_metadata = None
def __getattribute__(self, attr: str) -> Any:
# Disallow new_zeros, new_ones, new_full, etc.
if "new_" in attr:
raise AttributeError(
"Invalid attr {!r} for special tensors".format(attr)
)
return super().__getattribute__(attr)
def detach(self) -> "_SpecialTensor":
ret = super().detach()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
@property
def data(self) -> "_SpecialTensor":
ret = super().data
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def clone(self) -> "_SpecialTensor":
ret = super().clone()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def cuda(
self, device: Optional[torch.device] = None, non_blocking: bool = False
) -> "_SpecialTensor":
ret = super().cuda()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def contiguous(self) -> "_SpecialTensor":
ret = super().contiguous()
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def view(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().view(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def permute(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().permute(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def __getitem__(self, *args, **kwargs) -> "_SpecialTensor":
ret = super().__getitem__(*args, **kwargs)
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def __copy__(self) -> "_SpecialTensor":
ret = copy.copy(super())
ret.__class__ = _SpecialTensor
ret._metadata = self._metadata
return ret
def _check_type(tensor: torch.Tensor) -> None:
given_type = type(tensor)
if not issubclass(given_type, torch.Tensor):
raise TypeError("invalid type {!r}".format(given_type))
def tag_with_metadata(tensor: torch.Tensor, metadata: Any) -> None:
"""Tag a metadata to a tensor."""
_check_type(tensor)
tensor.__class__ = _SpecialTensor
tensor._metadata = metadata
RepresentibleByQuantizeAffine = collections.namedtuple(
"RepresentibleByQuantizeAffine", ["quant_params"]
)
def mark_quantize_affine(
tensor: torch.Tensor,
scale: float,
zero_point: int,
dtype: np.dtype = np.uint8,
) -> None:
"""Mark a tensor as quantized with affine.
See //xnorai/training/pytorch/extensions/functions:quantize_affine for more
info on this method of quantization.
The tensor itself can be a floating point Tensor. However, its values must
be representible with @scale and @zero_point. This function, for performance
reasons, does not validiate if the tensor is really quantizable as it
claims to be.
Arguments:
tensor (torch.Tensor): The tensor to be marked as affine-quantizable
Tensor.
scale (float): the scale (from quantization parameters).
zero_point (int): The zero_point (from quantization parameters).
dtype (numpy.dtype): Type of tensor when quantized (this is usually
numpy.uint8, which is used for Q8). A ValueError will be thrown if
the input dtype is not one of the following:
{numpy.uint8, numpy.int32}.
"""
allowed_dtypes = [np.uint8, np.int32]
if dtype not in allowed_dtypes:
raise ValueError(
"Provided dtype ({}) is not supported. Please use: {}".format(
dtype, allowed_dtypes
)
)
quant_params = QuantizeAffineParams2(scale, zero_point, dtype)
tag_with_metadata(tensor, RepresentibleByQuantizeAffine(quant_params))
| 31.859873 | 80 | 0.658137 | 2,507 | 0.5012 | 0 | 0 | 173 | 0.034586 | 0 | 0 | 2,072 | 0.414234 |
c0f028af70b526fd95e136fc02b10d25bfdd263a | 2,705 | py | Python | porodynhe_example2d.py | sfepy/example_largedef_porodyn | 4116abc7daed195eee15277b2bd564cec3762ac6 | [
"MIT"
]
| null | null | null | porodynhe_example2d.py | sfepy/example_largedef_porodyn | 4116abc7daed195eee15277b2bd564cec3762ac6 | [
"MIT"
]
| null | null | null | porodynhe_example2d.py | sfepy/example_largedef_porodyn | 4116abc7daed195eee15277b2bd564cec3762ac6 | [
"MIT"
]
| null | null | null | # Rohan E., Lukeš V.
# Modeling large-deforming fluid-saturated porous media using
# an Eulerian incremental formulation.
# Advances in Engineering Software, 113:84-95, 2017,
# https://doi.org/10.1016/j.advengsoft.2016.11.003
#
# Run simulation:
#
# ./simple.py example_largedef_porodyn-1/porodynhe_example2d.py
#
# The results are stored in `example_largedef_porodyn-1/results`.
#
import numpy as nm
from porodyn_engine import incremental_algorithm,\
fc_fce, mat_fce, def_problem
import os.path as osp
wdir = osp.dirname(__file__)
def define():
params = {
'mesh_file': 'rect_16x16.vtk',
'mat_store_elem': 75, # element for which material data are stored
'u_store_node': 272, # node for which displacement is stored
'p_store_node': 144, # node for which pressure is stored
'dim': 2, # problem dimension
'dt': 0.01, # time step
't_end': 2.0, # end time
'force': 4e6, # applied force
'save_step': True, # save results in each time step?
'init_mode': False, # calculate initial state?
}
material_params = {
'param': {
'B': nm.eye(params['dim']),
'g': 9.81, # gravitational acceleration
},
'solid': {
'Phi': 0.58, # volume fraction
'lam': 8.4e6, # Lame coefficient
'mu': 5.6e6, # Lame coefficient
'rho': 2700, # density
},
'fluid': {
'kappa': 1e-1, # permeability parameter
'beta': 0.8, # permeability parameter
'rho': 1000, # density
'Kf': 2.2e10, # bulk modulus
},
}
regions = {
'Omega': 'all',
'Left': ('vertices in (x < 0.001)', 'facet'),
'Right': ('vertices in (x > 9.999)', 'facet'),
'Bottom': ('vertices in (y < 0.001)', 'facet'),
'Top_r': ('vertices in (y > 9.999) & (x > 4.999)', 'facet'),
'Top_l': ('vertices in (y > 9.999) & (x < 5.001)', 'facet'),
'ForceRegion': ('copy r.Top_r', 'facet'),
}
ebcs = {
'Fixed_Left_u': ('Left', {'u.0': 0.0}),
'Fixed_Right_u': ('Right', {'u.0': 0.0}),
'Fixed_Bottom_u': ('Bottom', {'u.1': 0.0}),
'Fixed_Top_p': ('Top_l', {'p.0': 0.0}),
}
###############################################
options = {
'output_dir': osp.join(wdir, 'results'),
'parametric_hook': 'incremental_algorithm',
}
filename_mesh = params['mesh_file']
materials, functions, fields, variables, equations, solvers = \
def_problem(params['dt'], params['force'])
return locals()
| 31.823529 | 76 | 0.528651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,460 | 0.539542 |
c0f0b3100db352f07b237c204da41ba3ca9a0b70 | 260 | py | Python | mailcheck/__init__.py | shacker/django-mailcheck | 878dd21dcd599bd3761e225ba0c2717af458c000 | [
"BSD-3-Clause"
]
| 1 | 2019-05-24T12:40:49.000Z | 2019-05-24T12:40:49.000Z | mailcheck/__init__.py | shacker/django-mailcheck | 878dd21dcd599bd3761e225ba0c2717af458c000 | [
"BSD-3-Clause"
]
| null | null | null | mailcheck/__init__.py | shacker/django-mailcheck | 878dd21dcd599bd3761e225ba0c2717af458c000 | [
"BSD-3-Clause"
]
| null | null | null | """
Pluggable Django email backend for capturing outbound mail for QA/review purposes.
"""
__version__ = "1.0"
__author__ = "Scot Hacker"
__email__ = "[email protected]"
__url__ = "https://github.com/shacker/django-mailcheck"
__license__ = "BSD License"
| 23.636364 | 82 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.726923 |
c0f1710109fd0bcc8c80d8dbd1890e68264eb994 | 4,923 | py | Python | nistapttools/histogram_functions.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
]
| null | null | null | nistapttools/histogram_functions.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
]
| null | null | null | nistapttools/histogram_functions.py | bcaplins/NIST_APT_TOOLS | 80c25498e8b069b8ee289a2d09c76c932c054cea | [
"Unlicense"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 13:41:03 2019
@author: bwc
"""
import numpy as np
def bin_dat(dat,bin_width=0.001,user_roi=[],isBinAligned=False,isDensity=False):
user_roi = np.asarray(user_roi)
roi_supp = (user_roi.size == 2)
# Get roi
if isBinAligned and roi_supp:
lower = np.floor(np.min(user_roi)/bin_width)*bin_width
upper = np.ceil(np.max(user_roi)/bin_width)*bin_width
roi = np.array([lower, upper])
elif isBinAligned and (not roi_supp):
lower = np.floor(np.min(dat)/bin_width)*bin_width
upper = np.ceil(np.max(dat)/bin_width)*bin_width
roi = np.array([lower, upper])
elif (not isBinAligned) and roi_supp:
roi = user_roi
else: # (not isBinAligned) and (not roi_supp):
roi = np.array([np.min(dat), np.max(dat)])
num_bins = int(np.rint((roi[1]/bin_width-roi[0]/bin_width)))
histo = np.histogram(dat,range=(roi[0], roi[1]),bins=num_bins,density=isDensity)
xs = (histo[1][1:]+histo[1][0:-1])/2
ys = histo[0]
return (xs,ys)
def edges_to_centers(*edges):
"""
Convert bin edges to bin centers
Parameters
----------
*edges : bin edges
Returns
-------
centers : list of bin centers
"""
centers = []
for es in edges:
centers.append((es[0:-1]+es[1:])/2)
return centers
def corrhist(epos):
dat = epos['tof']
roi = [0, 5000]
delta = 1
# dat = epos['m2q']
# roi = [0, 100]
# delta = .1
#
# MF = np.mean(epos['tof']/np.sqrt(epos['m2q']))
# dat = np.sqrt(epos['m2q'])*MF
# roi = [0, np.sqrt(250)*MF]
# delta = .001*MF
##
N = int(np.ceil((roi[1]-roi[0])/delta))
corrhist = np.zeros([N,N], dtype=int)
multi_idxs = np.where(epos['ipp']>1)[0]
for multi_idx in multi_idxs:
n_hits = epos['ipp'][multi_idx]
cluster = dat[multi_idx:multi_idx+n_hits]
idx1 = -1
idx2 = -1
for i in range(n_hits):
for j in range(i+1,n_hits):
idx1 = int(np.floor(cluster[i]/delta))
idx2 = int(np.floor(cluster[j]/delta))
if idx1 < N and idx2 < N:
corrhist[idx1,idx2] += 1
return corrhist+corrhist.T-np.diag(np.diag(corrhist))
def dummy():
# Voltage and bowl correct ToF data
from voltage_and_bowl import do_voltage_and_bowl
p_volt = np.array([])
p_bowl = np.array([])
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
epos_vb = epos.copy()
epos_vb['tof'] = tof_corr.copy()
import voltage_and_bowl
tof_vcorr = voltage_and_bowl.mod_full_voltage_correction(p_volt,epos['tof'],epos['v_dc'])
epos_v = epos.copy()
epos_v['tof'] = tof_vcorr.copy()
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
epos_b = epos.copy()
epos_b['tof'] = tof_bcorr.copy()
ROI = [0, None]
ch = histogram_functions.corrhist(epos)
fig1 = plt.figure(num=1)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('raw')
fig1.gca().set_xlim(ROI[0],ROI[1])
fig1.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_v)
fig2 = plt.figure(num=2)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('volt')
fig2.gca().set_xlim(ROI[0],ROI[1])
fig2.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_b)
fig3 = plt.figure(num=3)
plt.clf()
plt.imshow(np.log2(1+ch))
plt.title('bowl')
fig3.gca().set_xlim(ROI[0],ROI[1])
fig3.gca().set_ylim(ROI[0],ROI[1])
ch = histogram_functions.corrhist(epos_vb)
fig4 = plt.figure(num=4)
plt.clf()
plt.imshow(np.log10(1+ch))
plt.title('v+b')
# fig4.gca().set_xlim(ROI[0],ROI[1])
# fig4.gca().set_ylim(ROI[0],ROI[1])
idxs = np.where(epos['ipp'] == 2)[0]
fig5 = plt.figure(num=5)
plt.clf()
dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1])
plt.hist(dts,bins=np.arange(0,2000,.5),label='deltaT')
plt.hist(tof_corr[np.r_[idxs,idxs+1]],bins=np.arange(0,2000,.5),label='since t0')
fig66 = plt.figure(num=66)
plt.clf()
dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1])
# sus = np.sqrt(tof_corr[idxs]**2+tof_corr[idxs+1]**2)
# sus = np.fmax(tof_corr[idxs],tof_corr[idxs+1])
sus = (tof_corr[idxs]+tof_corr[idxs+1])/np.sqrt(2)
plt.plot(sus,dts,'.',ms=1,alpha=1)
# fig66.gca().axis('equal')
fig66.gca().set_xlim(0,7000)
fig66.gca().set_ylim(-100, 800)
return
| 24.615 | 110 | 0.549868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.172659 |
c0f3c763fc8fb9b275792346291c6e8ea034e967 | 1,138 | py | Python | bage_utils/inspect_util.py | bage79/nlp4kor | 016a20270774325579fc816a0364fb1695e60b51 | [
"MIT"
]
| 60 | 2017-04-26T04:43:45.000Z | 2021-11-08T13:01:11.000Z | bage_utils/inspect_util.py | bage79/nlp4kor | 016a20270774325579fc816a0364fb1695e60b51 | [
"MIT"
]
| null | null | null | bage_utils/inspect_util.py | bage79/nlp4kor | 016a20270774325579fc816a0364fb1695e60b51 | [
"MIT"
]
| 17 | 2017-05-21T17:27:20.000Z | 2021-01-16T22:35:44.000Z | import inspect # http://docs.python.org/2/library/inspect.html
from pprint import pprint
from bage_utils.dict_util import DictUtil # @UnusedImport
class InspectUtil(object):
@staticmethod
def summary():
frame = inspect.stack()[1]
d = {'file': frame[1], 'line': frame[2], 'function': frame[3], 'code': frame[4]}
return d
@staticmethod
def all():
frame = inspect.stack()[1]
d = {}
for key in dir(frame[0]):
d[key] = getattr(frame[0], key)
return DictUtil.sort_by_key(d)
@staticmethod
def locals():
frame = inspect.stack()[1]
d = {}
for key in frame[0].f_locals:
d[key] = frame[0].f_locals[key]
return DictUtil.sort_by_key(d)
@staticmethod
def globals():
frame = inspect.stack()[1]
d = {}
for key in frame[0].f_globals:
d[key] = frame[0].f_globals[key]
return DictUtil.sort_by_key(d)
def __test():
pprint(InspectUtil.summary())
pprint(InspectUtil.locals())
if __name__ == '__main__':
pprint(InspectUtil.summary())
# __test()
| 24.212766 | 88 | 0.579086 | 824 | 0.724077 | 0 | 0 | 775 | 0.681019 | 0 | 0 | 110 | 0.096661 |
c0f597adef0dae65ff023459c242c4d6daf5a5e9 | 2,812 | py | Python | kod/zeropython/zerocon/seriesdefinition.py | piotrmaslanka/zero | 4a4e8341b06032b7ade6d7a56d9ae33dd1a5f47d | [
"BSD-3-Clause"
]
| null | null | null | kod/zeropython/zerocon/seriesdefinition.py | piotrmaslanka/zero | 4a4e8341b06032b7ade6d7a56d9ae33dd1a5f47d | [
"BSD-3-Clause"
]
| null | null | null | kod/zeropython/zerocon/seriesdefinition.py | piotrmaslanka/zero | 4a4e8341b06032b7ade6d7a56d9ae33dd1a5f47d | [
"BSD-3-Clause"
]
| null | null | null | import struct
class SeriesDefinition(object):
def __init__(self, seriesname, replicacount, generation, autotrim, recordsize, options, tombstonedon):
self._seriesname = seriesname
self._replicacount = replicacount
self._generation = generation
self._autotrim = autotrim
self._recordsize = recordsize
self._options = options
self._tombstonedon = tombstonedon
self._intp = None
@property
def seriesname(self):
return self._seriesname
@seriesname.setter
def seriesname(self, value):
self._seriesname = value
self._intp = None
@property
def replicacount(self):
return self._replicacount
@replicacount.setter
def replicacount(self, value):
self._replicacount = value
self._intp = None
@property
def generation(self):
return self._generation
@generation.setter
def generation(self, value):
self._generation = int(value)
self._intp = None
@property
def recordsize(self):
return self._recordsize
@recordsize.setter
def recordsize(self, value):
self._recordsize = int(value)
self._intp = None
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._intp = None
@property
def autotrim(self):
return self._autotrim
@autotrim.setter
def autotrim(self, value):
self._autotrim = value
self._intp = None
@property
def tombstonedon(self):
return self._tombstonedon
@tombstonedon.setter
def tombstonedon(self, value):
self._tombstonedon = long(value)
self._intp = None
def __precompileINTP(self):
a = struct.pack('>iiqqqh', self._replicacount, self._recordsize, self._generation,
self._autotrim, self._tombstonedon, len(self._options))
b = self._options + struct.pack('>h', len(self._seriesname)) + self._seriesname
self._intp = str(a+b)
def toINTP(self):
if self._intp == None:
self.__precompileINTP()
return self._intp
def _lengthInBytes(self):
return len(self.toINTP())
@staticmethod
def fromINTP(dat):
repc, recs, genr, autr, tombs, lenopt = struct.unpack('>iiqqqh', dat[:34])
options = dat[34:34+lenopt]
lennam, = struct.unpack('>h', dat[34+lenopt:34+lenopt+2])
nam = dat[34+lenopt+2:34+lenopt+2+lennam]
if len(nam) != lennam:
raise Exception
return SeriesDefinition(nam, repc, genr, autr, recs, options, tombs)
| 29.291667 | 106 | 0.599218 | 2,791 | 0.992532 | 0 | 0 | 1,658 | 0.589616 | 0 | 0 | 26 | 0.009246 |
c0f5fb0852a3f468b938572e90b83ad69c9f9511 | 3,914 | py | Python | Common.py | DongDong-123/zgg_active | 7b7304bc9391e1d370052087d4ad2e6d05db670c | [
"Apache-2.0"
]
| null | null | null | Common.py | DongDong-123/zgg_active | 7b7304bc9391e1d370052087d4ad2e6d05db670c | [
"Apache-2.0"
]
| null | null | null | Common.py | DongDong-123/zgg_active | 7b7304bc9391e1d370052087d4ad2e6d05db670c | [
"Apache-2.0"
]
| null | null | null | import os
import random
import time
import xlwt
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from front_login import *
from readConfig import ReadConfig
from db import DbOperate
from selenium.webdriver.chrome.options import Options
from mysqldb import connect
chrome_options = Options()
chrome_options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome()
driver.maximize_window()
driver.get(ReadConfig().get_root_url())
driver.get(ReadConfig().get_root_url())
class Common(object):
def __init__(self):
self.driver = driver
# Excel写入
self.row = 0
self.workbook = xlwt.Workbook(encoding='utf-8')
self.booksheet = self.workbook.add_sheet('Sheet1')
self.timetemp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # 存储Excel表格文件名编号
# 每个案件的数量
self.number = 1
self.report_path = ReadConfig().save_report()
self.windows = None
self.screen_path = ReadConfig().save_screen()
# 增加案件数量
def number_add(self):
if self.number > 1:
for i in range(self.number):
self.driver.find_element_by_xpath("//a[@class='add']").click()
else:
self.driver.find_element_by_xpath("//a[@class='add']").click()
# 减少案件数量至1
def number_minus(self):
while self.number > 1:
self.driver.find_element_by_xpath("//a[@class='jian']").click()
# 存入数据库
def save_to_mysql(self, parm):
code = 0
if isinstance(parm, list):
parm.append(code)
else:
parm = list(parm)
parm.append(code)
res_code = connect(parm)
print("存储状态", res_code)
# 执行下单
def execute_function(self, callback):
try:
eval("self.{}()".format(callback))
except Exception as e:
print("错误信息:", e)
self.write_error_log(callback)
time.sleep(0.5)
self.write_error_log(str(e))
def write_error_log(self, info):
error_log_path = os.path.join(self.report_path,
"error_log_{}.log".format(time.strftime("%Y-%m-%d", time.localtime())))
with open(error_log_path, "a", encoding="utf-8") as f:
f.write("{}: ".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + info + "\n")
# 处理价格字符
def process_price(self, price):
if "¥" in price:
price = price.replace("¥", '')
return price
# 关闭窗口
def closed_windows(self, num):
self.windows = self.driver.window_handles
for n in range(num + 1, len(self.windows)):
self.driver.switch_to.window(self.windows[n])
self.driver.close()
self.windows = self.driver.window_handles
self.driver.switch_to.window(self.windows[num])
# 存储信息
def excel_number(self, infos):
# 获取案件名称、案件号
if infos:
n = 0
for info in infos:
self.booksheet.write(self.row, n, info)
self.booksheet.col(n).width = 300 * 28
n += 1
path = os.path.join(self.report_path, "report_{}.xls".format(self.timetemp))
self.workbook.save(path)
# 窗口截图
def qr_shotscreen(self, windows_handle, name):
current_window = self.driver.current_window_handle
if current_window != windows_handle:
self.driver.switch_to.window(windows_handle)
path = self.screen_path
self.driver.save_screenshot(path + self.timetemp + name + ".png")
print("截图成功")
self.driver.switch_to.window(current_window)
else:
path = self.screen_path
self.driver.save_screenshot(path + self.timetemp +name + ".png")
print("截图成功")
| 32.347107 | 109 | 0.601175 | 3,481 | 0.851933 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.133627 |
c0f64387b72b3f7cb9554217c9f76926a2cb5bad | 5,367 | py | Python | dashboard_generator.py | vgm236/exec-dash | 5c446849ffc0ced5ec6c286d87603afa280f6017 | [
"MIT"
]
| 1 | 2019-06-20T03:14:22.000Z | 2019-06-20T03:14:22.000Z | dashboard_generator.py | vgm236/exec-dash | 5c446849ffc0ced5ec6c286d87603afa280f6017 | [
"MIT"
]
| null | null | null | dashboard_generator.py | vgm236/exec-dash | 5c446849ffc0ced5ec6c286d87603afa280f6017 | [
"MIT"
]
| null | null | null | # dashboard_generator.py
import os.path # helps to save in a different folder
import pandas as pd
import itertools
import locale # from https://stackoverflow.com/Questions/320929/currency-formatting-in-python
from os import listdir
from os.path import isfile, join
#for chart generation
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# FILES PATH
save_path = 'C:/Users/Owner/Desktop/NYU-MBA/Programming/Files/monthly-sales/data'
# INTRODUCTION
print("Select one month to report")
print("---------------------------------------------------------------------")
# LISTING FILES (sorted and in a proper list)
onlyfiles = [f for f in listdir(save_path) if isfile(join(save_path, f))] #https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
onlyfiles.sort()
print(*onlyfiles, sep = "\n") #https://www.geeksforgeeks.org/print-lists-in-python-4-different-ways/
print("---------------------------------------------------------------------")
# REPORT SELECTION
selected_year = input("Please input a year (Example 2018 -- for Year): ")
selected_month = input("Please input a month (Example 01 -- for January): ")
# FILE SELECTED
file_name = "sales-" + selected_year + selected_month + ".csv"
# OPENING SPECIFIC FILE
find_file = os.path.join(save_path, file_name) #find the file
while not os.path.exists(find_file): #correct if does not exist
print("---------------------------------------------------------------------")
print("\n")
print("The file selected do not exist. Please try again")
print("\n")
print("---------------------------------------------------------------------")
exit()
stats = pd.read_csv(find_file)
# PERFORMING THE SUM
total_sales = stats["sales price"].sum()
# FORMATTING TOTAL SALES
locale.setlocale( locale.LC_ALL, '' )
total_sales_format = locale.currency(total_sales, grouping= True)
print("---------------------------------------------------------------------")
# SALES REPORT DATE
if selected_month == "01":
month_name = "JANUARY"
if selected_month == "02":
month_name = "FEBRUARY"
if selected_month == "03":
month_name = "MARCH"
if selected_month == "04":
month_name = "APRIL"
if selected_month == "05":
month_name = "MAY"
if selected_month == "06":
month_name = "JUNE"
if selected_month == "07":
month_name = "JULY"
if selected_month == "08":
month_name = "AUGUST"
if selected_month == "09":
month_name = "SEPTEMBER"
if selected_month == "10":
month_name = "OCTOBER"
if selected_month == "11":
month_name = "NOVEMBER"
if selected_month == "12":
month_name = "DECEMBER"
print("SALES REPORT " + "(" + month_name + " " + selected_year + ")")
# PRINTING TOTAL SALES
print("TOTAL SALES: " + (total_sales_format))
print("\n")
# TOP SELLING PRODUCTS
product_totals = stats.groupby(["product"]).sum()
product_totals = product_totals.sort_values("sales price", ascending=False)
top_sellers = []
rank = 1
for i, row in product_totals.iterrows():
d = {"rank": rank, "name": row.name, "monthly_sales": row["sales price"]}
top_sellers.append(d)
rank = rank + 1
def to_usd(my_price):
return "${0:,.2f}".format(my_price)
print("TOP SELLING PRODUCTS:")
for d in top_sellers:
locale.setlocale( locale.LC_ALL, '' )
print(" " + str(d["rank"]) + ") " + d["name"] +
": " + to_usd(d["monthly_sales"]))
print("\n")
print("---------------------------------------------------------------------")
print("\n")
print("GENERATING BAR CHART...")
print("\n")
print("---------------------------------------------------------------------")
### PRINT BAR CHART
# first two lines are the list comprehensions to make a list of dictionaries into a list)
x = [p["name"] for p in top_sellers] ## VERY IMPORTANT
y = [p["monthly_sales"] for p in top_sellers] ## VERY IMPORTANT
#sorting in the correct order
x.reverse()
y.reverse()
# break charts into two
fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes
#formatting chart
usd_formatter = ticker.FormatStrFormatter('$%1.0f')
ax.xaxis.set_major_formatter(usd_formatter)
# CHART GENERATION
plt.barh(x, y)
plt.title("TOP-SELLING PRODUCTS " + "(" + month_name + " " + selected_year + ")") # AXIS TITLES
plt.ylabel('Sales (USD)') # AXIS TITLES
plt.ylabel("Product") # AXIS TITLES
# formatting numbers
for i, v in enumerate(y):
ax.text(v, i, usd_formatter(v), color='black', fontweight='bold')
#https://matplotlib.org/users/colors.html
#https://matplotlib.org/3.1.0/gallery/pyplots/text_commands.html#sphx-glr-gallery-pyplots-text-commands-py
plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off)
plt.show()
exit()
## FULL SOLUTION PROVIDED BY THE PROFESSOR
# # this section needs to come before the chart construction
# fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes
# usd_formatter = ticker.FormatStrFormatter('$%1.0f')
# ax.xaxis.set_major_formatter(usd_formatter)
#
# # chart construction
# plt.barh(sorted_products, sorted_sales)
# plt.title(chart_title)
# plt.ylabel("Product")
# plt.xlabel("Monthly Sales (USD)")
#
# plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off)
# plt.show() | 27.80829 | 157 | 0.63406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,989 | 0.556922 |
c0f84e0c95d431aa5ccd03662827d19008ac7c6c | 2,235 | py | Python | product_spider/spiders/medicalisotopes_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
]
| null | null | null | product_spider/spiders/medicalisotopes_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
]
| null | null | null | product_spider/spiders/medicalisotopes_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
]
| null | null | null | from urllib.parse import urljoin
from scrapy import Request
from product_spider.items import RawData
from product_spider.utils.functions import strip
from product_spider.utils.spider_mixin import BaseSpider
class MedicalIsotopesSpider(BaseSpider):
name = "medicalisotopes"
base_url = "https://www.medicalisotopes.com/"
start_urls = ['https://www.medicalisotopes.com/productsbycategories.php', ]
def parse(self, response):
a_nodes = response.xpath('//div[contains(@class, "main-content")]//a')
for a in a_nodes:
parent = a.xpath('./text()').get()
url = a.xpath('./@href').get()
yield Request(urljoin(self.base_url, url), callback=self.parse_list, meta={'parent': parent})
def parse_list(self, response):
rel_urls = response.xpath('//td[2]/a/@href').getall()
parent = response.meta.get('parent')
for rel_url in rel_urls:
yield Request(urljoin(self.base_url, rel_url), callback=self.parse_detail, meta={'parent': parent})
next_page = response.xpath('//a[@class="c-page"]/following-sibling::a[text()!="NEXT"]/@href').get()
if next_page:
yield Request(urljoin(self.base_url, next_page), callback=self.parse_list, meta={'parent': parent})
def parse_detail(self, response):
tmp = '//td[contains(text(), {!r})]/following-sibling::td//text()'
package = strip(response.xpath('normalize-space(//td/table//td[1]/text())').get())
d = {
'brand': 'medicalisotopes',
'parent': response.meta.get('parent'),
'cat_no': strip(response.xpath(tmp.format("Catalog Number:")).get()),
'en_name': strip(response.xpath('//th[contains(text(), "Product:")]/following-sibling::th/text()').get()),
'cas': strip(response.xpath(tmp.format("CAS Number:")).get()),
'mf': strip(''.join(response.xpath(tmp.format("Formula:")).getall())),
'mw': strip(response.xpath(tmp.format("Molecular Weight:")).get()),
'info3': package and package.rstrip('\xa0='),
'info4': strip(response.xpath('//td/table//td[2]/text()').get()),
'prd_url': response.url,
}
yield RawData(**d)
| 46.5625 | 118 | 0.619239 | 2,023 | 0.905145 | 1,806 | 0.808054 | 0 | 0 | 0 | 0 | 641 | 0.286801 |
c0f8d44852a7507fedd1ad9b67823c5e5f22423a | 117 | py | Python | fnexchange/core/config.py | dnif-archive/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
]
| 1 | 2017-07-19T22:13:54.000Z | 2017-07-19T22:13:54.000Z | fnexchange/core/config.py | dnif/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
]
| 1 | 2021-03-25T21:27:21.000Z | 2021-03-25T21:27:21.000Z | fnexchange/core/config.py | dnif-archive/fnExchange | d75431b37da3193447b919b4be2e0104266156f1 | [
"Apache-2.0"
]
| 1 | 2021-07-07T18:55:19.000Z | 2021-07-07T18:55:19.000Z | import yaml
def read_config(path):
with open(path, 'r') as f:
conf = yaml.safe_load(f)
return conf
| 14.625 | 32 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.025641 |
c0fa6359178c8344bf4c2736824c78112998b09b | 511 | py | Python | books_and_notes/professional_courses/operating_system/sources/ucore_os_lab/docs/ucore实验指导书/lab2/rename.py | gxw1/review_the_national_post-graduate_entrance_examination | 8812779a7a4ce185a531d120562d5194b697c0c9 | [
"MIT"
]
| 640 | 2019-03-30T11:32:43.000Z | 2022-03-31T14:05:18.000Z | books_and_notes/professional_courses/operating_system/sources/ucore_os_lab/docs/ucore实验指导书/lab2/rename.py | yyzVegst/review_the_national_post-graduate_entrance_examination | 8812779a7a4ce185a531d120562d5194b697c0c9 | [
"MIT"
]
| 6 | 2019-07-22T01:57:24.000Z | 2022-01-20T15:03:16.000Z | books_and_notes/professional_courses/operating_system/sources/ucore_os_lab/docs/ucore实验指导书/lab2/rename.py | yyzVegst/review_the_national_post-graduate_entrance_examination | 8812779a7a4ce185a531d120562d5194b697c0c9 | [
"MIT"
]
| 212 | 2019-04-10T02:31:50.000Z | 2022-03-30T02:32:47.000Z | #!/usr/bin/python
import os
lines = [line for line in open("hehe.txt")]
for line in lines:
i = 0
for c in line:
if (c != '_' and not (c >= '0' and c <= '9')):
break
i+=1
cmd = "mv " + line[0:i].strip() + line[i+5:].strip() + " lab2_" + line[0:i].strip() + line[i+5:].strip()
print cmd
os.system(cmd)
continue
index = line.find("_lab2_")
num = line[0 : index + 1]
value = line[index + 6 : ]
nn = "lab2_" + num + value
cmd = "mv 3_" + line.strip() + " " + nn
#print cmd
os.system(cmd)
| 19.653846 | 106 | 0.547945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.164384 |
c0fa6b58d78457006cba2d731fe207bcc18728f5 | 9,819 | py | Python | smiles_parsers/Smarts.py | UnixJunkie/frowns | 427e4c11a8a4dbe865828d18221899478497795e | [
"BSD-3-Clause"
]
| null | null | null | smiles_parsers/Smarts.py | UnixJunkie/frowns | 427e4c11a8a4dbe865828d18221899478497795e | [
"BSD-3-Clause"
]
| null | null | null | smiles_parsers/Smarts.py | UnixJunkie/frowns | 427e4c11a8a4dbe865828d18221899478497795e | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/enb python
import string, re
import Handler
#######################
# Define some regular expressions inside a quoted string
# then turn the string into the actual data structure.
# (I found it was easiest to understand when done this way.)
definitions = r"""
# These are the atomic symbols Daylight allows outside of []s
# See "atom_class" for names like "a" and "A"
raw_atom Cl|Br|[cnospBCNOFPSI*]
# For atoms inside of []s
open_bracket \[
close_bracket \]
# See "element_modifiers" for the patterns for element names
# charges, chiralities, H count, etc.
# [235U]
weight \d+
# [#6]
atomic_number #\d+
# [!C]
atom_not !
# & is highest (an "and")
# , is next (an "or")
# ; is lowest (an "and")
# [n&H] [n,H] [c,h;H1]
atom_binary [&,;]
# C.C
dot \.
# - single bond (aliphatic)
# / directional single bond "up"
# \ directional single bond "down"
# /? directional bond "up or unspecified"
# \? directional bond "down or unspecified"
# = double bond
# # triple bond
# : aromatic bond
# ~ any bond (wildcard)
# @ any ring bond
bond [/\\]\??|[=#:~@-]
# *!:* -- not aromatic
bond_not !
# *@;!:* -- same as !:
bond_binary [&;,]
# (C).(C)
open_zero \(
# C(C)
open_branch \(
# [$(*C);$(*CC)]
open_recursive_smarts \$\(
# special cased because it closes open_zero, open_branch, and
# recursive_smarts
close_parens \)
# Ring closures, 1, %5 %99 (and even %00 for what it's worth)
closure \d|%\d\d?
"""
#######################
# Turn the above string into key/value pairs where the
# values are the compiled regular expressions.
info = {}
for line in string.split(definitions, "\n"):
line = string.strip(line)
if not line or line[:1] == "#":
continue
name, pattern = string.split(line)
info[name] = re.compile(pattern)
del line, name, pattern
info["atom_class"] = re.compile(r"""
(?P<raw_aromatic>a)| # Not really sure what these mean
(?P<raw_b_unknown>b)|
(?P<raw_f_unknown>f)|
(?P<raw_h_unknown>h)|
(?P<raw_i_unknown>i)|
(?P<raw_r_unknown>r)|
(?P<raw_aliphatic>A)|
(?P<raw_R_unknown>R)
""", re.X)
# 'H' is used for the hydrogen count, so those searches require a
# special recursive SMARTS definition. Eg, for deuterium or tritium
# [$([2H]),$([3H])]
# This is implemented as a special-case hack. Note: if there's
# an error in the parse string in this section then the error
# location will point to the start of this term, not at the
# character that really caused the error. Can be fixed with an
# 'error_' like I did for the SMILES -- not needed for now. XXX
hydrogen_term_fields = [
"open_recursive_smarts",
"open_bracket",
"weight",
"element",
"positive_count",
"positive_symbols",
"negative_count",
"negative_symbols",
"close_bracket",
"close_recursive_smarts",
]
info["hydrogen_term"] = re.compile(r"""
(?P<open_recursive_smarts>\$\()
(?P<open_bracket>\[)
(?P<weight>\d+)? # optional molecular weight [2H]
(?P<element>H) # Must be a hydrogen
( # optional charge
(?P<positive_count>\+\d+)| # +3
(?P<positive_symbols>\++)| # ++
(?P<negative_count>\-\d+)| # -2
(?P<negative_symbols>\-+)| # ---
)?
(?P<close_bracket>\])
(?P<close_recursive_smarts>\))
""", re.X)
element_symbols_pattern = \
r"C[laroudsemf]?|Os?|N[eaibdpos]?|S[icernbmg]?|P[drmtboau]?|" \
r"H[eofgas]|c|n|o|s|p|A[lrsgutcm]|B[eraik]?|Dy|E[urs]|F[erm]?|" \
r"G[aed]|I[nr]?|Kr?|L[iaur]|M[gnodt]|R[buhenaf]|T[icebmalh]|" \
r"U|V|W|Xe|Yb?|Z[nr]|\*"
info["element_modifier"] = re.compile(r"""
(?P<element>
# This does *not* contain H. Hydrogen searches must be done
# with a special recursive SMARTS. On the other hand, it does
# include the lower case aromatic names.
""" + element_symbols_pattern + r"""
)|
(?P<aromatic>a)| # aromatic
(?P<aliphatic>A)| # Aliphatic
(?P<degree>D\d+)| # Degree<n>
(?P<total_hcount>H\d*)| # total Hydrogen count<n> (defaults to 1)
(?P<imp_hcount>h\d*)| # implicit hydrogen count<n> (defaults to 1)
(?P<ring_membership>R\d*)| # in <n> Rings (no n means any rings)
(?P<ring_size>r\d*)| # in a ring of size <n> (no n means any rings)
(?P<valence>v\d+)| # total bond order of <n>
(?P<connectivity>X\d+)| # <n> total connections
(?P<positive_count>\+\d+)| # +2 +3
(?P<positive_symbols>\++)| # + ++ +++
(?P<negative_count>\-\d+)| # -1 -4
(?P<negative_symbols>\-+)| # -- - -------
# XXX What about chiral_count?
(?P<chiral_named> # The optional '?' means "or unspecified"
@TH[12]\??| # @TH1 @TH2?
@AL[12]\??| # @AL2?
@SP[123]\??| # @SP3 @SP1?
@TB(1[0-9]?|20?|[3-9])\??| # @TH{1 through 20}
@OH(1[0-9]?|2[0-9]?|30?|[4-9])\?? # @OH{1 through 30}
)|
(?P<chiral_symbols>@@?\??) # @ (anticlockwise) or @@ (clockwise)
""", re.X)
# The ')' closes three different open parens. This maps from the
# previous open state to the appropriate close state.
close_parens_states = {
"open_branch": "close_branch",
"open_recursive_smarts": "close_recursive_smarts",
"open_zero": "close_zero",
}
#### Some helpful definitions to reduce clutter and complication
# Possible transitions from the start node. Also visited after
# a '.' disconnect or in a recursive SMARTS.
expecting_start = ("raw_atom", "atom_class", "open_bracket", "open_zero")
# Looking for node definition, like "C" or "a" or "["
expecting_atom = ("raw_atom", "atom_class", "open_bracket")
# Inside of []s: 235U, #6, R, $([2H]), $(*=C), !
expecting_element_start = ("weight", "atomic_number",
"element_modifier", "hydrogen_term",
"open_recursive_smarts", "atom_not")
# the ';' in [n;H1] or the ']' at the end
expecting_element_end = ("atom_binary", "close_bracket")
# All bonds start with a '!' or one of the bond symbols
expecting_bond_start = ("bond", "bond_not")
expecting_raw_term = expecting_atom + expecting_bond_start + \
("close_parens", "open_branch", "dot", "closure")
expecting_modifier = ("element_modifier", "open_recursive_smarts")
table = {
"start": expecting_start,
# (C).(R).[U].([$(*)])
"open_zero": ("raw_atom", "atom_class", "open_bracket"),
# as well as (CC(C))
"close_zero": ("dot", "close_parens"),
# A raw term are the things like 'C', '[U]', '%10', '.', '(', '!#'
"raw_atom": expecting_raw_term,
# An atom_class is a non-specific atom term, like 'A' or 'r'
"atom_class": expecting_raw_term,
# the []s
"open_bracket": expecting_element_start,
"close_bracket": expecting_raw_term,
# Yes, '[!!!!C]' is legal, according to the docs, but it isn't
# supported by the parser, unless you optimze it.
"atom_not": expecting_element_start,
"atom_binary": expecting_element_start,
# "14N", "14a", ...
# Note that weight can only be set once so it isn't a modifier
# Also, "14#6" isn't legal (tested against the toolkit)
"weight": expecting_modifier,
# "#6R2" or "#8," or "#7]"
# The atomic_number can only be set once so it isn't a modifier
"atomic_number": expecting_modifier + expecting_element_end,
# All of these are type of modifiers
"element_modifier": expecting_modifier + expecting_element_end,
"hydrogen_term": expecting_modifier + expecting_element_end,
"close_recursive_smarts": expecting_modifier + expecting_element_end,
# This it the recursive part -- goes back to the beginning
"open_recursive_smarts": expecting_start,
# C=C, C1CCC=1, C~-C, C=(C)C, C=,-C
"bond": expecting_atom + ("closure", "bond", "open_branch",
"bond_binary"),
# C!!=C
"bond_not": expecting_bond_start,
# C=,-C
"bond_binary": expecting_bond_start,
"closure": expecting_raw_term,
"close_branch": expecting_raw_term,
"open_branch": expecting_atom + expecting_bond_start + ("dot",),
# After a "." we can start all over again
"dot": expecting_start,
}
def tokenize(s, handler = Handler.TokenHandler()):
expected = table["start"]
parens_stack = []
n = len(s)
i = 0
handler.begin()
while i < n:
for state in expected:
m = info[state].match(s, i)
if m:
break
else:
handler.error("Unknown character", i, s[i:])
return
if close_parens_states.has_key(state):
parens_stack.append(state)
elif state == "close_parens":
try:
state = close_parens_states[parens_stack.pop()]
except IndexError:
# Too many close parens
handler.error("Too many ')'", i, s[i:])
return
d = m.groupdict()
if d and state == "hydrogen_term":
# Special case the hydrogen term
for field in hydrogen_term_fields:
if d[field] is not None:
handler.add_token(field, i, d[field])
#print " --> New state:", state
else:
name = state
if d:
# There should only be one match
for name, v in d.items():
if v is not None:
break
handler.add_token(name, i, m.group(0))
expected = table[state]
i = m.end(0)
handler.end()
| 31.776699 | 76 | 0.575517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,703 | 0.682656 |
c0fc8fa48988775dbfa087aacb17d54064d6211e | 2,915 | py | Python | visartm/settings_default.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
]
| 1 | 2020-10-01T10:11:21.000Z | 2020-10-01T10:11:21.000Z | visartm/settings_default.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
]
| null | null | null | visartm/settings_default.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
]
| null | null | null | # This is default settings for VisARTM for local usage
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, "data")
SECRET_KEY = 'yj_fhwf$-8ws1%a_vl5c0lf($#ke@c3+lu3l-f733k(j-!q*57'
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1"]
THREADING = True
REGISTRATION_CLOSED = False
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
EMAIL_USE_TLS = True
INSTALLED_APPS = [
'test_without_migrations',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'datasets',
'visual',
'models',
'assessment',
'research',
'tools',
'accounts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'visartm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'visartm.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'visartm.sqlite',
}
}
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': (
'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator'),
},
{
'NAME': (
'django.contrib.auth.password_validation.'
'MinimumLengthValidator'),
},
{
'NAME': (
'django.contrib.auth.password_validation.'
'CommonPasswordValidator'),
},
{
'NAME': (
'django.contrib.auth.password_validation.'
'NumericPasswordValidator'),
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
| 23.699187 | 71 | 0.646312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,620 | 0.555746 |
c0fe21b15a59a46814a6a24b71ed4f6e93699049 | 8,402 | py | Python | pyvizio/const.py | jezzab/pyvizio | 8086f9e5aac49d1d99ade02684ca35c05e03a7eb | [
"MIT"
]
| 72 | 2017-08-08T19:32:12.000Z | 2022-03-18T03:18:41.000Z | pyvizio/const.py | raman325/pyvizio | 9cf45fcc9b409caf223a38d8f79c775742ab4127 | [
"MIT"
]
| 48 | 2017-09-16T16:37:54.000Z | 2022-01-23T20:43:42.000Z | pyvizio/const.py | ConnectionMaster/pyvizio | 0fe4558557917509d3da3bb24f9221f15ba901ce | [
"MIT"
]
| 42 | 2017-09-04T22:59:21.000Z | 2022-03-18T03:18:30.000Z | """pyvizio constants."""
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_CRAVE360 = "crave360"
DEFAULT_DEVICE_ID = "pyvizio"
DEFAULT_DEVICE_CLASS = DEVICE_CLASS_TV
DEFAULT_DEVICE_NAME = "Python Vizio"
DEFAULT_PORTS = [7345, 9000]
DEFAULT_TIMEOUT = 5
MAX_VOLUME = {DEVICE_CLASS_TV: 100, DEVICE_CLASS_SPEAKER: 31, DEVICE_CLASS_CRAVE360: 100}
# Current Input when app is active
INPUT_APPS = ["SMARTCAST", "CAST"]
# App name returned when it is not in app dictionary
UNKNOWN_APP = "_UNKNOWN_APP"
NO_APP_RUNNING = "_NO_APP_RUNNING"
SMARTCAST_HOME = "SmartCast Home"
APP_CAST = "Cast"
# NAME_SPACE values that appear to be equivalent
EQUIVALENT_NAME_SPACES = (2, 4)
APP_HOME = {
"name": SMARTCAST_HOME,
"country": ["*"],
"config": [
{
"NAME_SPACE": 4,
"APP_ID": "1",
"MESSAGE": "http://127.0.0.1:12345/scfs/sctv/main.html",
}
],
}
# No longer needed but kept around in case the external source for APPS is unavailable
APPS = [
{
"name": "Prime Video",
"country": ["*"],
"id": ["33"],
"config": [
{
"APP_ID": "4",
"NAME_SPACE": 4,
"MESSAGE": "https://atv-ext.amazon.com/blast-app-hosting/html5/index.html?deviceTypeID=A3OI4IHTNZQWDD",
},
{"NAME_SPACE": 2, "APP_ID": "4", "MESSAGE": "None"},
],
},
{
"name": "CBS All Access",
"country": ["usa"],
"id": ["9"],
"config": [{"NAME_SPACE": 2, "APP_ID": "37", "MESSAGE": "None"}],
},
{
"name": "CBS News",
"country": ["usa", "can"],
"id": ["56"],
"config": [{"NAME_SPACE": 2, "APP_ID": "42", "MESSAGE": "None"}],
},
{
"name": "Crackle",
"country": ["usa"],
"id": ["8"],
"config": [{"NAME_SPACE": 2, "APP_ID": "5", "MESSAGE": "None"}],
},
{
"name": "Curiosity Stream",
"country": ["usa", "can"],
"id": ["37"],
"config": [{"NAME_SPACE": 2, "APP_ID": "12", "MESSAGE": "None"}],
},
{
"name": "Fandango Now",
"country": ["usa"],
"id": ["24"],
"config": [{"NAME_SPACE": 2, "APP_ID": "7", "MESSAGE": "None"}],
},
{
"name": "FilmRise",
"country": ["usa"],
"id": ["47"],
"config": [{"NAME_SPACE": 2, "APP_ID": "24", "MESSAGE": "None"}],
},
{
"name": "Flixfling",
"country": ["*"],
"id": ["49"],
"config": [{"NAME_SPACE": 2, "APP_ID": "36", "MESSAGE": "None"}],
},
{
"name": "Haystack TV",
"country": ["usa", "can"],
"id": ["35"],
"config": [
{
"NAME_SPACE": 0,
"APP_ID": "898AF734",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{"platform":"sctv"}}}',
}
],
},
{
"name": "Hulu",
"country": ["usa"],
"id": ["19"],
"config": [
{
"APP_ID": "3",
"NAME_SPACE": 4,
"MESSAGE": "https://viziosmartcast.app.hulu.com/livingroom/viziosmartcast/1/index.html#initialize",
},
{"NAME_SPACE": 2, "APP_ID": "3", "MESSAGE": "None"},
],
},
{
"name": "iHeartRadio",
"country": ["usa"],
"id": ["11"],
"config": [{"NAME_SPACE": 2, "APP_ID": "6", "MESSAGE": "None"}],
},
{
"name": "NBC",
"country": ["usa"],
"id": ["43"],
"config": [{"NAME_SPACE": 2, "APP_ID": "10", "MESSAGE": "None"}],
},
{
"name": "Netflix",
"country": ["*"],
"id": ["34"],
"config": [{"NAME_SPACE": 3, "APP_ID": "1", "MESSAGE": "None"}],
},
{
"name": "Plex",
"country": ["usa", "can"],
"id": ["40"],
"config": [
{
"APP_ID": "9",
"NAME_SPACE": 4,
"MESSAGE": "https://plex.tv/web/tv/vizio-smartcast",
},
{"NAME_SPACE": 2, "APP_ID": "9", "MESSAGE": "None"},
],
},
{
"name": "Pluto TV",
"country": ["usa"],
"id": ["12"],
"config": [
{"APP_ID": "65", "NAME_SPACE": 4, "MESSAGE": "https://smartcast.pluto.tv"},
{
"NAME_SPACE": 0,
"APP_ID": "E6F74C01",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:tv.pluto","CAST_MESSAGE":{"command":"initializePlayback","channel":"","episode":"","time":0}}',
},
],
},
{
"name": "RedBox",
"country": ["usa"],
"id": ["55"],
"config": [{"NAME_SPACE": 2, "APP_ID": "41", "MESSAGE": "None"}],
},
{
"name": "TasteIt",
"country": ["*"],
"id": ["52"],
"config": [{"NAME_SPACE": 2, "APP_ID": "26", "MESSAGE": "None"}],
},
{
"name": "Toon Goggles",
"country": ["usa", "can"],
"id": ["46"],
"config": [{"NAME_SPACE": 2, "APP_ID": "21", "MESSAGE": "None"}],
},
{
"name": "Vudu",
"country": ["usa"],
"id": ["6"],
"config": [
{
"APP_ID": "31",
"NAME_SPACE": 4,
"MESSAGE": "https://my.vudu.com/castReceiver/index.html?launch-source=app-icon",
}
],
},
{
"name": "XUMO",
"country": ["usa"],
"id": ["27"],
"config": [
{
"NAME_SPACE": 0,
"APP_ID": "36E1EA1F",
"MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{}}}',
}
],
},
{
"name": "YouTubeTV",
"country": ["usa", "mexico"],
"id": ["45"],
"config": [{"NAME_SPACE": 5, "APP_ID": "3", "MESSAGE": "None"}],
},
{
"name": "YouTube",
"country": ["*"],
"id": ["44"],
"config": [{"NAME_SPACE": 5, "APP_ID": "1", "MESSAGE": "None"}],
},
{
"name": "Baeble",
"country": ["usa"],
"id": ["39"],
"config": [{"NAME_SPACE": 2, "APP_ID": "11", "MESSAGE": "None"}],
},
{
"name": "DAZN",
"country": ["usa", "can"],
"id": ["57"],
"config": [{"NAME_SPACE": 2, "APP_ID": "34", "MESSAGE": "None"}],
},
{
"name": "FitFusion by Jillian Michaels",
"country": ["usa", "can"],
"id": ["54"],
"config": [{"NAME_SPACE": 2, "APP_ID": "39", "MESSAGE": "None"}],
},
{
"name": "Newsy",
"country": ["usa", "can"],
"id": ["38"],
"config": [{"NAME_SPACE": 2, "APP_ID": "15", "MESSAGE": "None"}],
},
{
"name": "Cocoro TV",
"country": ["usa", "can"],
"id": ["63"],
"config": [{"NAME_SPACE": 2, "APP_ID": "55", "MESSAGE": "None"}],
},
{
"name": "ConTV",
"country": ["usa", "can"],
"id": ["41"],
"config": [{"NAME_SPACE": 2, "APP_ID": "18", "MESSAGE": "None"}],
},
{
"name": "Dove Channel",
"country": ["usa", "can"],
"id": ["42"],
"config": [{"NAME_SPACE": 2, "APP_ID": "16", "MESSAGE": "None"}],
},
{
"name": "Love Destination",
"country": ["*"],
"id": ["64"],
"config": [{"NAME_SPACE": 2, "APP_ID": "57", "MESSAGE": "None"}],
},
{
"name": "WatchFree",
"country": ["usa"],
"id": ["48"],
"config": [{"NAME_SPACE": 2, "APP_ID": "22", "MESSAGE": "None"}],
},
{
"name": "AsianCrush",
"country": ["usa", "can"],
"id": ["50"],
"config": [
{
"NAME_SPACE": 2,
"APP_ID": "27",
"MESSAGE": "https://html5.asiancrush.com/?ua=viziosmartcast",
}
],
},
{
"name": "Disney+",
"country": ["usa"],
"id": ["51"],
"config": [
{
"NAME_SPACE": 4,
"APP_ID": "75",
"MESSAGE": "https://cd-dmgz.bamgrid.com/bbd/vizio_tv/index.html",
}
],
},
]
| 28.100334 | 190 | 0.416448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,300 | 0.511783 |
8d00ab8273f452e2946deb1ce6f8cb6b06b174a7 | 1,858 | py | Python | etl/steps/data/garden/owid/latest/covid.py | c1x1x00xxPentium/etl | 4c9c4e466287deefba1aaae12c473c38d9ecb3cd | [
"MIT"
]
| 5 | 2021-11-01T18:54:52.000Z | 2022-03-10T17:19:14.000Z | etl/steps/data/garden/owid/latest/covid.py | c1x1x00xxPentium/etl | 4c9c4e466287deefba1aaae12c473c38d9ecb3cd | [
"MIT"
]
| 98 | 2021-09-24T19:29:34.000Z | 2022-03-31T15:57:18.000Z | etl/steps/data/garden/owid/latest/covid.py | c1x1x00xxPentium/etl | 4c9c4e466287deefba1aaae12c473c38d9ecb3cd | [
"MIT"
]
| 2 | 2021-12-15T07:53:38.000Z | 2022-02-05T14:50:43.000Z | #
# covid19.py
# owid/latest/covid
#
from owid.catalog.meta import License, Source
import datetime as dt
import pandas as pd
from owid.catalog import Dataset, Table
from etl.helpers import downloaded
MEGAFILE_URL = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv"
def run(dest_dir: str) -> None:
d = create_dataset(dest_dir)
with downloaded(MEGAFILE_URL) as filename:
df = pd.read_csv(filename)
df["date"] = pd.to_datetime(df.date)
for col in ["iso_code", "continent", "location"]:
df[col] = df[col].astype("category")
df.set_index(["iso_code", "date"], inplace=True)
t = Table(df)
t.metadata.short_name = "covid"
d.add(t)
def create_dataset(dest_dir: str) -> Dataset:
d = Dataset.create_empty(dest_dir)
d.metadata.short_name = "covid19"
d.metadata.namespace = "owid"
d.metadata.sources = [
Source(
name="Multiple sources via Our World In Data",
description="Our complete COVID-19 dataset maintained by Our World in Data. We will update it daily throughout the duration of the COVID-19 pandemic.",
url="https://github.com/owid/covid-19-data/tree/master/public/data",
source_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv",
owid_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv",
date_accessed=str(dt.date.today()),
publication_date=str(dt.date.today()),
publication_year=dt.date.today().year,
)
]
d.metadata.licenses = [
License(
name="Other (Attribution)",
url="https://github.com/owid/covid-19-data/tree/master/public/data#license",
)
]
d.save()
return d
| 30.966667 | 163 | 0.656082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.391819 |
8d0131fd0dd77260df8028fcae8b87ef1c39cda9 | 1,175 | py | Python | tests/integration_tests/test_suassystem_interop_client.py | liyu711/SUAS | 2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b | [
"MIT"
]
| null | null | null | tests/integration_tests/test_suassystem_interop_client.py | liyu711/SUAS | 2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b | [
"MIT"
]
| null | null | null | tests/integration_tests/test_suassystem_interop_client.py | liyu711/SUAS | 2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b | [
"MIT"
]
| null | null | null | import unittest
import interop
from SUASSystem import InteropClientConverter
from SUASSystem import Location
class SDATestCase(unittest.TestCase):
def setUp(self):
self.interop_client = InteropClientConverter()
def test_submit_target(self):
compiled_target_info = {
"latitude" : 38,
"longitude" : -77,
"orientation" : "s",
"shape" : "circle",
"background_color" : "white",
"alphanumeric" : "ABC",
"alphanumeric_color" : "black",
}
target_image_relative_path = "tests/images/target.PNG"
target_id = self.interop_client.post_standard_target(compiled_target_info, target_image_relative_path)
def test_submit_position(self):
"""
Test POST of position data
"""
self.interop_client.post_telemetry(Location(38, 76, 100), 350.0)
def test_get_obstacles(self):
"""
Test GET of obstacles
"""
self.interop_client.get_obstacles()
def test_get_active_mission(self):
"""
Test GET of active mission
"""
self.interop_client.get_active_mission()
| 27.97619 | 110 | 0.621277 | 1,064 | 0.905532 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.249362 |
8d061cbc92ba43ecf28dbdbaa3d14137c3609085 | 3,244 | py | Python | xlremed/dataset/docred.py | osainz59/XLREMed | d7fb0c322126676fe95f71ced0014076cb7a9e4d | [
"Apache-2.0"
]
| 4 | 2020-10-01T16:58:24.000Z | 2021-11-30T09:29:03.000Z | xlremed/dataset/docred.py | osainz59/XLREMed | d7fb0c322126676fe95f71ced0014076cb7a9e4d | [
"Apache-2.0"
]
| null | null | null | xlremed/dataset/docred.py | osainz59/XLREMed | d7fb0c322126676fe95f71ced0014076cb7a9e4d | [
"Apache-2.0"
]
| null | null | null | import os
import json
#import fire
from collections import defaultdict
from pprint import pprint
from itertools import product
from .dataset import Dataset
class DocRED(Dataset):
def __init__(self, path):
super(DocRED, self).__init__(name='DocRED')
self.path = path
self._init()
self.train_data = None
self.val_data = None
self.test_data = None
def _init(self):
self.rel_info = json.load(open(os.path.join(self.path, 'rel_info.json')))
self.rel2id = {v: i for i, v in enumerate(self.rel_info.keys())}
self.train_path = os.path.join(self.path, 'train_annotated.json')
self.train_dist_path = os.path.join(self.path, 'train_distant.json')
self.dev_path = os.path.join(self.path, 'dev.json')
self.test_path = os.path.join(self.path, 'test.json')
def _read_instances(self, path, labels=False):
with open(path, 'rt') as in_file:
data = json.load(in_file)
output = []
for i, instance in enumerate(data):
text = ""
sentences_lenghts = []
l = 0
for sent in instance['sents']:
sentences_lenghts.append(l)
l += len(sent)
text += " " + " ".join(sent)
entities = []
ent2id = defaultdict(list)
for i, ent in enumerate(instance['vertexSet']):
idx = f"#{i}"
for elem in ent:
entities.append( (idx, elem['name'], sentences_lenghts[elem['sent_id']] + elem['pos'][0],
sentences_lenghts[elem['sent_id']] + elem['pos'][1], elem['type']) )
ent2id[f"{elem['sent_id']}#{i}"].append(len(entities) - 1)
if labels:
relation_facts = []
for label in instance['labels']:
heads, tails = [], []
for evidence in label['evidence']:
for h in ent2id.get(f"{evidence}#{label['h']}", []):
heads.append(h)
for t in ent2id.get(f"{evidence}#{label['t']}", []):
tails.append(t)
for head, tail in product(heads, tails):
relation_facts.append( (self.rel2id[label['r']], head, tail) )
text = self.tokenizer.encode(text)
output.append( (text, entities) if not labels else (text, entities, relation_facts) )
return output
def get_train(self):
if not self.train_data:
self.train_data = self._read_instances(self.train_path, labels=True)
return self.train_data
def get_val(self):
if not self.val_data:
self.val_data = self._read_instances(self.dev_path, labels=True)
return self.val_data
def get_test(self):
if not self.test_data:
self.test_data = self._read_instances(self.test_path, labels=False)
return self.test_data
def test():
dataset = DocRED('data/DocRED')
for instance in dataset.get_train():
pprint(instance)
break
if __name__ == "__main__":
#fire.Fire(test)
test() | 33.791667 | 109 | 0.544698 | 2,895 | 0.892417 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.095869 |
8d0629eab200e0d853386685fca3ce29b3fc85c5 | 295 | py | Python | models/account.py | Tingesplatform/tokenomics | b541a54b1709a1282c5133e731b26a6ca05ac901 | [
"MIT"
]
| 2 | 2021-01-12T04:44:06.000Z | 2022-03-15T23:34:41.000Z | models/account.py | Tingesplatform/tokenomics | b541a54b1709a1282c5133e731b26a6ca05ac901 | [
"MIT"
]
| 5 | 2019-01-14T07:17:34.000Z | 2021-06-01T23:38:58.000Z | models/account.py | Tingesplatform/tokenomics | b541a54b1709a1282c5133e731b26a6ca05ac901 | [
"MIT"
]
| 2 | 2019-01-11T17:07:48.000Z | 2022-03-03T17:26:41.000Z | import os
class Account:
ACCOUNTS_STORAGE = {}
"""Ethereum account"""
def __init__(self):
self.address = "0x" + os.urandom(20).hex()
self.ACCOUNTS_STORAGE[self.address] = self
def __str__(self):
return f'Account {self.address}'
__repr__ = __str__
| 19.666667 | 50 | 0.616949 | 282 | 0.955932 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.172881 |
8d06a575c43d8f20bdee54e0f28161929a56cf8b | 7,955 | py | Python | swyft/networks/classifier.py | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
]
| 104 | 2020-11-26T09:46:03.000Z | 2022-03-18T06:22:03.000Z | swyft/networks/classifier.py | cweniger/swyft | 2c0ed514622a37e8ec4e406b99a8327ecafb7ab4 | [
"MIT"
]
| 83 | 2021-03-02T15:54:26.000Z | 2022-03-10T08:09:05.000Z | swyft/networks/classifier.py | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
]
| 10 | 2021-02-04T14:27:36.000Z | 2022-03-31T17:39:34.000Z | from abc import ABC, abstractmethod
from typing import Dict, Hashable, Tuple
import torch
import torch.nn as nn
import swyft
import swyft.utils
from swyft.networks.channelized import ResidualNetWithChannel
from swyft.networks.standardization import (
OnlineDictStandardizingLayer,
OnlineStandardizingLayer,
)
from swyft.types import Array, MarginalIndex, ObsShapeType
class HeadTailClassifier(ABC):
"""Abstract class which ensures that child classifier networks will function with swyft"""
@abstractmethod
def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
"""convert the observation into a tensor of features
Args:
observation: observation type
Returns:
a tensor of features which can be utilized by tail
"""
pass
@abstractmethod
def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor:
"""finish the forward pass using features computed by head
Args:
features: output of head
parameters: the parameters normally given to forward pass
Returns:
the same output as `forward(observation, parameters)`
"""
pass
class ObservationTransform(nn.Module):
def __init__(
self,
observation_key: Hashable,
observation_shapes: ObsShapeType,
online_z_score: bool,
) -> None:
super().__init__()
self.observation_key = observation_key
self.observation_shapes = observation_shapes
self.flatten = nn.Flatten()
if online_z_score:
self.online_z_score = OnlineDictStandardizingLayer(self.observation_shapes)
else:
self.online_z_score = nn.Identity()
def forward(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
z_scored_observation = self.online_z_score(observation)
return self.flatten(z_scored_observation[self.observation_key]) # B, O
@property
def n_features(self) -> int:
with torch.no_grad():
fabricated_observation = {
key: torch.rand(2, *shape)
for key, shape in self.observation_shapes.items()
}
_, n_features = self.forward(fabricated_observation).shape
return n_features
class ParameterTransform(nn.Module):
def __init__(
self, n_parameters: int, marginal_indices: MarginalIndex, online_z_score: bool
) -> None:
super().__init__()
self.register_buffer(
"marginal_indices",
torch.tensor(swyft.utils.tupleize_marginal_indices(marginal_indices)),
)
self.n_parameters = torch.Size([n_parameters])
if online_z_score:
self.online_z_score = OnlineStandardizingLayer(self.n_parameters)
else:
self.online_z_score = nn.Identity()
def forward(self, parameters: torch.Tensor) -> torch.Tensor:
parameters = self.online_z_score(parameters)
return self.get_marginal_block(parameters, self.marginal_indices) # B, M, P
@property
def marginal_block_shape(self) -> Tuple[int, int]:
return self.get_marginal_block_shape(self.marginal_indices)
@staticmethod
def is_marginal_block_possible(marginal_indices: MarginalIndex) -> bool:
marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
return [len(marginal_indices[0]) == len(mi) for mi in marginal_indices]
@classmethod
def get_marginal_block_shape(
cls, marginal_indices: MarginalIndex
) -> Tuple[int, int]:
marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
assert cls.is_marginal_block_possible(
marginal_indices
), f"Each tuple in {marginal_indices} must have the same length."
return len(marginal_indices), len(marginal_indices[0])
@classmethod
def get_marginal_block(
cls, parameters: Array, marginal_indices: MarginalIndex
) -> torch.Tensor:
depth = swyft.utils.depth(marginal_indices)
tuple_marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
assert cls.is_marginal_block_possible(
tuple_marginal_indices
), f"Each tuple in {tuple_marginal_indices} must have the same length."
if depth in [0, 1, 2]:
return torch.stack(
[parameters[..., mi] for mi in tuple_marginal_indices], dim=1
)
else:
raise ValueError(
f"{marginal_indices} must be of the form (a) 2, (b) [2, 3], (c) [2, [1, 3]], or (d) [[0, 1], [1, 2]]."
)
class MarginalClassifier(nn.Module):
def __init__(
self,
n_marginals: int,
n_combined_features: int,
hidden_features: int,
num_blocks: int,
dropout_probability: float = 0.0,
use_batch_norm: bool = True,
) -> None:
super().__init__()
self.n_marginals = n_marginals
self.n_combined_features = n_combined_features
self.net = ResidualNetWithChannel(
channels=self.n_marginals,
in_features=self.n_combined_features,
out_features=1,
hidden_features=hidden_features,
num_blocks=num_blocks,
dropout_probability=dropout_probability,
use_batch_norm=use_batch_norm,
)
def forward(
self, features: torch.Tensor, marginal_block: torch.Tensor
) -> torch.Tensor:
fb = features.unsqueeze(1).expand(-1, self.n_marginals, -1) # B, M, O
combined = torch.cat([fb, marginal_block], dim=2) # B, M, O + P
return self.net(combined).squeeze(-1) # B, M
class Network(nn.Module, HeadTailClassifier):
def __init__(
self,
observation_transform: nn.Module,
parameter_transform: nn.Module,
marginal_classifier: nn.Module,
) -> None:
super().__init__()
self.observation_transform = observation_transform
self.parameter_transform = parameter_transform
self.marginal_classifier = marginal_classifier
def forward(
self, observation: Dict[Hashable, torch.Tensor], parameters: torch.Tensor
) -> torch.Tensor:
features = self.observation_transform(observation) # B, O
marginal_block = self.parameter_transform(parameters) # B, M, P
return self.marginal_classifier(features, marginal_block) # B, M
def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
return self.observation_transform(observation) # B, O
def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor:
marginal_block = self.parameter_transform(parameters) # B, M, P
return self.marginal_classifier(features, marginal_block) # B, M
def get_marginal_classifier(
observation_key: Hashable,
marginal_indices: MarginalIndex,
observation_shapes: ObsShapeType,
n_parameters: int,
hidden_features: int,
num_blocks: int,
observation_online_z_score: bool = True,
parameter_online_z_score: bool = True,
) -> nn.Module:
observation_transform = ObservationTransform(
observation_key, observation_shapes, online_z_score=observation_online_z_score
)
n_observation_features = observation_transform.n_features
parameter_transform = ParameterTransform(
n_parameters, marginal_indices, online_z_score=parameter_online_z_score
)
n_marginals, n_block_parameters = parameter_transform.marginal_block_shape
marginal_classifier = MarginalClassifier(
n_marginals,
n_observation_features + n_block_parameters,
hidden_features=hidden_features,
num_blocks=num_blocks,
)
return Network(
observation_transform,
parameter_transform,
marginal_classifier,
)
if __name__ == "__main__":
pass
| 34.737991 | 118 | 0.669893 | 6,481 | 0.814708 | 0 | 0 | 2,609 | 0.32797 | 0 | 0 | 913 | 0.114771 |
8d0744d0d6c3d370aff01717e44e415269208d73 | 238 | py | Python | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_min_length_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_min_length_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/date/schema_instance/nistschema_sv_iv_list_date_min_length_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.nist_data.list_pkg.date.schema_instance.nistschema_sv_iv_list_date_min_length_1_xsd.nistschema_sv_iv_list_date_min_length_1 import NistschemaSvIvListDateMinLength1
__all__ = [
"NistschemaSvIvListDateMinLength1",
]
| 39.666667 | 182 | 0.886555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.142857 |
8d09e1e56a5c8747b2ab32acaa6c0733d2870eb2 | 12,386 | py | Python | mstrio/distribution_services/contact/contact.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
]
| 1 | 2022-02-15T13:18:04.000Z | 2022-02-15T13:18:04.000Z | mstrio/distribution_services/contact/contact.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
]
| null | null | null | mstrio/distribution_services/contact/contact.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
]
| null | null | null | from collections import defaultdict
from enum import auto
from typing import Iterable, List, Optional, TYPE_CHECKING, Union
from mstrio import config
from mstrio.api import contacts
from mstrio.distribution_services.contact_group import ContactGroup
from mstrio.distribution_services.device import Device
from mstrio.utils.entity import auto_match_args_entity, DeleteMixin, EntityBase
from mstrio.utils.enum_helper import AutoName
from mstrio.utils.helper import (
camel_to_snake, delete_none_values, Dictable, fetch_objects, get_objects_id
)
from mstrio.users_and_groups.user import User
if TYPE_CHECKING:
from mstrio.connection import Connection
class ContactDeliveryType(AutoName):
EMAIL = auto()
FILE = auto()
PRINTER = auto()
FTP = auto()
MOBILE_ANDROID = auto()
MOBILE_IPHONE = auto()
MOBILE_IPAD = auto()
UNSUPPORTED = auto()
class ContactAddress(Dictable):
"""Representation of contact address object
Attributes:
id: id of contact address, optional
name: contact address' name
physical_address: physical address of contact
delivery_type: object of type ContactDeliveryType
is_default: specifies if address is default, optional,
default value: False
device: instance of Device or string (containing device's id),
if device is a string, connection is required
connection: instance of Connection, optional,
is required if device is string
"""
_FROM_DICT_MAP = {'delivery_type': ContactDeliveryType, 'device': Device.from_dict}
def __init__(self, name: str, physical_address: str,
delivery_type: Union[ContactDeliveryType, str], device: Union['Device', str],
id: Optional[str] = None, is_default: bool = False,
connection: Optional['Connection'] = None):
self.id = id
self.name = name
self.physical_address = physical_address
self.is_default = is_default
self.delivery_type = delivery_type if isinstance(
delivery_type, ContactDeliveryType) else ContactDeliveryType(delivery_type)
if isinstance(device, Device):
self.device = device
else:
if not connection:
raise ValueError('Argument: connection is required if device is a string')
self.device = Device(connection, id=device)
def __repr__(self) -> str:
param_dict = auto_match_args_entity(self.__init__, self, exclude=['self'],
include_defaults=False)
params = [
f"{param}={self.delivery_type}"
if param == 'delivery_type' else f'{param}={repr(value)}'
for param, value in param_dict.items()
]
formatted_params = ', '.join(params)
return f'ContactAddress({formatted_params})'
def to_dict(self, camel_case=True) -> dict:
result = {
'name': self.name,
'id': self.id,
'physicalAddress': self.physical_address,
'deliveryType': self.delivery_type.value,
'deviceId': self.device.id,
'deviceName': self.device.name,
'isDefault': self.is_default
}
return result if camel_case else camel_to_snake(result)
@classmethod
def from_dict(cls, source, connection, to_snake_case=True) -> 'ContactAddress':
source = source.copy()
device_id = source.pop('deviceId')
device_name = source.pop('deviceName')
source['device'] = {'id': device_id, 'name': device_name}
return super().from_dict(source, connection, to_snake_case)
def list_contacts(connection: 'Connection', to_dictionary: bool = False,
limit: Optional[int] = None, **filters) -> Union[List['Contact'], List[dict]]:
"""Get all contacts as list of Contact objects or dictionaries.
Optionally filter the contacts by specifying filters.
Args:
connection: MicroStrategy connection object
to_dictionary: If True returns a list of contact dicts,
otherwise returns a list of contact objects
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters:
['id', 'name', 'description', 'enabled']
"""
return Contact._list_contacts(
connection=connection,
to_dictionary=to_dictionary,
limit=limit,
**filters
)
class Contact(EntityBase, DeleteMixin):
"""Object representation of Microstrategy Contact object
Attributes:
name: contact's name
id: contact's id
description: contact's description
enabled: specifies if a contact is enabled
linked_user: user linked to contact, instance of User
contact_addresses: list of contact's addresses,
instances of ContactAddress
memberships: list of Contact Groups that the contact belongs to
connection: instance of Connection class, represents connection
to MicroStrategy Intelligence Server
"""
_FROM_DICT_MAP = {
**EntityBase._FROM_DICT_MAP,
'linked_user': User.from_dict,
'contact_addresses': [ContactAddress.from_dict],
'memberships': [ContactGroup.from_dict],
}
_API_GETTERS = {
('id', 'name', 'description', 'enabled', 'linked_user', 'memberships',
'contact_addresses'): contacts.get_contact
}
_API_DELETE = staticmethod(contacts.delete_contact)
_API_PATCH = {
('name', 'description', 'enabled', 'linked_user', 'contact_addresses', 'memberships'):
(contacts.update_contact, 'put')
}
_PATCH_PATH_TYPES = {
'name': str,
'description': str,
'enabled': bool,
'linked_user': dict,
'contact_addresses': list,
'memberships': list
}
def __init__(self, connection: 'Connection', id: Optional[str] = None,
name: Optional[str] = None):
"""Initialize Contact object by passing id or name.
When `id` is provided, name is omitted.
Args:
connection: MicroStrategy connection object
id: ID of Contact
name: name of Contact
"""
if id is None and name is None:
raise ValueError("Please specify either 'id' or 'name' parameter in the constructor.")
if id is None:
result = self._list_contacts(
connection=connection,
name=name,
to_dictionary=True
)
if result:
object_data = result[0]
object_data['connection'] = connection
self._init_variables(**object_data)
else:
raise ValueError(f"There is no Contact named: '{name}'")
else:
super().__init__(connection, id)
def _init_variables(self, **kwargs) -> None:
super()._init_variables(**kwargs)
self.description = kwargs.get('description')
self.enabled = kwargs.get('enabled')
linked_user = kwargs.get("linked_user")
self.linked_user = User.from_dict(linked_user, self.connection) if linked_user else None
addresses = kwargs.get('contact_addresses')
self.contact_addresses = [
ContactAddress.from_dict(address, self.connection)
for address in addresses
] if addresses else None
memberships = kwargs.get('memberships')
self.memberships = [
ContactGroup.from_dict(m, self.connection)
for m in memberships
] if memberships else None
@classmethod
def create(cls, connection: 'Connection', name: str, linked_user: Union['User', str],
contact_addresses: Iterable[Union['ContactAddress', dict]],
description: Optional[str] = None, enabled: bool = True) -> 'Contact':
"""Create a new contact.
Args:
connection: MicroStrategy connection object
returned by `connection.Connection()`
name: contact name
linked_user: user linked to contact
contact_addresses: list of contact addresses
description: description of contact
enabled: specifies if contact should be enabled
Returns:
Contact object
"""
body = {
'name': name,
'description': description,
'enabled': enabled,
'linkedUser': {
'id': get_objects_id(linked_user, User)
},
'contactAddresses': [
address.to_dict() if isinstance(address, ContactAddress) else address
for address in contact_addresses
],
}
body = delete_none_values(body)
response = contacts.create_contact(connection, body).json()
if config.verbose:
print(
f"Successfully created contact named: '{name}' with ID: '{response['id']}'"
)
return cls.from_dict(source=response, connection=connection)
def alter(self, name: Optional[str] = None, description: Optional[str] = None,
enabled: Optional[bool] = None, linked_user: Optional[Union['User', str]] = None,
contact_addresses: Optional[Iterable[Union['ContactAddress', dict]]] = None):
"""Update properties of a contact
Args:
name: name of a contact
description: description of a contact
enabled: specifies if a contact is enabled
linked_user: an object of class User linked to the contact
contact_addresses: list of contact addresses
"""
linked_user = {'id': get_objects_id(linked_user, User)} if linked_user else None
func = self.alter
args = func.__code__.co_varnames[:func.__code__.co_argcount]
defaults = func.__defaults__ # type: ignore
defaults_dict = dict(zip(args[-len(defaults):], defaults)) if defaults else {}
local = locals()
properties = defaultdict(dict)
for property_key in defaults_dict:
if local[property_key] is not None:
properties[property_key] = local[property_key]
self._alter_properties(**properties)
@classmethod
def _list_contacts(cls, connection: 'Connection', to_dictionary: bool = False,
limit: Optional[int] = None, **filters
) -> Union[List['Contact'], List[dict]]:
"""Get all contacts as list of Contact objects or dictionaries.
Optionally filter the contacts by specifying filters.
Args:
connection: MicroStrategy connection object
to_dictionary: If True returns a list of contact dicts,
otherwise returns a list of contact objects
limit: limit the number of elements returned. If `None` (default),
all objects are returned.
**filters: Available filter parameters:
['id', 'name', 'description', 'enabled']
"""
objects = fetch_objects(
connection=connection,
api=contacts.get_contacts,
limit=limit,
filters=filters,
dict_unpack_value='contacts'
)
if to_dictionary:
return objects
return [
cls.from_dict(source=obj, connection=connection)
for obj in objects
]
def add_to_contact_group(self, contact_group: Union['ContactGroup', str]):
"""Add to ContactGroup
Args:
contact_group: contact group to which add this contact
"""
if isinstance(contact_group, str):
contact_group = ContactGroup(self.connection, id=contact_group)
contact_group.add_members([self])
self.fetch()
def remove_from_contact_group(self, contact_group: Union['ContactGroup', str]):
"""Remove from ContactGroup
Args:
contact_group: contact group from which to remove this contact
"""
if isinstance(contact_group, str):
contact_group = ContactGroup(self.connection, id=contact_group)
contact_group.remove_members([self])
self.fetch()
| 35.797688 | 98 | 0.619086 | 10,866 | 0.877281 | 0 | 0 | 2,993 | 0.241644 | 0 | 0 | 4,640 | 0.374617 |
8d0a2cafd43bfa146f864191d3b8493254f59ce2 | 330 | py | Python | sources/t04/t04ej15.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
]
| null | null | null | sources/t04/t04ej15.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
]
| null | null | null | sources/t04/t04ej15.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
]
| null | null | null | # Iterador a partir de una función generadora
def fib():
prev, curr = 0, 1
while True:
yield curr
prev, curr = curr, prev + curr
f = fib()
# Recorremos nuestro iterador, llamando a next(). Dentro del for se llama automáticamente a iter(f)
print(0, end=' ')
for n in range(16):
print(next(f), end=' ') | 25.384615 | 99 | 0.630303 | 0 | 0 | 106 | 0.319277 | 0 | 0 | 0 | 0 | 152 | 0.457831 |
8d0bafb0014137256cbe170eed5e636723c3e6ff | 1,166 | py | Python | setup.py | snower/sevent | a11e4488e500a4008b07f4391119ad3288a1f07f | [
"MIT"
]
| 14 | 2018-02-16T10:33:04.000Z | 2021-09-28T02:04:50.000Z | setup.py | snower/sevent | a11e4488e500a4008b07f4391119ad3288a1f07f | [
"MIT"
]
| null | null | null | setup.py | snower/sevent | a11e4488e500a4008b07f4391119ad3288a1f07f | [
"MIT"
]
| 1 | 2021-09-26T06:09:38.000Z | 2021-09-26T06:09:38.000Z | #!/usr/bin/env python
import os
import sys
import platform
from setuptools import setup, Extension
if platform.system() != 'Windows' and platform.python_implementation() == "CPython":
ext_modules = [Extension('sevent/cbuffer', sources=['sevent/cbuffer.c'])]
else:
ext_modules = []
if os.path.exists("README.md"):
if sys.version_info[0] >= 3:
with open("README.md", encoding="utf-8") as fp:
long_description = fp.read()
else:
with open("README.md") as fp:
long_description = fp.read()
else:
long_description = ''
setup(
name='sevent',
version='0.4.6',
packages=['sevent', 'sevent.impl', 'sevent.coroutines', 'sevent.helpers'],
ext_modules=ext_modules,
package_data={
'': ['README.md'],
},
install_requires=[
'dnslib>=0.9.7',
'greenlet>=0.4.2',
],
author='snower',
author_email='[email protected]',
url='https://github.com/snower/sevent',
license='MIT',
description='lightweight event loop',
long_description=long_description,
long_description_content_type="text/markdown",
)
| 27.116279 | 85 | 0.610635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.290738 |
8d0d1deb21adb6e5f10d2c2fc89468f8264b3969 | 1,229 | py | Python | pyOCD/target/target_lpc1768.py | karlp/pyOCD | cfb76afdc8f50eeaf2c6a841ed9de1f5070e8916 | [
"Apache-2.0"
]
| 1 | 2016-01-05T04:08:57.000Z | 2016-01-05T04:08:57.000Z | pyOCD/target/target_lpc1768.py | karlp/pyOCD | cfb76afdc8f50eeaf2c6a841ed9de1f5070e8916 | [
"Apache-2.0"
]
| null | null | null | pyOCD/target/target_lpc1768.py | karlp/pyOCD | cfb76afdc8f50eeaf2c6a841ed9de1f5070e8916 | [
"Apache-2.0"
]
| null | null | null | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM
class LPC1768(CortexM):
def __init__(self, transport):
CortexM.__init__(self, transport)
self.auto_increment_page_size = 0x1000
def reset(self):
# halt processor
self.halt()
# not remap 0x0000-0x0020 to anything but the flash
self.writeMemory(0x400FC040, 1)
CortexM.reset(self)
def resetStopOnReset(self):
# halt processor
self.halt()
# not remap 0x0000-0x0020 to anything but the flash
self.writeMemory(0x400FC040, 1)
CortexM.resetStopOnReset(self)
| 30.725 | 73 | 0.689992 | 589 | 0.479251 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.599675 |
8d0d5fc54b12bc4a2778b7b411b9ddf33d8564a9 | 16,444 | py | Python | CORNISH_B-CNN_Trainer_And_Optimiser.py | s-neilson/CORNISH-CNN-classifier | 983754c582276a80f0bc28ccf5e8124840958d03 | [
"MIT"
]
| 2 | 2019-06-23T06:58:51.000Z | 2019-11-21T01:36:48.000Z | CORNISH_B-CNN_Trainer_And_Optimiser.py | s-neilson/CORNISH-CNN-classifier | 983754c582276a80f0bc28ccf5e8124840958d03 | [
"MIT"
]
| null | null | null | CORNISH_B-CNN_Trainer_And_Optimiser.py | s-neilson/CORNISH-CNN-classifier | 983754c582276a80f0bc28ccf5e8124840958d03 | [
"MIT"
]
| null | null | null | import os
import warnings
warnings.simplefilter("ignore")
import csv
import numpy
import hyperopt
from hyperopt import Trials,tpe,hp,fmin
from keras.utils import to_categorical
import pickle
from loadConfiguration import Configuration
from objectCreation import createImagedObjects
from trainBCNN import runOptimisingTrial,runTrial
from createModelPerformancePlots import createAccuracyPlots,createConfusionMatricies
from getObjectHierarchyLabels import getObjectHierarchyLabels
def main():
#Below various configurations are loaded.
inputConfiguration=Configuration(os.getcwd()+"/configurations/inputConfiguration.txt","=")
trainSingleModel=inputConfiguration.getConfigurationValue("trainSingleModel","bool")
filePrefix=inputConfiguration.getConfigurationValue("filePrefix","raw")
allowedFileSuffixes=inputConfiguration.getConfigurationValue("useFileSuffix","raw")
allowedFileSuffixes=[allowedFileSuffixes] if(type(allowedFileSuffixes)==str) else allowedFileSuffixes
desiredImageSize=inputConfiguration.getConfigurationValue("desiredImageSize","int")
dataFolder=os.getcwd()+inputConfiguration.getConfigurationValue("dataFolder","raw")
contigiousEqualAreaRejectionThreshold=inputConfiguration.getConfigurationValue("contigiousEqualAreaRejectionThreshold","int")
objectLeafLabelTotalQuantity=inputConfiguration.getConfigurationValue("objectLeafLabelTotalQuantity","int")
transformedObjectImageRemovalChance=inputConfiguration.getConfigurationValue("transformedObjectImageRemovalChance","float")
objectTypeLabels=inputConfiguration.getConfigurationValue("allowedObjectType","raw")
#A map between object types and their corresponding label lists is created.
objectTypeLabelDictionary={i[0]:tuple(i[1:]) for i in objectTypeLabels}
objectTypePossibleLabelSets,objectHierarchyDepth=getObjectHierarchyLabels(list(objectTypeLabelDictionary.values()))
#The loaded input configuration is printed.
print("Input configuration loaded:")
print( "Will train a single model" if(trainSingleModel) else " Will optimise hyperparameters")
print(" Will use the following file suffixes:")
for currentSuffix in allowedFileSuffixes:
print(" "+currentSuffix)
print(" Image size: "+str(desiredImageSize)+" pixels")
print(" Main image folder: "+dataFolder)
print(" Contigious colour area rejection threshold: "+("Disabled" if(contigiousEqualAreaRejectionThreshold is None) else str(contigiousEqualAreaRejectionThreshold)))
print(" Minimum objects per object category to load/create: "+str(objectLeafLabelTotalQuantity))
print(" Chance of a individual image being removed from an augmented image: "+str(transformedObjectImageRemovalChance))
print(" Labels at each level in the object type heirarchy:")
for i in range(0,objectHierarchyDepth):
print(" Level "+str(i)+":")
for j in objectTypePossibleLabelSets[i]:
print(" "+j)
trainingConfiguration=Configuration(os.getcwd()+"/configurations/trainingConfiguration.txt","=")
batchSize=trainingConfiguration.getConfigurationValue("batchSize","int")
epochNumber=trainingConfiguration.getConfigurationValue("epochNumber","int")
trainingLossWeight=trainingConfiguration.getConfigurationValue("trainingLossWeight","float")
validationFraction=trainingConfiguration.getConfigurationValue("validationFraction","float")
outputFilePrefix=trainingConfiguration.getConfigurationValue("outputFilePrefix","raw")
hyperparameterOptimisationMaximumEvaluations=trainingConfiguration.getConfigurationValue("hyperparameterOptimisationMaximumEvaluations","int")
dropoutFraction=trainingConfiguration.getConfigurationValue("dropoutFraction","float")
convolutionLayersPerBlock=trainingConfiguration.getConfigurationValue("convolutionLayersPerBlock","int")
extraFirstBlock=trainingConfiguration.getConfigurationValue("extraFirstBlock","bool")
initalLayerFilterCount=trainingConfiguration.getConfigurationValue("initalLayerFilterCount","int")
filterCountBlockMultiplicativeFactor=trainingConfiguration.getConfigurationValue("filterCountBlockMultiplicativeFactor","float")
initalLayerKernalSize=trainingConfiguration.getConfigurationValue("initalLayerKernalSize","int")
kernalSizeBlockMultiplicitiveFactor=trainingConfiguration.getConfigurationValue("kernalSizeBlockMultiplicitiveFactor","float")
learningRate=trainingConfiguration.getConfigurationValue("learningRate","float")
gpuQuantity=trainingConfiguration.getConfigurationValue("gpuQuantity","int")
earlyStoppingMinDelta=trainingConfiguration.getConfigurationValue("earlyStoppingMinDelta","float")
earlyStoppingPatience=trainingConfiguration.getConfigurationValue("earlyStoppingPatience","int")
#the loaded training configuration is printed.
print("\n")
print("Training configuration loaded:")
print(" Batch size: "+str(batchSize))
print(" Epochs trained per level in hierarchy: "+str(epochNumber))
print(" Current hierarchy level training loss weight: "+str(trainingLossWeight))
print(" Validation object fraction: "+str(validationFraction))
print(" Output file prefix: "+outputFilePrefix)
if(trainSingleModel):
print(" The following parameters will be used for training the model:")
print(" dropoutFraction: "+str(dropoutFraction))
print(" convolutionLayersPerBlock: "+str(convolutionLayersPerBlock))
print(" extraFirstBlock: "+str(extraFirstBlock))
print(" initalLayerFilterCount: "+str(initalLayerFilterCount))
print(" filterCountBlockMultiplicativeFactor: "+str(filterCountBlockMultiplicativeFactor))
print(" initalLayerKernalSize: "+str(initalLayerKernalSize))
print(" kernalSizeBlockMultiplicitiveFactor: "+str(kernalSizeBlockMultiplicitiveFactor))
print(" learningRate: "+str(learningRate))
else:
print(" Maximum number of hyperparameter optimisation evaluations: "+str(hyperparameterOptimisationMaximumEvaluations))
print(" Number of GPUs to use for training: "+str(gpuQuantity))
print(" Early stopping minimum loss delta: "+str(earlyStoppingMinDelta))
print(" Early stopping patience: "+str(earlyStoppingPatience))
hyperparameterLimitsConfiguration=Configuration(os.getcwd()+"/configurations/hyperparameterLimitsConfiguration.txt","=")
minimumDropoutFraction=hyperparameterLimitsConfiguration.getConfigurationValue("minimumDropoutFraction","float")
maximumDropoutFraction=hyperparameterLimitsConfiguration.getConfigurationValue("maximumDropoutFraction","float")
possibleConvolutionLayersPerBlock=hyperparameterLimitsConfiguration.getConfigurationValue("possibleConvolutionLayersPerBlock","int")
possibleInitalLayerFilterCount=hyperparameterLimitsConfiguration.getConfigurationValue("possibleInitalLayerFilterCount","int")
possibleFilterCountBlockMultiplicativeFactor=hyperparameterLimitsConfiguration.getConfigurationValue("possibleFilterCountBlockMultiplicativeFactor","float")
possibleInitalLayerKernalSize=hyperparameterLimitsConfiguration.getConfigurationValue("possibleInitalLayerKernalSize","int")
possibleKernalSizeBlockMultiplicitiveFactor=hyperparameterLimitsConfiguration.getConfigurationValue("possibleKernalSizeBlockMultiplicitiveFactor","float")
minimumLearningRate=hyperparameterLimitsConfiguration.getConfigurationValue("minimumLearningRate","float")
maximumLearningRate=hyperparameterLimitsConfiguration.getConfigurationValue("maximumLearningRate","float")
if(not trainSingleModel):
print("\n")
print(" Hyperparameters will be optimised through the following ranges:")
print(" dropoutFraction: "+str(minimumDropoutFraction)+"-"+str(maximumDropoutFraction))
print(" convolutionLayersPerBlock: "+str(possibleConvolutionLayersPerBlock))
print(" extraFirstBlock: True or False")
print(" initalLayerFilterCount: "+str(possibleInitalLayerFilterCount))
print(" filterCountBlockMultiplicativeFactor: "+str(possibleFilterCountBlockMultiplicativeFactor))
print(" initalLayerKernalSize: "+str(possibleInitalLayerKernalSize))
print(" kernalSizeBlockMultiplicitiveFactor: "+str(possibleKernalSizeBlockMultiplicitiveFactor))
print(" learningRate: "+str(minimumLearningRate)+"-"+str(maximumLearningRate))
trainObjects,validationObjects=createImagedObjects(dataFolder,objectTypeLabelDictionary,desiredImageSize,contigiousEqualAreaRejectionThreshold,
filePrefix,allowedFileSuffixes,validationFraction,objectLeafLabelTotalQuantity,transformedObjectImageRemovalChance)
#Data from the loaded/created ImagedObjects is turned into a format that can be used in the neural network.
numpy.random.shuffle(trainObjects)
numpy.random.shuffle(validationObjects)
trainObjectImageList=[currentObject.imageData for currentObject in trainObjects]
trainObjectIntegerLabelList=[[None for j in range(0,len(trainObjects))]for i in range(0,objectHierarchyDepth)]
validationObjectImageList=[currentObject.imageData for currentObject in validationObjects]
validationObjectIntegerLabelList=[[None for j in range(0,len(validationObjects))]for i in range(0,objectHierarchyDepth)]
#Creates a list that contains the labels for each object represented as integers instead of strings.
for i in range(0,objectHierarchyDepth):
trainObjectIntegerLabelList[i]=[(objectTypePossibleLabelSets[i]).index(currentObject.label[i]) for currentObject in trainObjects]
validationObjectIntegerLabelList[i]=[(objectTypePossibleLabelSets[i]).index(currentObject.label[i]) for currentObject in validationObjects]
#The labels are one-hot encoded for each level in the object heirarchy.
trainLabels=[to_categorical(trainObjectIntegerLabelList[i],len(objectTypePossibleLabelSets[i])) for i in range(0,objectHierarchyDepth)]
validationLabels=[to_categorical(validationObjectIntegerLabelList[i],len(objectTypePossibleLabelSets[i])) for i in range(0,objectHierarchyDepth)]
#The above data is put into a form that can be used by the model.
xTrain=numpy.zeros(shape=(len(trainObjectImageList),)+trainObjectImageList[0].shape)
xValidation=numpy.zeros(shape=(len(validationObjectImageList),)+validationObjectImageList[0].shape)
for currentIndex,currentImageData in enumerate(trainObjectImageList):
xTrain[currentIndex,:,:,:]=currentImageData
for currentIndex,currentImageData in enumerate(validationObjectImageList):
xValidation[currentIndex,:,:,:]=currentImageData
#Each output of the model is accociated with a set of labels.
outputLayerNames=["out"+str(i+1) for i in range(0,objectHierarchyDepth)] #Each output layer is labeled sequentially from the output closest to the input layer.
yTrain=dict(zip(outputLayerNames,trainLabels))
yValidation=dict(zip(outputLayerNames,validationLabels))
nonOptimisingModelParameters=validationObjectImageList[0].shape,outputLayerNames,objectTypePossibleLabelSets,gpuQuantity
nonOptimisingTrainParameters=xTrain,xValidation,yTrain,yValidation,batchSize,epochNumber,trainingLossWeight,earlyStoppingMinDelta,earlyStoppingPatience
nonOptimisingF1Parameters=validationObjectIntegerLabelList,objectHierarchyDepth
if(trainSingleModel): #For training a single model with specified hyperparameters.
modelHyperparameters=[dropoutFraction,convolutionLayersPerBlock,extraFirstBlock,initalLayerFilterCount,filterCountBlockMultiplicativeFactor,initalLayerKernalSize,kernalSizeBlockMultiplicitiveFactor,learningRate]
result=runTrial(modelHyperparameters,nonOptimisingModelParameters,nonOptimisingTrainParameters,nonOptimisingF1Parameters)
outputModelFileName=outputFilePrefix+"TrainedModel.h5"
outputModelHistoryFileName=outputFilePrefix+"TrainingHistory.npy"
print("\n")
print("Saving model file at location "+os.getcwd()+"/"+outputModelFileName)
print("Saving model training history file at location "+os.getcwd()+"/"+outputModelHistoryFileName)
result[0].save(outputModelFileName) #The model is saved.
numpy.save(outputModelHistoryFileName,result[1]) #The training history is saved.
outputConfusionMatriciesFilePath=outputFilePrefix+"ConfusionMatricies.png"
print("\n")
print("Creating accuracy plots, will be saved in folder "+os.getcwd()+" as .png files with the prefix "+outputFilePrefix+"AccuracyPlot_")
createAccuracyPlots(result[1],outputLayerNames,outputFilePrefix)
print("Creating confusion matricies, plot will be saved at location: "+os.getcwd()+"/"+outputConfusionMatriciesFilePath)
createConfusionMatricies(model=result[0],testObjects=validationObjects,testObjectImageList=validationObjectImageList,
imageSaveFilePath=outputConfusionMatriciesFilePath,objectHierarchyLabels=objectTypePossibleLabelSets)
else: #For hyperparameter optimisation
rotLambda=lambda parameters:runOptimisingTrial(parameters,nonOptimisingModelParameters,nonOptimisingTrainParameters,nonOptimisingF1Parameters)
space=[hp.uniform("dropoutFraction",minimumDropoutFraction,maximumDropoutFraction),
hp.choice("convolutionLayersPerBlock",possibleConvolutionLayersPerBlock),
hp.choice("extraFirstBlock",[True,False]),
hp.choice("initalLayerFilterCount",possibleInitalLayerFilterCount),
hp.choice("filterCountBlockMultiplicativeFactor",possibleFilterCountBlockMultiplicativeFactor),
hp.choice("initalLayerKernalSize",possibleInitalLayerKernalSize),
hp.choice("kernalSizeBlockMultiplicitiveFactor",possibleKernalSizeBlockMultiplicitiveFactor),
hp.uniform("learningRate",minimumLearningRate,maximumLearningRate)]
trials=Trials()
bestResults=fmin(rotLambda,space=space,algo=tpe.suggest,max_evals=hyperparameterOptimisationMaximumEvaluations,trials=trials)
optimisedHyperparameters=hyperopt.space_eval(space,bestResults)
print("\n")
print("Optimised hyperparameters: ",optimisedHyperparameters)
outputOptimisedHyperparameterFileName=outputFilePrefix+"OptimisedHyperparameters.txt"
print("Saving optimised hyperparmeters at location: "+os.getcwd()+"/"+outputOptimisedHyperparameterFileName)
outputOptimisedHyperparameterFile=open(outputOptimisedHyperparameterFileName,"w")
outputOptimisedHyperparameterFileWriter=csv.writer(outputOptimisedHyperparameterFile,delimiter="=")
outputOptimisedHyperparameterFileWriter.writerow(["dropoutFraction",optimisedHyperparameters[0]])
outputOptimisedHyperparameterFileWriter.writerow(["convolutionLayersPerBlock",optimisedHyperparameters[1]])
outputOptimisedHyperparameterFileWriter.writerow(["extraFirstBlock",optimisedHyperparameters[2]])
outputOptimisedHyperparameterFileWriter.writerow(["initalLayerFilterCount",optimisedHyperparameters[3]])
outputOptimisedHyperparameterFileWriter.writerow(["filterCountBlockMultiplicativeFactor",optimisedHyperparameters[4]])
outputOptimisedHyperparameterFileWriter.writerow(["initalLayerKernalSize",optimisedHyperparameters[5]])
outputOptimisedHyperparameterFileWriter.writerow(["kernalSizeBlockMultiplicitiveFactor",optimisedHyperparameters[6]])
outputOptimisedHyperparameterFileWriter.writerow(["learningRate",optimisedHyperparameters[7]])
outputOptimisedHyperparameterFile.close()
print("\n")
outputTrialsFileName=outputFilePrefix+"TrainingTrials.p"
print("Saving trials pickle file at location "+os.getcwd()+"/"+outputTrialsFileName)
pickle.dump(trials,open(outputTrialsFileName,"wb"))
main()
| 62.287879 | 220 | 0.769582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,395 | 0.267271 |
2386c477dd9e245a8edc63bb8d5ff08d06c30f67 | 1,202 | py | Python | core/systems/cart_pole.py | ivandariojr/core | c4dec054a3e80355ed3812d48ca2bba286584a67 | [
"MIT"
]
| 6 | 2021-01-26T21:00:24.000Z | 2022-02-28T23:57:50.000Z | core/systems/cart_pole.py | ivandariojr/core | c4dec054a3e80355ed3812d48ca2bba286584a67 | [
"MIT"
]
| 15 | 2020-01-28T22:49:18.000Z | 2021-12-14T08:34:39.000Z | core/systems/cart_pole.py | ivandariojr/core | c4dec054a3e80355ed3812d48ca2bba286584a67 | [
"MIT"
]
| 6 | 2019-06-07T21:31:20.000Z | 2021-12-13T01:00:02.000Z | from torch import cat, cos, float64, sin, stack, tensor
from torch.nn import Module, Parameter
from core.dynamics import RoboticDynamics
class CartPole(RoboticDynamics, Module):
def __init__(self, m_c, m_p, l, g=9.81):
RoboticDynamics.__init__(self, 2, 1)
Module.__init__(self)
self.params = Parameter(tensor([m_c, m_p, l, g], dtype=float64))
def D(self, q):
m_c, m_p, l, _ = self.params
_, theta = q
return stack(
(stack([m_c + m_p, m_p * l * cos(theta)]),
stack([m_p * l * cos(theta), m_p * (l ** 2)])))
def C(self, q, q_dot):
_, m_p, l, _ = self.params
z = tensor(0, dtype=float64)
_, theta = q
_, theta_dot = q_dot
return stack((stack([z, -m_p * l * theta_dot * sin(theta)]),
stack([z, z])))
def U(self, q):
_, m_p, l, g = self.params
_, theta = q
return m_p * g * l * cos(theta)
def G(self, q):
_, m_p, l, g = self.params
_, theta = q
z = tensor(0, dtype=float64)
return stack([z, -m_p * g * l * sin(theta)])
def B(self, q):
return tensor([[1], [0]], dtype=float64)
| 29.317073 | 72 | 0.526622 | 1,061 | 0.882696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2389a78f5b884ed87e17e07d946941aa063ef130 | 2,674 | py | Python | export.py | nicolasm/lastfm-export | 43456161c7083b490d09a1d2638a38c9771e1b3f | [
"MIT"
]
| 7 | 2015-11-17T20:40:29.000Z | 2022-02-03T17:55:23.000Z | export.py | nicolasm/lastfm-export | 43456161c7083b490d09a1d2638a38c9771e1b3f | [
"MIT"
]
| 2 | 2021-12-30T22:32:11.000Z | 2021-12-30T22:32:12.000Z | export.py | nicolasm/lastfm-export | 43456161c7083b490d09a1d2638a38c9771e1b3f | [
"MIT"
]
| 1 | 2020-08-03T09:19:11.000Z | 2020-08-03T09:19:11.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
# This script imports your Last.fm listening history #
# inside a MySQL or Sqlite database. #
# #
# Copyright (c) 2015-2020, Nicolas Meier #
#######################################################################
import json
import logging
import sys
from lfmconf.lfmconf import get_lastfm_conf
from lfmdb import lfmdb
from stats.stats import LastfmStats, recent_tracks, \
retrieve_total_json_tracks_from_db
from queries.inserts import get_query_insert_json_track
logging.basicConfig(
level=logging.INFO,
format=f'%(asctime)s %(levelname)s %(message)s'
)
conf = get_lastfm_conf()
user = conf['lastfm']['service']['username']
api_key = conf['lastfm']['service']['apiKey']
lastfm_stats = LastfmStats.get_lastfm_stats(user, api_key)
total_pages = lastfm_stats.nb_delta_pages()
total_plays_in_db = lastfm_stats.nb_json_tracks_in_db
logging.info('Nb page to get: %d' % total_pages)
if total_pages == 0:
logging.info('Nothing to update!')
sys.exit(1)
all_pages = []
for page_num in range(total_pages, 0, -1):
logging.info('Page %d of %d' % (page_num, total_pages))
page = recent_tracks(user, api_key, page_num)
while page.get('recenttracks') is None:
logging.info('has no tracks. Retrying!')
page = recent_tracks(user, api_key, page_num)
all_pages.append(page)
# Iterate through all pages
num_pages = len(all_pages)
for page_num, page in enumerate(all_pages):
logging.info('Page %d of %d' % (page_num + 1, num_pages))
tracks = page['recenttracks']['track']
# Remove the "nowplaying" track if found.
if tracks[0].get('@attr'):
if tracks[0]['@attr']['nowplaying'] == 'true':
tracks.pop(0)
# Get only the missing tracks.
if page_num == 0:
logging.info('Fist page')
nb_plays = lastfm_stats.nb_plays_for_first_page()
tracks = tracks[0: nb_plays]
logging.info('Getting %d plays' % nb_plays)
# On each page, iterate through all tracks
num_tracks = len(tracks)
json_tracks = []
for track_num, track in enumerate(reversed(tracks)):
logging.info('Track %d of %d' % (track_num + 1, num_tracks))
json_tracks.append(json.dumps(track))
try:
lfmdb.insert_many(get_query_insert_json_track(), json_tracks)
except Exception:
sys.exit(1)
logging.info('Done! %d rows in table json_track.' % retrieve_total_json_tracks_from_db())
| 31.093023 | 89 | 0.613687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 947 | 0.354151 |
238ac0373309554dc94b4fa70cb2b5d3ddfb8bbc | 2,617 | py | Python | final_project/code/src/data_attention.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | final_project/code/src/data_attention.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | final_project/code/src/data_attention.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""
data file
read in data
"""
from typing import Tuple, Any
import pandas as pd
import tensorflow as tf
from loguru import logger
from utils import file_path_relative
import numpy as np
from transformers import DistilBertTokenizer
NUM_ROWS_TRAIN: int = 15000
TEST_RATIO: float = 0.2
def _run_encode(texts: np.array, tokenizer: Any, maxlen: int = 512):
"""
Encoder for encoding the text into sequence of integers for transformer Input
"""
logger.info('encode')
encodings = tokenizer(
texts.tolist(),
return_token_type_ids=False,
padding='max_length',
truncation=True,
max_length=maxlen
)
return np.array(encodings['input_ids'])
def read_data_attention(strategy: tf.distribute.TPUStrategy,
max_len: int,
) -> Tuple[np.array, np.array, np.array, np.array, tf.data.Dataset, tf.data.Dataset, tf.data.Dataset, int]:
"""
read data from attention models
"""
logger.info('reading data for attention models')
# batch with number of tpu's
batch_size = 16 * strategy.num_replicas_in_sync
auto = tf.data.experimental.AUTOTUNE
# First load the tokenizer
tokenizer = DistilBertTokenizer.from_pretrained(
'distilbert-base-multilingual-cased')
train = pd.read_csv(file_path_relative('jigsaw-toxic-comment-train.csv'))
valid = pd.read_csv(file_path_relative('validation.csv'))
test = pd.read_csv(file_path_relative('test.csv'))
x_train = _run_encode(train['comment_text'].astype(str),
tokenizer, maxlen=max_len)
x_valid = _run_encode(valid['comment_text'].astype(str),
tokenizer, maxlen=max_len)
x_test = _run_encode(test['content'].astype(
str), tokenizer, maxlen=max_len)
y_train = train['toxic'].values
y_valid = valid['toxic'].values
train_dataset = (
tf.data.Dataset
.from_tensor_slices((x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset = (
tf.data.Dataset
.from_tensor_slices((x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset = (
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
)
# return all datasets
return x_train, x_valid, y_train, y_valid, train_dataset, valid_dataset, \
test_dataset, batch_size
if __name__ == '__main__':
raise RuntimeError('cannot run data attention on its own')
| 27.260417 | 131 | 0.649599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.201376 |
2390d6b6f9cc47e9e8fe109965cc9769d5444907 | 2,875 | py | Python | applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
]
| null | null | null | applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
]
| null | null | null | applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py | clazaro/Kratos | b947b82c90dfcbf13d60511427f85990d36b90be | [
"BSD-4-Clause"
]
| null | null | null | # Kratos imports
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
from KratosMultiphysics.WindEngineeringApplication.test_suite import SuiteFlags, TestSuite
import run_cpp_tests
# STL imports
import pathlib
class TestLoader(UnitTest.TestLoader):
@property
def suiteClass(self):
return TestSuite
def AssembleTestSuites(enable_mpi=False):
""" Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
"""
static_suites = UnitTest.KratosSuites
# Test cases will be organized into lists first, then loaded into their
# corresponding suites all at once
local_cases = {}
for key in static_suites.keys():
local_cases[key] = []
# Glob all test cases in this application
this_directory = pathlib.Path(__file__).absolute().parent
test_loader = TestLoader()
all_tests = test_loader.discover(this_directory)
# Sort globbed test cases into lists based on their suite flags
# flags correspond to entries in KratosUnittest.TestSuites
# (small, nightly, all, validation)
#
# Cases with the 'mpi' flag are added to mpi suites as well as their corresponding normal suites.
# Cases with the 'mpi_only' flag are not added to normal suites.
for test_case in all_tests:
suite_flags = set(test_case.suite_flags)
# Check whether the test case has a flag for mpi
mpi = SuiteFlags.MPI in suite_flags
mpi_only = SuiteFlags.MPI_ONLY in suite_flags
# Don't add the test if its mpi-exclusive and mpi is not enabled
if (not enable_mpi) and mpi_only:
continue
# Remove mpi flags
if mpi:
suite_flags.remove(SuiteFlags.MPI)
if mpi_only:
suite_flags.remove(SuiteFlags.MPI_ONLY)
# Add case to the corresponding suites
for suite_flag in suite_flags:
local_cases[suite_flag.name.lower()].append(test_case)
if mpi or mpi_only:
local_cases["mpi_" + suite_flag.name.lower()].append(test_case)
# Put test in 'all' if it isn't already there
if not (SuiteFlags.ALL in suite_flags):
if not mpi_only:
local_cases["all"].append(test_case)
if mpi or mpi_only:
local_cases["mpi_all"].append(test_case)
# Load all sorted cases into the global suites
for suite_name, test_cases in local_cases.items():
static_suites[suite_name].addTests(test_cases)
return static_suites
def Run(enable_mpi=False):
UnitTest.runTests(AssembleTestSuites(enable_mpi=enable_mpi))
if __name__ == "__main__":
Run(enable_mpi=False) | 31.25 | 103 | 0.681391 | 103 | 0.035826 | 0 | 0 | 60 | 0.02087 | 0 | 0 | 1,060 | 0.368696 |
2392d8b12f4d624279784bb31763afce245b714a | 5,739 | py | Python | src/cadorsfeed/fpr.py | kurtraschke/cadors-parse | 67ac398ef318562dcbd7c60ef7c0d91e7980111a | [
"MIT"
]
| 1 | 2018-01-05T12:54:13.000Z | 2018-01-05T12:54:13.000Z | src/cadorsfeed/fpr.py | kurtraschke/cadors-parse | 67ac398ef318562dcbd7c60ef7c0d91e7980111a | [
"MIT"
]
| null | null | null | src/cadorsfeed/fpr.py | kurtraschke/cadors-parse | 67ac398ef318562dcbd7c60ef7c0d91e7980111a | [
"MIT"
]
| null | null | null | import re
import uuid
from copy import deepcopy
from datetime import datetime
from lxml import etree
from lxml.html import xhtml_to_html
from geoalchemy import WKTSpatialElement
from geolucidate.functions import _cleanup, _convert
from geolucidate.parser import parser_re
from cadorsfeed import db
from cadorsfeed.models import DailyReport, CadorsReport, ReportCategory
from cadorsfeed.models import Aircraft, NarrativePart, Location, LocationRef
from cadorsfeed.cadorslib.xpath_functions import extensions
from cadorsfeed.cadorslib.narrative import process_narrative, normalize_ns
from cadorsfeed.cadorslib.locations import LocationStore
from cadorsfeed.aerodb import aerodromes_re, lookup
NSMAP = {'h': 'http://www.w3.org/1999/xhtml',
'pyf': 'urn:uuid:fb23f64b-3c54-4009-b64d-cc411bd446dd',
'a': 'http://www.w3.org/2005/Atom',
'geo': 'http://www.w3.org/2003/01/geo/wgs84_pos#',
'aero':'urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d'}
def make_datetime(date, time):
if time is None:
time = "0000 Z"
return datetime.strptime(date + " " + time, "%Y-%m-%d %H%M Z")
def clean_html(tree):
mytree = deepcopy(tree)
for elem in mytree.iter():
for attr, val in elem.attrib.iteritems():
if attr.startswith('{'):
del elem.attrib[attr]
xhtml_to_html(mytree)
return etree.tostring(normalize_ns(mytree), method="html",
encoding=unicode)
def format_parsed_report(parsed_report):
report = CadorsReport.query.get(
parsed_report['cadors_number']) or CadorsReport(uuid=uuid.uuid4())
parsed_report['timestamp'] = make_datetime(parsed_report['date'],
parsed_report['time'])
del parsed_report['date']
del parsed_report['time']
primary_locations = set()
other_locations = set()
if parsed_report['tclid'] != '':
#try to do a db lookup
data = lookup(parsed_report['tclid'])
if data is not None:
primary_locations.add(data)
if parsed_report['location'] != '':
location = parsed_report['location']
#Apply geolucidate and the aerodromes RE
match = aerodromes_re.get_icao_re.search(location)
if match:
data = lookup(match.group())
primary_locations.add(data)
match = parser_re.search(location)
if match:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
location = make_location(latitude, longitude)
location.name = match.group()
primary_locations.add(location)
for narrative_part in parsed_report['narrative']:
narrative_tree = process_narrative(narrative_part['narrative_text'])
narrative_part['narrative_html'] = clean_html(narrative_tree)
narrative_part['narrative_xml'] = etree.tostring(narrative_tree,
method="xml",
encoding=unicode)
#do the location extraction here
#parse out geolinks
elements = narrative_tree.xpath(
"//*[@class='geolink' and @geo:lat and @geo:long]",
namespaces=NSMAP)
for element in elements:
longitude = element.attrib[
'{http://www.w3.org/2003/01/geo/wgs84_pos#}long']
latitude = element.attrib[
'{http://www.w3.org/2003/01/geo/wgs84_pos#}lat']
name = element.attrib['title']
location = make_location(latitude, longitude)
location.name = name
other_locations.add(location)
#parse out aerodrome links
elements = narrative_tree.xpath(
"//*[@class='aerolink' and @aero:code]",
namespaces=NSMAP)
for element in elements:
code = element.attrib[
'{urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d}code']
other_locations.add(lookup(code))
for aircraft_part in parsed_report['aircraft']:
if aircraft_part['flight_number'] is not None:
match = re.match("([A-Z]{2,4})([0-9]{1,4})M?",
aircraft_part['flight_number'])
if match:
aircraft_part['flight_number_operator'] = match.group(1)
aircraft_part['flight_number_flight'] = match.group(2)
report.categories = []
report.aircraft = []
report.narrative_parts = []
report.locations = []
for category in parsed_report['categories']:
report.categories.append(ReportCategory(text=category))
del parsed_report['categories']
for aircraft_part in parsed_report['aircraft']:
report.aircraft.append(Aircraft(**aircraft_part))
del parsed_report['aircraft']
for narrative_part in parsed_report['narrative']:
report.narrative_parts.append(NarrativePart(**narrative_part))
del parsed_report['narrative']
for location in primary_locations:
locref = LocationRef(report=report, location=location,
primary=True)
db.session.add(locref)
other_locations -= primary_locations
for location in other_locations:
locref = LocationRef(report=report, location=location,
primary=False)
db.session.add(locref)
for key, value in parsed_report.iteritems():
setattr(report, key, value)
return report
def make_location(latitude, longitude):
wkt = "POINT(%s %s)" % (longitude,
latitude)
point = WKTSpatialElement(wkt)
location = Location(location=point)
return location
| 34.781818 | 76 | 0.626067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 990 | 0.172504 |
2393234a05bc91425b68a33c2343314b4950796f | 1,016 | py | Python | sstcam_sandbox/d191122_dc_tf/generate_pedestal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
]
| null | null | null | sstcam_sandbox/d191122_dc_tf/generate_pedestal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
]
| null | null | null | sstcam_sandbox/d191122_dc_tf/generate_pedestal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
]
| 1 | 2021-03-30T09:46:56.000Z | 2021-03-30T09:46:56.000Z | from sstcam_sandbox import get_checs
from TargetCalibSB.pedestal import PedestalTargetCalib
from TargetCalibSB import get_cell_ids_for_waveform
from CHECLabPy.core.io import TIOReader
from tqdm import tqdm
from glob import glob
def process(path):
pedestal_path = path.replace("_r0.tio", "_ped.tcal")
reader = TIOReader(path)
pedestal = PedestalTargetCalib(
reader.n_pixels, reader.n_samples-32, reader.n_cells
)
desc = "Generating pedestal"
for wfs in tqdm(reader, total=reader.n_events, desc=desc):
if wfs.missing_packets:
continue
cells = get_cell_ids_for_waveform(wfs.first_cell_id, reader.n_samples, reader.n_cells)
wfs = wfs[:, 32:]
wfs.first_cell_id = cells[32]
pedestal.add_to_pedestal(wfs, wfs.first_cell_id)
pedestal.save_tcal(pedestal_path)
def main():
input_paths = glob(get_checs("d181203_erlangen/pedestal/*.tio"))
for path in input_paths:
process(path)
if __name__ == '__main__':
main()
| 29.028571 | 94 | 0.714567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.082677 |
239448a88cf09613c57c9e9d85d7149c490e74ce | 740 | py | Python | Python/剑指offer53-I.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
]
| null | null | null | Python/剑指offer53-I.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
]
| null | null | null | Python/剑指offer53-I.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
]
| null | null | null | class Solution:
def search(self, nums: List[int], target: int) -> int:
# if len(nums) == 1:
# return 1 if nums[0] == target else 0
tmp = []
i, j = 0, len(nums) - 1
while i <= j:
m = (i + j) // 2
if nums[m] == target:
tmp.append(m)
if i < len(nums) and nums[i] == target:
tmp.append(i)
i += 1
elif nums[m] > target:
j = m - 1
if j >= 0 and nums[j] == target:
tmp.append(j)
else:
i = m + 1
if i < len(nums) and nums[i] == target:
tmp.append(i)
return len(set(tmp))
| 32.173913 | 58 | 0.364865 | 739 | 0.998649 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.083784 |
23944dce2a5292a5f4dccddbeb6c459b0db88881 | 821 | py | Python | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
]
| 2 | 2017-08-23T16:58:01.000Z | 2020-07-03T01:53:34.000Z | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
]
| 11 | 2017-08-18T17:19:21.000Z | 2022-03-18T15:54:40.000Z | punk/aggregator/aggregateByDateTimeCategory.py | NewKnowledge/punk | 53007a38433023f9a9f5cf39786b1c5a28f1f996 | [
"MIT"
]
| 2 | 2017-09-11T19:38:04.000Z | 2020-05-28T00:58:05.000Z | import pandas as pd
from typing import List, NamedTuple
from .timeseries import agg_by_category_by_date
from primitive_interfaces.base import PrimitiveBase
class AggregateByDateTimeCategory(PrimitiveBase[pd.DataFrame, List[str]]):
__author__ = 'distil'
def __init__(self):
pass
def get_params(self) -> dict:
return {}
def set_params(self, params: dict) -> None:
self.params = params
def get_call_metadata(self) -> {}:
return {}
def fit(self):
pass
def produce(self, inputs: pd.DataFrame, values: List[str] = [], groupby : List[str] = [],
datetime=None, intervals=None, aggregation=None):
return agg_by_category_by_date(inputs, datetime, values, groupby, interval=intervals,
agg=aggregation)
| 28.310345 | 94 | 0.65408 | 662 | 0.806334 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.009744 |
2394fb0077e12ccbcbd82c4f19bfaa2b70a81ecc | 150 | py | Python | eemeter/modeling/exceptions.py | SuccessionEcologicalServices/eemeter-1 | dc06f42dc64679a5d56771d6900169eef4eaf515 | [
"MIT"
]
| null | null | null | eemeter/modeling/exceptions.py | SuccessionEcologicalServices/eemeter-1 | dc06f42dc64679a5d56771d6900169eef4eaf515 | [
"MIT"
]
| 1 | 2018-06-14T04:24:49.000Z | 2018-06-14T04:24:49.000Z | eemeter/modeling/exceptions.py | SuccessionEcologicalServices/eemeter-1 | dc06f42dc64679a5d56771d6900169eef4eaf515 | [
"MIT"
]
| null | null | null | class DataSufficiencyException(Exception):
pass
class ModelFitException(Exception):
pass
class ModelPredictException(Exception):
pass
| 13.636364 | 42 | 0.773333 | 143 | 0.953333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2395f7861779117a4a46333dc411993e7c87448d | 2,723 | py | Python | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
]
| null | null | null | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
]
| null | null | null | algorithm/leetcode/2018-03-25.py | mhoonjeon/problemsolving | f47ff41b03ce406b26ea36be602c0aa14ac7ccf1 | [
"MIT"
]
| null | null | null | # 804. Unique Morse Code Words
class Solution:
def __init__(self):
self.morse_code = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.",
"....", "..", ".---", "-.-", ".-..", "--", "-.",
"---", ".--.", "--.-", ".-.", "...", "-", "..-",
"...-", ".--", "-..-", "-.--", "--.."]
self.alphabets = "abcdefghijklmnopqrstuvwxyz"
self.alpha_morse = dict(zip(self.alphabets, self.morse_code))
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
word_set = []
for word in words:
s = ""
for ch in word:
s += self.alpha_morse[ch]
word_set.append(s)
return len(list(set(word_set)))
""" https://leetcode.com/problems/unique-morse-code-words/discuss/120675/\
Easy-and-Concise-Solution-C++JavaPython
def uniqueMorseRepresentations(self, words):
d = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---",
"-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-",
"..-", "...-", ".--", "-..-", "-.--", "--.."]
return len({''.join(d[ord(i) - ord('a')] for i in w) for w in words})
"""
# 771. Jewels and Stones, 98.33%
# https://leetcode.com/problems/jewels-and-stones/description/
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
count = 0
for jewel in J:
for stone in S:
if jewel == stone:
count += 1
return count
""" https://leetcode.com/problems/jewels-and-stones/discuss/113553/\
Easy-and-Concise-Solution-using-hash-set-C++JavaPython
def numJewelsInStones(self, J, S):
setJ = set(J)
return sum(s in setJ for s in S)
"""
# 806. Number of Lines To Write String
# https://leetcode.com/problems/number-of-lines-to-write-string/
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
lines = 1
line_width = 0
for ch in S:
index = ord(ch) - ord('a')
if line_width + widths[index] <= 100:
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width]
""" https://leetcode.com/problems/number-of-lines-to-write-string/discuss/\
120666/Easy-Solution-6-lines-C++JavaPython
def numberOfcurs(self, widths, S):
res, cur = 1, 0
for i in S:
width = widths[ord(i) - ord('a')]
res += 1 if cur + width > 100 else 0
cur = width if cur + width > 100 else cur + width
return [res, cur]
"""
| 27.785714 | 79 | 0.480353 | 792 | 0.290856 | 0 | 0 | 0 | 0 | 0 | 0 | 1,646 | 0.60448 |
2396d1a675b3960ca8025853ba1b4a50d69159c9 | 19,779 | py | Python | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
]
| 32 | 2019-07-11T22:58:14.000Z | 2022-03-04T19:34:55.000Z | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
]
| 3 | 2021-05-24T14:03:13.000Z | 2022-01-07T03:47:32.000Z | pygna/block_model.py | Gee-3/pygna | 61f2128e918e423fef73d810e0c3af5761933096 | [
"MIT"
]
| 5 | 2019-07-24T09:38:07.000Z | 2021-12-30T09:20:20.000Z | import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import logging
from pygna import output
from pygna.utils import YamlConfig
import pandas as pd
import random
import string
import seaborn as sns
import pygna.output as output
class BlockModel(object):
def __init__(self, block_model_matrix, n_nodes: int = 10, nodes_percentage: list = None):
"""
This class implements a block model reading and elaboration methods
:param block_model_matrix: the matrix to be used as block model
:param n_nodes: the number of nodes
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
"""
self.n_nodes = n_nodes
self.nodes = ["N" + str(i) for i in range(n_nodes)]
self.n_clusters = block_model_matrix.shape[0]
self.graph = nx.Graph()
self.bm = block_model_matrix
self.nodes_in_block = False
self.nodes_percentage = nodes_percentage
self.cluster_dict = {}
def set_nodes(self, nodes_names: list) -> None:
"""
Set the nodes name of the block model
:param nodes_names: the names list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> nodes = list("A", "B", "C")
>>> bm.set_nodes(nodes)
"""
self.nodes = nodes_names
self.n_nodes = len(nodes_names)
def set_bm(self, block_model_matrix: pd.DataFrame) -> None:
"""
Change block model matrix used in the class
:param block_model_matrix: the block model matrix
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bmm = pd.DataFrame(mydata_matrix)
>>> bm.set_bm(bmm)
"""
if block_model_matrix.shape[0] == self.n_clusters:
self.bm = block_model_matrix
else:
logging.error("the block model is supposed to have %d clusters" % (self.n_clusters))
def set_nodes_in_block_percentage(self, nodes_percentage: list) -> None:
"""
Pass the percentage of nodes in each block as a list, for example [0.5, 0.5]
:param nodes_percentage: percentage of the nodes
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block_percentage([0.5, 0.5])
"""
self.nodes_percentage = nodes_percentage
def set_nodes_in_block(self, nodes_in_block: int) -> None:
"""
Set the nodes number in the block model
:param nodes_in_block: the number of nodes in the block list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block(1000)
"""
self.nodes_in_block = nodes_in_block
def create_graph(self) -> None:
"""
Create a graph from the parameters passed in the constructor of the class
Example
_______
>>> bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"], nodes_percentage=config["BlockModel"]["nodes_percentage"])
>>> bm.create_graph()
"""
reject = True
logging.info('Reject=' + str(reject))
while reject:
graph = generate_graph_from_sm(self.n_nodes, self.bm, self.nodes_in_block, self.nodes,
self.nodes_percentage)
LCC = max(nx.connected_components(graph), key=len)
reject = (len(LCC) != self.n_nodes)
logging.info('Reject=' + str(reject))
logging.info('Nodes: %d, in LCC: %d' % (self.n_nodes, len(LCC)))
self.graph = graph
def plot_graph(self, output_folder: str) -> None:
"""
Plot the block model graph
:param output_folder: the folder where to save the result
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.plot_graph("block_model_path.pdf")
"""
plot_bm_graph(self.graph, self.bm, output_folder=output_folder)
def write_network(self, output_file: str) -> None:
"""
Save the network on a given file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_network("network.tsv")
"""
self.network_file = output_file
logging.info("Network written on %s" % (output_file))
if output_file.endswith(".tsv"):
nx.write_edgelist(self.graph, output_file, data=False, delimiter="\t")
else:
logging.error("output file format unknown")
def write_cluster_genelist(self, output_file: str) -> None:
"""
Save the gene list to a GMT file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_cluster_genelist("genes.gmt")
"""
self.genelist_file = output_file
clusters = nx.get_node_attributes(self.graph, "cluster")
for i in set(clusters.values()):
c = "cluster_" + str(i)
self.cluster_dict[c] = {}
self.cluster_dict[c]["descriptor"] = "cluster"
self.cluster_dict[c]["genes"] = [str(j) for j in clusters.keys() if clusters[j] == i]
if output_file.endswith(".gmt"):
output.print_GMT(self.cluster_dict, self.genelist_file)
else:
logging.error("output file format unknown")
def generate_graph_from_sm(n_nodes: int, block_model: pd.DataFrame, nodes_in_block: list = False,
node_names: list = None, nodes_percentage: list = None) -> nx.Graph:
"""
This function creates a graph with n_nodes number of vertices and a matrix block_model that describes the intra e inter-block connectivity.
The nodes_in_block is parameter, list, to control the number of nodes in each cluster
:param n_nodes: the number of nodes in the block model
:param block_model: the block model to elaborate
:param nodes_in_block: the list of nodes in the block model
:param node_names: the list of names in the block model
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> nodes = list("A","B","C")
>>> graph = generate_graph_from_sm(n_nodes, bm, nodes_in_block, nodes, nodes_percentage)
"""
if not node_names:
node_names = range(n_nodes)
edges = []
G = nx.Graph()
if nodes_percentage:
cluster = np.random.choice(block_model.shape[0], size=n_nodes, p=nodes_percentage)
np.random.shuffle(cluster)
elif nodes_in_block:
list_temp = [nodes_in_block[i] * [i] for i in range(len(nodes_in_block))]
cluster = np.array([val for sublist in list_temp for val in sublist])
np.random.shuffle(cluster)
else:
# cluster is an array of random numbers corresponding to the cluster of each node
cluster = np.random.randint(block_model.shape[0], size=n_nodes)
for i in range(n_nodes):
G.add_node(node_names[i], cluster=cluster[i])
for i in range(n_nodes):
for j in range(i + 1, n_nodes):
if np.random.rand() < block_model[cluster[i], cluster[j]]:
edges.append((node_names[i], node_names[j]))
G.add_edges_from(edges)
return G
def plot_bm_graph(graph: nx.Graph, block_model: pd.DataFrame, output_folder: str = None) -> None:
"""
Save the graph on a file
:param graph: the graph with name of the nodes
:param block_model: the block model
:param output_folder: the folder where to save the file
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> graph = nx.complete_graph(100)
>>> plot_bm_graph(graph, bm, output_folder="./results/")
"""
nodes = graph.nodes()
colors = ['#b15928', '#1f78b4', '#6a3d9a', '#33a02c', '#ff7f00']
cluster = nx.get_node_attributes(graph, 'cluster')
labels = [colors[cluster[n]] for n in nodes]
layout = nx.spring_layout(graph)
plt.figure(figsize=(13.5, 5))
plt.subplot(1, 3, 1)
nx.draw(graph, nodelist=nodes, pos=layout, node_color='#636363', node_size=50, edge_color='#bdbdbd')
plt.title("Observed network")
plt.subplot(1, 3, 2)
plt.imshow(block_model, cmap='OrRd', interpolation='nearest')
plt.title("Stochastic block matrix")
plt.subplot(1, 3, 3)
legend = []
for ix, c in enumerate(colors):
legend.append(mpatches.Patch(color=c, label='C%d' % ix))
nx.draw(graph, nodelist=nodes, pos=layout, node_color=labels, node_size=50, edge_color='#bdbdbd')
plt.legend(handles=legend, ncol=len(colors), mode="expand", borderaxespad=0)
plt.title("SB clustering")
plt.savefig(output_folder + 'block_model.pdf', bbox_inches='tight')
def generate_sbm_network(input_file: "yaml configuration file") -> None:
"""
This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes.
All parameters must be specified in a yaml file.
This function allows to create network and geneset for any type of SBM
"""
ym = YamlConfig()
config = ym.load_config(input_file)
print(config)
bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"],
nodes_percentage=config["BlockModel"]["nodes_percentage"])
outpath = config["Simulations"]["output_folder"]
suffix = config["Simulations"]["suffix"]
for i in range(config["Simulations"]["n_simulated"]):
bm.create_graph()
bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv")
bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt")
# bm.plot_graph(outpath+suffix+"_s_"+str(i))
def generate_sbm2_network(output_folder: 'folder where the simulations are saved',
prefix: 'prefix for the simulations' = 'sbm',
n_nodes: 'nodes in the network' = 1000,
theta0: 'probability of connection in the cluster' = '0.9,0.7,0.5,0.2',
percentage: 'percentage of nodes in cluster 0, use ratio 0.1 = 10 percent' = '0.1',
density: 'multiplicative parameter used to define network density' = '0.06,0.1,0.2',
n_simulations: 'number of simulated networks for each configuration' = 3
):
"""
This function generates the simulated networks and genesets using the stochastic block model with 2 BLOCKS as described in the paper. The output names are going to be prefix_t_<theta0>_p_<percentage>_d_<density>_s_<n_simulation>_network.tsv or _genes.gmt
One connected cluster while the rest of the network has the same probability of connection. SBM = d *[theta0, 1-theta0 1-theta0, 1-theta0]
The simulator checks for connectedness of the generated network, if the generated net is not connected, a new simulation is generated.
"""
teta_ii = [float(i) for i in theta0.replace(' ', '').split(',')]
percentages = [float(i) for i in percentage.replace(' ', '').split(',')]
density = [float(i) for i in density.replace(' ', '').split(',')]
n_simulated = int(n_simulations)
n_nodes = int(n_nodes)
for p in percentages:
for t in teta_ii:
for d in density:
matrix = np.array([[d * t, d * (1 - t)], [d * (1 - t), d * (1 - t)]])
bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
for i in range(n_simulated):
name = output_folder + prefix + "_t_" + str(t) + "_p_" + str(p) + "_d_" + str(d) + "_s_" + str(i)
bm.create_graph()
bm.write_network(name + "_network.tsv")
bm.write_cluster_genelist(name + "_genes.gmt")
def write_network(network, output_file):
network_file= output_file
logging.info("Network written on %s" %(output_file))
if output_file.endswith(".tsv"):
nx.write_edgelist(network, output_file, data=False, delimiter="\t")
else:
logging.error("output file format unknown")
def get_mix_genesets(gmt_diz,
tups = [('positive_0', 'positive_1'),
('positive_2', 'positive_3'),
('null_4', 'null_5'),
('null_6', 'null_7')],
perc = [4,6,10,12,88,90,94,96]):
diz = {}
for t in tups:
a = gmt_diz[t[0]]['genes']
b = gmt_diz[t[1]]['genes']
for p in perc:
name = t[0]+'_'+str(int(p))+'_'+t[1]+'_'+str(int(100-p))
aa = np.random.choice(a, int(len(a)/100*p), replace = False)
bb = np.random.choice(b, int(len(a)/100*int(100-p)), replace = False)
tot = []
for i in aa:
tot.append(i)
for i in bb:
tot.append(i)
diz[name]=tot
return(diz)
#########################################################################
####### COMMAND LINE FUNCTIONS ##########################################
#########################################################################
def generate_gna_sbm( output_tsv: 'output_network',
output_gmt: 'output geneset filename, this contains only the blocks',
output_gmt2: 'mixture output geneset filename, this contains the mixture blocks'=None,
N:'number of nodes in the network' = 1000,
block_size:'size of the first 8 blocks' = 50,
d:'baseline probability of connection, p0 in the paper' = 0.06,
fc_cis:'positive within-block scaling factor for the probability of connection, Mii = fc_cis * d (alpha parameter in the paper)' = 2.,
fc_trans:'positive between-block scaling factor for the probability of connection, (beta parameter in the paper)' = .5,
pi : 'percentage of block-i nodes for the genesets made of block-i and block-j. Use symmetrical values (5,95),use string comma separated' = '4,6,10,12,88,90,94,96',
descriptor='crosstalk_sbm',
sbm_matrix_figure: 'shows the blockmodel matrix' = None):
"""
This function generates benchmark network and geneset to test
the crosstalk between two blocks.
This function generates 4 blocks with d*fold change probability
and other 4 blocks with d probability.
The crosstalk is set both between the the first 4 blocks and the others.
Make sure that 8*cluster_size < N
"""
clusters = 8
lc = N - (block_size*clusters)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = clusters*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((9,9))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
A = fc_cis*d
B = d + fc_trans*(d*(fc_cis-1))
probs[0,1] = B
probs[2,3] = B
probs[1,0] = B
probs[3,2] = B
probs[4,5] = B
probs[6,7] = B
probs[5,4] = B
probs[7,6] = B
probs[0,0] = A
probs[1,1] = A
probs[2,2] = A
probs[3,3] = A
if type(sbm_matrix_figure)==str:
f,ax = plt.subplots(1)
sns.heatmap(probs, ax = ax, cmap = 'YlOrRd', annot=True)
f.savefig(sbm_matrix_figure)
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<4:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
if type(output_gmt2)==str:
perc = [float(i) for i in pi.split(',')]
logging.info('Generating mixes with perc = %s')
gmt_diz2={}
mix_dix = get_mix_genesets(gmt_diz, perc = perc)
for name,i in mix_dix.items():
gmt_diz2[name]={}
gmt_diz2[name]['genes']=i
gmt_diz2[name]['descriptor']=descriptor
output.print_GMT(gmt_diz2, output_gmt2)
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
print('Generated'+output_tsv)
def generate_gnt_sbm( output_tsv: 'output network filename',
output_gmt: 'output geneset filename, this contains only the blocks',
N:'number of nodes in the network' = 1000,
block_size: 'size of the first 6 blocks'= 50,
d: 'baseline probability of connection, p0 in the paper' = 0.06,
fold_change:'positive within-block scaling factor for the probability of connection, Mii = fold_change * d (alpha parameter in the paper)' = 2.,
descriptor:'descriptor for the gmt file'='mixed_sbm'):
"""
This function generates 3 blocks with d*fold_change probability
and other 3 blocks with d probability.
Make sure that 6*cluster_size < N
"""
lc = N - (block_size*6)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = 6*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((7,7))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
probs[0,0]=fold_change*d
probs[1,1]=fold_change*d
probs[2,2]=fold_change*d
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<3:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
| 36.027322 | 258 | 0.590323 | 6,252 | 0.316093 | 0 | 0 | 0 | 0 | 0 | 0 | 8,923 | 0.451135 |
23971993e9893cd5f385730b84276166fd285f88 | 184 | py | Python | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
]
| null | null | null | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
]
| null | null | null | printshops/apps.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class PrintshopsConfig(AppConfig):
name = 'printshops'
""" Register our signals """
def ready(self):
import printshops.signals
| 18.4 | 34 | 0.684783 | 147 | 0.798913 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.217391 |
239787e50f7f31c3ea658751864cb26108816d6d | 275 | py | Python | ctfproblems/Hopper/10_who_is_digging/grader.py | milesmcc/pactf-2018 | cfd9d94a7b6828259220f52ab3c5893a28429c62 | [
"MIT"
]
| null | null | null | ctfproblems/Hopper/10_who_is_digging/grader.py | milesmcc/pactf-2018 | cfd9d94a7b6828259220f52ab3c5893a28429c62 | [
"MIT"
]
| null | null | null | ctfproblems/Hopper/10_who_is_digging/grader.py | milesmcc/pactf-2018 | cfd9d94a7b6828259220f52ab3c5893a28429c62 | [
"MIT"
]
| null | null | null | def grade(key, submission):
if submission.lower() == 'sea' or submission.lower() == 'the sea':
return True, "Yes! Miles learns Russian so he came up with the words that visually look same in both English and Russian."
else:
return False, "Nyet!"
| 45.833333 | 131 | 0.650909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.472727 |
2398b8c755adf06d3f7f1e5cae4d4aedb1f1899b | 443 | py | Python | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
]
| 5 | 2021-09-09T21:08:14.000Z | 2021-12-14T02:30:52.000Z | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
]
| null | null | null | class/lect/Lect-17/pd1.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
]
| 8 | 2021-09-09T17:46:07.000Z | 2022-02-08T22:41:35.000Z | import pandas as pd
people_dict = {
"weight": pd.Series([145, 182, 191],index=["joan", "bob", "mike"]),
"birthyear": pd.Series([2002, 2000, 1999], index=["bob", "joan", "mike"], name="year"),
"children": pd.Series([1, 2], index=["mike", "bob"]),
"hobby": pd.Series(["Rock Climbing", "Scuba Diving", "Sailing"], index=["joan", "bob", "mike"]),
}
people = pd.DataFrame(people_dict)
print ( people )
| 31.642857 | 104 | 0.557562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.320542 |
239de3aa205a8c68e33dedf541996817e27acfa5 | 3,440 | py | Python | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
]
| 1 | 2021-11-09T12:01:56.000Z | 2021-11-09T12:01:56.000Z | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
]
| null | null | null | virtualsmartcard-0.8/src/vpicc/virtualsmartcard/tests/SmartcardSAM_test.py | CMelas/foo | d7a34b24606c7b9ab04ea8c39a8b3716ca6255c1 | [
"MIT"
]
| null | null | null | #
# Copyright (C) 2014 Dominik Oepen
#
# This file is part of virtualsmartcard.
#
# virtualsmartcard is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# virtualsmartcard is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# virtualsmartcard. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
from virtualsmartcard.SmartcardSAM import *
class TestSmartcardSAM(unittest.TestCase):
def setUp(self):
self.password = "DUMMYKEYDUMMYKEY"
self.myCard = SAM("1234", "1234567890")
self.secEnv = Security_Environment(None, self.myCard) # TODO: Set CRTs
self.secEnv.ht.algorithm = "SHA"
self.secEnv.ct.algorithm = "AES-CBC"
def test_incorrect_pin(self):
with self.assertRaises(SwError):
self.myCard.verify(0x00, 0x00, "5678")
def test_counter_decrement(self):
ctr1 = self.myCard.counter
try:
self.myCard.verify(0x00, 0x00, "3456")
except SwError as e:
pass
self.assertEquals(self.myCard.counter, ctr1 - 1)
def test_internal_authenticate(self):
sw, challenge = self.myCard.get_challenge(0x00, 0x00, "")
blocklen = vsCrypto.get_cipher_blocklen("DES3-ECB")
padded = vsCrypto.append_padding(blocklen, challenge)
sw, result_data = self.myCard.internal_authenticate(0x00, 0x00, padded)
self.assertEquals(sw, SW["NORMAL"])
def test_external_authenticate(self):
sw, challenge = self.myCard.get_challenge(0x00, 0x00, "")
blocklen = vsCrypto.get_cipher_blocklen("DES3-ECB")
padded = vsCrypto.append_padding(blocklen, challenge)
sw, result_data = self.myCard.internal_authenticate(0x00, 0x00, padded)
sw, result_data = self.myCard.external_authenticate(0x00, 0x00,
result_data)
self.assertEquals(sw, SW["NORMAL"])
def test_security_environment(self):
hash = self.secEnv.hash(0x90, 0x80, self.password)
# The API should be changed so that the hash function returns SW_NORMAL
self.secEnv.ct.key = hash[:16]
crypted = self.secEnv.encipher(0x00, 0x00,
self.password)
# The API should be changed so that encipher() returns SW_NORMAL
plain = self.secEnv.decipher(0x00, 0x00, crypted)
# The API should be changed so that decipher() returns SW_NORMAL
# self.assertEqual(plain, self.password)
# secEnv.decipher doesn't strip padding. Should it?
# should this really be secEnv.ct? probably rather secEnv.dst
self.secEnv.ct.algorithm = "RSA"
self.secEnv.dst.keylength = 1024
sw, pk = self.secEnv.generate_public_key_pair(0x00, 0x00, "")
self.assertEquals(sw, SW["NORMAL"])
if __name__ == "__main__":
unittest.main()
# CF = CryptoflexSE(None)
# print CF.generate_public_key_pair(0x00, 0x80, "\x01\x00\x01\x00")
# print MyCard._get_referenced_key(0x01)
| 40 | 79 | 0.670058 | 2,449 | 0.711919 | 0 | 0 | 0 | 0 | 0 | 0 | 1,338 | 0.388953 |
239ed9095bc55c203b6c4b8328d5c14492d59001 | 6,762 | py | Python | test/phagesExperiment/runTableCases.py | edsaac/bioparticle | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
]
| null | null | null | test/phagesExperiment/runTableCases.py | edsaac/bioparticle | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
]
| 1 | 2020-09-25T23:31:21.000Z | 2020-09-25T23:31:21.000Z | test/phagesExperiment/runTableCases.py | edsaac/VirusTransport_RxSandbox | 67e191329ef191fc539b290069524b42fbaf7e21 | [
"MIT"
]
| 1 | 2021-09-30T05:00:58.000Z | 2021-09-30T05:00:58.000Z | ###############################################################
# _ _ _ _ _
# | |__ (_) ___ _ __ __ _ _ __| |_(_) ___| | ___
# | '_ \| |/ _ \| '_ \ / _` | '__| __| |/ __| |/ _ \
# | |_) | | (_) | |_) | (_| | | | |_| | (__| | __/
# |_.__/|_|\___/| .__/ \__,_|_| \__|_|\___|_|\___|
# |_|
#
###############################################################
#
# $ python3 runTableCases.py [CASES.CSV] [TEMPLATE.IN] -run
#
# Where:
# - [CASES.CSV] path to csv file with the list of
# parameters and the corresponding tags
# - [TEMPLATE.IN] input file template for PFLOTRAN and
# the corresponding tags
# - [shouldRunPFLOTRAN = "-run"]
#
###############################################################
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from os import system
import sys
## Global variables
ColumnLenght = 50.0
ConcentrationAtInlet = 1.66E-16
## Non-dimensional numbers
def DaII(K,A,U,L=ColumnLenght):
return (L*L*K)/(A*U)
def Peclet(A,L=ColumnLenght):
return L/A
def plotResults(U,pH,IS,PV,kATT,kDET,dAq,dIm,alpha):
FILE = current_folder+"/pflotran-obs-0.tec"
textBoxpH = "pH = {:n}".format(pH)\
+ "\nIS = {:n}".format(IS)
textBoxKin = \
"$k_{\\rm att}$"+" = {:.4f}".format(kATT) + " h$^{-1}$" +"\n" + \
"$k_{\\rm det}$"+" = {:.4f}".format(kDET) + " h$^{-1}$" +"\n" + \
"$\lambda_{\\rm aq}$"+" = {:.4f}".format(dAq)+ " h$^{-1}$" +"\n" + \
"$\lambda_{\\rm im}$"+" = {:.4f}".format(dIm)+ " h$^{-1}$" +"\n" + \
"$\\alpha_{\\rm L}$"+" = {:.4f}".format(alpha)+ " cm "
textBoxDimensionless = "Damköhler(II) = $\\dfrac{\\rm reaction}{\\rm dispersion}$"+"\n" +\
"Da$^{\\rm att}$"+" = {:.1E}".format(DaII(kATT,alpha,U)) +"\n" +\
"Da$^{\\rm det}$"+" = {:.1E}".format(DaII(kDET,alpha,U)) +"\n" +\
"Da$^{\\rm λaq}$"+" = {:.1E}".format(DaII(dAq, alpha,U)) +"\n" +\
"Da$^{\\rm λim}$"+" = {:.1E}".format(DaII(dIm, alpha,U)) +"\n\n" +\
"Péclet = $\\dfrac{\\rm advection}{\\rm dispersion}$"+"\n" +\
"P$_{\\rm é}$"+" = {:.1E}".format(Peclet(alpha))
system("./miscellaneous/PFT2CSV.sh " + FILE)
#system("rm " + current_folder +"/*.out")
ObservationPoint = np.loadtxt(FILE,delimiter=",",skiprows=1)
Cnorm = ObservationPoint[:,1]/ConcentrationAtInlet
TimeInPoreVolumes = ObservationPoint[:,0] * U*24./(ColumnLenght)
Legend=["$\\dfrac{[V_{(aq)}]}{[V_{(aq)}]_0}$"]
plt.figure(figsize=(10,4),facecolor="white")
## Plot log-scale
ax1 = plt.subplot(1,2,1)
ax1.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3)
ax1.set_yscale("symlog",\
linthresh=1.0E-6,subs=[1,2,3,4,5,6,7,8,9])
ax1.set_ylim([-1.0E-7,1.15])
ax1.set_xlim([0,10])
ax1.set_xlabel("Pore Volume [$-$]",fontsize="large")
ax1.axvline(x=PV,ls="dotted",c="gray",lw=1)
ax1.axhspan(ymin=-1.0E-7,ymax=1.0E-6,facecolor="pink",alpha=0.2)
## Rate values
ax1.text(9.5,5.0E-5,textBoxKin,\
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5),\
horizontalalignment='right')
## Case pH/IS
ax1.text(9.0,1.0E-1,textBoxpH,\
bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\
horizontalalignment='right')
## Plot linear-scale
ax2 = plt.subplot(1,2,2)
ax2.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3,label=Legend[0])
ax2.set_ylim([-1.0E-2,1.02])
ax2.set_xlim([0,10])
ax2.set_xlabel("Pore Volume [$-$]",fontsize="large")
ax2.axvline(x=PV,ls="dotted",c="gray",lw=1)
ax2.legend(fontsize="large",loc="upper right")
## Péclet and Damköhler numbers
ax2.text(9.5,0.1,textBoxDimensionless,\
bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\
horizontalalignment='right')
plt.tight_layout()
FIGPATH = current_folder + "/" + "CASE_" + current_folder[7:10] + ".png"
#plt.show()
plt.savefig(FIGPATH,transparent=False)
## Tags dictionary for variables in input file
tagsReplaceable = {
"Porosity" : "<porosity>",
"DarcyVel" : "<darcyVel>", # q = u*porosity
"CleanTime" : "<elutionTime>", # t @ C0 = 0
"FinalTime" : "<endTime>", # @ 10 pore volumes
"AttachRate": "<katt>",
"DetachRate": "<kdet>",
"DecayAq" : "<decayAq>",
"DecayIm" : "<decayIm>",
"LongDisp" : "<longDisp>"
}
## Tags dictionary for other parameters
tagsAccesory = {
"FlowVel" : "poreWaterVel",
"PoreVol" : "poreVolume",
"pH" : "pH",
"IonicStr" : "IS"
}
## Path to PFLOTRAN executable
PFLOTRAN_path = "$PFLOTRAN_DIR/src/pflotran/pflotran "
## Table with the set of parameters
try:
parameters_file = str(sys.argv[1])
except IndexError:
sys.exit("Parameters file not defined :(")
setParameters = read_csv(parameters_file)
total_rows = setParameters.shape[0]
## Template for the PFLOTRAN input file
try:
template_file = str(sys.argv[2])
except IndexError:
sys.exit("Template file not found :(")
## Run cases?
try:
shouldRunPFLOTRAN = "-run" in str(sys.argv[3])
except IndexError:
shouldRunPFLOTRAN = False
## Delete previous cases
system("rm -rf CASE*")
## Row in the set of parameters table = case to be run
for i in range(total_rows):
#for i in range(1):
## Create a folder for the case
current_folder = "./CASE_" + "{0:03}".format(i+1)
system("mkdir " + current_folder)
## Copy template input file to folder
system("cp " + template_file + " " + current_folder+"/pflotran.in")
current_file = current_folder + "/pflotran.in"
## Replace tags for values in case
for current_tag in tagsReplaceable:
COMM = "sed -i 's/" + tagsReplaceable[current_tag] + "/"\
+'{:.3E}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])\
+ "/g' " + current_file
system(COMM)
## Run PFLOTRAN in that case
if shouldRunPFLOTRAN:
#print(PFLOTRAN_path + "-pflotranin " + current_file)
system(PFLOTRAN_path + "-pflotranin " + current_file)
#system("python3 ./miscellaneous/organizeResults.py " + current_folder + "/pflotran-obs-0.tec -clean")
current_U = setParameters.loc[i,tagsAccesory["FlowVel"]]
current_pH = setParameters.loc[i,tagsAccesory["pH"]]
current_IS = setParameters.loc[i,tagsAccesory["IonicStr"]]
current_PV = setParameters.loc[i,tagsAccesory["PoreVol"]]
#Porosity = setParameters.loc[i,tagsReplaceable["Porosity"]]
#input("Press Enter to continue...")
plotResults(current_U,current_pH,current_IS,current_PV,\
setParameters.loc[i,tagsReplaceable["AttachRate"]],\
setParameters.loc[i,tagsReplaceable["DetachRate"]],\
setParameters.loc[i,tagsReplaceable["DecayAq"]],\
setParameters.loc[i,tagsReplaceable["DecayIm"]],\
setParameters.loc[i,tagsReplaceable["LongDisp"]])
#input("Press Enter to continue...")
system("rm -r pictures ; mkdir pictures")
system("cp CASE**/*.png ./pictures/") | 34.676923 | 106 | 0.603963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,160 | 0.466834 |
239f83a7c0d314a200223629c25572a463600e23 | 593 | py | Python | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
]
| null | null | null | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
]
| null | null | null | mongo_list_temp.py | ScottStanton/mqtt_temp_mongo_web | 76d59910f132fea9724b86aebfcef04b61789b8d | [
"Unlicense"
]
| null | null | null | #!/usr/bin/python3
#
# This software is covered by The Unlicense license
#
import os, pymongo, sys
def print_mongo():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["cpu_temperature"]
mycol = mydb["temps"]
#print(myclient.list_database_names())
for x in mycol.find():
print(x)
myclient.close()
def main():
print_mongo()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 17.969697 | 64 | 0.60371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.310287 |
23a0582a156a5116f9a3e62beef47135533e30c9 | 203 | py | Python | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
]
| null | null | null | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
]
| null | null | null | tests/decisionreqdef/test_module.py | fasfoxcom/pycamunda | 6bbebe1db40ce9fb29a9d420366e6dca1892df7b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
def test_all_contains_only_valid_names():
import pycamunda.decisionreqdef
for name in pycamunda.decisionreqdef.__all__:
getattr(pycamunda.decisionreqdef, name)
| 22.555556 | 49 | 0.73399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.1133 |
23a2a97bb6db12d817c114dd0b13665cae319c12 | 2,185 | py | Python | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
]
| 2 | 2022-01-05T08:41:38.000Z | 2022-02-14T01:30:08.000Z | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
]
| 1 | 2022-03-28T03:23:36.000Z | 2022-03-28T03:23:36.000Z | second/pytorch/models/fusion.py | RickOnEarth/pointpillars_based_CLOCs | c6d4576a151540200dac2354b00dc4ecce6ee72d | [
"MIT"
]
| 2 | 2022-01-07T05:56:43.000Z | 2022-02-16T13:26:13.000Z | import time
import torch
from torch import nn
from torch.nn import functional as F
#import spconv
import torchplus
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
import sys
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
class fusion(nn.Module):
def __init__(self):
super(fusion, self).__init__()
self._total_time = 0.0
self._total_count = 0
self.name = 'fusion_layer'
self.corner_points_feature = Sequential(
nn.Conv2d(24,48,1),
nn.ReLU(),
nn.Conv2d(48,96,1),
nn.ReLU(),
nn.Conv2d(96,96,1),
nn.ReLU(),
nn.Conv2d(96,4,1),
)
self.fuse_2d_3d = Sequential(
nn.Conv2d(4,18,1),
nn.ReLU(),
nn.Conv2d(18,36,1),
nn.ReLU(),
nn.Conv2d(36,36,1),
nn.ReLU(),
nn.Conv2d(36,1,1),
)
self.maxpool = Sequential(
nn.MaxPool2d([200,1],1),
)
def forward(self,input_1,tensor_index):
torch.cuda.synchronize()
t1 = time.time()
flag = -1
if tensor_index[0,0] == -1: #tensor_index[0,0]=0
out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device)
out_1[:,:,:] = -9999999
flag = 0
else:
x = self.fuse_2d_3d(input_1) #input例:[1, 4, 1, 193283],4 channel,1*193283
out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device)
out_1[:,:,:] = -9999999
out_1[:,tensor_index[:,0],tensor_index[:,1]] = x[0,:,0,:]
flag = 1
x = self.maxpool(out_1)
#x, _ = torch.max(out_1,1)
x = x.squeeze().reshape(1,-1,1)
torch.cuda.synchronize()
self._total_time += time.time() - t1
self._total_count += 1 #batch size = 1
#print("avg fusion time:", self._total_time/self._total_count*1000)
return x, flag
| 33.106061 | 92 | 0.556522 | 1,769 | 0.808132 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.135222 |
23a456677b9384e5a17f6de8dcdc1e93e2a745f9 | 3,001 | py | Python | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
]
| 1 | 2021-04-16T13:05:20.000Z | 2021-04-16T13:05:20.000Z | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
]
| null | null | null | pdf_lines_gluer.py | serge-sotnyk/pdf-lines-gluer | b44284a28e4bce377d683ab8d6f820e704c630cb | [
"MIT"
]
| 2 | 2019-06-24T06:45:46.000Z | 2019-06-28T19:43:20.000Z | import string
from typing import List, Dict
# inject code here #
def _mean_in_window(lines, i) -> float:
start = max(i - 5, 0)
finish = min(i + 5, len(lines) - 1)
sm, count = 0, 0
for n in range(start, finish):
sm += len(lines[n]) - 1 # minus one-char prefix
count += 1
return sm / max(count, 1)
def _last_char(line: str) -> str:
return ' ' if len(line) < 1 else line[-1]
def _last_char_features(l_char: str) -> Dict[str, object]:
res = {
'isalpha': l_char.isalpha(),
'isdigit': l_char.isdigit(),
'islower': l_char.islower(),
'punct': l_char if l_char in string.punctuation else ' ',
}
return res
def _first_chars(line: str) -> str:
if len(line) < 1:
chars = ' '
elif len(line) < 2:
chars = line[0]
else:
chars = line[:2]
res = []
for c in chars:
if c.isdigit():
res.append('0')
elif c.isalpha():
res.append('a' if c.islower() else 'A')
else:
res.append(c)
return ''.join(res)
def _line_to_features(line: str, i: int, lines: List[str], annotated: bool) -> Dict[str, object]:
features = {}
this_len = len(line)
mean_len = _mean_in_window(lines, i)
if i > 0:
prev_len = len(lines[i-1]) - (1 if annotated else 0)
l_char = _last_char(lines[i-1])
else:
prev_len = 0
l_char = ' '
features.update(
{
'this_len': this_len,
'mean_len': mean_len,
'prev_len': prev_len,
'first_chars': _first_chars(line),
})
features.update(_last_char_features(l_char))
return features
def _featurize_text_with_annotation(text: str) -> (List[object], List[bool]):
lines = text.strip().splitlines()
x, y = [], []
for i, line in enumerate(lines):
y.append(line[0] == '+') # True, if line should be glued with previous
line = line[1:]
x.append(_line_to_features(line, i, lines, True))
return x, y
_HYPHEN_CHARS = {
'\u002D', # HYPHEN-MINUS
'\u00AD', # SOFT HYPHEN
'\u2010', # HYPHEN
'\u2011', # NON-BREAKING HYPHEN
}
def _preprocess_pdf(text: str, clf, v) -> str:
lines = [s.strip() for s in text.strip().splitlines()]
x = []
for i, line in enumerate(lines):
x.append(_line_to_features(line, i, lines, False))
if not x:
return ''
x_features = v.transform(x)
y_pred = clf.predict(x_features)
corrected_acc = []
for i, line in enumerate(lines):
line = line.strip()
if i == 0 or not y_pred[i]:
corrected_acc.append(line)
else:
prev_line = corrected_acc[-1]
if prev_line != '' and prev_line[-1] in _HYPHEN_CHARS:
corrected_acc[-1] = prev_line[:-1]
else:
corrected_acc[-1] += ' '
corrected_acc[-1] += line
corrected = '\n'.join(corrected_acc)
return corrected
| 26.557522 | 97 | 0.55115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.096634 |
23a5398ab784fc5aa194816a75732cc159a8849f | 1,241 | py | Python | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
]
| null | null | null | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
]
| null | null | null | backend/thing/urls.py | thuong-lino/thing | e45d8f197896f4ab9b52dec0a85169396fff629a | [
"MIT"
]
| null | null | null | from django.conf.urls import include
from django.urls import path
from django.contrib import admin
from users.views import FacebookLogin
import django_js_reverse.views
from rest_framework.routers import DefaultRouter
from common.routes import routes as common_routes
router = DefaultRouter()
routes = common_routes
for route in routes:
router.register(route['regex'], route['viewset'],
basename=route['basename'])
urlpatterns = [
path("", include("common.urls"), name="common"),
path("assignments/", include("assignments.urls"), name='assignments'),
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'),
path("admin/", admin.site.urls, name="admin"),
path("jsreverse/", django_js_reverse.views.urls_js, name="js_reverse"),
path("api/", include(router.urls), name="api"),
path("api/assignments/", include("assignments.api.assignment.urls")),
path("api/grade-assignment/", include("assignments.api.graded-assignment.urls")),
path("api/", include("users.urls"), name="user"),
]
| 37.606061 | 85 | 0.706688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.344077 |
23a5e45f9981098530b74e9239812e4a0d27fb21 | 7,302 | py | Python | core/dataset/data_loader.py | thuzhaowang/idn-solver | 7da29ce0b0bd7e76023e1cae56e3d186b324a394 | [
"MIT"
]
| 22 | 2021-10-11T02:31:52.000Z | 2022-02-23T08:06:14.000Z | core/dataset/data_loader.py | xubin1994/idn-solver | 6b5dcfd94f35cc118c5dee0f98401e4848e670e3 | [
"MIT"
]
| 4 | 2021-12-02T02:36:30.000Z | 2022-03-16T01:04:47.000Z | core/dataset/data_loader.py | xubin1994/idn-solver | 6b5dcfd94f35cc118c5dee0f98401e4848e670e3 | [
"MIT"
]
| 4 | 2022-01-20T03:12:23.000Z | 2022-03-16T00:08:54.000Z | import numpy as np
from path import Path
import random
import pickle
import torch
import os
import cv2
def load_as_float(path):
"""Loads image"""
im = cv2.imread(path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB).astype(np.float32)
return im
class SequenceFolder(torch.utils.data.Dataset):
"""Creates a pickle file for ScanNet scene loading, and corresponding dataloader"""
def __init__(self, root, ttype, seed=None, seq_length=3, seq_gap=20, transform=None):
np.random.seed(seed)
random.seed(seed)
self.root = Path(root)
scene_list_path = ttype
self.scene_list_path = scene_list_path[:-4]
fold_root = 'scans_test_sample' if 'test' in ttype else 'scannet_nas'
#fold_root = 'scannet_nas'
scenes = [self.root/fold_root/folder[:-1] for folder in open(scene_list_path)]
self.ttype = ttype
self.scenes = sorted(scenes)
self.seq_gap = seq_gap
self.seq_length = seq_length
self.transform = transform
file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle'
if os.path.exists(file_pickle):
with open(file_pickle, 'rb') as handle:
sequence_set = pickle.load(handle)
self.samples = sequence_set
else:
self.crawl_folders()
def crawl_folders(self):
sequence_set = []
isc = 0
cnt = 0
for scene in self.scenes:
#print(isc, len(self.scenes))
isc += 1
frames = os.listdir(os.path.join(scene, "color"))
frames = [int(os.path.splitext(frame)[0]) for frame in frames]
frames = sorted(frames)
intrinsics = np.genfromtxt(os.path.join(scene, "intrinsic", "intrinsic_depth.txt")).astype(np.float32).reshape((4, 4))[:3,:3]
# The index from scannet nas is already sampled
if len(frames) < (self.seq_gap // 20) * self.seq_length:
continue
cnt += len(frames)
end_idx = len(frames) * 20
path_split = scene.split('/')
for i in range(len(frames)):
idx = frames[i]
img = os.path.join(scene, "color", "%04d.jpg" % idx)
if 'test' in self.ttype:
depth = os.path.join(scene, "depth", "%04d.png" % idx)
# do not require normal when test
normal = ""
else:
depth = os.path.join(scene, "depth", "%04d.npy" % idx)
normal = os.path.join(scene, "normal", "%04d_normal.npy" % idx)
pose_tgt = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % idx))
do_nan_tgt = False
nan_pose_tgt = np.sum(np.isnan(pose_tgt) | np.isinf(pose_tgt))
if nan_pose_tgt>0:
do_nan_tgt = True
sample = {'intrinsics': intrinsics, 'tgt': img, 'tgt_depth': depth, 'tgt_normal': normal, 'ref_depths': [], 'ref_imgs': [], 'ref_poses': [], 'path': []}
sample['path'] = os.path.join(scene , img[:-4])
if idx < self.seq_gap:
shifts = list(range(idx,idx+(self.seq_length-1)*self.seq_gap+1,self.seq_gap))
shifts.remove(idx) #.pop(i)
elif idx >= end_idx - self.seq_gap:
shifts = list(range(idx,end_idx,self.seq_gap))
shifts = list(range(idx-(self.seq_length-1)*self.seq_gap,idx+1,self.seq_gap))
shifts.remove(idx)
else:
if self.seq_length%2 == 1:
demi_length = self.seq_length//2
if (idx>=demi_length*self.seq_gap) and (idx<end_idx- demi_length*self.seq_gap):
shifts = list(range(idx- (demi_length)*self.seq_gap, idx+(demi_length)*self.seq_gap+1,self.seq_gap))
elif idx<demi_length*self.seq_gap:
diff_demi = (demi_length-idx//self.seq_gap)
shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi)*self.seq_gap+1,self.seq_gap))
elif idx>=end_idx- demi_length*self.seq_gap:
diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap)
shifts = list(range(idx- (demi_length+diff_demi)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap))
else:
print('Error')
shifts.remove(idx)
else:
#2 scenarios
demi_length = self.seq_length//2
if (idx >= demi_length*self.seq_gap) and (idx < end_idx- demi_length*self.seq_gap):
shifts = list(range(idx - demi_length*self.seq_gap, idx + (demi_length-1)*self.seq_gap+1, self.seq_gap))
elif idx < demi_length*self.seq_gap:
diff_demi = (demi_length-idx//self.seq_gap)
shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi-1)*self.seq_gap+1,self.seq_gap))
elif idx>=end_idx- demi_length*self.seq_gap:
diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap)
shifts = list(range(idx- (demi_length+diff_demi-1)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap))
else:
print('Error')
shifts.remove(idx)
do_nan = False
try:
for j in shifts:
pose_src = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % j))
pose_rel = np.linalg.inv(pose_src) @ pose_tgt
pose = pose_rel[:3,:].reshape((1,3,4)).astype(np.float32)
sample['ref_poses'].append(pose)
sample['ref_imgs'].append(os.path.join(scene, "color", "%04d.jpg" % j))
if 'test' in self.ttype:
sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.png" % j))
else:
sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.npy" % j))
nan_pose = np.sum(np.isnan(pose)) + np.sum(np.isinf(pose))
if nan_pose>0:
do_nan = True
if not do_nan_tgt and not do_nan:
sequence_set.append(sample)
except:
continue
file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle'
with open(file_pickle, 'wb') as handle:
pickle.dump(sequence_set, handle, protocol=pickle.HIGHEST_PROTOCOL)
self.samples = sequence_set
def __getitem__(self, index):
sample = self.samples[index]
tgt_img = load_as_float(sample['tgt'])
if 'test' in self.ttype:
tgt_depth = cv2.imread(sample['tgt_depth'],-1).astype(np.float32) / 1000.0
tgt_normal = np.tile(np.expand_dims(np.ones_like(tgt_depth), -1), (1,1,3))
else:
tgt_depth = np.load(sample['tgt_depth']).astype(np.float32) / 1000.0
tgt_normal = np.load(sample['tgt_normal']).astype(np.float32)
tgt_normal = 1.0 - tgt_normal * 2.0 # [-1, 1]
tgt_normal[:,:,2] = np.abs(tgt_normal[:,:,2]) * -1.0
ref_poses = sample['ref_poses']
ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']]
if 'test' in self.ttype:
ref_depths = [cv2.imread(depth_img,-1).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']]
else:
ref_depths = [np.load(depth_img).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']]
if self.transform is not None:
imgs, depths, normals, intrinsics = self.transform([tgt_img] + ref_imgs, [tgt_depth] + ref_depths, [tgt_normal], np.copy(sample['intrinsics']))
tgt_img = imgs[0]
tgt_depth = depths[0]
tgt_normal = normals[0]
ref_imgs = imgs[1:]
ref_depths = depths[1:]
else:
intrinsics = np.copy(sample['intrinsics'])
intrinsics_inv = np.linalg.inv(intrinsics)
return tgt_img, ref_imgs, tgt_normal, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths
def __len__(self):
return len(self.samples)
| 38.840426 | 161 | 0.65037 | 7,026 | 0.962202 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.115311 |
23a6372b0029d78dd5def2146734771fbbe2bd48 | 1,632 | py | Python | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
]
| null | null | null | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
]
| null | null | null | server.py | hugoantunes/EchoServer | da4a6b8d8f4362e6770f767c8e75e80cac55d417 | [
"MIT"
]
| null | null | null | import Queue
import select
import socket
from conf import ADDRESS, BACKLOG, SIZE
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
print 'starting up on %s port %s' % ADDRESS
server.bind(ADDRESS)
server.listen(BACKLOG)
inputs = [server]
outputs = []
message_queues = {}
while inputs:
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s is server:
connection, client_address = s.accept()
print 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
message_queues[connection] = Queue.Queue()
else:
data = s.recv(SIZE)
if data:
print 'received from %s' % str(s.getpeername())
message_queues[s].put(data)
if s not in outputs:
outputs.append(s)
else:
print 'closing socket after reading no data'
inputs.remove(s)
s.close()
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
print 'sending to %s' % str(s.getpeername())
s.send(next_msg)
except Queue.Empty:
print 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
for s in exceptional:
print 'handling exceptional condition for', s.getpeername()
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
| 27.661017 | 76 | 0.578431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.112132 |
23a7e7e53ed3f920173ee73d17e3e8afad1d765f | 3,813 | py | Python | glue.py | mkechagia/android-survey | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
]
| 1 | 2022-01-26T08:14:24.000Z | 2022-01-26T08:14:24.000Z | glue.py | mkechagia/android-survey-tool | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
]
| null | null | null | glue.py | mkechagia/android-survey-tool | a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa | [
"Apache-2.0"
]
| null | null | null | import re
import copy
from collections import defaultdict
from string import Template
# initialize the dictionary for the methods with checked exceptions such as {fake method: real method}
method_dict_checked = {'deleteRecord' : 'delete', \
'editText' : 'setText_new', \
'insertData' : 'insert_new', \
'setLayout' : 'setContentView_new', \
'findViewId' : 'findViewById_new', \
'changeTextColor' : 'setTextColor_new', \
'getCursorString' : 'getString', \
'queryData' : 'query_new', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText_new'}
# initialize the dictionary for the methods with unchecked exceptions such as {fake method: real method}
method_dict_unchecked = {'deleteRecord' : 'delete', \
'editText' : 'setText', \
'insertData' : 'insert', \
'setLayout' : 'setContentView', \
'findViewId' : 'findViewById', \
'changeTextColor' : 'setTextColor', \
'getCursorString' : 'getString', \
'queryData' : 'query', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText'}
# answer_block is a dict of user's answers,
# i.e. answer_block = {'answer_1' : fake_answer}
# survey type refers to the different surveys
# (methods with checked exceptions Vs. methods with unchecked exceptions--documented and undocumented)
def glue_answer(filepath, answers, survey_type, email):
method_dict = set_dict(survey_type)
# open the file
filein = open(filepath)
# read it
src = Template(filein.read())
result = src.substitute(answers)
with open('static/%s-NoteEditor.java' % (email), 'w') as f:
f.write("%s" % result)
# dictionary for answers with real Android's API methods
real_answers = bind_method(answers, method_dict)
#do the substitution
result = src.substitute(real_answers)
return result
# Bind the answers' methods to the real Android's API methods
# answers is a dict, i.e. answers = {'answer_1' : fake_answer}
# This function returns a dict of answers with real Android's
# API methods, i.e. real_answers = {'answer_1' : real_answer}
def bind_method(answers, method_dict):
real_answers = {}
a_keys = list(answers.keys())
m_keys = list(method_dict.keys())
# for each user answer
for k, l in enumerate(a_keys):
# get the value of the answer
an = answers.get(a_keys[k])
# for each fake method
for m, n in enumerate(m_keys):
# search for fake method in the answer
fake = m_keys[m]
if (re.search(fake, an)):
#print ("find fake :" + fake)
# get real method
real = method_dict.get(fake)
if (a_keys[k] not in list(real_answers.keys())):
real_answers[a_keys[k]] = re.sub(fake+'\(', real+'(', an)
break
# check if finally there exists fake method in user's answer
for d, f in enumerate(a_keys):
if (a_keys[d] not in list(real_answers.keys())):
real_answers[a_keys[d]] = answers.get(a_keys[d])
return real_answers
def replace_methods(compiler_output, survey_type):
method_dict = set_dict(survey_type)
for fake, real in method_dict.items():
#compiler_output = compiler_output.replace(fake, real)
compiler_output = re.sub(real, fake, compiler_output)
if re.search("\bsetTextColor\b\(\bcolors\b\)", compiler_output):
compiler_output = re.sub("\bsetTextColor\b\(\bcolors\b\)", "changeTextColor(colors)", replace_output)
# check for line numbers
#comp_output = remove_line_numbers(compiler_output)
return compiler_output
# dict depending on the survey type
def set_dict(survey_type):
if (survey_type == 'unchecked'):
return method_dict_unchecked
elif (survey_type == 'checked'):
return method_dict_checked
# replace line numbers with spaces
def remove_line_numbers(output):
out = ''
#.java:118
print ("Here is the output.")
print (output)
#if re.seach('.java:/d+', output):
# print ("OKK")
out = re.sub(':[0-9]+', '', output)
return out
# vim: tabstop=8 noexpandtab shiftwidth=8 softtabstop=0
| 34.981651 | 104 | 0.710464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,995 | 0.52321 |
23a85b9835619dae1db6fad9d342a22f09ccf61a | 272 | py | Python | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
]
| null | null | null | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
]
| null | null | null | solved_bronze/num10952.py | ilmntr/white_study | 51d69d122b07e9a0922dddb134bff4ec79077eb9 | [
"MIT"
]
| null | null | null | # a = 1
# b = 1
# while (not ((a==0) and (b==0))):
# a, b = map(int, input().split())
# print(a+b)
while True:
a, b = map(int, input().split())
if a == 0 and b == 0:
break
print(a+b) | 17 | 42 | 0.338235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.404412 |
23a8d2f8440fc0f4ab166887414f385e16797422 | 381 | py | Python | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
]
| 3 | 2015-10-12T21:32:37.000Z | 2021-09-16T16:51:03.000Z | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
]
| null | null | null | mac.py | focusaurus/commander | 4d511c9211ec6afcb2614e7b24b287c7c833c853 | [
"MIT",
"Unlicense"
]
| null | null | null | from builtins import str
from .helpers import run
import logging
import subprocess
import functools
import types
logger = logging.getLogger("commander")
def maestro(scriptId):
"""Run a Keyboard Maestro script by ID (more robust) or name."""
run(
"""osascript -e 'tell application "Keyboard Maestro Engine" to """
"""do script "%s"'\n""" % scriptId
)
| 22.411765 | 74 | 0.67979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.430446 |
23a8d8b1cd48f9fd55d3941e62fe86313bca756e | 764 | py | Python | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
]
| null | null | null | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
]
| null | null | null | planning_system/db/schema/views/finance/v_ui_finance.py | jehboyes/planning_system | a415f1408ef344732498d2ffb111dfd187b9b50f | [
"MIT"
]
| null | null | null | from planning_system.db.schema.views import _get_set_cols
def definition(session):
"""
Return UI view.
Complex view, which requires a dynamic pivot.
"""
pvt_list = _get_set_cols(session)
sql = f"""
SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level, {pvt_list}
FROM (SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level,
CAST(f_Set.acad_year as CHAR(4)) + ' ' + f_set.set_cat_id as finance_summary, amount as amount
FROM [v_mri_finance_grouped_subtotal] f INNER JOIN f_set ON f_set.set_id = f.set_id) p
PIVOT
(SUM(amount) FOR finance_summary in ({pvt_list})) as pvt
"""
return sql
| 38.2 | 120 | 0.700262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.798429 |
23a8eaed478e43d59a2f6ba4f9c799fc00e83544 | 267 | py | Python | 1526_minIncrementsToReachTargetArray.py | stuti-rastogi/leetcode-python-solutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
]
| 4 | 2018-07-24T08:36:42.000Z | 2019-08-25T17:48:47.000Z | 1526_minIncrementsToReachTargetArray.py | stuti-rastogi/leetcodesolutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
]
| null | null | null | 1526_minIncrementsToReachTargetArray.py | stuti-rastogi/leetcodesolutions | 73593fe642a06a83cde974ba5e6de3a7b396ec84 | [
"MIT"
]
| null | null | null | class Solution:
def minNumberOperations(self, target: List[int]) -> int:
num_ops = target[0]
for i in range(1, len(target)):
diff = target[i]-target[i-1]
if diff > 0:
num_ops += diff
return num_ops
| 26.7 | 60 | 0.524345 | 266 | 0.996255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
23abc12980cb0a7128b692a9097ad4b745fb655b | 756 | py | Python | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
]
| 1 | 2021-03-24T11:49:35.000Z | 2021-03-24T11:49:35.000Z | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
]
| null | null | null | python/torch_helpers/trace2jit.py | zhaohb/Forward | 08c7622090ce0cdd32fe5d0b462cb63258ce0a75 | [
"BSD-3-Clause"
]
| null | null | null | import torch
import torchvision.models as models
'''
Description:
convert torch module to JIT TracedModule.
功能说明:
将torch 模型转化为 JIT TracedModule。
'''
def TracedModelFactory(file_name, traced_model):
traced_model.save(file_name)
traced_model = torch.jit.load(file_name)
print("filename : ", file_name)
print(traced_model.graph)
if __name__ == "__main__":
dummy_input = torch.randn(1, 3, 224, 224) # dummy_input is customized by user
model = models.resnet18(pretrained=True) # model is customized by user
model = model.cpu().eval()
traced_model = torch.jit.trace(model, dummy_input)
model_name = 'model_name' # model_name is customized by user
TracedModelFactory(model_name + '.pth', traced_model)
| 28 | 81 | 0.718254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.346154 |
23ad1135866d4f8277494a12a0ed3be2f1311aa3 | 9,739 | py | Python | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
]
| 1 | 2021-05-30T13:27:33.000Z | 2021-05-30T13:27:33.000Z | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
]
| null | null | null | CppSimShared/Python/cppsimdata.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
]
| null | null | null | # cppsimdata.py
# written by Michael H. Perrott
# with minor modifications by Doug Pastorello to work with both Python 2.7 and Python 3.4
# available at www.cppsim.com as part of the CppSim package
# Copyright (c) 2013-2017 by Michael H. Perrott
# This file is disributed under the MIT license (see Copying file)
import ctypes as ct
import numpy as np
import sys
import os
import platform
import subprocess as sp
import contextlib
from scipy.signal import lfilter,welch
class CPPSIM_STORAGE_INFO(ct.Structure):
_fields_ = [
('filename',ct.c_char_p),
('num_sigs',ct.c_int),
('num_samples',ct.c_int)
]
class CppSimData(object):
def __init__(self, filename=None):
if filename != None:
self.storage_info = CPPSIM_STORAGE_INFO(filename.encode('UTF-8'),0,0)
else:
self.storage_info = CPPSIM_STORAGE_INFO('None'.encode('UTF-8'),0,0)
self.err_msg = ct.create_string_buffer(1000)
self.cur_sig_name = ct.create_string_buffer(1000)
if sys.platform == 'darwin':
home_dir = os.getenv("HOME")
arch_val = platform.architecture()[0]
cppsimdata_lib_file = home_dir + '/CppSim/CppSimShared/Python/macosx/cppsimdata_lib.so'
elif sys.platform == 'win32':
cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME")
if sys.maxsize == 2147483647:
cppsimdata_lib_file = cppsimsharedhome + '/Python/win32/cppsimdata_lib.dll'
else:
cppsimdata_lib_file = cppsimsharedhome + '/Python/win64/cppsimdata_lib.dll'
else:
cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME")
arch_val = platform.architecture()[0]
if arch_val == '64bit':
cppsimdata_lib_file = cppsimsharedhome + '/Python/glnxa64/cppsimdata_lib.so'
else:
cppsimdata_lib_file = cppsimsharedhome + '/Python/glnx86/cppsimdata_lib.so'
self.cppsimdata_lib = ct.CDLL(cppsimdata_lib_file)
self.cppsimdata_lib.loadsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p]
self.cppsimdata_lib.lssig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.c_char_p]
self.cppsimdata_lib.evalsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.POINTER(ct.c_double), ct.c_char_p]
self.cppsimdata_lib.initialize()
if filename != None:
error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
def __repr__(self):
return "File: '%s', num_samples = %d, num_sigs = %d"%(self.storage_info.filename, self.storage_info.num_samples, self.storage_info.num_sigs)
def loadsig(self,filename):
self.storage_info.filename = filename
error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
def get_num_samples(self):
return self.storage_info.num_samples
def get_num_sigs(self):
return self.storage_info.num_sigs
def get_filename(self):
return self.storage_info.filename
def lssig(self,print_str_flag=None):
sig_list = []
self.cppsimdata_lib.reset_cur_sig_count()
for i in range(self.storage_info.num_sigs):
error_flag = self.cppsimdata_lib.lssig(ct.byref(self.storage_info),self.cur_sig_name, self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
if print_str_flag == 'print':
print('%d: %s' % (i,self.cur_sig_name.value.decode('UTF-8')))
sig_list.append(self.cur_sig_name.value.decode('UTF-8'))
return sig_list
def evalsig(self,sig_name):
# If the signal name is a string, convert it to a byte array for the interface
if (type(sig_name) is str):
sig_name = str.encode(sig_name)
sig_data = np.zeros(self.storage_info.num_samples)
error_flag = self.cppsimdata_lib.evalsig(ct.byref(self.storage_info), sig_name, sig_data.ctypes.data_as(ct.POINTER(ct.c_double)),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
return sig_data
def cppsim_unbuffer_for_print(status, stream='stdout'):
newline_chars = ['\r', '\n', '\r\n']
stream = getattr(status, stream)
with contextlib.closing(stream):
while True:
out = []
last = stream.read(1)
if last == '' and status.poll() is not None:
break
while last not in newline_chars:
if last == '' and status.poll() is not None:
break
out.append(last)
last = stream.read(1)
out = ''.join(out)
yield out
def cppsim(sim_file="test.par"):
if sim_file.find('.par') < 0:
sim_file = sim_file + '.par'
cppsim_home = os.getenv('CppSimHome')
if cppsim_home == None:
cppsim_home = os.getenv('CPPSIMHOME')
if cppsim_home == None:
home = os.getenv('HOME')
if sys.platform == 'win32':
default_cppsim_home = "%s\\CppSim" % (home)
else:
default_cppsim_home = "%s/CppSim" % (home)
if os.path.isdir(default_cppsim_home):
cppsim_home = default_cppsim_home
else:
print('Error running cppsim from Python: environment variable')
print(' CPPSIMHOME is undefined')
cppsimshared_home = os.getenv('CppSimSharedHome')
if cppsimshared_home == None:
cppsimshared_home = os.getenv('CPPSIMSHAREDHOME')
if cppsimshared_home == None:
if sys.platform == 'win32':
default_cppsimshared_home = "%s\\CppSimShared" % (cppsim_home)
else:
default_cppsimshared_home = "%s/CppSimShared" % (cppsim_home)
if os.path.isdir(default_cppsimshared_home):
cppsimshared_home = default_cppsimshared_home
else:
print('Error running cppsim: environment variable')
print(' CPPSIMSHAREDHOME is undefined')
# print('cppsimhome: %s' % cppsim_home)
# print('cppsimsharedhome: %s' % cppsimshared_home)
cur_dir = os.getcwd()
if sys.platform == 'win32':
i = cur_dir.lower().find('\\simruns\\')
else:
i = cur_dir.lower().find('/simruns/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
if sys.platform == 'win32':
print(' .....\\SimRuns\\Library_name\\Module_name')
else:
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_cell = cur_dir[i+9:1000]
if sys.platform == 'win32':
i = library_cell.find('\\')
else:
i = library_cell.find('/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_name = library_cell[0:i]
cell_name = library_cell[i+1:1000]
print("Running CppSim on module '%s' (Lib:'%s'):" % (cell_name, library_name))
print("\n... netlisting ...\n")
if sys.platform == 'win32':
rp_base = '%s/Sue2/bin/win32/sue_cppsim_netlister' % (cppsimshared_home)
else:
rp_base = '%s/Sue2/bin/sue_cppsim_netlister' % (cppsimshared_home)
rp_arg1 = cell_name
rp_arg2 = '%s/Sue2/sue.lib' % (cppsim_home)
rp_arg3 = '%s/Netlist/netlist.cppsim' % (cppsim_home)
rp = [rp_base, rp_arg1, rp_arg2, rp_arg3]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('\n... running net2code ...\n')
if sys.platform == 'win32':
rp_base = '%s/bin/win32/net2code' % (cppsimshared_home)
else:
rp_base = '%s/bin/net2code' % (cppsimshared_home)
rp_arg1 = '-cpp'
rp_arg2 = sim_file
rp = [rp_base, rp_arg1, rp_arg2]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('... compiling ...\n')
if sys.platform == 'win32':
rp_base = '%s/msys/bin/make' % (cppsimshared_home)
else:
rp_base = 'make'
rp = [rp_base]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
# calculate phase noise: returns frequency (Hz) and specral density (dBc/Hz)
def calc_pll_phasenoise(noiseout,Ts):
num_segments = 20;
window_length = np.floor(noiseout.size/num_segments)
Kv = 1.0
phase = lfilter([Ts*2*np.pi*Kv],[1,-1],noiseout-np.mean(noiseout))
# calculate L(f)
f, Pxx = welch(phase,1/Ts,'hanning',2**16, None, None, 'constant', False, 'density',-1)
# In Matlab:
# [Pxx,f] = pwelch(phase,window_length,[],[],1/Ts,'twosided');
# [Pxx,f] = psd(sqrt(Ts)*phase,2^16,1/Ts,2^16,'mean');
Pxx_db = 10.0*np.log10(Pxx)
return f, Pxx_db
| 38.800797 | 149 | 0.629736 | 3,853 | 0.395626 | 580 | 0.059554 | 0 | 0 | 0 | 0 | 2,481 | 0.254749 |
23aebda5722243d52ce15ff9c4cb52dbd5434d9f | 1,217 | py | Python | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
]
| 1 | 2021-07-30T19:06:07.000Z | 2021-07-30T19:06:07.000Z | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
]
| 8 | 2021-04-22T20:47:48.000Z | 2021-07-30T19:06:01.000Z | waferscreen/data_io/exceptions.py | chw3k5/WaferScreen | c0ca7fe939fe7cd0b722b7d6129b148c03a7505c | [
"Apache-2.0"
]
| null | null | null | class ResProcessingError(Exception):
"""The base class for exceptions the occur during resonator processing."""
pass
class ResMinIsLeftMost(ResProcessingError):
"""Raised when the RWHM definition detects that the resonator's minima is the left-most point"""
pass
class ResMinIsRightMost(ResProcessingError):
"""Raised when the RWHM definition detects that the resonator's minima is the right-most point"""
pass
class FailedResFit(ResProcessingError):
"""Raised when the curvefit has a runtime error and a the resonator fit fails to converge"""
pass
# Lambda processing
class LambdaProcessingError(Exception):
"""The base class for exceptions the occur during lambda curve fitting and processing."""
pass
class NoDataForCurveFit(LambdaProcessingError):
"""Empty lists, [], were return for currentuA and/or freqGHz needed for lambda fitting"""
pass
class NotEnoughDataForCurveFit(LambdaProcessingError):
"""Curve Fit for lambda fitting has more free parameters then data points"""
pass
class OptimalParametersNotFoundForCurveFit(LambdaProcessingError):
"""Optimal parameters not found: Number of calls to function has reached maxfev"""
pass
| 30.425 | 101 | 0.758422 | 1,175 | 0.965489 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.586689 |
23b3590bb9d68aac5032da0773011d5e1741a6b6 | 5,977 | py | Python | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
]
| 28 | 2019-12-01T21:45:28.000Z | 2021-05-05T17:46:09.000Z | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
]
| null | null | null | notify/handlers.py | marzocchi/iterm-notify | 5e587213ca89c0361a39c785fa4560fda275052f | [
"MIT"
]
| 2 | 2020-08-04T12:55:04.000Z | 2020-12-20T22:23:47.000Z | import logging
from datetime import datetime
from typing import List
from notify.backends import BackendFactory
from notify.commands import Command
from notify.config import Config, Stack
from notify.notifications import Factory, Notification
from notify.strategies import StrategyFactory
class MaintainConfig:
def __init__(self, stack: Stack,
success_template: Notification,
failure_template: Notification,
logger: logging.Logger):
self.__success_template = success_template
self.__failure_template = failure_template
self.__logger = logger
self.__configuration_stack = stack
self.__configuration_stack.on_pop += self.__apply_on_pop
self.__apply_config(self.__configuration_stack.current)
def __apply_config(self, cfg: Config):
self.notifications_backend_handler(cfg.notifications_backend.name, *cfg.notifications_backend.args)
self.success_title_handler(cfg.success_title)
self.success_message_handler(cfg.success_message)
self.success_icon_handler(cfg.success_icon)
self.success_sound_handler(cfg.success_sound)
self.failure_title_handler(cfg.failure_title)
self.failure_message_handler(cfg.failure_message)
self.failure_icon_handler(cfg.failure_icon)
self.failure_sound_handler(cfg.failure_sound)
self.command_complete_timeout_handler(*cfg.notifications_strategy.args)
self.logging_name_handler(cfg.logger_name)
self.logging_level_handler(cfg.logger_level)
def __apply_on_pop(self):
self.__apply_config(self.__configuration_stack.current)
def notifications_backend_handler(self, name: str, *args):
selected_backend = self.__configuration_stack.notifications_backend.with_name(name, *args)
self.__configuration_stack.notifications_backend = selected_backend
def command_complete_timeout_handler(self, t: str):
selected_strategy = self.__configuration_stack.notifications_strategy.with_args(int(t))
self.__configuration_stack.notifications_strategy = selected_strategy
def success_title_handler(self, title: str):
self.__success_template = self.__success_template.with_title(title)
self.__configuration_stack.success_title = self.__success_template.title
def success_message_handler(self, message: str):
self.__success_template = self.__success_template.with_message(message)
self.__configuration_stack.success_message = self.__success_template.message
def success_icon_handler(self, icon: str):
self.__success_template = self.__success_template.with_icon(icon if icon != "" else None)
self.__configuration_stack.success_icon = self.__success_template.icon
def success_sound_handler(self, sound: str):
self.__success_template = self.__success_template.with_sound(sound if sound != "" else None)
self.__configuration_stack.success_sound = self.__success_template.sound
def failure_title_handler(self, title: str):
self.__failure_template = self.__failure_template.with_title(title)
self.__configuration_stack.failure_title = self.__failure_template.title
def failure_message_handler(self, message: str):
self.__failure_template = self.__failure_template.with_message(message)
self.__configuration_stack.failure_message = self.__failure_template.message
def failure_icon_handler(self, icon: str):
self.__failure_template = self.__failure_template.with_icon(icon if icon != "" else None)
self.__configuration_stack.failure_icon = self.__failure_template.icon
def failure_sound_handler(self, sound: str):
self.__failure_template = self.__failure_template.with_sound(sound if sound != "" else None)
self.__configuration_stack.failure_sound = self.__failure_template.sound
def logging_name_handler(self, new_name: str):
self.__logger.name = new_name
self.__configuration_stack.logger_name = self.__logger.name
def logging_level_handler(self, new_level: str):
self.__logger.setLevel(new_level)
self.__configuration_stack.logger_level = self.__logger.level
class Notify:
def __init__(self, stack: Stack,
notification_factory: Factory,
backend_factory: BackendFactory):
self.__stack = stack
self.__notification_factory = notification_factory
self.__backend_factory = backend_factory
self.__commands: list = []
def notify(self, message: str, title: str):
n = self.__notification_factory.create(message=message, title=title, success=True)
self.__backend_factory.create(self.__stack.notifications_backend).notify(n)
class NotifyCommandComplete:
def __init__(self, stack: Stack,
strategy_factory: StrategyFactory,
notification_factory: Factory,
backend_factory: BackendFactory):
self.__stack = stack
self.__strategy_factory = strategy_factory
self.__notification_factory = notification_factory
self.__backend_factory = backend_factory
self.__commands: List[Command] = []
def before_command(self, command_line: str):
self.__stack.push()
self.__commands.append(Command(datetime.now(), command_line))
def after_command(self, exit_code: str):
exit_code = int(exit_code)
if len(self.__commands) == 0:
raise RuntimeError("after_command without a command")
cmd = self.__commands.pop()
complete_cmd = cmd.complete(exit_code, datetime.now())
if self.__strategy_factory.create(self.__stack.current.notifications_strategy).should_notify(complete_cmd):
n = self.__notification_factory.from_command(complete_cmd)
self.__backend_factory.create(self.__stack.current.notifications_backend).notify(n)
self.__stack.pop()
| 42.091549 | 115 | 0.736657 | 5,678 | 0.949975 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.00686 |
23b5a122ef2746145b44e7be72e1b2d49508e86c | 254 | py | Python | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
]
| null | null | null | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
]
| null | null | null | submissions/Coomber/myLogic.py | omarmartinez97/aima-python | c8d5aa86382fb72e9ddec4938706599fee439bbb | [
"MIT"
]
| null | null | null | technology = {
'kb': '''
Oculus(rift)
HTC(vive)
VR(Zuck, rift)
VR(Gabe, vive)
(Oculus(O) & HTC(H)) ==> Dominates(H, O)
(VR(V)) ==> Technology(T)
''',
'queries':'''
VR(x)
Dominates(x, y)
''',
}
Examples = {
'technology': technology,
} | 14.111111 | 40 | 0.527559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.744094 |
23b703b6cabe1962c636f6a2c79308b98c38c1a8 | 817 | py | Python | postag/resolver.py | MineRobber9000/postag | d905f44f8ba975df06cc21aaf1fc425cf53e039f | [
"MIT"
]
| null | null | null | postag/resolver.py | MineRobber9000/postag | d905f44f8ba975df06cc21aaf1fc425cf53e039f | [
"MIT"
]
| null | null | null | postag/resolver.py | MineRobber9000/postag | d905f44f8ba975df06cc21aaf1fc425cf53e039f | [
"MIT"
]
| null | null | null | class Resolver:
def __init__(self):
self.resolvers = []
def addResolver(self,res,priority):
self.resolvers.append(dict(resolver=res,priority=priority))
self.resolvers.sort(key=lambda x: x["priority"])
def resolve(self,name):
for r in [x["resolver"] for x in self.resolvers[::-1]]:
success,result = r(name)
if success:
return result
class GlobalVarResolver:
def __init__(self,globs):
self.globs = globs
def __call__(self,name):
if name in self.globs:
return True, self.globs[name]
elif name in dir(self.globs["__builtins__"]):
return True, getattr(self.globs["__builtins__"],name)
return False, None
class FunctionalMapping:
def __init__(self,get,set):
self.get = get
self.set = set
def __getitem__(self,k):
return self.get(k)
def __setitem__(self,k,v):
self.set(k,v)
| 26.354839 | 61 | 0.70869 | 812 | 0.99388 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.058752 |
23ba2ecb3b446799d3bd04447ada1a6c88421c82 | 8,113 | py | Python | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
]
| 1 | 2019-12-12T13:21:56.000Z | 2019-12-12T13:21:56.000Z | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
]
| null | null | null | sdk/python/feast/loaders/ingest.py | wzpy/feast | 06fe09b7047fe370cbf63555cec1ba820f1e7267 | [
"Apache-2.0"
]
| null | null | null | import logging
import multiprocessing
import os
import time
from functools import partial
from multiprocessing import Process, Queue, Pool
from typing import Iterable
import pandas as pd
import pyarrow as pa
from feast.feature_set import FeatureSet
from feast.type_map import convert_dict_to_proto_values
from feast.types.FeatureRow_pb2 import FeatureRow
from kafka import KafkaProducer
from tqdm import tqdm
from feast.constants import DATETIME_COLUMN
_logger = logging.getLogger(__name__)
GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
CPU_COUNT = os.cpu_count() # type: int
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
def _kafka_feature_row_producer(
feature_row_queue: Queue, row_count: int, brokers, topic, ctx: dict, pbar: tqdm
):
"""
Pushes Feature Rows to Kafka. Reads rows from a queue. Function will run
until total row_count is reached.
Args:
feature_row_queue: Queue containing feature rows.
row_count: Total row count to process
brokers: Broker to push to
topic: Topic to push to
ctx: Context dict used to communicate with primary process
pbar: Progress bar object
"""
# Callback for failed production to Kafka
def on_error(e):
# Save last exception
ctx["last_exception"] = e
# Increment error count
if "error_count" in ctx:
ctx["error_count"] += 1
else:
ctx["error_count"] = 1
# Callback for succeeded production to Kafka
def on_success(meta):
pbar.update()
producer = KafkaProducer(bootstrap_servers=brokers)
processed_rows = 0
# Loop through feature rows until all rows are processed
while processed_rows < row_count:
# Wait if queue is empty
if feature_row_queue.empty():
time.sleep(1)
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
else:
while not feature_row_queue.empty():
row = feature_row_queue.get()
if row is not None:
# Push row to Kafka
producer.send(topic, row.SerializeToString()).add_callback(
on_success
).add_errback(on_error)
processed_rows += 1
# Force an occasional flush
if processed_rows % 10000 == 0:
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
del row
pbar.refresh()
# Ensure that all rows are pushed
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
# Using progress bar as counter is much faster than incrementing dict
ctx["success_count"] = pbar.n
pbar.close()
def _encode_pa_chunks(
tbl: pa.lib.Table,
fs: FeatureSet,
max_workers: int,
df_datetime_dtype: pd.DataFrame.dtypes,
chunk_size: int = 5000,
) -> Iterable[FeatureRow]:
"""
Generator function to encode rows in PyArrow table to FeatureRows by
breaking up the table into batches.
Each batch will have its rows spread accross a pool of workers to be
transformed into FeatureRow objects.
Args:
tbl: PyArrow table to be processed.
fs: FeatureSet describing PyArrow table.
max_workers: Maximum number of workers.
df_datetime_dtype: Pandas dtype of datetime column.
chunk_size: Maximum size of each chunk when PyArrow table is batched.
Returns:
Iterable FeatureRow object.
"""
pool = Pool(max_workers)
# Create a partial function with static non-iterable arguments
func = partial(
convert_dict_to_proto_values,
df_datetime_dtype=df_datetime_dtype,
feature_set=fs,
)
for batch in tbl.to_batches(max_chunksize=chunk_size):
m_df = batch.to_pandas()
results = pool.map_async(func, m_df.to_dict("records"))
yield from results.get()
pool.close()
pool.join()
return
def ingest_table_to_kafka(
feature_set: FeatureSet,
table: pa.lib.Table,
max_workers: int,
chunk_size: int = 5000,
disable_pbar: bool = False,
timeout: int = None,
) -> None:
"""
Ingest a PyArrow Table to a Kafka topic based for a Feature Set
Args:
feature_set: FeatureSet describing PyArrow table.
table: PyArrow table to be processed.
max_workers: Maximum number of workers.
chunk_size: Maximum size of each chunk when PyArrow table is batched.
disable_pbar: Flag to indicate if tqdm progress bar should be disabled.
timeout: Maximum time before method times out
"""
pbar = tqdm(unit="rows", total=table.num_rows, disable=disable_pbar)
# Use a small DataFrame to validate feature set schema
ref_df = table.to_batches(max_chunksize=100)[0].to_pandas()
df_datetime_dtype = ref_df[DATETIME_COLUMN].dtype
# Validate feature set schema
_validate_dataframe(ref_df, feature_set)
# Create queue through which encoding and production will coordinate
row_queue = Queue()
# Create a context object to send and receive information across processes
ctx = multiprocessing.Manager().dict(
{"success_count": 0, "error_count": 0, "last_exception": ""}
)
# Create producer to push feature rows to Kafka
ingestion_process = Process(
target=_kafka_feature_row_producer,
args=(
row_queue,
table.num_rows,
feature_set.get_kafka_source_brokers(),
feature_set.get_kafka_source_topic(),
ctx,
pbar,
),
)
try:
# Start ingestion process
print(
f"\n(ingest table to kafka) Ingestion started for {feature_set.name}:{feature_set.version}"
)
ingestion_process.start()
# Iterate over chunks in the table and return feature rows
for row in _encode_pa_chunks(
tbl=table,
fs=feature_set,
max_workers=max_workers,
chunk_size=chunk_size,
df_datetime_dtype=df_datetime_dtype,
):
# Push rows onto a queue for the production process to pick up
row_queue.put(row)
while row_queue.qsize() > chunk_size:
time.sleep(0.1)
row_queue.put(None)
except Exception as ex:
_logger.error(f"Exception occurred: {ex}")
finally:
# Wait for the Kafka production to complete
ingestion_process.join(timeout=timeout)
failed_message = (
""
if ctx["error_count"] == 0
else f"\nFail: {ctx['error_count']}/{table.num_rows}"
)
last_exception_message = (
""
if ctx["last_exception"] == ""
else f"\nLast exception:\n{ctx['last_exception']}"
)
print(
f"\nIngestion statistics:"
f"\nSuccess: {ctx['success_count']}/{table.num_rows}"
f"{failed_message}"
f"{last_exception_message}"
)
def _validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
"""
Validates a Pandas dataframe based on a feature set
Args:
dataframe: Pandas dataframe
feature_set: Feature Set instance
"""
if "datetime" not in dataframe.columns:
raise ValueError(
f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
)
for entity in feature_set.entities:
if entity.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
)
for feature in feature_set.features:
if feature.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
)
| 31.815686 | 103 | 0.646986 | 0 | 0 | 1,236 | 0.152348 | 0 | 0 | 0 | 0 | 3,334 | 0.410945 |
23bb7ae2de638bcc64e1ae2469bf78db888b942c | 389 | py | Python | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
]
| 2 | 2020-04-24T18:36:52.000Z | 2020-04-25T00:15:57.000Z | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
]
| null | null | null | 1stRound/Easy/389 Find the Difference/Counter.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
]
| null | null | null | import collections
class Solution:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
scount, tcount = collections.Counter(s), collections.Counter(t)
for t in tcount:
if tcount[t] > scount[t]:
return t
s = "abcd"
t = "abcde"
p = Solution()
print(p.findTheDifference(s,t)) | 24.3125 | 71 | 0.539846 | 300 | 0.771208 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.231362 |
23bbe8dfe70d77ea6c966fa54a0f12dbc414a437 | 16,580 | py | Python | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
]
| 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
]
| 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/lb/backend_address_pool_address.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
]
| 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BackendAddressPoolAddressArgs', 'BackendAddressPoolAddress']
@pulumi.input_type
class BackendAddressPoolAddressArgs:
def __init__(__self__, *,
backend_address_pool_id: pulumi.Input[str],
ip_address: pulumi.Input[str],
virtual_network_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BackendAddressPoolAddress resource.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist.
:param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
"""
pulumi.set(__self__, "backend_address_pool_id", backend_address_pool_id)
pulumi.set(__self__, "ip_address", ip_address)
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Input[str]:
"""
The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "backend_address_pool_id")
@backend_address_pool_id.setter
def backend_address_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "backend_address_pool_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Input[str]:
"""
The Static IP Address which should be allocated to this Backend Address Pool.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> pulumi.Input[str]:
"""
The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_network_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _BackendAddressPoolAddressState:
def __init__(__self__, *,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BackendAddressPoolAddress resources.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool.
:param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
if backend_address_pool_id is not None:
pulumi.set(__self__, "backend_address_pool_id", backend_address_pool_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if name is not None:
pulumi.set(__self__, "name", name)
if virtual_network_id is not None:
pulumi.set(__self__, "virtual_network_id", virtual_network_id)
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "backend_address_pool_id")
@backend_address_pool_id.setter
def backend_address_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_address_pool_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The Static IP Address which should be allocated to this Backend Address Pool.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
return pulumi.get(self, "virtual_network_id")
@virtual_network_id.setter
def virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_id", value)
class BackendAddressPoolAddress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Backend Address within a Backend Address Pool.
> **Note:** Backend Addresses can only be added to a `Standard` SKU Load Balancer.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_virtual_network = azure.network.get_virtual_network(name="example-network",
resource_group_name="example-resources")
example_lb = azure.lb.get_lb(name="example-lb",
resource_group_name="example-resources")
example_backend_address_pool = azure.lb.get_backend_address_pool(name="first",
loadbalancer_id=example_lb.id)
example_backend_address_pool_address = azure.lb.BackendAddressPoolAddress("exampleBackendAddressPoolAddress",
backend_address_pool_id=example_backend_address_pool.id,
virtual_network_id=example_virtual_network.id,
ip_address="10.0.0.1")
```
## Import
Backend Address Pool Addresses can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/loadBalancer1/backendAddressPools/backendAddressPool1/addresses/address1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool.
:param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackendAddressPoolAddressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Backend Address within a Backend Address Pool.
> **Note:** Backend Addresses can only be added to a `Standard` SKU Load Balancer.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_virtual_network = azure.network.get_virtual_network(name="example-network",
resource_group_name="example-resources")
example_lb = azure.lb.get_lb(name="example-lb",
resource_group_name="example-resources")
example_backend_address_pool = azure.lb.get_backend_address_pool(name="first",
loadbalancer_id=example_lb.id)
example_backend_address_pool_address = azure.lb.BackendAddressPoolAddress("exampleBackendAddressPoolAddress",
backend_address_pool_id=example_backend_address_pool.id,
virtual_network_id=example_virtual_network.id,
ip_address="10.0.0.1")
```
## Import
Backend Address Pool Addresses can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/loadBalancer1/backendAddressPools/backendAddressPool1/addresses/address1
```
:param str resource_name: The name of the resource.
:param BackendAddressPoolAddressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackendAddressPoolAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BackendAddressPoolAddressArgs.__new__(BackendAddressPoolAddressArgs)
if backend_address_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__.__dict__["backend_address_pool_id"] = backend_address_pool_id
if ip_address is None and not opts.urn:
raise TypeError("Missing required property 'ip_address'")
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["name"] = name
if virtual_network_id is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_id'")
__props__.__dict__["virtual_network_id"] = virtual_network_id
super(BackendAddressPoolAddress, __self__).__init__(
'azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
virtual_network_id: Optional[pulumi.Input[str]] = None) -> 'BackendAddressPoolAddress':
"""
Get an existing BackendAddressPoolAddress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool.
:param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
:param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BackendAddressPoolAddressState.__new__(_BackendAddressPoolAddressState)
__props__.__dict__["backend_address_pool_id"] = backend_address_pool_id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["name"] = name
__props__.__dict__["virtual_network_id"] = virtual_network_id
return BackendAddressPoolAddress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Output[str]:
"""
The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "backend_address_pool_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
The Static IP Address which should be allocated to this Backend Address Pool.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="virtualNetworkId")
def virtual_network_id(self) -> pulumi.Output[str]:
"""
The ID of the Virtual Network within which the Backend Address Pool should exist.
"""
return pulumi.get(self, "virtual_network_id")
| 47.643678 | 282 | 0.68076 | 16,127 | 0.972678 | 0 | 0 | 13,739 | 0.828649 | 0 | 0 | 8,985 | 0.541918 |
23bd05e550888fff887e56ad22915b9704444c37 | 4,136 | py | Python | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
]
| 60 | 2015-12-04T20:11:23.000Z | 2019-03-17T20:00:56.000Z | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
]
| 68 | 2019-03-22T01:07:32.000Z | 2021-07-02T04:48:57.000Z | submission.py | Amar1729/Liked-Saved-Image-Downloader | 48c17d8cb0cdce3bf7ebab16729510be11f51013 | [
"MIT"
]
| 19 | 2015-09-15T17:30:29.000Z | 2019-03-17T18:05:30.000Z | # -*- coding: utf-8 -*-
import pickle
import os
# third-party imports
import jsonpickle
class Submission:
def __init__(self):
# Source is either Tumblr or Reddit
self.source = u''
self.title = u''
self.author = u''
self.subreddit = u''
self.subredditTitle = u''
self.body = u''
self.bodyUrl = u''
self.postUrl = u''
def getXML(self):
baseString = (u'\t<source>' + self.source + u'</source>\n'
+ u'\t<title>' + self.title + u'</title>\n'
+ u'\t<author>' + self.author + u'</author>\n'
+ u'\t<subreddit>' + self.subreddit + u'</subreddit>\n'
+ u'\t<subredditTitle>' + self.subredditTitle + u'</subredditTitle>\n'
+ u'\t<body>' + self.body + u'</body>\n'
+ u'\t<bodyUrl>' + self.bodyUrl + u'</bodyUrl>\n'
+ u'\t<postUrl>' + self.postUrl + u'</postUrl>\n')
return str(baseString)
def getHtml(self):
baseString = (u'\t<p>' + self.source + u'</p>\n'
+ u'\t<h2>' + self.title + u'</h2>\n'
+ u'\t<h3>' + self.author + u'</h3>\n'
+ u'\t<h4>' + self.subreddit + u'</h4>\n'
+ u'\t<h4>' + self.subredditTitle + u'</h4>\n'
+ u'\t<p>' + self.body + u'</p>\n'
# + u'\t<p>' + self.bodyUrl + u'</p>\n'
+ u'\t<a href=' + self.postUrl + u'/>Link</a><br /><br />\n')
return baseString
def getJson(self):
jsonpickle.set_preferred_backend('json')
jsonpickle.set_encoder_options('json', ensure_ascii=False, indent=4, separators=(',', ': '))
return jsonpickle.encode(self)
def getAsList(self):
return [self.source, self.title, self.author,
self.subreddit, self.subredditTitle,
self.body, self.bodyUrl, self.postUrl]
def initFromDict(self, dictEntry):
self.source = dictEntry['source']
self.title = dictEntry['title']
self.author = dictEntry['author']
self.subreddit = dictEntry['subreddit']
self.subredditTitle = dictEntry['subredditTitle']
self.body = dictEntry['body']
self.bodyUrl = dictEntry['bodyUrl']
self.postUrl = dictEntry['postUrl']
def getAsList_generator(submissions):
for submission in submissions:
yield submission.getAsList()
def writeOutSubmissionsAsJson(redditList, file):
file.write('{\n'.encode('utf8'))
for submission in redditList:
outputString = submission.getJson() + u',\n'
file.write(outputString.encode('utf8'))
file.write('}'.encode('utf8'))
def saveSubmissionsAsJson(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsJson(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsHtml(redditList, file):
submissionsStr = ""
for submission in redditList:
submissionsStr += submission.getHtml() + u'\n'
htmlStructure = u"""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Reddit Saved Comments</title>
</head>
<body>
{0}
</body>
</html>
""".format(submissionsStr)
file.write(htmlStructure.encode('utf8'))
def saveSubmissionsAsHtml(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsHtml(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsXML(redditList, file):
for submission in redditList:
outputString = u'<submission>\n' + submission.getXML() + u'</submission>\n'
file.write(outputString.encode('utf8'))
def saveSubmissionsAsXML(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsXML(submissions, outputFile)
outputFile.close()
def writeCacheSubmissions(submissions, cacheFileName):
cacheFile = open(cacheFileName, 'wb')
pickle.dump(submissions, cacheFile)
def readCacheSubmissions(cacheFileName):
if os.path.exists(cacheFileName):
cacheFile = open(cacheFileName, 'rb')
submissions = pickle.load(cacheFile)
return submissions
else:
return []
| 30.637037 | 100 | 0.604691 | 2,175 | 0.52587 | 109 | 0.026354 | 0 | 0 | 0 | 0 | 880 | 0.212766 |
23bd7796ce5dbbe94cd644365987adb6f71698db | 191 | py | Python | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
]
| null | null | null | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
]
| null | null | null | mtga_event_prize_level.py | everybodyeverybody/mtga_earnings_calculator | 4be67e37299c122eba110eb07308426d8078c645 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3.7
from decimal import Decimal
from collections import namedtuple
EventPrizeLevel = namedtuple(
"EventPrizeLevel", ["packs", "gems", "gold"], defaults=[0, 0, 0],
)
| 23.875 | 69 | 0.712042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.314136 |
23be2b4e5eb31b3f80e1bec885f51c83e38a6703 | 621 | py | Python | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
]
| null | null | null | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
]
| null | null | null | mundo_1/desafios/desafio_028.py | lvfds/Curso_Python3 | 1afb7706553a1d21d3d97e061144c5f019ca9391 | [
"MIT"
]
| null | null | null | """
Escreva um programa que faça o computador 'Pensar' em um número inteiro entre 0 e 5
e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador.
"""
from random import randint
numero_gerado_aleatoriamente = randint(0,5)
numero_digitado_pelo_usuario = int(input('Adivinhe qual número estou pensando, uma dica: é entre 0 e 5! '))
if numero_digitado_pelo_usuario == numero_gerado_aleatoriamente:
print(f'VOCÊ ACERTOU! O número que estava pensando era mesmo o {numero_gerado_aleatoriamente}!')
else:
print(f'Você errou! O número que pensei era {numero_gerado_aleatoriamente}')
| 38.8125 | 107 | 0.772947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.661392 |
23be77dcebe4a2a83f67827319e9327e25df75de | 1,699 | py | Python | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
]
| 38 | 2020-06-07T14:44:11.000Z | 2022-03-08T06:19:49.000Z | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
]
| 9 | 2020-10-22T02:38:01.000Z | 2022-03-15T09:53:30.000Z | exp/noise_features/models.py | WilliamCCHuang/GraphLIME | 0f89bd67865c0b4b5a93becbc03273e55c15fc68 | [
"MIT"
]
| 6 | 2021-03-04T21:32:34.000Z | 2021-12-24T05:58:35.000Z | import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
class GCN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.5):
super(GCN, self).__init__()
self.dropout = dropout
self.conv1 = GCNConv(input_dim, hidden_dim)
self.conv2 = GCNConv(hidden_dim, output_dim)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
class GAT(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, heads_1=8, heads_2=1, att_dropout=0.6, input_dropout=0.6):
super(GAT, self).__init__()
self.att_dropout = att_dropout
self.input_dropout = input_dropout
self.conv1 = GATConv(in_channels=input_dim,
out_channels=hidden_dim // heads_1,
heads=heads_1,
concat=True,
dropout=att_dropout)
self.conv2 = GATConv(in_channels=hidden_dim,
out_channels=output_dim,
heads=heads_2,
concat=False,
dropout=att_dropout)
def forward(self, x, edge_index):
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv1(x, edge_index)
x = F.elu(x)
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
| 33.313725 | 116 | 0.575633 | 1,590 | 0.935845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
23c09d19f8336af168a12e16ec8d400bf72a904d | 7,740 | py | Python | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
]
| null | null | null | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
]
| null | null | null | nscl/nn/scene_graph/scene_graph.py | OolongQian/NSCL-PyTorch-Release | 4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb | [
"MIT"
]
| null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : scene_graph.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 07/19/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Scene Graph generation.
"""
import os
import torch
import torch.nn as nn
import jactorch
import jactorch.nn as jacnn
from . import functional
DEBUG = bool(int(os.getenv('DEBUG_SCENE_GRAPH', 0)))
__all__ = ['SceneGraph']
class SceneGraph(nn.Module):
def __init__(self, feature_dim, output_dims, downsample_rate):
super().__init__()
self.pool_size = 7
self.feature_dim = feature_dim
self.output_dims = output_dims
self.downsample_rate = downsample_rate
self.object_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
self.context_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
self.relation_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
if not DEBUG:
self.context_feature_extract = nn.Conv2d(feature_dim, feature_dim, 1)
self.relation_feature_extract = nn.Conv2d(feature_dim, feature_dim // 2 * 3, 1)
self.object_feature_fuse = nn.Conv2d(feature_dim * 2, output_dims[1], 1)
self.relation_feature_fuse = nn.Conv2d(feature_dim // 2 * 3 + output_dims[1] * 2, output_dims[2], 1)
self.object_feature_fc = nn.Sequential(nn.ReLU(True),
nn.Linear(output_dims[1] * self.pool_size ** 2, output_dims[1]))
self.relation_feature_fc = nn.Sequential(nn.ReLU(True),
nn.Linear(output_dims[2] * self.pool_size ** 2, output_dims[2]))
self.reset_parameters()
else:
def gen_replicate(n):
def rep(x):
return torch.cat([x for _ in range(n)], dim=1)
return rep
self.pool_size = 32
self.object_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.context_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.relation_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.context_feature_extract = gen_replicate(2)
self.relation_feature_extract = gen_replicate(3)
self.object_feature_fuse = jacnn.Identity()
self.relation_feature_fuse = jacnn.Identity()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
def forward(self, input, objects, objects_length):
"""qian: to thoroughly understand the meanings of object_features, context_features, relation_features,
i mean, the semantic meaning, i'd better go back to the paper itself."""
object_features = input # qian: (32, 256, 16, 24)
context_features = self.context_feature_extract(input) # qian: (32, 256, 16, 24)
relation_features = self.relation_feature_extract(input) # qian: (32, 384, 16, 24)
outputs = list()
objects_index = 0
for i in range(input.size(0)):
"""qian: iterate through every instance in the input batch."""
box = objects[objects_index:objects_index + objects_length[i].item()] # qian: (3, 4) [3 objects, 4 for bb].
objects_index += objects_length[i].item()
with torch.no_grad():
batch_ind = i + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device)
# generate a "full-image" bounding box
image_h, image_w = input.size(2) * self.downsample_rate, input.size(3) * self.downsample_rate
image_box = torch.cat([
torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
image_w + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
image_h + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device)
], dim=-1) # qian: this box contains the entire image.
# meshgrid to obtain the subject and object bounding boxes
"""qian: i don't perfectly understand the meaning of meshgrid,
but the idea is to obtain all the combinations of multiple bounding boxes (here is 2)."""
sub_id, obj_id = jactorch.meshgrid(torch.arange(box.size(0), dtype=torch.int64, device=box.device),
dim=0)
sub_id, obj_id = sub_id.contiguous().view(-1), obj_id.contiguous().view(-1)
sub_box, obj_box = jactorch.meshgrid(box, dim=0)
sub_box = sub_box.contiguous().view(box.size(0) ** 2, 4)
obj_box = obj_box.contiguous().view(box.size(0) ** 2, 4)
# union box
"""qian: union_box (9, 4), including all 9 possible bounding box pairs' union.
The union means the set union operation."""
union_box = functional.generate_union_box(sub_box, obj_box)
rel_batch_ind = i + torch.zeros(union_box.size(0), 1, dtype=box.dtype, device=box.device)
# intersection maps
# qian: (3, 1, 7, 7). crop the object ROI.
box_context_imap = functional.generate_intersection_map(box, image_box, self.pool_size)
# qian: (9, 1, 7, 7). crop ordered object ROI in each pair.
sub_union_imap = functional.generate_intersection_map(sub_box, union_box, self.pool_size)
# qian: (9, 1, 7, 7). crop ordered object ROI in each pair.
obj_union_imap = functional.generate_intersection_map(obj_box, union_box, self.pool_size)
this_context_features = self.context_roi_pool(context_features, torch.cat([batch_ind, image_box], dim=-1))
x, y = this_context_features.chunk(2, dim=1)
this_object_features = self.object_feature_fuse(torch.cat([
self.object_roi_pool(object_features, torch.cat([batch_ind, box], dim=-1)),
x, y * box_context_imap
], dim=1))
this_relation_features = self.relation_roi_pool(relation_features,
torch.cat([rel_batch_ind, union_box], dim=-1))
x, y, z = this_relation_features.chunk(3, dim=1)
this_relation_features = self.relation_feature_fuse(torch.cat([
this_object_features[sub_id], this_object_features[obj_id],
x, y * sub_union_imap, z * obj_union_imap
], dim=1))
if DEBUG:
outputs.append([
None,
this_object_features,
this_relation_features
])
else:
outputs.append([
None,
self._norm(self.object_feature_fc(this_object_features.view(box.size(0), -1))),
self._norm(
self.relation_feature_fc(this_relation_features.view(box.size(0) * box.size(0), -1)).view(
box.size(0), box.size(0), -1))
])
return outputs
def _norm(self, x):
return x / x.norm(2, dim=-1, keepdim=True)
| 47.484663 | 120 | 0.588889 | 7,271 | 0.939406 | 0 | 0 | 0 | 0 | 0 | 0 | 1,295 | 0.167313 |
23c12f3233981fe353e6b9f38266cba0ebd64146 | 789 | py | Python | roomcreator.py | ajaxalex5/MidYearProject | a399347cd8cb4b24cf1aeac4e11269a0a2109ddf | [
"MIT"
]
| null | null | null | roomcreator.py | ajaxalex5/MidYearProject | a399347cd8cb4b24cf1aeac4e11269a0a2109ddf | [
"MIT"
]
| null | null | null | roomcreator.py | ajaxalex5/MidYearProject | a399347cd8cb4b24cf1aeac4e11269a0a2109ddf | [
"MIT"
]
| null | null | null | class Room (object):
def __init__(self, name, xl, yl, layout):
self.name = str(name)
self.xl = int(xl)
self.yl = int(yl)
self.layout = layout
def load_room_file(file):
roomfile = open(file, "r")
roomlist = []
linelist = []
for line in roomfile:
linelist.append(line)
while linelist[0] != "STOP":
temproomformat = []
for line in range(0, int(linelist[1])):
temproomformat.append([])
for tile in range(0, int(linelist[2])):
temproomformat[-1].append(linelist[3+line][tile])
roomlist.append(Room(linelist[0], int(linelist[1]), int(linelist[2]), temproomformat))
for x in range(4+int(linelist[2])):
del(linelist[0])
return roomlist | 24.65625 | 94 | 0.564005 | 178 | 0.225602 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.011407 |
23c22b89349457ba83481e99d719de420d9ae033 | 645 | py | Python | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
]
| 1 | 2021-02-11T10:03:42.000Z | 2021-02-11T10:03:42.000Z | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
]
| 2 | 2020-02-24T11:52:51.000Z | 2021-07-05T19:38:55.000Z | sitemapparser/base_data.py | frasy/site-map-parser | 7648b50a1e15777cf82a6916ef5cbb149c5e99df | [
"MIT"
]
| 4 | 2020-02-10T14:49:41.000Z | 2021-05-07T14:41:32.000Z | import re
from abc import ABCMeta
from dateutil import parser
class BaseData(metaclass=ABCMeta):
def __init__(self):
self._lastmod = None
self._loc = None
@property
def lastmod(self):
return self._lastmod
@lastmod.setter
def lastmod(self, value):
self._lastmod = parser.isoparse(value) if value is not None else None
@property
def loc(self):
return self._loc
@loc.setter
def loc(self, value):
value = str(value)
if not re.match('http[s]?://', value):
raise ValueError("{} does not match a url".format(value))
self._loc = value
| 22.241379 | 77 | 0.615504 | 580 | 0.899225 | 0 | 0 | 444 | 0.688372 | 0 | 0 | 38 | 0.058915 |
23c2c0ad760da305cb104343e55a702bf05d28ce | 630 | py | Python | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
]
| 2 | 2019-02-06T19:47:48.000Z | 2019-10-30T07:30:14.000Z | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
]
| 6 | 2019-03-21T18:29:04.000Z | 2019-04-11T18:31:34.000Z | nomad/tests/core/test_shortest_path_solver.py | romilbhardwaj/nomad | c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1 | [
"MIT"
]
| null | null | null | import unittest
import networkx as nx
from core.placement.spsolver import DPShortestPathSolver
class TestShorthestPathSolverMethods(unittest.TestCase):
def setUp(self):
self.g1 = nx.read_weighted_edgelist('tests/test-graph_1.txt', create_using=nx.MultiDiGraph, nodetype=int)
def test_shortest_path(self):
u = 0
v = 3
k = 2
weight_shortest_path = 9
(weight, path) = DPShortestPathSolver.shortest_path(self.g1, u, v, k)
self.assertEqual(weight, weight_shortest_path)
self.assertEqual(path, [0, 2, 3])
if __name__ == '__main__':
unittest.main() | 30 | 113 | 0.680952 | 476 | 0.755556 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.053968 |
23c38e57ef816e8a8c15f2598a7fb8639340906e | 1,285 | py | Python | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
]
| 6 | 2021-07-29T03:26:20.000Z | 2022-01-28T15:11:45.000Z | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
]
| 2 | 2021-09-30T09:47:23.000Z | 2022-01-31T03:08:24.000Z | Leetcode/medium/integer-break.py | jen-sjen/data-structures-basics-leetcode | addac32974b16e0a37aa60c210ab7820b349b279 | [
"MIT"
]
| 5 | 2021-08-10T06:41:11.000Z | 2022-01-29T17:50:20.000Z | """
# INTEGER BREAK
Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get.
Example 1:
Input: 2
Output: 1
Explanation: 2 = 1 + 1, 1 × 1 = 1.
Example 2:
Input: 10
Output: 36
Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
Note: You may assume that n is not less than 2 and not larger than 58.
"""
class Solution:
def integerBreak(self, n: int) -> int:
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
if n == 3:
return 2
# 0 to 3 are special cases beacuse they will produce a result less than their value. We can't use that lesser value in the other calculations
known = {0: 0, 1: 1, 2: 2, 3: 3}
return self.breakDown(n, known)
def breakDown(self, n, known):
if n in known:
return known[n]
else:
maximum = 0
for x in range(1, n // 2 + 1):
p1 = self.breakDown(x, known)
p2 = self.breakDown(n - x, known)
maximum = max(maximum, p1 * p2)
known[n] = maximum
return known[n]
| 26.770833 | 167 | 0.525292 | 864 | 0.670807 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.430124 |
23c54e92e07439e85887d70b0a443815fb516d17 | 1,112 | py | Python | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
]
| 27 | 2015-03-06T05:50:35.000Z | 2021-03-01T07:54:03.000Z | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
]
| 2 | 2015-02-05T14:59:07.000Z | 2016-02-19T00:18:52.000Z | setup.py | daien/camocomp | aa2c1b6dd2cfe1eb166047b52d75ade5b6b8b554 | [
"BSD-3-Clause"
]
| 13 | 2015-01-25T12:43:42.000Z | 2019-11-25T17:46:42.000Z | #!/usr/bin/env python
from distutils.core import setup
SHORT_DESCR = "CAmera MOtion COMPensation using image stiching techniques to generate stabilized videos"
try:
LONG_DESCR = open('README.rst').read()
except IOError:
LONG_DESCR = SHORT_DESCR
setup(
name='camocomp',
version='0.1',
author='Adrien Gaidon',
author_email='[email protected]',
keywords='camera motion compensation, video stabilization, stitching, opencv, hugin',
packages=['camocomp'],
url='http://pypi.python.org/pypi/camocomp/',
license='New BSD License',
description=SHORT_DESCR,
long_description=LONG_DESCR,
platforms=["Linux"],
requires=['numpy', 'ffmpeg', 'cv2', 'hsi'],
scripts=['scripts/camocomp_video'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
]
)
| 30.054054 | 104 | 0.660072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.569245 |
23c622f1dbca6b4b9f0f05bf93b50ad3b73a9109 | 408 | py | Python | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
]
| null | null | null | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
]
| null | null | null | qiang00_before_project/qiang02_the_template/q02_add_template_filter.py | 13528770807/flask_project | 2930db1d59763b155f758ad4061a70d413bfc34d | [
"MIT"
]
| null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
li = [1, 2, 3, 4, 5, 6, 7]
return render_template('filter.html', li=li)
@app.template_filter('li_rv2') # 添加过滤器 方法二
def li_reverse(li):
res = list(li)
res.reverse()
return res
# app.add_template_filter(li_reverse, 'li_rv') # 添加过滤器 方法一
if __name__ == "__main__":
app.run(debug=True)
| 17 | 59 | 0.647059 | 0 | 0 | 0 | 0 | 239 | 0.543182 | 0 | 0 | 136 | 0.309091 |
23c6a65b4e2832bc68e0d04d1fcc2bd1ed8f0280 | 801 | py | Python | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
]
| 16 | 2017-02-22T02:26:41.000Z | 2021-04-05T10:28:02.000Z | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
]
| 22 | 2017-02-27T21:50:45.000Z | 2021-05-21T02:31:35.000Z | smps/rcmod.py | BenjiStomps/py-smps | c449bbfcd748203630bc0aecf2552c8d836f827c | [
"MIT"
]
| 8 | 2017-09-30T09:50:44.000Z | 2021-05-20T22:29:54.000Z | """"""
import matplotlib as mpl
__all__ = ["set"]
def set(tick_scale=1, rc=dict()):
"""
Control plot style and scaling using seaborn and the
matplotlib rcParams interface.
:param tick_scale: A scaler number controling the spacing
on tick marks, defaults to 1.
:type tick_scale: float
:param rc: Additional settings to pass to rcParams.
:type rc: dict
"""
rc_log_defaults = {
'xtick.major.size': 10. * tick_scale,
'xtick.minor.size': 6. * tick_scale,
'ytick.major.size': 10. * tick_scale,
'ytick.minor.size': 6. * tick_scale,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75,
'mathtext.default': 'regular'
}
mpl.rcParams.update(dict(rc_log_defaults, **rc))
| 26.7 | 62 | 0.601748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.593009 |
23c7aeb8b7efffbb30d83d454153984dd31f2ff4 | 169 | py | Python | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
]
| null | null | null | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
]
| null | null | null | Chapter 04/Chap04_Example4.28.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
]
| null | null | null | # M1
def mul1(a1):
return lambda b1:b1*a1
myresult = mul1(3)
print(myresult(7))
#M-2
mul = lambda a = 3: (lambda b: a*b)
myres = mul()
print(myres)
print(myres(7))
| 14.083333 | 35 | 0.633136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.047337 |
23c87c4cfb4e5c6fd8c9ed42a1f9fee075d07137 | 414 | py | Python | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
]
| 1 | 2020-08-08T16:01:55.000Z | 2020-08-08T16:01:55.000Z | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
]
| 4 | 2022-02-15T02:42:33.000Z | 2022-02-28T01:30:07.000Z | tools/generate_taint_models/parameter.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
]
| 1 | 2020-11-22T12:08:51.000Z | 2020-11-22T12:08:51.000Z | from enum import Enum, auto
from typing import NamedTuple, Optional
class Parameter(NamedTuple):
class Kind(Enum):
ARG = auto()
VARARG = auto()
KWARG = auto()
name: str
annotation: Optional[str]
kind: Kind
def __eq__(self, other: "Parameter") -> bool:
if not isinstance(other, self.__class__):
return False
return self.name == other.name
| 21.789474 | 49 | 0.611111 | 343 | 0.828502 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.02657 |
23c9d0fc017e203c468d9f46add866be9898f0bd | 2,961 | py | Python | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
]
| 2 | 2022-03-16T13:50:21.000Z | 2022-03-27T15:14:09.000Z | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
]
| null | null | null | abqPython_SvM_3_SaveODB.py | jtipton2/abaqusSignedvM | 83f0577b6a3eab6d3c86a46ae110a94a7075981c | [
"BSD-3-Clause"
]
| 2 | 2021-07-18T03:10:12.000Z | 2022-03-27T15:14:11.000Z | # -*- coding: utf-8 -*-
import numpy as np
from odbAccess import *
from abaqusConstants import *
filename = 'Job-4e-SS-Pulse'
"""
LOAD DATA
===============================================================================
"""
results = np.load(filename + '.npz')
vonMisesMax = results['vonMisesMax'].transpose()
vonMisesMin = results['vonMisesMin'].transpose()
vonMisesStatic = results['vonMisesStatic'].transpose()
nodeNum = results['nodeNum'].transpose()
nodeCoord = results['nodeCoord']
# Sort nodeCoord on nodal values
nodeCoord = nodeCoord[nodeCoord[:,0].argsort()]
# Calculate Mean and Amplitude
vonMisesAmp = (vonMisesMax - vonMisesMin)/2
vonMisesMean = (vonMisesMax + vonMisesMin)/2
"""
LOAD ODB
===============================================================================
"""
odb = openOdb(filename+'.odb',readOnly=False)
# Get Instance
allInstances = (odb.rootAssembly.instances.keys())
odbInstance = odb.rootAssembly.instances[allInstances[-1]]
"""
FORMAT AND SAVE DATA TO ODB
===============================================================================
"""
vMNodes = np.ascontiguousarray(nodeNum, dtype=np.int32)
vMMax = np.ascontiguousarray(np.reshape(vonMisesMax,(-1,1)), dtype=np.float32)
vMMin = np.ascontiguousarray(np.reshape(vonMisesMin,(-1,1)), dtype=np.float32)
vMStatic = np.ascontiguousarray(np.reshape(vonMisesStatic,(-1,1)), dtype=np.float32)
vMMean = np.ascontiguousarray(np.reshape(vonMisesMean,(-1,1)), dtype=np.float32)
vMAmp = np.ascontiguousarray(np.reshape(vonMisesAmp,(-1,1)), dtype=np.float32)
newFieldOutputMax = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMax', description = 'Max Signed von Mises', type = SCALAR)
newFieldOutputMax.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMax.tolist())
newFieldOutputMin = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMin', description = 'Min Signed von Mises', type = SCALAR)
newFieldOutputMin.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMin.tolist())
newFieldOutputMStatic = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMStatic', description = 'Static Signed von Mises', type = SCALAR)
newFieldOutputMStatic.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMStatic.tolist())
newFieldOutputMean = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMean', description = 'Signed von Mises Mean', type = SCALAR)
newFieldOutputMean.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMean.tolist())
newFieldOutputAmp = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMAmp', description = 'Signed von Mises Amplitude', type = SCALAR)
newFieldOutputAmp.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMAmp.tolist())
"""
SAVE AND CLOSE
===============================================================================
"""
odb.save()
odb.close()
| 37.961538 | 150 | 0.660588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 844 | 0.285039 |
23cbfc7fdcdcf980a0e3a9a727e48fece2483a0e | 7,014 | py | Python | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
]
| null | null | null | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
]
| null | null | null | ssh.py | unazed/Py-s-SH | c20d883f75f094c71386e62cbfa8197120c641fc | [
"MIT"
]
| null | null | null | """
SSH reimplementation in Python, made by Unazed Spectaculum under the MIT license
"""
import socket
import struct
class SSH(object):
"""
Abstracted interface for secure-shell protocol with underlying TCP structure
"""
def __init__(self, host_ip, hostname, host_port=22, version="SSH-2.0"):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((host_ip, host_port))
self.version = version
self.hostname = hostname
self.qualified_name = "%s-%s\r\n" % (version, hostname)
def listen(self, backlog=1):
self.socket.listen(backlog)
def accept(self):
while 1:
client, info = self.socket.accept()
print("{*} %s connected." % info[0])
yield (client, info)
print("{*} %s disconnected." % info[0])
client.close()
def handle_connections(self):
for client, info in self.accept():
version_info = client.recv(128)
print("{*} Version Information: %s" % repr(version_info))
if not version_info.startswith(self.version):
print("{*} Client has incompatible versions.")
continue
client.send(self.qualified_name)
pkt_len, pdn_len, payload, _ = self.binary_packet_parse(client)
data = self.kexinit_packet_parse(payload, client)
@staticmethod
def kexinit_packet_parse(payload, sock):
SSH_MSG_KEXINIT = payload[0]
COOKIE = payload[1:17]
PAYLOAD = payload[17:]
KEX_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
KEX_ALGORITHMS = PAYLOAD[4:4+KEX_ALGORITHMS_LENGTH]
PAYLOAD = PAYLOAD[4+KEX_ALGORITHMS_LENGTH:]
SERVER_HOST_KEY_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
SERVER_HOST_KEY_ALGORITHMS = PAYLOAD[4:4+SERVER_HOST_KEY_ALGORITHMS_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+SERVER_HOST_KEY_ALGORITHMS_LENGTH:]
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
MAC_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
MAC_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
LANGUAGES_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
LANGUAGES_CLIENT_TO_SERVER = PAYLOAD[4:4+LANGUAGES_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+LANGUAGES_CLIENT_TO_SERVER_LENGTH:]
LANGUAGES_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
LANGUAGES_SERVER_TO_CLIENT = PAYLOAD[4:4+LANGUAGES_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+LANGUAGES_SERVER_TO_CLIENT_LENGTH:]
FIRST_KEX_PACKET_FOLLOWS = bool(PAYLOAD[0])
PAYLOAD = PAYLOAD[1:]
RESERVED = struct.unpack("!l", PAYLOAD)
print("{*} SSH_MSG_KEXINIT = %r" % SSH_MSG_KEXINIT)
print("{*} Cookie = %r" % COOKIE)
print("{*} KEX_ALGORITHMS = %s" % KEX_ALGORITHMS)
print("{*} SERVER_HOST_KEY_ALGORITHMS = %s" % SERVER_HOST_KEY_ALGORITHMS)
print("{*} ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = %s" % ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = %s" % ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} MAC_ALGORITHMS_CLIENT_TO_SERVER = %s" % MAC_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} MAC_ALGORITHMS_SERVER_TO_CLIENT = %s" % MAC_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = %s" % COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = %s" % COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} LANGUAGES_CLIENT_TO_SERVER = %s" % LANGUAGES_CLIENT_TO_SERVER)
print("{*} LANGUAGES_SERVER_TO_CLIENT = %s" % LANGUAGES_SERVER_TO_CLIENT)
print("{*} FIRST_KEX_PACKETS_FOLLOWS = %r" % FIRST_KEX_PACKET_FOLLOWS)
print("{*} RESERVED = %r" % RESERVED)
if FIRST_KEX_PACKET_FOLLOWS:
print("{*} Data = %r" % sock.recv(350000))
return (
SSH_MSG_KEXINIT,
COOKIE,
KEX_ALGORITHMS,
SERVER_HOST_KEY_ALGORITHMS,
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER,
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT,
MAC_ALGORITHMS_CLIENT_TO_SERVER,
MAC_ALGORITHMS_SERVER_TO_CLIENT,
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER,
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT,
LANGUAGES_CLIENT_TO_SERVER,
LANGUAGES_SERVER_TO_CLIENT,
FIRST_KEX_PACKET_FOLLOWS,
RESERVED # for error checking
)
@staticmethod
def namelist_create(lists):
pass
@staticmethod
def binary_packet_create(data):
PACKET_LENGTH = struct.pack("!l", len(data))
print("{*} PACKET_LENGTH = %r" % PACKET_LENGTH)
@staticmethod
def binary_packet_parse(sock):
PACKET_LENGTH = struct.unpack("!l", sock.recv(4))[0]
PADDING_LENGTH = struct.unpack("!b", sock.recv(1))[0]
PAYLOAD = sock.recv(PACKET_LENGTH-PADDING_LENGTH-1)
RANDOM_PADDING = sock.recv(PADDING_LENGTH+1)
print("{*} Packet length = %s" % PACKET_LENGTH)
print("{*} Pading length = %s" % PADDING_LENGTH)
print("{*} Padding = %r" % RANDOM_PADDING)
return (PACKET_LENGTH, PADDING_LENGTH, PAYLOAD, RANDOM_PADDING)
def close(self):
self.socket.close()
| 44.675159 | 120 | 0.681637 | 6,893 | 0.982749 | 243 | 0.034645 | 5,451 | 0.77716 | 0 | 0 | 1,033 | 0.147277 |
23ce177acd70b69372b2d3dd196d4ee81ee251d0 | 1,140 | py | Python | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
]
| 104 | 2015-11-02T00:08:32.000Z | 2022-02-17T23:17:14.000Z | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
]
| 68 | 2015-11-09T05:33:24.000Z | 2020-04-10T06:46:54.000Z | seriously/probably_prime.py | Mego/Seriously | 07b256e4f35f5efec3b01434300f9ccc551b1c3e | [
"MIT"
]
| 25 | 2015-11-19T05:34:09.000Z | 2021-07-20T13:54:03.000Z | import random
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d
def probably_prime(n, k=10):
"""
Miller-Rabin primality test
Input: n > 3
k: accuracy of test
Output: True if n is "probably prime", False if it is composite
From psuedocode at https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if n == 2:
return True
if n % 2 == 0:
return False
r, d = find_spelling(n)
for check in range(k):
a = random.randint(2, n - 1)
x = pow(a, d, n) # a^d % n
if x == 1 or x == n - 1:
continue
for i in range(r):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True | 24.782609 | 89 | 0.497368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.372807 |
23ce1db523427cb59d90dd66571f9536a6eda982 | 4,859 | py | Python | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
]
| 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
]
| 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/moz4r/Marty/marty_customInmoov.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
]
| 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | #MARTY I2C PI
#SCRIPT BASED ON MATS WORK
#SCRIPT PUSHED INSIDE inmoovCustom : https://github.com/MyRobotLab/inmoov/tree/master/InmoovScript
raspi = Runtime.createAndStart("RasPi","RasPi")
adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver")
adaFruit16c.setController("RasPi","1","0x40")
#
# This part is common for both devices and creates two servo instances
# on port 3 and 8 on the Adafruit16CServoDriver
# Change the names of the servos and the pin numbers to your usage
cuisseDroite = Runtime.createAndStart("cuisseDroite", "Servo")
genouDroite = Runtime.createAndStart("genouDroite", "Servo")
chevilleDroite = Runtime.createAndStart("chevilleDroite", "Servo")
cuisseGauche = Runtime.createAndStart("cuisseGauche", "Servo")
genouGauche = Runtime.createAndStart("genouGauche", "Servo")
chevilleGauche = Runtime.createAndStart("chevilleGauche", "Servo")
eyes = Runtime.createAndStart("eyes", "Servo")
armLeft = Runtime.createAndStart("armLeft", "Servo")
armRight = Runtime.createAndStart("armRight", "Servo")
sleep(1)
ledBlue=14
ledRed=13
ledGreen=12
vitesse=80
cuisseDroiteRest=90
genouDroiteRest=90
chevilleDroiteRest=80
cuisseGaucheRest=97
genouGaucheRest=95
chevilleGaucheRest=90
armLeftRest=90
armRightRest=120
eyesRest=90
cuisseDroite.setRest(cuisseDroiteRest)
genouDroite.setRest(genouDroiteRest)
chevilleDroite.setRest(chevilleDroiteRest)
cuisseGauche.setRest(cuisseGaucheRest)
genouGauche.setRest(genouGaucheRest)
chevilleGauche.setRest(chevilleGaucheRest)
eyes.setRest(eyesRest)
eyes.map(0,180,66,100)
armLeft.setRest(armLeftRest)
armRight.setRest(armRightRest)
cuisseDroite.attach(adaFruit16c,0)
genouDroite.attach(adaFruit16c,1)
chevilleDroite.attach(adaFruit16c,2)
cuisseGauche.attach(adaFruit16c,4)
genouGauche.attach(adaFruit16c,5)
chevilleGauche.attach(adaFruit16c,15)
eyes.attach(adaFruit16c,8)
armLeft.attach(adaFruit16c,9)
armRight.attach(adaFruit16c,10)
eyes.setVelocity(-1)
armLeft.setVelocity(-1)
armRight.setVelocity(-1)
cuisseDroite.rest()
genouDroite.rest()
chevilleDroite.rest()
cuisseGauche.rest()
genouGauche.rest()
chevilleGauche.rest()
eyes.rest()
armLeft.rest()
armRight.rest()
sleep(2)
cuisseDroite.detach()
genouDroite.detach()
chevilleDroite.detach()
cuisseGauche.detach()
genouGauche.detach()
chevilleGauche.detach()
armLeft.detach()
armRight.detach()
def walk(step):
talkBlocking("D'accord, c'est parti !")
start(step)
talk("Je m'aichauffe")
cuisseDroite.attach()
genouDroite.attach()
chevilleDroite.attach()
cuisseGauche.attach()
genouGauche.attach()
chevilleGauche.attach()
genouGauche.attach()
armLeft.attach()
armRight.attach()
cuisseDroite.setVelocity(vitesse)
genouDroite.setVelocity(vitesse)
chevilleDroite.setVelocity(vitesse)
cuisseGauche.setVelocity(vitesse)
genouGauche.setVelocity(vitesse)
chevilleGauche.setVelocity(vitesse)
for i in range(1,step) :
armLeft.moveTo(50)
armRight.moveTo(50)
chevilleDroite.moveTo(chevilleDroiteRest+20)
chevilleGauche.moveTo(chevilleGaucheRest+30)
sleep(0.8)
cuisseGauche.moveTo(cuisseDroiteRest+40)
cuisseDroite.moveTo(chevilleDroiteRest-40)
sleep(0.8)
chevilleDroite.moveTo(chevilleDroiteRest-30)
chevilleGauche.moveTo(chevilleGaucheRest-20)
sleep(0.8)
cuisseGauche.moveTo(cuisseGaucheRest)
cuisseDroite.moveTo(chevilleDroiteRest)
armLeft.moveTo(90)
armRight.moveTo(90)
sleep(0.8)
cuisseDroite.detach()
genouDroite.detach()
chevilleDroite.detach()
cuisseGauche.detach()
genouGauche.detach()
chevilleGauche.detach()
eyes.detach()
def start(step):
sleep(5)
armLeft.attach()
armRight.attach()
armLeft.attach()
eyes.attach()
eyes.moveTo(180)
armRight.moveTo(0)
sleep(2)
eyes.moveTo(0)
armRight.moveTo(120)
sleep(1)
eyes.moveTo(180)
sleep(0)
eyes.moveTo(180)
sleep(2)
eyes.moveTo(0)
armRight.moveTo(armRightRest)
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,0)
def red():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,1)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,1)
def blue():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,1)
adaFruit16c.setPinValue(ledRed,1)
adaFruit16c.setPinValue(ledBlue,0)
def green():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,1)
adaFruit16c.setPinValue(ledBlue,1)
def noLed():
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,0)
adaFruit16c.setPinValue(7,1)
red()
sleep(1)
green()
sleep(1)
blue()
sleep(1)
noLed()
led = Runtime.start("led","Clock")
led.setInterval(100)
global i
i=0
def ledFunc(timedata):
global i
if i==0:
red()
i=1
else:
noLed()
i=0
led.setInterval(random.randint(10,100))
led.addListener("pulse", python.name, "ledFunc")
| 22.919811 | 98 | 0.787611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.129862 |
23ce6753d608fd795d0aebbaec8257e2469df9e3 | 7,214 | py | Python | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
]
| null | null | null | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
]
| null | null | null | tabular_experiments_supp_mat.py | juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations | 991c4cf6153fafef4200732a5ef8ac93f1175f27 | [
"MIT"
]
| null | null | null | from sklearn import tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import numpy as np
from generate_dataset import generate_dataset, preparing_dataset
from storeExperimentalInformations import store_experimental_informations, prepare_legends
import baseGraph
import ape_tabular
import warnings
import pickle
#from keras.models import Sequential
#from keras.layers import Dense
if __name__ == "__main__":
# Filter the warning from matplotlib
warnings.filterwarnings("ignore")
# Datasets used for the experiments
dataset_names = ["generate_circles", "generate_moons", "blood", "diabete", "generate_blobs"]# "compas", "adult", "titanic"
# array of the models used for the experiments
models = [GradientBoostingClassifier(n_estimators=20, learning_rate=1.0),
RandomForestClassifier(n_estimators=20),
#MLPClassifier(random_state=1, activation="logistic"),
VotingClassifier(estimators=[('lr', LogisticRegression()), ('gnb', GaussianNB()), ('rc', LogisticRegression())], voting="soft"),
MLPClassifier(random_state=1),
RidgeClassifier()]#,
#LogisticRegression(),
#tree.DecisionTreeClassifier(),
#Sequential(),
#models=[RidgeClassifier(), MLPClassifier(random_state=1)]
# Number of instances explained by each model on each dataset
max_instance_to_explain = 10
# Print explanation result
illustrative_example = False
""" All the variable necessaries for generating the graph results """
# Store results inside graph if set to True
graph = True
verbose = False
growing_sphere = False
if growing_sphere:
label_graph = "growing spheres "
growing_method = "GS"
else:
label_graph = ""
growing_method = "GF"
# Threshold for explanation method precision
threshold_interpretability = 0.99
linear_separability_index = 1
interpretability_name = ['ls', 'ls regression', 'ls raw data', 'ls extend']
#interpretability_name = ['ls log reg', 'ls raw data']
# Initialize all the variable needed to store the result in graph
for dataset_name in dataset_names:
if graph: experimental_informations = store_experimental_informations(len(models), len(interpretability_name), interpretability_name, len(models))
models_name = []
# Store dataset inside x and y (x data and y labels), with aditional information
x, y, class_names, regression, multiclass, continuous_features, categorical_features, \
categorical_values, categorical_names, transformations = generate_dataset(dataset_name)
for nb_model, model in enumerate(models):
model_name = type(model).__name__
if "MLP" in model_name and nb_model <=2 :
model_name += "logistic"
if growing_sphere:
filename = "./results/"+dataset_name+"/"+model_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
filename_all = "./results/"+dataset_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
else:
filename="./results/"+dataset_name+"/"+model_name+"/"+str(threshold_interpretability)+"/sup_mat_"
filename_all="./results/"+dataset_name+"/"+str(threshold_interpretability)+"/sup_mat_"
if graph: experimental_informations.initialize_per_models(filename)
models_name.append(model_name)
# Split the dataset inside train and test set (50% each set)
dataset, black_box, x_train, x_test, y_train, y_test = preparing_dataset(x, y, dataset_name, model)
print("###", model_name, "training on", dataset_name, "dataset.")
if 'Sequential' in model_name:
# Train a neural network classifier with 2 relu and a sigmoid activation function
black_box.add(Dense(12, input_dim=len(x_train[0]), activation='relu'))
black_box.add(Dense(8, activation='relu'))
black_box.add(Dense(1, activation='sigmoid'))
black_box.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
black_box.fit(x_train, y_train, epochs=50, batch_size=10)
def predict(x):
if x.shape[0] > 1:
return np.asarray([prediction[0] for prediction in black_box.predict_classes(x)])
return black_box.predict_classes(x)[0]
def score(x, y):
return sum(predict(x) == y)/len(y)
else:
black_box = black_box.fit(x_train, y_train)
predict = black_box.predict
score = black_box.score
print('### Accuracy:', score(x_test, y_test))
cnt = 0
explainer = ape_tabular.ApeTabularExplainer(x_train, class_names, predict, black_box.predict_proba,
continuous_features=continuous_features,
categorical_features=categorical_features, categorical_values=categorical_values,
feature_names=dataset.feature_names, categorical_names=categorical_names,
verbose=verbose, threshold_precision=threshold_interpretability,
linear_separability_index=linear_separability_index,
transformations=transformations)
for instance_to_explain in x_test:
if cnt == max_instance_to_explain:
break
print("### Instance number:", cnt + 1, "over", max_instance_to_explain)
print("### Models ", nb_model + 1, "over", len(models))
print("instance to explain:", instance_to_explain)
try:
precision, coverage, f2 = explainer.explain_instance(instance_to_explain,
growing_method=growing_method,
local_surrogate_experiment=True)
print("precision", precision)
print("coverage", coverage)
print("f2", f2)
if graph: experimental_informations.store_experiments_information_instance(precision, 'precision.csv', coverage, 'coverage.csv', f2, 'f2.csv')
cnt += 1
except Exception as inst:
print(inst)
if graph: experimental_informations.store_experiments_information(max_instance_to_explain, nb_model, 'precision.csv', 'coverage.csv', 'f2.csv', filename_all=filename_all)
| 59.131148 | 182 | 0.624896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,590 | 0.220405 |
23ceb4be40ab14b96763eb535badca57463b0253 | 8,099 | py | Python | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
]
| 5 | 2021-03-17T00:33:53.000Z | 2022-03-07T18:16:25.000Z | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
]
| null | null | null | summarise_results.py | MDBAuth/EWR_tool | 5b05cf276822d97a38a32a5fc031209224a04fb3 | [
"CC0-1.0"
]
| 2 | 2022-01-14T03:50:10.000Z | 2022-02-14T00:45:56.000Z | import pandas as pd
import numpy as np
import data_inputs, evaluate_EWRs
#--------------------------------------------------------------------------------------------------
def sum_events(events):
'''returns a sum of events'''
return int(round(events.sum(), 0))
def get_frequency(events):
'''Returns the frequency of years they occur in'''
if events.count() == 0:
result = 0
else:
result = (int(events.sum())/int(events.count()))*100
return int(round(result, 0))
def get_average(input_events):
'''Returns overall average length of events'''
events = input_events.dropna()
if len(events) == 0:
result = 0
else:
result = round(sum(events)/len(events),1)
return result
def get_event_length(input_events, num_events):
events = input_events.dropna()
if num_events == 0:
EL = 0
else:
EL = round(sum(events)/num_events,1)
return EL
def count_exceedence(input_events, EWR_info):
events = input_events.copy(deep=True)
if EWR_info['max_inter-event'] == None:
return 'N/A'
else:
masking = events.isna()
events[masking] = ''
total = 0
for year in events.index:
if list(events[year]) != '':
count = len(events[year])
total = total + count
return int(total)
def initialise_summary_df_columns(input_dict):
'''Ingest a dictionary of ewr yearly results and a list of statistical tests to perform
initialises a dataframe with these as a multilevel heading and returns this'''
analysis = data_inputs.analysis()
column_list = []
list_of_arrays = []
for scenario, scenario_results in input_dict.items():
for sub_col in analysis:
column_list = tuple((scenario, sub_col))
list_of_arrays.append(column_list)
array_of_arrays =tuple(list_of_arrays)
multi_col_df = pd.MultiIndex.from_tuples(array_of_arrays, names = ['scenario', 'type'])
return multi_col_df
def initialise_summary_df_rows(input_dict):
'''Ingests a dictionary of ewr yearly results
pulls the location information and the assocaited ewrs at each location,
saves these as respective indexes and return the multi-level index'''
index_1 = list()
index_2 = list()
index_3 = list()
combined_index = list()
# Get unique col list:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
site_list = []
for col in site_results[PU]:
if '_' in col:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR_code = '_'.join(remove_end)
else:
EWR_code = remove_end[0]
else:
EWR_code = col
if EWR_code in site_list:
continue
else:
site_list.append(EWR_code)
add_index = tuple((site, PU, EWR_code))
if add_index not in combined_index:
combined_index.append(add_index)
unique_index = tuple(combined_index)
multi_index = pd.MultiIndex.from_tuples(unique_index, names = ['gauge', 'planning unit', 'EWR'])
return multi_index
def allocate(df, add_this, idx, site, PU, EWR, scenario, category):
'''Save element to a location in the dataframe'''
df.loc[idx[[site], [PU], [EWR]], idx[scenario, category]] = add_this
return df
def summarise(input_dict):
'''Ingests a dictionary with ewr pass/fails
summarises these results and returns a single summary dataframe'''
PU_items = data_inputs.get_planning_unit_info()
EWR_table, see_notes_ewrs, undefined_ewrs, noThresh_df, no_duration, DSF_ewrs = data_inputs.get_EWR_table()
# Initialise dataframe with multi level column heading and multi-index:
multi_col_df = initialise_summary_df_columns(input_dict)
index = initialise_summary_df_rows(input_dict)
df = pd.DataFrame(index = index, columns=multi_col_df)
# Run the analysis and add the results to the dataframe created above:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
for col in site_results[PU]:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR = '_'.join(remove_end)
else:
EWR = remove_end[0]
idx = pd.IndexSlice
if ('_eventYears' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event years')
F = get_frequency(site_results[PU][col])
df = allocate(df, F, idx, site, PU, EWR, scenario, 'Frequency')
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['TF'])
TF = EWR_info['frequency']
df = allocate(df, TF, idx, site, PU, EWR, scenario, 'Target frequency')
elif ('_numAchieved' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Achievement count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Achievements per year')
elif ('_numEvents' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Events per year')
elif ('_eventLength' in col):
EL = get_event_length(site_results[PU][col], S)
df = allocate(df, EL, idx, site, PU, EWR, scenario, 'Event length')
elif ('_totalEventDays' in col):
AD = get_average(site_results[PU][col])
df = allocate(df, AD, idx, site, PU, EWR, scenario, 'Threshold days')
elif ('daysBetweenEvents' in col):
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
DB = count_exceedence(site_results[PU][col], EWR_info)
df = allocate(df, DB, idx, site, PU, EWR, scenario, 'Inter-event exceedence count')
# Also save the max inter-event period to the data summary for reference
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
MIE = EWR_info['max_inter-event']
df = allocate(df, MIE, idx, site, PU, EWR, scenario, 'Max inter event period (years)')
elif ('_missingDays' in col):
MD = sum_events(site_results[PU][col])
df = allocate(df, MD, idx, site, PU, EWR, scenario, 'No data days')
elif ('_totalPossibleDays' in col):
TD = sum_events(site_results[PU][col])
df = allocate(df, TD, idx, site, PU, EWR, scenario, 'Total days')
return df | 47.087209 | 118 | 0.548463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,537 | 0.189777 |
23cf8e518be1c460ad577e7a202dfb564e60b6c9 | 247 | py | Python | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
]
| 3 | 2021-04-23T08:04:14.000Z | 2021-05-08T01:24:08.000Z | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
]
| null | null | null | os/excel and csv/save pandas to xlsx file.py | pydeveloper510/Python | 2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d | [
"MIT"
]
| 1 | 2021-05-08T01:24:46.000Z | 2021-05-08T01:24:46.000Z | import pandas as pd
writer = pd.ExcelWriter("data.xlsx", engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', index=False)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
| 24.7 | 57 | 0.765182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.368421 |
23cf95b3c49a497e9b4fcecf5c43de957206031c | 1,564 | py | Python | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
]
| 1 | 2016-05-16T23:13:47.000Z | 2016-05-16T23:13:47.000Z | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
]
| 41 | 2016-01-22T00:56:14.000Z | 2016-05-12T14:38:37.000Z | setup.py | nitehawck/DevEnvManager | 425b0d621be577fe73f22b4641f7099eac65669e | [
"MIT"
]
| null | null | null | from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name="dem",
version="0.0.8",
author="Ian Macaulay, Jeremy Opalach",
author_email="[email protected]",
url="http://www.github.com/nitehawck/dem",
description="An agnostic library/package manager for setting up a development project environment",
long_description=readme,
license="MIT License",
classifiers=[
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production / Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Build Tools',
],
packages=['dem', 'dem.dependency', 'dem.project'],
install_requires=[
'virtualenv',
'PyYaml',
'wget',
'gitpython'
],
tests_require=[
'pyfakefs',
'mock'
],
entry_points={
'console_scripts': [
'dem = dem.__main__:main'
]
},
) | 31.28 | 103 | 0.575448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.618286 |
23d1de5c4b1de87a253332547b768f99517edb24 | 326 | py | Python | lfs/core/admin.py | restless/django-lfs | 4058f9d45b416ef2e8c28a87856ea0f1550b523d | [
"BSD-3-Clause"
]
| 1 | 2020-02-26T03:07:39.000Z | 2020-02-26T03:07:39.000Z | lfs/core/admin.py | mxins/django-lfs | bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8 | [
"BSD-3-Clause"
]
| null | null | null | lfs/core/admin.py | mxins/django-lfs | bf42ed80ce0e1ec96db6ab985adcc614ea79dfc8 | [
"BSD-3-Clause"
]
| null | null | null | # django imports
from django.contrib import admin
# lfs imports
from lfs.core.models import Action
from lfs.core.models import ActionGroup
from lfs.core.models import Shop
from lfs.core.models import Country
admin.site.register(Shop)
admin.site.register(Action)
admin.site.register(ActionGroup)
admin.site.register(Country)
| 23.285714 | 39 | 0.819018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.088957 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.