hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3254729c0575b8bd980f42074c2cb939b0ad6cf0 | 1,382 | py | Python | problems/p012.py | 10jmellott/ProjectEuler | eb84d129bbc37ba10ad7814ad2138d81568e0085 | [
"Unlicense"
]
| null | null | null | problems/p012.py | 10jmellott/ProjectEuler | eb84d129bbc37ba10ad7814ad2138d81568e0085 | [
"Unlicense"
]
| null | null | null | problems/p012.py | 10jmellott/ProjectEuler | eb84d129bbc37ba10ad7814ad2138d81568e0085 | [
"Unlicense"
]
| null | null | null | """<a href="https://projecteuler.net/problem=12" class="title-custom-link">Highly divisible triangular number</a>
The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle
number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
from utils.oeis import triangular_numbers
from utils.fibonacci import trial_division
from utils.fibonacci import factors_to_dictionary
def main():
"""Solves this problem
Utilizes [A000005](http://oeis.org/A000005) which is solved via a
lemma to Euler's Totient Function
Returns:
Integer: Solution to this problem
"""
i = 1
divisors = 0
while divisors <= 500:
triangle = triangular_numbers(i)
prime_factors = trial_division(triangle)
prime_factors = factors_to_dictionary(prime_factors)
divisors = 1
for k, v in prime_factors.items():
divisors = divisors * (v + 1)
i = i + 1
return triangular_numbers(i - 1)
| 32.139535 | 113 | 0.664978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.621563 |
3255418e552bf21eec558aa0897845fa6583a29c | 4,984 | py | Python | u3s2m1ass1-pt6/code/rpg_queries.py | LambdaTheda/lambdata-Unit3 | b44b20f2f3e28d2b17613660ddb562afe4825686 | [
"MIT"
]
| null | null | null | u3s2m1ass1-pt6/code/rpg_queries.py | LambdaTheda/lambdata-Unit3 | b44b20f2f3e28d2b17613660ddb562afe4825686 | [
"MIT"
]
| null | null | null | u3s2m1ass1-pt6/code/rpg_queries.py | LambdaTheda/lambdata-Unit3 | b44b20f2f3e28d2b17613660ddb562afe4825686 | [
"MIT"
]
| 1 | 2020-05-11T04:33:24.000Z | 2020-05-11T04:33:24.000Z | import sqlite3
import os
#DB_FILEPATH = "data/chinook.db"
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "rpg_db.sqlite3")
conn = sqlite3.connect(DB_FILEPATH)
conn.row_factory = sqlite3.Row
print(type(conn)) #> <class 'sqlite3.Connection'>
curs = conn.cursor()
print(type(curs)) #> <class 'sqlite3.Cursor'>
query = """SELECT
count(DISTINCT character_id) as character_count
FROM charactercreator_character"""
# query1 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_cleric"""
# query2 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_fighter"""
# query3 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_mage"""
# query4 = """SELECT
# count(DISTINCT character_ptr_id) as character_ptr_count
# FROM charactercreator_thief"""
queries_combined = """SELECT
count(distinct c.character_ptr_id) as total_clerics
,count(distinct f.character_ptr_id) as total_fighters
,count(distinct m.character_ptr_id) as total_mages
,count(distinct n.mage_ptr_id) as total_necromancers
,count(distinct t.character_ptr_id) as total_thieves
FROM charactercreator_character ccc
LEFT JOIN charactercreator_fighter f
ON ccc.character_id = f.character_ptr_id
LEFT JOIN charactercreator_cleric c
ON ccc.character_id= c.character_ptr_id
LEFT JOIN charactercreator_mage m
ON ccc.character_id = m.character_ptr_id
LEFT JOIN charactercreator_necromancer n
ON ccc.character_id = n.mage_ptr_id
LEFT JOIN charactercreator_thief t
ON ccc.character_id = t.character_ptr_id"""
query5 = """SELECT
count(DISTINCT item_id ) as total_item
FROM armory_item"""
query6 = """SELECT
count(DISTINCT item_ptr_id) as weapons
FROM armory_weapon"""
query7 = """SELECT
count(DISTINCT item_id) - count(DISTINCT item_ptr_id) as total_non_weapons
FROM armory_item, armory_weapon"""
query8 = """SELECT item_id
, count(DISTINCT item_id) as item
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20
"""
query9 = """SELECT cci.character_id
, count(DISTINCT aw.item_ptr_id) as number_of_weapons
FROM charactercreator_character_inventory as cci
LEFT JOIN armory_item as ai ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon as aw ON ai.item_id = aw.item_ptr_id
GROUP BY character_id
LIMIT 20"""
query10 = """SELECT avg(total_items) as avg_items
FROM (
-- row per character = 302
SELECT
c.character_id
,c.name
--,ci.item_id
,count(distinct ci.item_id) as total_items
FROM charactercreator_character c
LEFT JOIN charactercreator_character_inventory ci
ON c.character_id = ci.character_id
GROUP BY c.character_id
) subz"""
query11 = """SELECT avg(weapon_count) as avg_weapon
FROM (
SELECT
cci.character_id
,count(DISTINCT aw.item_ptr_id) as weapon_count
FROM charactercreator_character_inventory cci
LEFT JOIN armory_item ai ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon aw ON ai.item_id = aw.item_ptr_id
GROUP BY 1
) subz"""
print("----------")
result = curs.execute(query).fetchone()
print("RESULTS FOR CHARACTERCREATOR_CHARACTER", result)
print(result["character_count"])
# print("-------------")
# result1 = curs.execute(query1).fetchone()
# print("Results for charactercreator_cleric", result1)
# print(result1["character_ptr_count"])
# print("---------")
# result2 = curs.execute(query2).fetchone()
# print("Results for charactercreator_fighter", result2)
# print(result2["character_ptr_count"])
# print("---------")
# result3 = curs.execute(query3).fetchone()
# print("Results for charactercreator_mage", result3)
# print(result3["character_ptr_count"])
# print('--------')
# result4 = curs.execute(query4).fetchone()
# print("Results for charactercreator_thief", result4)
# print(result4["character_ptr_count"])
# print("-------------")
# result5 = curs.execute(query5).fetchone()
# print("Results for total Items", result5)
# print(result5["total_item"])
result_queries = curs.execute(queries_combined).fetchall()
print("Results of each specific subclass", result_queries)
result6 = curs.execute(query6).fetchone()
print("Results for total weapons", result6)
print(result6["weapons"])
print("---------")
result7 = curs.execute(query7).fetchone()
print("Results for total non weapons", result7)
print(result7["total_non_weapons"])
print("---------")
result8 = curs.execute(query8).fetchall()
for rw in result8:
print(rw[0], rw[1])
print("---------")
result9 = curs.execute(query9).fetchall()
for rw in result9:
print(rw['character_id'], rw['number_of_weapons'])
print("---------")
result10 = curs.execute(query10).fetchone()
print("Average item per character", result10)
print(result10["avg_items"])
print("---------")
result11= curs.execute(query11).fetchone()
print("Average weapon per character", result11)
print(result11["avg_weapon"])
print("---------") | 30.576687 | 85 | 0.731742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,926 | 0.787721 |
3256173ee4e9a424745cf36c9f1ac6cf9bf2bc08 | 7,872 | py | Python | tools/table.py | asterick/minimon.js | 4876544525eb1bfef1b81a12807e7ba37cdd4949 | [
"0BSD"
]
| 5 | 2019-04-25T00:19:56.000Z | 2020-09-02T01:24:40.000Z | tools/table.py | asterick/minimon.js | 4876544525eb1bfef1b81a12807e7ba37cdd4949 | [
"0BSD"
]
| 6 | 2020-05-23T23:17:59.000Z | 2022-02-17T21:50:46.000Z | tools/table.py | asterick/minimon.js | 4876544525eb1bfef1b81a12807e7ba37cdd4949 | [
"0BSD"
]
| null | null | null | #!/usr/bin/env python3
# ISC License
#
# Copyright (c) 2019, Bryon Vandiver
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from json import dumps
import os
import csv
CSV_LOCATION = os.path.join(os.path.abspath(os.path.dirname(__file__)), 's1c88.csv')
op0s, op1s, op2s = [None] * 0x100, [None] * 0x100, [None] * 0x100
CONDITIONS = {
'C': 'cpu.reg.flag.c',
'NC': '!cpu.reg.flag.c',
'Z': 'cpu.reg.flag.z',
'NZ': '!cpu.reg.flag.z',
'V': 'cpu.reg.flag.v',
'NV': '!cpu.reg.flag.v',
'M': 'cpu.reg.flag.n',
'P': '!cpu.reg.flag.n',
'LT': 'cpu.reg.flag.n != cpu.reg.flag.v',
'LE': '(cpu.reg.flag.n != cpu.reg.flag.v) || cpu.reg.flag.z',
'GT': '(cpu.reg.flag.n == cpu.reg.flag.v) && !cpu.reg.flag.z',
'GE': 'cpu.reg.flag.n == cpu.reg.flag.v',
'F0': 'cpu.reg.flag.f0',
'F1': 'cpu.reg.flag.f1',
'F2': 'cpu.reg.flag.f2',
'F3': 'cpu.reg.flag.f3',
'NF0': '!cpu.reg.flag.f0',
'NF1': '!cpu.reg.flag.f1',
'NF2': '!cpu.reg.flag.f2',
'NF3': '!cpu.reg.flag.f3',
}
ARGUMENTS = {
'A': (8, False, False, 'a'),
'B': (8, False, False, 'b'),
'L': (8, False, False, 'l'),
'H': (8, False, False, 'h'),
'BR': (8, False, False, 'br'),
'SC': (8, False, False, 'sc'),
'EP': (8, False, False, 'ep'),
'XP': (8, False, False, 'xp'),
'YP': (8, False, False, 'yp'),
'NB': (8, False, False, 'nb'),
'BA': (16, False, False, 'ba'),
'HL': (16, False, False, 'hl'),
'IX': (16, False, False, 'ix'),
'IY': (16, False, False, 'iy'),
'SP': (16, False, False, 'sp'),
'PC': (16, False, False, 'pc'),
'#nn': (8, True, False, 'imm8'),
'rr': (8, True, False, 'imm8'),
'#mmnn': (16, True, False, 'imm16'),
'qqrr': (16, True, False, 'imm16'),
'[kk]': (16, True, True, 'vect'), # Special
'[hhll]': (-1, True, True, 'ind16'),
'[HL]': (-1, True, True, 'absHL'),
'[IX]': (-1, True, True, 'absIX'),
'[IY]': (-1, True, True, 'absIY'),
'[BR:ll]': (-1, True, True, 'absBR'),
'[SP+dd]': (-1, True, True, 'indDSP'),
'[IX+dd]': (-1, True, True, 'indDIX'),
'[IY+dd]': (-1, True, True, 'indDIY'),
'[IX+L]': (-1, True, True, 'indIIX'),
'[IY+L]': (-1, True, True, 'indIIY'),
}
OPERATIONS = {
'INC': (8, 'ReadWrite'),
'DEC': (8, 'ReadWrite'),
'SLA': (8, 'ReadWrite'),
'SLL': (8, 'ReadWrite'),
'SRA': (8, 'ReadWrite'),
'SRL': (8, 'ReadWrite'),
'RL': (8, 'ReadWrite'),
'RLC': (8, 'ReadWrite'),
'RR': (8, 'ReadWrite'),
'RRC': (8, 'ReadWrite'),
'CPL': (8, 'ReadWrite'),
'NEG': (8, 'ReadWrite'),
'LD': (8, 'Write', 'Read'),
'ADD': (8, 'ReadWrite', 'Read'),
'ADC': (8, 'ReadWrite', 'Read'),
'SUB': (8, 'ReadWrite', 'Read'),
'SBC': (8, 'ReadWrite', 'Read'),
'AND': (8, 'ReadWrite', 'Read'),
'OR': (8, 'ReadWrite', 'Read'),
'XOR': (8, 'ReadWrite', 'Read'),
'CP': (8, 'Read', 'Read'),
'BIT': (8, 'Read', 'Read'),
'CALL': (16, 'Read'),
'CARS': (8, 'Read'),
'CARL': (16, 'Read'),
'JRS': (8, 'Read'),
'JRL': (16, 'Read'),
'JP': (8, 'Read'),
'INT': (8, 'Read'),
'RETE': (8,),
'PUSH': (-1, 'Read'),
'POP': (-1, 'Write'),
'EX': (-1, 'ReadWrite', 'ReadWrite'),
'SWAP': (8, 'ReadWrite')
}
def get_name(*args):
return "inst_%s" % '_'.join([arg.lower() for arg in args if arg])
def format_arg(i, siz, mem, ind, nam):
if mem:
return "data%i" % i
else:
return "cpu.reg.%s" % nam
def format(cycles, op, *args):
condition = None
cycles, skipped = [int(c) for c in cycles.split(",") * 2][:2]
if len(args) > 0 and args[0] in CONDITIONS:
condition, args = args[0], args[1:]
try:
ops = OPERATIONS[op]
args = [ARGUMENTS[arg] for arg in args if arg]
default_size, directions = ops[0], ops[1:]
if len(args) >= 1:
size = max(default_size, *[s for s, i, m, n in args])
else:
size = default_size
name = get_name(op, condition, *[n for s, i, m, n in args])
print ("static int %s(Machine::State& cpu) {" % name)
for i, (siz, mem, ind, nam) in enumerate(args):
if ind:
print ("\tconst auto addr%i = calc_%s(cpu);" % (i, nam))
safety = "" if "Write" in directions[i] else "const "
if "Read" in directions[i]:
print ("\t%suint%i_t data%i = cpu_read%s(cpu, addr%i);" % (safety, size, i, size, i))
else:
print ("\tuint%i_t data%i;" % (size, i))
elif mem:
print ("\tconst uint%i_t data%i = cpu_imm%i(cpu);" % (size, i, siz))
if condition:
print ("\tif (!(%s)) {" % CONDITIONS[condition])
print ("\t\tcpu.reg.cb = cpu.reg.nb;")
print ("\t\treturn %i;" % skipped)
print ("\t}")
print ("\top_%s%i(%s);" % (op.lower(), size, ', '.join(['cpu']+[format_arg(i, *a) for i, a in enumerate(args)])));
block = False
for i, (siz, mem, ind, nam) in enumerate(args):
if ind and "Write" in directions[i]:
print ("\tcpu_write%s(cpu, data%i, addr%i);" % (size, i, i))
if nam in ['sc', 'nb'] and "Write" in directions[i]:
block = True
if block or op == 'RETE':
print ("\treturn %i + inst_advance(cpu); // Block IRQs" % cycles)
else:
print ("\treturn %i;" % cycles)
print ("}\n")
return name
except:
name = get_name(op, condition, *args)
print ("int clock_%s(Machine::State& cpu) {" % name)
print ("\t%s(cpu);" % name)
print ("\treturn %i;" % cycles)
print ("}\n")
return "clock_%s" % name
# Generate switch table
def dump_table(instructions, indent):
for i, t in enumerate(instructions):
if not t:
continue
print ("%scase 0x%02X: return %s(cpu);" % (indent, i, t))
#print (i, t)
print ("%sdefault: return inst_undefined(cpu);" % indent)
with open(CSV_LOCATION, 'r') as csvfile:
spamreader = csv.reader(csvfile)
next(spamreader)
for row in spamreader:
code, cycles0, op0, arg0_1, arg0_2, cycles1, op1, arg1_1, arg1_2, cycles2, op2, arg2_1, arg2_2 = row
code = int(code, 16)
if op0 != 'undefined':
op0s[code] = format(cycles0, op0, arg0_1, arg0_2)
if op1 != 'undefined':
op1s[code] = format(cycles1, op1, arg1_1, arg1_2)
if op2 != 'undefined':
op2s[code] = format(cycles2, op2, arg2_1, arg2_2)
print ("int inst_advance(Machine::State& cpu) {")
print ("\tswitch (cpu_imm8(cpu)) {")
dump_table(op0s, '\t')
print ("\tcase 0xCE:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op1s, '\t\t')
print ("\t\t}")
print ("\tcase 0xCF:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op2s, '\t\t')
print ("\t\t}")
print ("\t}")
print ("}")
| 32.528926 | 123 | 0.506225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,113 | 0.395452 |
325927f14aed5b03fe28e7161da22ac9db1b0f2b | 15,364 | py | Python | test_log.py | erkooi/desp_tools | 2bea2e44591ceeeb62cbfe163b4635a3157f6582 | [
"Apache-2.0"
]
| null | null | null | test_log.py | erkooi/desp_tools | 2bea2e44591ceeeb62cbfe163b4635a3157f6582 | [
"Apache-2.0"
]
| null | null | null | test_log.py | erkooi/desp_tools | 2bea2e44591ceeeb62cbfe163b4635a3157f6582 | [
"Apache-2.0"
]
| null | null | null | ###############################################################################
#
# Copyright (C) 2012
# ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""Test logging utilities
* Provide logging with standardized prefixes:
. time : self, if notime = 0
. verbosity level : self, if noVLevel = 0
. test case ID : self, if noTestId = 0
. message text : argument msgString, the actual text to log
* All append_log statements that have verbosity level equal or lower than the
test case verbosity level will get logged.
* The logging gets output to the stdio and to a file if a file name is provided.
* It is also possible to append other files to the test logging file.
* Best practise is to use the following verbosity levels for the append_log
argument:
-v 0 Log test result
-v 1 Log test title
-v 2 Log errors
-v 3 Log info
-v 4 Log error details
-v 5 Log info details
-v 6 Log debug
-v 7 Log debug details
"""
################################################################################
# System imports
import sys
import time
import common as cm
################################################################################
# Functions
class Testlog:
V_RESULT = 0
V_TITLE = 1
V_ERRORS = 2
V_INFO = 3
V_ERROR_DETAILS = 4
V_INFO_DETAILS = 5
V_DEBUG = 6
V_DEBUG_DETAILS = 7
_logName=None
def __init__(self, verbosity=11, testId='', sectionId='', logName=None):
self.verbosity = verbosity # Verbosity threshold used by append_log() to decide whether to log the input string or not
self._testId = testId # Test ID that optionally gets used as prefix in append_log line
self._sectionId = sectionId # Section ID that optionally gets used as prefix in append_log line
self._logName = logName # Name for the file that will contain the append_log
if self._logName != None:
try:
self._logFile = open(self._logName,'w')
except IOError:
print('ERROR : Can not open log file %s' % self._logName)
def __del__(self):
if self._logName != None:
self.close_log()
def close_log(self):
if self._logName != None:
self._logFile.close()
# The testId can should remain fixed at __init__, but the user can change the sectionId during the execution
def set_section_id(self, sectionId):
self._sectionId = sectionId
def verbose_levels(self):
return "0=result; 1=title; 2=errors; 3=info; 4=error details; 5=info details; 6=debug; 7=debug details"
# Print the message string and append it to the test log file in the Testlog style
def append_log(self, vLevel, msgString, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if vLevel <= self.verbosity:
txt = ''
if noTime == 0:
t = time.localtime()
txt = txt + '[%d:%02d:%02d %02d:%02d:%02d]' % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
if noVLevel == 0:
txt = txt + ' - (%d) ' % vLevel
if noTestId == 0:
txt = txt + self._testId
if noSectionId == 0:
txt = txt + self._sectionId
txt = txt + msgString
print(txt)
#sys.stdout.flush()
if self._logName != None:
self._logFile.write(txt + '\n')
# Print the repeat message string at regular intervals and append it to the test log file in the Testlog style
def append_log_rep(self, vLevel, rep, nofRep, nofLog=5, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if nofRep < nofLog:
logInterval = 1
else:
logInterval = nofRep//nofLog
if rep%logInterval==0 or rep==nofRep-1:
self.append_log(3, 'Rep-%d' % rep)
# Print the contents of an array to the test log file
def append_log_data(self, vLevel, prefixStr, data, radix='dec', dataWidth=8, nofColumns=16, rulers=False, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if vLevel <= self.verbosity:
r = 0
columnWidth = dataWidth + 1 # use 1 space between columns
if rulers:
rowStr = 'Col:'
for i in range(nofColumns):
rowStr += '%*d' % (columnWidth, i)
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
self.append_log(vLevel, prefixStr + 'Row:', noTime, noVLevel, noTestId, noSectionId)
rowStr = prefixStr + ('%-4d' % r)
else:
rowStr = prefixStr
k = 0
# Make sure data is a list, otherwise the following fails
if cm.depth(data)==0:
data=cm.listify(data)
n = len(data)
for i in range(n):
if radix=='uns': rowStr += ' %*d' % (dataWidth, data[i])
if radix=='dec': rowStr += ' %*d' % (dataWidth, data[i])
if radix=='hex': rowStr += ' %0*x' % (dataWidth, data[i])
if k < nofColumns-1:
k = k + 1
else:
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
rowStr = prefixStr
r = r + 1
if rulers:
rowStr += ('%-4d' % r)
k = 0
if k!=0:
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
def data_to_string(self, data, dataWidth=4, dataLeft=False, fractionWidth=2, fractionExponent=False):
"""Print data to string with length dataWidth + 1 white space
Default print the data as %s string to support any type
If the data is float or complex then print it using fraction notation when
fractionExponent=False or using exponent notation when fractionExponent=True.
The fractionWidth specifies the width of the floating point value.
The data is printed left or right aligned dependent on dataLeft.
For all data types the returned data string has length dataWidth + 1 for a
white space such that it can be used as a fixed size element string when
printing a row of data on a line.
. data = the data, can be float complex or other e.g. int, string, tuple
. dataWidth = width of the printed data string
. dataLeft = when True then left align the data in the printed data string, else right align
. fractionWidth = width of the fraction in case of float data
. fractionExponent = when True print exponent in case of float data, else only print fraction
"""
if isinstance(data, float):
# Log in float format
if fractionExponent:
dataStr = '%.*e' % (fractionWidth, data) # log data as float with exponent
else:
dataStr = '%.*f' % (fractionWidth, data) # log data as float
elif isinstance(data, complex):
# Log in complex float format
if fractionExponent:
dataStr = '%.*e,' % (fractionWidth, data.real) # log data real part as float with exponent
dataStr += '%.*ej' % (fractionWidth, data.imag) # log data imag part as float with exponent
else:
dataStr = '%.*f,' % (fractionWidth, data.real) # log data real part as float
dataStr += '%.*fj' % (fractionWidth, data.imag) # log data imag part as float
else:
# Default log data as string
dataStr = '%s' % str(data) # the data can be any type that fits %s e.g. int, string, tuple
# the explicite conversion by str() is needed for tuple
# Left or right align the dataStr within dataWidth
if dataLeft:
dataStr = '%-*s ' % (dataWidth, dataStr)
else:
dataStr = '%*s ' % (dataWidth, dataStr)
return dataStr
def append_log_one_dimensional_list(self, vLevel, name, L, prefixStr='', dataWidth=4, dataLeft=False, fractionWidth=0, fractionExponent=False, colIndices=None):
"""Log list L[col] in one row with index labels
. vLevel = verbosity level
. name = name, title of the list
. L = the one dimensional list
. prefixStr = prefix string that is printed before every line, can e.g. be used for grep
. dataWidth = of data in column, see self.data_to_string
. dataLeft = of data in column, see self.data_to_string
. fractionWidth = of data in column, see self.data_to_string
. fractionExponent = of data in column, see self.data_to_string
. colIndices = when None then log counter index, else use index from list
Remarks:
. This append_log_one_dimensional_list is similar to using append_log_data with nofColumns=len(L)
. This append_log_one_dimensional_list is similar to append_log_two_dimensional_list with 1 row.
"""
if vLevel <= self.verbosity:
self.append_log(vLevel, '') # start with newline
self.append_log(vLevel, prefixStr + '%s:' % name)
nof_cols = len(L)
# Print row with column indices
if colIndices == None:
colIndices = list(range(nof_cols))
col_index_str = '. index : '
for col in colIndices:
col_index_str += '%*d ' % (dataWidth, col)
self.append_log(vLevel, prefixStr + col_index_str)
# Print row with data
line_str = '. value : '
uniqueL = cm.unique(L)
if len(uniqueL)==1:
line_str += 'all ' + self.data_to_string(uniqueL[0], dataWidth, dataLeft, fractionWidth, fractionExponent)
else:
for col in range(nof_cols):
line_str += self.data_to_string(L[col], dataWidth, dataLeft, fractionWidth, fractionExponent)
self.append_log(vLevel, prefixStr + '%s' % line_str)
self.append_log(vLevel, '') # end with newline
def append_log_two_dimensional_list(self, vLevel, name, A, prefixStr='', transpose=False, reverseCols=False, reverseRows=False,
dataWidth=4, dataLeft=False, fractionWidth=0, fractionExponent=False, colIndices=None, rowIndices=None):
"""
Log two dimensional list A[row][col] per row with index labels
. vLevel = verbosity level
. name = name, title of the list
. A = the two dimensional list
. prefixStr = prefix string that is printed before every line, can e.g. be used for grep
. transpose = when true transpose(A) to log rows as columns and columns as rows
. reverseCols = when true reverse the order of the columns
. reverseRows = when true reverse the order of the rows
. dataWidth = of data in column, see self.data_to_string
. dataLeft = of data in column, see self.data_to_string
. fractionWidth = of data in column, see self.data_to_string
. fractionExponent = of data in column, see self.data_to_string
. colIndices = when None then log counter index, else use index from list
. rowIndices = when None then log counter index, else use index from list (can be text index)
Remarks:
. The example recipy for making a two dimensional list of the form A[rows][cols] is:
A = [], row=[], row.append(element) for all cols, A.append(row) for all rows
or use cm.create_multidimensional_list([Number of rows][Number of cols])
"""
if vLevel <= self.verbosity:
self.append_log(vLevel, '') # start with newline
self.append_log(vLevel, prefixStr + '%s:' % name)
if transpose:
#print name, transpose
A = cm.transpose(A)
if reverseRows:
A = cm.reverse_rows_ud(A)
if reverseCols:
A = cm.reverse_cols_lr(A)
nof_rows = len(A)
nof_cols = len(A[0])
self.append_log(vLevel, prefixStr + 'col :')
# Print row with column indices
if colIndices == None:
colIndices = list(range(nof_cols))
if rowIndices == None:
rowIndices = list(range(nof_rows))
rowIndexLength = 6 # default row_str prefix length
else:
rowIndexLength = 3 + len(str(rowIndices[-1])) # use last row index string for row_str prefix length
col_index_str = ' ' * rowIndexLength
for col in colIndices:
col_index_str += '%*d ' % (dataWidth, col)
self.append_log(vLevel, prefixStr + col_index_str)
self.append_log(vLevel, prefixStr + 'row :')
# For each row print row index and row with data
for ri,row in enumerate(rowIndices):
row_str = '%3s : ' % row # row index, log index as string to support also text index
uniqueRow = cm.unique(A[ri])
if len(uniqueRow)==1:
row_str += 'all ' + self.data_to_string(uniqueRow[0], dataWidth, dataLeft, fractionWidth, fractionExponent)
else:
for col in range(nof_cols):
row_str += self.data_to_string(A[ri][col], dataWidth, dataLeft, fractionWidth, fractionExponent)
self.append_log(vLevel, prefixStr + '%s' % row_str)
self.append_log(vLevel, '') # end with newline
# Read the contents of a file and append that to the test log file
def append_log_file(self, vLevel, fileName):
try:
appFile = open(fileName,'r')
self.append_log(vLevel,appFile.read(),1,1,1,1)
appFile.close()
except IOError:
self.append_log(vLevel,'ERROR : Can not open file %s' % fileName)
| 48.466877 | 164 | 0.570034 | 13,327 | 0.867417 | 0 | 0 | 0 | 0 | 0 | 0 | 7,437 | 0.484054 |
325b56ca169aa22d3b3e5e502acb535b1e7a8a46 | 868 | py | Python | subaudible/subparse.py | RobbieClarken/subaudible | f22bdec90693727b36eff426e96d6960387fb94d | [
"MIT"
]
| null | null | null | subaudible/subparse.py | RobbieClarken/subaudible | f22bdec90693727b36eff426e96d6960387fb94d | [
"MIT"
]
| null | null | null | subaudible/subparse.py | RobbieClarken/subaudible | f22bdec90693727b36eff426e96d6960387fb94d | [
"MIT"
]
| null | null | null | import re
def parse_srt(line_iter):
"""
Parses SubRip text into caption dicts.
Args:
line_iter: An iterator that yields lines of a SubRip file.
Yields:
dict: Caption dicts with `start`, `end` and `text` keys.
"""
line_iter = iter(line.rstrip('\r\n') for line in line_iter)
while True:
next(line_iter) # Skip counter
start, end = parse_time_line(next(line_iter))
text = '\n'.join(iter(line_iter.__next__, ''))
yield {'start': start, 'end': end, 'text': text}
def parse_time_line(line):
return (parse_time(time_str) for time_str in line.split('-->'))
def parse_time(time_str):
time_str = time_str.replace(',', '.')
match = re.search('(\d\d):(\d\d):(\d\d).(\d\d\d)', time_str)
h, m, s, ms = (int(s) for s in match.groups())
return 3600 * h + 60 * m + s + 1e-3 * ms
| 27.125 | 67 | 0.59447 | 0 | 0 | 527 | 0.607143 | 0 | 0 | 0 | 0 | 297 | 0.342166 |
325b89ab7374be326978f10a334f001191bd3ead | 1,971 | py | Python | application/models/basemodel.py | ahmedsadman/festive | e0e739f126de2e8368014398f5c928c410098da5 | [
"MIT"
]
| 2 | 2020-10-19T23:26:23.000Z | 2020-10-20T02:14:10.000Z | application/models/basemodel.py | ahmedsadman/fest-management-api | e0e739f126de2e8368014398f5c928c410098da5 | [
"MIT"
]
| null | null | null | application/models/basemodel.py | ahmedsadman/fest-management-api | e0e739f126de2e8368014398f5c928c410098da5 | [
"MIT"
]
| 1 | 2021-08-04T15:45:29.000Z | 2021-08-04T15:45:29.000Z | from sqlalchemy import func
from application import db
from application.helpers.error_handlers import ServerError
class BaseModel(db.Model):
__abstract__ = True
def save(self):
"""save the item to database"""
try:
db.session.add(self)
db.session.commit()
except Exception as e:
raise ServerError(message="Failed to save the item", error=e)
def delete(self):
"""delete the item from database"""
try:
db.session.delete(self)
db.session.commit()
except Exception as e:
raise ServerError(message="Deletion failed", error=e)
@classmethod
def find_by_id(cls, id):
return cls.query.filter_by(id=id).first()
@classmethod
def find_query(cls, _filter):
"""Build the query with the given level one filters (filters that has
direct match with entity attributes, not any nested relationship).
Returns 'query' object"""
query = cls.query
exclude_lower = [int, bool]
for attr, value in _filter.items():
# func.lower doesn't work for INT/BOOL types in some production
# databases, so this should be properly handled
# ex: lower(event.id) won't work because event.id is INT type
# So the logic is, whenever the passed 'value' in this scope is
# INT, it means
# we don't need to lower anything. Just compare the vanilla value
_attr = getattr(cls, attr)
_attr = (
_attr if (type(value) in exclude_lower) else func.lower(_attr)
)
_value = (
value if (type(value) in exclude_lower) else func.lower(value)
)
query = query.filter(_attr == _value)
return query
@classmethod
def find(cls, _filter):
"""find all entities by given filter"""
return cls.find_query(_filter).all()
| 33.982759 | 78 | 0.597666 | 1,854 | 0.940639 | 0 | 0 | 1,297 | 0.658042 | 0 | 0 | 639 | 0.324201 |
325ca5543e9808ec6039d4cf69192bb2bde47b8f | 522 | py | Python | tests/core/resource_test_base.py | alteia-ai/alteia-python-sdk | 27ec7458334334ed6a1edae52cb25d5ce8734177 | [
"MIT"
]
| 11 | 2020-12-22T14:39:21.000Z | 2022-02-18T16:34:34.000Z | tests/core/resource_test_base.py | alteia-ai/alteia-python-sdk | 27ec7458334334ed6a1edae52cb25d5ce8734177 | [
"MIT"
]
| 1 | 2021-08-05T14:21:12.000Z | 2021-08-09T13:22:55.000Z | tests/core/resource_test_base.py | alteia-ai/alteia-python-sdk | 27ec7458334334ed6a1edae52cb25d5ce8734177 | [
"MIT"
]
| null | null | null | import os
from unittest.mock import patch
import alteia
from tests.alteiatest import AlteiaTestBase
class ResourcesTestBase(AlteiaTestBase):
@classmethod
def setUpClass(cls):
with patch('alteia.core.connection.token.TokenManager.renew_token') as mock:
mock.return_value = None
cls.sdk = alteia.SDK(config_path=cls.get_absolute_path("./config-test.json"))
@staticmethod
def get_absolute_path(file_path):
return os.path.join(os.path.dirname(__file__), file_path)
| 27.473684 | 89 | 0.726054 | 418 | 0.800766 | 0 | 0 | 366 | 0.701149 | 0 | 0 | 75 | 0.143678 |
325dd1dcfd3afeca98237f91ac72ec8dacd09a26 | 137 | py | Python | scripts/viterbi.py | Tereshchenkolab/digitize-ecg-cli | fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1 | [
"MIT"
]
| 6 | 2021-06-12T08:20:33.000Z | 2022-03-01T15:32:35.000Z | scripts/viterbi.py | Tereshchenkolab/ecg-digitize | fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1 | [
"MIT"
]
| null | null | null | scripts/viterbi.py | Tereshchenkolab/ecg-digitize | fa5a17c5390a11ce07e39e6a8eecb56ed38b16a1 | [
"MIT"
]
| null | null | null | from ecgdigitize.signal.extraction.viterbi import *
if __name__ == "__main__":
print(list(interpolate(Point(0,0), Point(5,5)))) | 27.4 | 56 | 0.70073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.072993 |
325fc49ee449fcf77d594c853f23436486f7b300 | 2,711 | py | Python | tests/io/s3/test_s3_fetcher.py | ToucanToco/PeaKina | afaeec65d9b136d42331f140c3048d27bcddb6b1 | [
"BSD-3-Clause"
]
| null | null | null | tests/io/s3/test_s3_fetcher.py | ToucanToco/PeaKina | afaeec65d9b136d42331f140c3048d27bcddb6b1 | [
"BSD-3-Clause"
]
| null | null | null | tests/io/s3/test_s3_fetcher.py | ToucanToco/PeaKina | afaeec65d9b136d42331f140c3048d27bcddb6b1 | [
"BSD-3-Clause"
]
| null | null | null | from typing import Any, Dict
import boto3
import pytest
from s3fs import S3FileSystem
from peakina.io.s3.s3_fetcher import S3Fetcher
@pytest.fixture
def s3_fetcher(s3_endpoint_url):
return S3Fetcher(client_kwargs={"endpoint_url": s3_endpoint_url})
def test_s3_fetcher_open(s3_fetcher):
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/0_0.csv"
with s3_fetcher.open(filepath) as f:
assert f.read() == b"a,b\n0,0\n0,1"
def test_s3_fetcher_listdir(s3_fetcher, mocker):
s3_mtime_mock = mocker.patch("peakina.io.s3.s3_fetcher.s3_mtime")
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
assert s3_fetcher.listdir(dirpath) == [
"0_0.csv",
"0_1.csv",
"mydir",
]
assert s3_fetcher.mtime(f"{dirpath}/0_0.csv") > 0
assert s3_fetcher.mtime(f"{dirpath}/mydir") is None
s3_mtime_mock.assert_not_called()
def test_s3_fetcher_mtime(s3_fetcher):
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/0_0.csv"
assert s3_fetcher.mtime(filepath) > 0
def test_s3_fetcher_open_retry(s3_fetcher, s3_endpoint_url, mocker):
session = boto3.session.Session()
s3_client = session.client(
service_name="s3",
aws_access_key_id="accessKey1",
aws_secret_access_key="verySecretKey1",
endpoint_url=s3_endpoint_url,
)
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/for_retry_0_0.csv"
s3_client.upload_file("tests/fixtures/for_retry_0_0.csv", "mybucket", "for_retry_0_0.csv")
class S3FileSystemThatFailsOpen(S3FileSystem): # type:ignore[misc]
def __init__(self, key: str, secret: str, client_kwargs: Dict[str, Any]) -> None:
super().__init__(key=key, secret=secret, client_kwargs=client_kwargs)
self.invalidated_cache = False
def open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
if not self.invalidated_cache:
raise Exception("argh!")
return super().open(path, mode, block_size, cache_options, **kwargs)
def invalidate_cache(self, path=None):
self.invalidated_cache = True
mocker.patch("peakina.io.s3.s3_utils.s3fs.S3FileSystem", S3FileSystemThatFailsOpen)
logger_mock = mocker.patch("peakina.io.s3.s3_utils.logger")
with s3_fetcher.open(filepath) as f:
# ensure logger doesn't log credentials
logger_mock.warning.assert_called_once_with(
"could not open mybucket/for_retry_0_0.csv: argh!"
)
assert f.read() == b"a,b\n0,0\n0,1"
s3_client.delete_object(Bucket="mybucket", Key="tests/fixtures/for_retry_0_0.csv")
| 33.8875 | 94 | 0.693471 | 626 | 0.230911 | 0 | 0 | 118 | 0.043526 | 0 | 0 | 709 | 0.261527 |
3262d7cd59e5780cbf71323fcb7c77c193d6904e | 324 | py | Python | testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
]
| 1 | 2019-12-03T01:37:13.000Z | 2019-12-03T01:37:13.000Z | testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
]
| 11 | 2020-06-06T01:28:35.000Z | 2022-03-12T00:16:34.000Z | testemunhoweb/consulta/migrations/0002_auto_20191202_0219.py | danielcamilo13/testemunhoWEB | 46825e31123058fa6ee21e4e71e9e0bedde32bb4 | [
"bzip2-1.0.6"
]
| null | null | null | # Generated by Django 2.2.7 on 2019-12-02 05:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('consulta', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='generate',
new_name='consulta',
),
]
| 18 | 47 | 0.58642 | 239 | 0.737654 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.280864 |
32638416d54a115fde42bba19086c99e40948e61 | 802 | py | Python | backend/events/tests/test_views.py | trfoss/parrot | 2f120ee1ab82368f85b2b5a7f1c45afc26aa8963 | [
"BSD-2-Clause"
]
| 5 | 2019-02-25T02:24:51.000Z | 2019-04-21T00:56:43.000Z | backend/events/tests/test_views.py | trfoss/parrot | 2f120ee1ab82368f85b2b5a7f1c45afc26aa8963 | [
"BSD-2-Clause"
]
| 51 | 2019-02-06T03:36:27.000Z | 2021-06-10T21:11:24.000Z | backend/events/tests/test_views.py | trfoss/parrot | 2f120ee1ab82368f85b2b5a7f1c45afc26aa8963 | [
"BSD-2-Clause"
]
| 7 | 2019-02-06T04:37:10.000Z | 2019-03-28T07:52:26.000Z | """
backend/events/tests/test_views.py
Tests for the events page views. We use the test client. Read more at
https://docs.djangoproject.com/en/2.1/topics/testing/tools/
"""
import json
from django.test import TestCase
class EventsPageViewTests(TestCase):
"""Events page view tests for route /events/data
"""
fixtures = [
'event.json',
'team.json',
'teammember.json',
]
def test_events_data(self):
"""Test route /events/data
- it returns status code 200
- it returns a non-empty list
"""
response = self.client.get('/events/data')
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(isinstance(obj, list))
self.assertTrue(len(obj) > 0)
| 26.733333 | 69 | 0.63591 | 577 | 0.719451 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.498753 |
32649f15ad311acc51f598d331270d3f4fb588d6 | 497 | py | Python | instructors/lessons/practical_utils/examples/os-path-walk.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
]
| 46 | 2017-09-27T20:19:36.000Z | 2020-12-08T10:07:19.000Z | instructors/lessons/practical_utils/examples/os-path-walk.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
]
| 6 | 2018-01-09T08:07:37.000Z | 2020-09-07T12:25:13.000Z | instructors/lessons/practical_utils/examples/os-path-walk.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
]
| 18 | 2017-10-10T02:06:51.000Z | 2019-12-01T10:18:13.000Z | import os
import os.path
def visit(arg, dirname, names):
print dirname, arg
for name in names:
subname = os.path.join(dirname, name)
if os.path.isdir(subname):
print ' %s/' % name
else:
print ' %s' % name
print
os.mkdir('example')
os.mkdir('example/one')
f = open('example/one/file.txt', 'wt')
f.write('contents')
f.close()
f = open('example/two.txt', 'wt')
f.write('contents')
f.close()
os.path.walk('example', visit, '(User data)') | 22.590909 | 45 | 0.591549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.249497 |
326587ea3dd2af6a3849b34225b40c151ddc17b4 | 532 | py | Python | tikplay/provider/tests/retriever_test.py | tietokilta-saato/tikplay | 8061451c21f06bd07129a8a42543ea86b7518d4a | [
"MIT"
]
| 2 | 2015-01-15T14:14:50.000Z | 2015-10-23T05:37:34.000Z | tikplay/provider/tests/retriever_test.py | tietokilta-saato/tikplay | 8061451c21f06bd07129a8a42543ea86b7518d4a | [
"MIT"
]
| 8 | 2015-01-12T10:27:27.000Z | 2015-05-11T12:05:03.000Z | tikplay/provider/tests/retriever_test.py | tietokilta-saato/tikplay | 8061451c21f06bd07129a8a42543ea86b7518d4a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# Part of tikplay
# Yes, this is a bit of a non-test.
from nose.tools import *
from tikplay.provider.retriever import Retriever
class TestRetriever(object):
def __init__(self):
self.retriever = Retriever({})
@raises(NotImplementedError)
def test_handles(self):
self.retriever.handles_url("")
@raises(NotImplementedError)
def test_get(self):
self.retriever.get("")
def test_str(self):
assert str(self.retriever) == "URL retriever 'Unnamed retriever'" | 25.333333 | 73 | 0.682331 | 380 | 0.714286 | 0 | 0 | 178 | 0.334586 | 0 | 0 | 112 | 0.210526 |
3265c12d40cc56aa2b76c483dff904dc52c43391 | 11,333 | py | Python | myfunds/web/views/crypto/views.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
]
| null | null | null | myfunds/web/views/crypto/views.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
]
| null | null | null | myfunds/web/views/crypto/views.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
]
| null | null | null | import csv
import io
from datetime import datetime
import peewee as pw
from flask import Blueprint
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from wtforms import Form
from wtforms import IntegerField
from wtforms import validators as vals
from myfunds.core.constants import CryptoDirection
from myfunds.core.models import CryptoActionLog
from myfunds.core.models import CryptoBalance
from myfunds.core.models import CryptoCurrency
from myfunds.core.models import CryptoTransaction
from myfunds.core.models import db_proxy
from myfunds.modules import cmc
from myfunds.web import ajax
from myfunds.web import auth
from myfunds.web import notify
from myfunds.web import utils
from myfunds.web.constants import DATETIME_FORMAT
from myfunds.web.forms import AddCryptoBalanceForm
from myfunds.web.forms import AddCyptoTransactionForm
from myfunds.web.forms import DeleteCryptoBalanceForm
from myfunds.web.forms import UpdateCryptoBalanceQuantityForm
USD_CODE = "USD"
USD_PRECISION = 2
CRYPTO_PRECISION = 8
bp = Blueprint("crypto", __name__, template_folder="templates")
@bp.route("/crypto")
@auth.login_required
def index():
currencies = CryptoCurrency.select().order_by(CryptoCurrency.symbol)
balances = (
CryptoBalance.select()
.join(CryptoCurrency)
.where(CryptoBalance.account == g.authorized_account)
.order_by(CryptoBalance.name, CryptoCurrency.symbol)
)
investments = (
CryptoTransaction.select(
pw.fn.COUNT(CryptoTransaction.id),
pw.fn.SUM(CryptoTransaction.amount),
)
.where(
(CryptoTransaction.account == g.authorized_account)
& (CryptoTransaction.direction == CryptoDirection.INVESTMENT)
)
.scalar(as_tuple=True)
)
if investments[1] is None:
investments = None
fixed_profit = (
CryptoTransaction.select(
pw.fn.COUNT(CryptoTransaction.id),
pw.fn.SUM(CryptoTransaction.amount),
)
.where(
(CryptoTransaction.account == g.authorized_account)
& (CryptoTransaction.direction == CryptoDirection.FIXED_PROFIT)
)
.scalar(as_tuple=True)
)
if fixed_profit[1] is None:
fixed_profit = None
amount_pattern = utils.make_amount_pattern(8)
return render_template(
"crypto/view.html",
currencies=currencies,
investments=investments,
fixed_profit=fixed_profit,
balances=balances,
amount_pattern=amount_pattern,
)
@bp.route("/crypto/balances/new", methods=["POST"])
@auth.login_required
def new_balance():
redirect_url = url_for("crypto.index")
form = AddCryptoBalanceForm(request.form)
utils.validate_form(form, redirect_url)
name = form.name.data
currency_id = form.currency_id.data
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
balance = CryptoBalance.create(
account=g.authorized_account,
currency=currency,
name=name,
quantity=0,
)
notify.info(f"New balance '{balance.name}' was created.")
return redirect(redirect_url)
@bp.route("/crypto/balances/delete", methods=["POST"])
@auth.login_required
def delete_balance():
redirect_url = url_for("crypto.index")
form = DeleteCryptoBalanceForm(request.form)
utils.validate_form(form, redirect_url)
balance_id = form.balance_id.data
balance = CryptoBalance.get_or_none(id=balance_id, account=g.authorized_account)
if balance is None:
notify.error("Balance not found.")
return redirect(redirect_url)
balance.delete_instance()
notify.info(f"Balance '{balance.name}' was deleted.")
return redirect(redirect_url)
@bp.route("/crypto/balances/update-quantity", methods=["POST"])
@auth.login_required
def update_quantity():
redirect_url = url_for("crypto.index")
form = UpdateCryptoBalanceQuantityForm(request.form)
form.quantity.validators.append(
vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
)
utils.validate_form(form, redirect_url)
action = form.action.data
balance_id = form.balance_id.data
quantity = utils.amount_to_subunits(form.quantity.data, CRYPTO_PRECISION)
balance = CryptoBalance.get_or_none(id=balance_id, account=g.authorized_account)
if balance is None:
notify.error("Balance not found.")
return redirect(redirect_url)
quantity_before = balance.quantity
if action == "set":
balance.quantity = quantity
elif action == "add":
balance.quantity += quantity
else:
balance.quantity -= quantity
if balance.quantity < 0:
notify.error("Balance quantity can't be less then zero.")
return redirect(redirect_url)
with db_proxy.atomic():
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"{action.capitalize()} {form.quantity.data} {balance.currency.symbol} "
f"for {balance.name} ({balance.id}), "
f"before: {utils.make_hrf_amount(quantity_before, CRYPTO_PRECISION)}, "
f"after: {utils.make_hrf_amount(balance.quantity, CRYPTO_PRECISION)}."
),
created_at=datetime.now(),
)
balance.save()
notify.info("Balance quantity was updated.")
return redirect(redirect_url)
@bp.route("/crypto/invest", methods=["POST"])
@auth.login_required
def invest():
redirect_url = url_for("crypto.index")
quantity_validator = vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
price_validator = vals.Regexp(utils.make_amount_pattern(USD_PRECISION))
form = AddCyptoTransactionForm(request.form)
form.quantity.validators.append(quantity_validator)
form.price.validators.append(price_validator)
utils.validate_form(form, redirect_url)
currency_id = form.currency_id.data
quantity = form.quantity.data
price = form.price.data
amount = round(float(quantity) * float(price), USD_PRECISION)
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
with db_proxy.atomic():
creation_time = datetime.now()
CryptoTransaction.create(
account=g.authorized_account,
direction=CryptoDirection.INVESTMENT,
symbol=currency.symbol,
quantity=utils.amount_to_subunits(quantity, CRYPTO_PRECISION),
price=utils.amount_to_subunits(price, USD_PRECISION),
amount=utils.amount_to_subunits(amount, USD_PRECISION),
created_at=creation_time,
)
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"Invest ${amount}, bought {quantity} {currency.symbol} by ${price}."
),
created_at=creation_time,
)
notify.info("New investment was added.")
return redirect(redirect_url)
@bp.route("/crypto/fix-profit", methods=["POST"])
@auth.login_required
def fix_profit():
redirect_url = url_for("crypto.index")
quantity_validator = vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
price_validator = vals.Regexp(utils.make_amount_pattern(USD_PRECISION))
form = AddCyptoTransactionForm(request.form)
form.quantity.validators.append(quantity_validator)
form.price.validators.append(price_validator)
utils.validate_form(form, redirect_url)
currency_id = form.currency_id.data
quantity = form.quantity.data
price = form.price.data
amount = round(float(quantity) * float(price), USD_PRECISION)
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
with db_proxy.atomic():
creation_time = datetime.now()
CryptoTransaction.create(
account=g.authorized_account,
direction=CryptoDirection.FIXED_PROFIT,
symbol=currency.symbol,
quantity=utils.amount_to_subunits(quantity, CRYPTO_PRECISION),
price=utils.amount_to_subunits(price, USD_PRECISION),
amount=utils.amount_to_subunits(amount, USD_PRECISION),
created_at=creation_time,
)
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"Fix profit ${amount}, sell {quantity} {currency.symbol} by ${price}."
),
created_at=creation_time,
)
notify.info("New profit fix was added.")
return redirect(redirect_url)
@bp.route("/ajax/balances-values")
@ajax.ajax_endpoint
@auth.login_required
def ajax_balances_values():
balances = (
CryptoBalance.select()
.join(CryptoCurrency)
.where(CryptoBalance.account == g.authorized_account)
)
currencies_ids = [i.currency.cmc_id for i in balances]
prices = cmc.fetch_prices(currencies_ids, USD_CODE)
data = {}
for b in balances:
price, amount = prices.get(b.currency.cmc_id), None
if price is not None:
amount = round(
float(utils.make_hrf_amount(b.quantity, CRYPTO_PRECISION)) * price,
USD_PRECISION,
)
data[int(b.id)] = {"price": price, "amount": amount}
return data
class ActionsFilterForm(Form):
offset = IntegerField(validators=[vals.Optional()])
limit = IntegerField(validators=[vals.Optional()])
@bp.route("/crypto/actions")
@auth.login_required
def actions():
filter_form = ActionsFilterForm(request.args)
utils.validate_form(filter_form, url_for("crypto.actions"), error_notify=None)
offset = filter_form.offset.data or 0
limit = filter_form.limit.data or 10
filters = {"offset": offset, "limit": limit}
limit_plus_one = limit + 1
query = (
CryptoActionLog.select()
.where(CryptoActionLog.account == g.authorized_account)
.order_by(CryptoActionLog.created_at.desc())
.offset(offset)
.limit(limit_plus_one)
)
actions = list(query)[:limit]
has_prev = offset > 0
has_next = len(query) == limit_plus_one
return render_template(
"crypto/actions.html",
filters=filters,
actions=actions,
has_prev=has_prev,
has_next=has_next,
)
@bp.route("/crypto/actions/export")
@auth.login_required
def export_actions():
actions = (
CryptoActionLog.select()
.where(CryptoActionLog.account == g.authorized_account)
.order_by(CryptoActionLog.created_at.desc())
)
buffer = io.StringIO()
csvwriter = csv.writer(buffer, delimiter=";", quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Time", "Message"])
for i in actions.iterator():
csvwriter.writerow([i.created_at.strftime(DATETIME_FORMAT), i.message])
res = make_response(buffer.getvalue())
res.headers["Content-Disposition"] = "attachment; filename=actions.csv"
res.headers["Content-type"] = "text/csv"
return res
| 29.667539 | 88 | 0.682344 | 141 | 0.012442 | 0 | 0 | 9,970 | 0.879732 | 0 | 0 | 1,212 | 0.106944 |
32664ad5a10d717905dcb559f04579027da2c523 | 268 | py | Python | Python/InvertTree.py | lywc20/daily-programming | 78529e535aea5bda409e5a2a009274dca7011e29 | [
"MIT"
]
| null | null | null | Python/InvertTree.py | lywc20/daily-programming | 78529e535aea5bda409e5a2a009274dca7011e29 | [
"MIT"
]
| null | null | null | Python/InvertTree.py | lywc20/daily-programming | 78529e535aea5bda409e5a2a009274dca7011e29 | [
"MIT"
]
| null | null | null | class TreeNode:
def __init__(self,val):
self.left = None
self.right = None
self.val = None
def invertTree(self,root):
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
| 24.363636 | 87 | 0.619403 | 118 | 0.440299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32665f5e99814a1ca419ee599a7bb327ba8ffbf0 | 9,115 | py | Python | src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
]
| 12 | 2021-01-18T20:38:21.000Z | 2022-03-29T15:01:10.000Z | src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
]
| 101 | 2020-12-14T15:23:07.000Z | 2022-03-31T17:06:19.000Z | src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
]
| 15 | 2020-12-04T22:37:14.000Z | 2022-03-31T09:48:03.000Z | """
Wrap commonly-used torch builtins in nn.Module subclass
for easier automatic construction of script
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class argmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmax(A)
class argmin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmin(A)
class matmul(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.matmul(A, B.T)
class add(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.add(A, B)
class sin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.sin(A)
class cos(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.cos(A)
class abs(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.abs(A)
class flatten(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.reshape(A, (1, -1))
class clip(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, min_val, max_val):
return torch.clamp(A, min_val, max_val)
class shape(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.tensor(A.size()).to(torch.int64)
class det(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.det(A)
class And(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_and(A > 0, B > 0)
class Or(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_or(A > 0, B > 0)
class Xor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_xor(A > 0, B > 0)
class concat(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, axis=0):
return torch.cat(A, axis)
class ceil(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.ceil(A)
class floor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.floor(A)
class bitshift(torch.nn.Module):
def __init__(self, DIR):
super().__init__()
self.dir = DIR
def forward(self, A, B):
if self.dir == "RIGHT":
return A.to(torch.int64) >> B.to(torch.int64)
else:
return A.to(torch.int64) << B.to(torch.int64)
class conv(torch.nn.Module):
def __init__(
self,
auto_pad="NOTSET",
kernel_shape=None,
group=1,
strides=[1, 1],
dilations=[1, 1],
pads=[0, 0, 0, 0],
):
super().__init__()
self.group = group
self.auto_pad = auto_pad
self.strides = tuple(strides)
self.dilations = tuple(dilations)
self.kernel_shape = kernel_shape
def forward(self, A, W, B=None):
if self.auto_pad == "NOTSET":
self.pads = tuple(pads)
elif self.auto_pad == "VALID":
self.pads = (0, 0, 0, 0)
elif self.auto_pad == "SAME_UPPER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif self.auto_pad == "SAME_LOWER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 / 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
A = F.pad(A, self.pads)
return F.conv2d(
A,
W,
bias=B,
stride=self.strides,
padding=self.pads,
dilation=self.dilations,
groups=self.group,
)
class elu(torch.nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, A):
return nn.ELU(alpha=self.alpha)(A.to(torch.float32))
class hardsigmoid(torch.nn.Module):
def __init__(self, alpha=0.2, beta=0.5):
super().__init__()
self.alpha = alpha
self.beta = beta
def forward(self, A):
return torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardswish(torch.nn.Module):
def __init__(self):
super().__init__()
self.alpha = 1.0 / 6
self.beta = 0.5
def forward(self, A):
return A * torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardmax(torch.nn.Module):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def forward(self, A):
A = A.to(torch.float32)
rank = A.shape
if self.axis < 0:
self.axis += len(rank)
tensor = torch.arange(rank[self.axis])
repeats = []
repeats.append(1)
for i, idx in enumerate(reversed(rank[: self.axis])):
repeats.append(1)
tensor = torch.stack([tensor] * idx)
for i, idx in enumerate(rank[self.axis + 1 :]):
repeats.append(idx)
tensor = tensor.unsqueeze(-1).repeat(repeats)
repeats[-1] = 1
# b = torch.stack([torch.stack([torch.arange(4)] * 3)] *2)
# print(tensor.shape)
max_values, _ = torch.max(A, dim=self.axis)
# print(max_values, max_values.shape)
# tensor = torch.reshape(tensor, tuple(rank))
tensor[A != torch.unsqueeze(max_values, dim=self.axis)] = rank[self.axis]
# print(b)
first_max, _ = torch.min(tensor, dim=self.axis)
one_hot = torch.nn.functional.one_hot(first_max, rank[self.axis])
return one_hot
class compress(torch.nn.Module):
def __init__(self, axis=None):
self.axis = axis
super().__init__()
def forward(self, A, B):
idx = (B.to(torch.bool) != 0).nonzero().reshape(-1)
if self.axis != None:
return torch.index_select(A, self.axis, idx)
else:
return torch.index_select(A.reshape(-1), 0, idx)
# TODO: Many more to be implemented
__all__ = [
"argmax",
"argmin",
"matmul",
"add",
"sin",
"cos",
"abs",
"flatten",
"clip",
"shape",
"det",
"And",
"Or",
"Xor",
"concat",
"ceil",
"floor",
"bitshift",
"conv",
"elu",
"hardsigmoid",
"hardswish",
"compress",
]
| 23.798956 | 88 | 0.501042 | 8,515 | 0.934174 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.057926 |
326698864c4df87b158debf66bd86b994c325aa0 | 8,068 | py | Python | taf/testlib/snmphelpers.py | stepanandr/taf | 75cb85861f8e9703bab7dc6195f3926b8394e3d0 | [
"Apache-2.0"
]
| 10 | 2016-12-16T00:05:58.000Z | 2018-10-30T17:48:25.000Z | taf/testlib/snmphelpers.py | stepanandr/taf | 75cb85861f8e9703bab7dc6195f3926b8394e3d0 | [
"Apache-2.0"
]
| 40 | 2017-01-04T23:07:05.000Z | 2018-04-16T19:52:02.000Z | taf/testlib/snmphelpers.py | stepanandr/taf | 75cb85861f8e9703bab7dc6195f3926b8394e3d0 | [
"Apache-2.0"
]
| 23 | 2016-12-30T05:03:53.000Z | 2020-04-01T08:40:24.000Z | # Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``snmphelpers.py``
`SNMP specific helpers functions`
"""
import sys
import os
import shutil
import tarfile
from subprocess import Popen, PIPE
import pytest
import paramiko as paramiko
from . import helpers
from . import loggers
# create logger for module
def is_mibs_folder_empty(path):
"""Checks is MIBs folder empty of not.
Args:
path(str): path to MIBs folder
Returns:
bool: True if empty and False if not
Examples::
is_mibs_folder_empty()
"""
empty = True
if os.path.exists(path):
for file_n in os.listdir(path):
if 'ONS' in file_n or "ons" in file_n:
empty = False
return empty
def clear_mibs_folder(path):
"""Removes all ONS mibs from MIBS folder.
Args:
path(str): path to MIBs folder
Examples::
clear_mibs_folder()
"""
if os.path.exists(path):
shutil.rmtree(path)
def get_remote_file(hostname, port, username, password, remotepath, localpath):
"""Get remote file to local machine.
Args:
hostname(str): Remote IP-address
port(int): Remote SSH port
username(str): Remote host username for authentication
password(str): Remote host password for authentication
remotepath(str): Remote file to download location path
localpath(str): Local path to save remote file
Examples::
get_remote_file(host, port, username, password, tar_remotepath, tar_localpath)
"""
transport = paramiko.Transport((hostname, port))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.get(remotepath=remotepath, localpath=localpath)
finally:
sftp.close()
transport.close()
def untar_file(tar_path, untar_path):
"""Unpack tar file.
Args:
tar_path(str): Path to tar file
untar_path(str): Path where to unpack
Examples::
untar_file(tar_localpath, mib_path_txt)
"""
old_folder = os.path.join(untar_path, 'mibs')
if os.path.isfile(old_folder):
os.remove(old_folder)
tar = tarfile.open(tar_path)
tar.extractall(untar_path)
tar.close()
os.remove(tar_path)
def file_convert(mib_txt_path, mib_py_path):
"""Convert .txt MIB to .py.
Args:
mib_txt_path(str): Full path to .txt MIB.
mib_py_path(str): Full path to .py MIB
Examples::
file_convert(mib_txt_path, mib_py_path)
"""
mod_logger_snmp = loggers.module_logger(name=__name__)
# translate .txt mib into python format using 3rd party tools 'smidump'
smidump = Popen(['smidump', '-k', '-f', 'python', mib_txt_path], stdout=PIPE)
list_stdout = smidump.communicate()[0]
if len(list_stdout) == 0:
return "Fail"
# create tmp directory for filling MIBs dictionary
mib_path_tmp = os.path.join(mib_py_path, 'tmp')
if not os.path.exists(mib_path_tmp):
os.makedirs(mib_path_tmp)
# added tmp path into sys.path for imports converted MIB's
sys.path.append(mib_path_tmp)
# get file without extension
file_name = os.path.splitext(os.path.basename(mib_txt_path))[0]
# create .py name
temp_file_name = "{0}.py".format(file_name)
# create .tmp file path for imports
temp_file_path = os.path.join(mib_path_tmp, temp_file_name)
# save and import converted MIB's
with open(temp_file_path, "ab") as a:
a.write(list_stdout)
temp_module = __import__(os.path.splitext(os.path.basename(mib_txt_path))[0])
# update helpers.MIBS_DICT with MIB data
if "moduleName" in list(temp_module.MIB.keys()) and "nodes" in list(temp_module.MIB.keys()):
helpers.MIBS_DICT.update({temp_module.MIB["moduleName"]: list(temp_module.MIB["nodes"].keys())})
# clear tmp file path
sys.path.remove(mib_path_tmp)
os.remove(temp_file_path)
# translate MIB from .py into pysnmp format using 3rd party tools 'libsmi2pysnmp'
pipe = Popen(['libsmi2pysnmp', '--no-text'], stdout=PIPE, stdin=PIPE)
stdout = pipe.communicate(input=list_stdout)
# get MIB name from itself, add .py and save it.
mib_name = "{0}.py".format(temp_module.MIB["moduleName"])
mib_py_path = os.path.join(mib_py_path, mib_name)
mod_logger_snmp.debug("Convert %s to %s" % (file_name, temp_file_name))
with open(mib_py_path, 'a') as py_file:
for string in stdout:
if string is not None:
str_dict = string.decode('utf-8').split('\n')
for each_str in str_dict:
if "ModuleCompliance" in each_str:
if "ObjectGroup" in each_str:
py_file.write(each_str + '\n')
elif "Compliance)" in each_str:
pass
else:
py_file.write(each_str + '\n')
return mib_name
def convert_to_py(txt_dir_path, py_dir_path):
"""Converts .txt MIB's to .py.
Args:
txt_dir_path(str): Path to dir with .txt MIB's.
py_dir_path(str): Path to dir with .py MIB's
Examples::
convert_to_py(mib_path_tmp, mib_path)
"""
mod_logger_snmp = loggers.module_logger(name=__name__)
txt_dir_path = os.path.join(txt_dir_path, "MIB")
mod_logger_snmp.debug("Converts .txt MIB's to .py")
os.environ['SMIPATH'] = txt_dir_path
for mib in os.listdir(txt_dir_path):
mib_txt_path = os.path.join(txt_dir_path, mib)
retry_count = 3
retry = 1
while retry <= retry_count:
mib_py = file_convert(mib_txt_path, py_dir_path)
if mib_py not in os.listdir(py_dir_path):
mod_logger_snmp.debug("Converted MIB %s is not present at %s" % (mib, py_dir_path))
retry += 1
if retry > retry_count:
mod_logger_snmp.debug("Can not convert %s" % (mib, ))
else:
mod_logger_snmp.debug("Converted MIB %s is present at %s" % (mib, py_dir_path))
retry = retry_count + 1
shutil.rmtree(txt_dir_path)
shutil.rmtree(os.path.join(py_dir_path, "tmp"))
def create_mib_folder(config, path, env):
"""Creates MIB folder.
Args:
config(dict): Configuration dictionary.
path(str): Path to MIB folder.
env(Environment): Environment object.
Examples::
create_mib_folder()
"""
if config is None:
pytest.fail("UI settings not fount in environment configuration.")
host = config['host']
port = int(config['port'])
username = config['username']
password = config['password']
tar_folder = config['tar_remotepath']
tar_file = os.path.split(tar_folder)[1]
branch = env.env_prop['switchppVersion']
platform = getattr(getattr(env.switch[1], 'hw', None), 'snmp_path', None)
tar_remotepath = tar_folder.format(**locals())
if not os.path.exists(path):
os.makedirs(path)
tar_localpath = os.path.join(path, tar_file)
mib_path_tmp = os.path.join(path, 'tmp')
if not os.path.exists(mib_path_tmp):
os.makedirs(mib_path_tmp)
mib_path_txt = os.path.join(path, 'txt')
if not os.path.exists(mib_path_txt):
os.makedirs(mib_path_txt)
get_remote_file(host, port, username, password, tar_remotepath, tar_localpath)
untar_file(tar_localpath, mib_path_txt)
convert_to_py(mib_path_txt, path)
| 29.992565 | 104 | 0.649603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,353 | 0.415592 |
3266f7d31cc045815dafabe76a68d2f3cebde4da | 6,843 | py | Python | cadence/apps/backend/views.py | BitLooter/Cadence | 3adbe51f042120f7154711a58a614ce0e8b3664b | [
"BSD-2-Clause"
]
| null | null | null | cadence/apps/backend/views.py | BitLooter/Cadence | 3adbe51f042120f7154711a58a614ce0e8b3664b | [
"BSD-2-Clause"
]
| null | null | null | cadence/apps/backend/views.py | BitLooter/Cadence | 3adbe51f042120f7154711a58a614ce0e8b3664b | [
"BSD-2-Clause"
]
| null | null | null | import json
import logging
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_exempt # See note below on saveplaylist
import models
# Set up logging
logger = logging.getLogger("apps")
# View function decorators
##########################
def log_request(f):
"""Records request info to the log file"""
def wrapper(*args, **kwargs):
request = args[0]
# Display simpler message if there are no view parameters
if kwargs == {}:
message = "{} request from {}".format(f.__name__, request.get_host())
elif "item_id" in kwargs:
message = "{} (#{}) request from {}".format(f.__name__, kwargs["item_id"], request.get_host())
else:
message = "{} {} request from {}".format(f.__name__, repr(kwargs), request.get_host())
logger.info(message)
return f(*args, **kwargs)
wrapper.__doc__ = f.__doc__
return wrapper
def handle_not_found(f):
"""
For views that request a specific object (e.g. a playlist), return a 404
page and log an error if the object was not found.
Assumes the object being looked for is passed as a kwarg named 'item_id'.
If this view does not fit this pattern, you will not be able to handle
404 errors for it with this decorator.
"""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ObjectDoesNotExist as e:
print e.message
error = "{} (#{})".format(e.message, kwargs["item_id"])
logger.error(error)
return HttpResponseNotFound(error, mimetype="text/plain")
wrapper.__doc__ = f.__doc__
return wrapper
# View functions
################
#TODO: check for external errors like database access problems
def playlists(request):
"""
Generic view for /data/playlists/, choosing a view function for the request type.
Saves a playlist or returns a list of them, depending on request type. A GET
request will return a list of available playlists in JSON format; a POST request
saves a new playlist to the server, using the POST data (also in JSON format).
Does not actually do anything itself, but rather calls the correct function for
the task.
"""
# If POST, we're saving a playlist
if request.method == "POST":
return saveplaylist(request)
# Otherwise, default behavior is to return a list of playlists
else:
return playlistlist(request)
@log_request
def playlistlist(request):
"""View method for list of playlists. Returns list of playlists in JSON."""
lists = models.Playlist.getPlaylistList()
return json_response(lists)
@log_request
def saveplaylist(request):
"""
View method for saving a playlist (POST).
Saves a new playlist to the database. Data is in JSON format, and is expected
to take the form of a dict with 'name' and 'tracks' fields, name being a
string and tracks being a list of track IDs. Example::
{
"name": "Top ten Tuvian throat singing rap singles"
"tracks": [553, 1490, 6643, 1186, 6689, 91, 642, 11, 853, 321]
}
"""
try:
info = json.loads(request.raw_post_data)
except ValueError:
response = HttpResponseBadRequest("Error: POST data is not valid JSON", mimetype="text/plain")
logger.exception("Not saving playlist from {}, invalid JSON in request - POST data: '{}'".format(request.get_host(), request.raw_post_data))
else:
# Sanity check on the data - name is a (unicode) string, tracks are all integers
if "name" not in info or "tracks" not in info:
logger.debug("data check")
response = HttpResponseBadRequest("Error: Not enough parameters were passed", mimetype="text/plain")
logger.error("Not saving playlist from {}, not enough information given - POST data: '{}'".format(request.get_host(), request.raw_post_data))
elif (type(info["name"]) != unicode) or not (all(type(t) == int for t in info["tracks"])):
response = HttpResponseBadRequest("Error: Given data is invalid", mimetype="text/plain")
logger.error("Not saving playlist from {}, given data is invalid - POST data: '{}'".format(request.get_host(), request.raw_post_data))
else:
newID = models.Playlist.savePlaylist(info["tracks"], info["name"])
# TODO: return information about the new playlist
response = HttpResponse(json.dumps("Playlist saved to {}".format(info["name"])), status=201, mimetype="text/plain")
response["Location"] = "/cadence/data/playlist/{}/".format(newID)
logger.info("Playlist from {} successfully saved as #{}".format(request.get_host(), newID))
return response
@handle_not_found
@log_request
def playlist_tracks(request, item_id):
"""View method for playlist tracklist. Returns playlist matching ID."""
return json_response(models.Playlist.getPlaylist(item_id))
@log_request
def media(request):
"""
View method for all media. Returns information on every track in the library.
Note that for very large libraries, this could produce a great amount of data
and load slowly on the client (not to mention "Holy crap Frank, how'd we go over
our data limit again this month?"). Therefore, this view may be disabled depending
on the current site settings.
"""
return json_response(models.Media.getFullLibrary())
@handle_not_found
@log_request
def media_details(request, item_id):
"""View method for details on a specific media item"""
return json_response(models.Media.getDetails(item_id))
@log_request
def albums(request):
"""View method for albums list. Returns list of albums in the library."""
return json_response(models.Album.getAlbums())
@handle_not_found
@log_request
def album_tracks(request, item_id):
"""View method for album tracklist. Returns media for album matching ID."""
return json_response(models.Album.getAlbumTracks(item_id))
@log_request
def artists(request):
"""View method for artists list. Returns list of artists in the library."""
return json_response(models.Artist.getArtists())
@handle_not_found
@log_request
def artist_tracks(request, item_id):
"""View method for artist tracklist. Returns media for artist matching ID."""
return json_response(models.Artist.getArtistTracks(item_id))
# Utility methods
#################
def json_response(output):
"""Returns an HTTP Response with the data in output as the content in JSON format"""
return HttpResponse(json.dumps(output), mimetype="application/json")
| 34.736041 | 153 | 0.672366 | 0 | 0 | 0 | 0 | 3,981 | 0.581762 | 0 | 0 | 3,447 | 0.503726 |
32675e661c420861aca3a72ce984ac5043cdeab4 | 2,868 | py | Python | elexon_api/utils.py | GiorgioBalestrieri/elexon_api_tool | 5b271e9d4a52dec5585a232833a699b8392ee6b0 | [
"MIT"
]
| 4 | 2019-06-07T11:14:46.000Z | 2021-04-01T14:15:14.000Z | elexon_api/utils.py | GiorgioBalestrieri/elexon_api_tool | 5b271e9d4a52dec5585a232833a699b8392ee6b0 | [
"MIT"
]
| null | null | null | elexon_api/utils.py | GiorgioBalestrieri/elexon_api_tool | 5b271e9d4a52dec5585a232833a699b8392ee6b0 | [
"MIT"
]
| 6 | 2019-02-28T20:24:26.000Z | 2021-03-30T18:08:23.000Z | import os
from pathlib import Path
import pandas as pd
from collections import defaultdict
from typing import Dict, List
from .config import REQUIRED_D, API_KEY_FILENAME
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def get_required_parameters(service_code: str) -> List[str]:
"""Get list of required parameters for service."""
return REQUIRED_D[service_code]
def _get_path_to_module() -> Path:
"""Get path to this module."""
return Path(os.path.realpath(__file__)).parent
def get_api_key_path(filename=API_KEY_FILENAME) -> Path:
"""Load api key."""
path_to_dir = _get_path_to_module()
return path_to_dir / filename
class ElexonAPIException(Exception):
pass
def extract_df(r_dict: dict) -> pd.DataFrame:
"""Extract DataFrame from dictionary.
Parameters
----------
r_dict
Obtained from response through xmltodict.
"""
r_body = r_dict['responseBody']
r_items_list = r_body['responseList']['item']
try:
df_items = pd.DataFrame(r_items_list)
except Exception as e:
logger.warning(f"Failed to create DataFrame.", exc_info=True)
try:
df_items = pd.DataFrame(r_items_list, index=[0])
except Exception as e:
logger.error("Failed to create DataFrame.")
raise e
return df_items
def extract_df_by_record_type(r_dict: dict) -> Dict[str,pd.DataFrame]:
content: List[dict] = r_dict['responseBody']['responseList']['item']
records_d = split_list_of_dicts(content, 'recordType')
return {k: pd.DataFrame(l) for k,l in records_d.items()}
def split_list_of_dicts(dict_list: List[dict], key: str) -> Dict[str,List[dict]]:
"""Split list of dictionaries into multiples lists based on a specific key.
Output lists are stored in a dicionary with the value used as key.
Example:
>>> dict_list = [
{
"recordType": "a",
"foo": 1,
"bar": 1,
},
{
"recordType": "b",
"foo": 2,
"bar": 2,
},
{
"recordType": "b",
"foo": 3,
"bar": 3,
}
]
>>> split_list_of_dicts(dict_list, 'recordType')
{
"a": [
{
"recordType": "a",
"foo": 1,
"bar": 1,
},
],
"b": [
{
"recordType": "b",
"foo": 2,
"bar": 2,
},
{
"recordType": "b",
"foo": 3,
"bar": 3,
}
]
}
]
"""
result = defaultdict(list)
for d in dict_list:
result[d[key]].append(d)
return result | 25.380531 | 81 | 0.540098 | 45 | 0.01569 | 0 | 0 | 0 | 0 | 0 | 0 | 1,396 | 0.48675 |
326881582afe0e7d4f36578fa52df6c3b487641d | 1,608 | py | Python | relative_connectivity_of_subgraphs.py | doberse/RRI | e2fdc085d8040efc230a25eec670dd6839cbf1f7 | [
"MIT"
]
| null | null | null | relative_connectivity_of_subgraphs.py | doberse/RRI | e2fdc085d8040efc230a25eec670dd6839cbf1f7 | [
"MIT"
]
| null | null | null | relative_connectivity_of_subgraphs.py | doberse/RRI | e2fdc085d8040efc230a25eec670dd6839cbf1f7 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import networkx as nx
import pandas as pd
#Other nodes connected by one node
r=open('input_data/BC-related_RRI_network.txt')
ll=r.readlines()
r.close()
rna_pairs=[]
node_to_nodes={}
for l in ll:
ws=l.strip().split('\t')
qx=sorted(ws[0:2])
rna_pairs.append((qx[0],qx[1]))
for i in [0,1]:
if i==0:
j=1
else:
j=0
if qx[i] not in node_to_nodes:
node_to_nodes[qx[i]]=[qx[j]]
else:
node_to_nodes[qx[i]].append(qx[j])
#Dictionary of Node No.
r=open('input_data/RRI_node.csv')
r.readline()
no2node={}
for l in r:
ws=l.strip().split(',')
no2node[ws[0]]='~'.join(ws[1:7])
r.close()
#Sort nodes by node degree
node_degree={}
for k in node_to_nodes:
node_degree[k]=len(node_to_nodes[k])
df=pd.DataFrame(node_degree,index=['Degree'])
df=df.sort_values(by='Degree',axis=1,ascending=False)
nodes=df.columns.values
#Compute the relative conectivity of subgraphs
G=nx.Graph()
node_G=[]
w=open('RC_in_BC-related_RRI_network.csv','w')
w.write('Node,No.,Relative connectivity\n')
k=0
lim=len(nodes)
while k<lim:
node_key=nodes[k]
node_G.append(node_key)
G.add_node(node_key)#Add the node in subgraphs
for node in node_G:
if node in set(node_to_nodes[node_key]):
G.add_edge(node_key,node)#Add the edge in subgraphs
largest_components=max(nx.connected_components(G),key=len)
k+=1
w.write(no2node[node_key]+','+str(k)+','+str(len(largest_components)/float(len(node_G)))+'\n')
w.close()
| 26.360656 | 99 | 0.625622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.237562 |
326bc9a28ede548053a0104238484ec204f3ccb0 | 1,518 | py | Python | macdaily/cmd/install.py | JarryShaw/MacDaily | 853b841dd1f1f7e6aae7bf2c305ff008bc76055c | [
"BSD-3-Clause"
]
| 10 | 2018-09-20T19:57:56.000Z | 2021-11-14T18:28:10.000Z | macdaily/cmd/install.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
]
| 2 | 2020-05-31T08:49:47.000Z | 2021-12-28T16:57:42.000Z | macdaily/cmd/install.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
import abc
from macdaily.cls.command import Command
from macdaily.util.tools.print import print_info
class InstallCommand(Command):
@property
def cmd(self):
return 'install'
@property
def act(self):
return ('install', 'installed', 'installed')
@property
def job(self):
return ('installation', 'installation')
@property
def ignored(self):
return NotImplemented
@property
def notfound(self):
return NotImplemented
def _pkg_args(self, namespace):
"""Return if there's packages for main process."""
self._merge_packages(namespace)
self._parse_args(namespace)
self._pkgs = list()
self._fail = list()
return bool(self._packages)
def _run_proc(self):
self._pkgs = list()
self._fail = list()
for path in self._exec:
text = f'Using {self.name} executable {path!r}'
print_info(text, self._file, redirect=self._qflag)
self._var__temp_pkgs = self._packages # pylint: disable=attribute-defined-outside-init
if self._check_confirm(path):
self._proc_install(path)
else:
text = f'No {self.desc[1]} to install for executable {path!r}'
print_info(text, self._file, redirect=self._qflag)
self._proc_fixmissing(path)
self._proc_cleanup()
@abc.abstractmethod
def _proc_install(self, path):
pass
| 25.3 | 99 | 0.607378 | 1,388 | 0.914361 | 0 | 0 | 402 | 0.264822 | 0 | 0 | 284 | 0.187088 |
326dd27e7ff223645c2d0bf5d397fdea5ed20af2 | 2,632 | py | Python | src/piotr/cmdline/fs.py | orangecms/piotr | f892ce6eaaa08ea81eb01943a388b64fbf3ccc44 | [
"MIT"
]
| 47 | 2021-07-02T08:39:02.000Z | 2021-11-08T22:21:39.000Z | src/piotr/cmdline/fs.py | orangecms/piotr | f892ce6eaaa08ea81eb01943a388b64fbf3ccc44 | [
"MIT"
]
| 2 | 2021-07-08T09:25:30.000Z | 2021-07-12T10:06:51.000Z | src/piotr/cmdline/fs.py | orangecms/piotr | f892ce6eaaa08ea81eb01943a388b64fbf3ccc44 | [
"MIT"
]
| 5 | 2021-07-08T08:29:17.000Z | 2021-10-18T13:35:11.000Z | """
FS commandline module.
Allows to:
- list host filesystems
- remove a specific host filesystem
- add a specific host filesystem
"""
from os.path import basename
from piotr.cmdline import CmdlineModule, module, command
from piotr.user import UserDirectory as ud
from piotr.util import confirm
@module('fs', 'List, add, remove Piotr host filesystems')
class FsModule(CmdlineModule):
def __init__(self):
super().__init__()
@command('List available host filesystems')
def list(self, options):
"""
List available FSs.
"""
self.title(' Installed host filesystems:')
print('')
count = 0
for fs in ud.get().getHostFilesystems():
fs_line = (self.term.bold + '{fs:<40}' + self.term.normal + \
'{extra:<40}').format(
fs=' > %s' % fs['file'],
extra='(version {version}, platform: {platform}, cpu: {cpu} ({endian}), type: {fstype})'.format(
version=fs['version'],
platform=fs['platform'],
cpu=fs['cpu'],
fstype=fs['type'],
endian='little-endian' if fs['endian']=='little' else 'big-endian'
)
)
print(fs_line)
count += 1
print('')
print(' %d filesystem(s) available' % count)
print('')
@command('Remove a specific filesystem', ['fs name'])
def remove(self, options):
"""
Remove filesystem from our repository.
Expects options[0] to be the name of the target filesystem to remove.
"""
if len(options) >= 1:
# Ask for confirm
if confirm('Are you sure to remove this filesystem'):
# Remove kernel by name
if ud.get().removeHostFs(options[0]):
print('Filesystem %s successfully removed.' % options[0])
else:
self.error('An error occurred while removing host filesystem.')
else:
self.important(' You must provide a host filesystem name to remove.')
@command('Add a specific host filesystem', ['path'])
def add(self, options):
"""
Add kernel to our kernel repository.
"""
if len(options) >= 1:
if ud.get().addHostFs(options[0]):
print('Host filesystem successfully added to our registry.')
else:
self.error('An error occurred while importing host filesystem.')
else:
self.important(' You must provide a filesystem file to add.')
| 32.9 | 112 | 0.549392 | 2,275 | 0.864033 | 0 | 0 | 2,333 | 0.886062 | 0 | 0 | 1,165 | 0.442461 |
32717c3bd131867ffad78e96d71e4ee21ce9b1c6 | 61 | py | Python | mct_logging/src/mct_logging/__init__.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
]
| null | null | null | mct_logging/src/mct_logging/__init__.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
]
| null | null | null | mct_logging/src/mct_logging/__init__.py | iorodeo/mct | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | [
"Apache-2.0"
]
| null | null | null | import tracking_pts_logger_master
import tracking_pts_logger
| 20.333333 | 33 | 0.934426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32723b5595559318393e20a40362e1d61e41c415 | 3,133 | py | Python | singlecellmultiomics/modularDemultiplexer/demultiplexModules/scartrace.py | zztin/SingleCellMultiOmics | d3035c33eb1375f0703cc49537417b755ad8a693 | [
"MIT"
]
| 17 | 2019-05-21T09:12:16.000Z | 2022-02-14T19:26:58.000Z | singlecellmultiomics/modularDemultiplexer/demultiplexModules/scartrace.py | zztin/SingleCellMultiOmics | d3035c33eb1375f0703cc49537417b755ad8a693 | [
"MIT"
]
| 70 | 2019-05-20T08:08:45.000Z | 2021-06-22T15:58:01.000Z | singlecellmultiomics/modularDemultiplexer/demultiplexModules/scartrace.py | zztin/SingleCellMultiOmics | d3035c33eb1375f0703cc49537417b755ad8a693 | [
"MIT"
]
| 7 | 2020-04-09T15:11:12.000Z | 2022-02-14T15:23:31.000Z | from singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods import UmiBarcodeDemuxMethod, NonMultiplexable
# ScarTrace
class ScartraceR1(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = 'scartrace'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=0,
umiLength=0,
barcodeRead=0,
barcodeStart=0,
barcodeLength=8,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
**kwargs)
self.shortName = 'SCARC8R1'
self.longName = 'Scartrace, CB: 8bp'
self.description = '384 well format. Scar amplicon demultiplexing, cell barcode in read 1'
self.autoDetectable = True
class ScartraceR2(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = 'scartrace'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=0,
umiLength=0,
barcodeRead=1,
barcodeStart=0,
barcodeLength=8,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
**kwargs)
self.shortName = 'SCARC8R2'
self.longName = 'Scartrace, CB: 8bp'
self.description = '384 well format. Scar amplicon demultiplexing, cell barcode in read 2'
self.autoDetectable = True
def demultiplex(self, records, **kwargs):
if kwargs.get(
'probe') and not records[0].sequence.startswith('CCTTGAACTTCTGGTTGTAG'):
raise NonMultiplexable
taggedRecords = UmiBarcodeDemuxMethod.demultiplex(
self, records, **kwargs)
return taggedRecords
class ScartraceR2RP4(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = 'scartrace'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=0,
umiLength=0,
barcodeRead=1,
barcodeStart=0,
barcodeLength=8,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
random_primer_end=False,
random_primer_read=0,
random_primer_length=4,
**kwargs)
self.shortName = 'SCARC8R2R4'
self.longName = 'Scartrace, CB: 8bp, with 4bp random sequence in read 1'
self.description = '384 well format. Scar amplicon demultiplexing, cell barcode in read , 4bp random sequence in R1'
self.autoDetectable = True
def demultiplex(self, records, **kwargs):
if kwargs.get(
'probe') and not records[0].sequence[4:].startswith('CCTTGAACTTCTGGTTGTAG'):
raise NonMultiplexable
taggedRecords = UmiBarcodeDemuxMethod.demultiplex(
self, records, **kwargs)
return taggedRecords
| 34.054348 | 125 | 0.605171 | 2,976 | 0.949888 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.150016 |
3272a27a8fc6fa3c964e19b20bd692f8755a0dee | 6,151 | py | Python | tests/models.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
]
| 2 | 2020-03-17T00:53:23.000Z | 2020-07-16T07:00:33.000Z | tests/models.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
]
| 76 | 2019-12-05T01:15:57.000Z | 2021-09-07T16:47:27.000Z | tests/models.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
]
| 1 | 2020-02-05T15:09:47.000Z | 2020-02-05T15:09:47.000Z | from datetime import date, time
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
from django_fsm import FSMField, transition
from rest_framework.reverse import reverse
from simple_history.models import HistoricalRecords
from bridger.buttons import ActionButton
from bridger.display import FieldSet, InstanceDisplay, Section
from bridger.enums import RequestType
from bridger.search import register as search_register
from bridger.tags import TagModelMixin
@search_register(endpoint="modeltest-list")
class ModelTest(TagModelMixin, models.Model):
@classmethod
def search_for_term(cls, search_term, request=None):
return (
cls.objects.all()
.annotate(
_search=models.functions.Concat(
models.F("char_field"), models.Value(" "), models.F("text_field"), output_field=models.CharField(),
)
)
.annotate(_repr=models.F("char_field"))
)
STATUS1 = "status1"
STATUS2 = "status2"
STATUS3 = "status3"
status_choices = ((STATUS1, "Status1"), (STATUS2, "Status2"), (STATUS3, "Status3"))
MOVE_BUTTON1 = ActionButton(
method=RequestType.PATCH,
icon="wb-icon-thumbs-up-full",
key="move1",
label="Move1",
action_label="Move1",
description_fields="<p>We will move1 this model.</p>",
instance_display=InstanceDisplay(sections=(
Section(fields=FieldSet(fields=("char_field", "integer_field"))),)
),
identifiers=("tests:modeltest",),
)
MOVE_BUTTON2 = ActionButton(
method=RequestType.PATCH,
icon="wb-icon-thumbs-up-full",
key="move2",
label="Move2",
action_label="Move2",
description_fields="<p>We will move2 this model.</p>",
instance_display=InstanceDisplay(sections=(
Section(fields=FieldSet(fields=("char_field", "integer_field"))),)
),
identifiers=("tests:modeltest",),
)
# Text
char_field = models.CharField(max_length=255, verbose_name="Char", help_text="This is the help text of a char field.",)
text_field = models.TextField(null=True, blank=True)
# Numbers
integer_field = models.IntegerField(verbose_name="Integer")
float_field = models.FloatField()
decimal_field = models.DecimalField(decimal_places=4, max_digits=7)
percent_field = models.FloatField()
# Date and Time
datetime_field = models.DateTimeField(verbose_name="DateTime")
datetime_field1 = models.DateTimeField(verbose_name="DateTime 1")
date_field = models.DateField()
time_field = models.TimeField()
# Boolean
boolean_field = models.BooleanField()
star_rating = models.PositiveIntegerField()
# Choice
choice_field = models.CharField(max_length=64, choices=(("a", "A"), ("b", "B")), default="a")
# Status
status_field = FSMField(default=STATUS1, choices=status_choices, verbose_name="Status")
# Files
image_field = models.ImageField(null=True, blank=True)
file_field = models.FileField(null=True, blank=True)
history = HistoricalRecords()
def get_tag_detail_endpoint(self):
return reverse("modeltest-detail", args=[self.id])
def get_tag_representation(self):
return self.char_field
@transition(
field=status_field, source=[STATUS1], target=STATUS2, custom={"_transition_button": MOVE_BUTTON1},
)
def move1(self):
"""Moves the model from Status1 to Status2"""
pass
@transition(
field=status_field, source=[STATUS1, STATUS2], target=STATUS3, custom={"_transition_button": MOVE_BUTTON2},
)
def move2(self):
"""Moves the model from Status1 or Status2 to Status3"""
pass
@classmethod
def get_endpoint_basename(cls):
return "modeltest"
@classmethod
def get_endpoint(cls):
return "modeltest-list"
@classmethod
def get_representation_endpoint(cls):
return "modeltestrepresentation-list"
@classmethod
def get_representation_value_key(cls):
return "id"
@classmethod
def get_representation_label_key(cls):
return "{{char_field}}"
class Meta:
verbose_name = "Test Model"
verbose_name_plural = "Test Models"
@search_register(endpoint="relatedmodeltest-list")
class RelatedModelTest(TagModelMixin, models.Model):
@classmethod
def search_for_term(cls, request=None):
return cls.objects.all().annotate(_search=models.F("char_field")).annotate(_repr=models.F("char_field"))
text_json = models.JSONField(default=list, blank=True, null=True)
text_markdown = models.TextField(default="")
model_test = models.ForeignKey(
to="tests.ModelTest",
related_name="related_models",
null=True,
blank=True,
on_delete=models.CASCADE,
verbose_name="Model Test",
)
model_tests = models.ManyToManyField(
to="tests.ModelTest", related_name="related_models_m2m", blank=True, verbose_name="Model Tests1",
)
char_field = models.CharField(max_length=255, verbose_name="Char")
list_field = ArrayField(base_field=models.CharField(max_length=255), null=True, blank=True, default=list)
history = HistoricalRecords()
def get_tag_detail_endpoint(self):
return reverse("relatedmodeltest-detail", args=[self.id])
def get_tag_representation(self):
return self.char_field
def __str__(self):
return self.char_field
@property
def upper_char_field(self):
return self.char_field.upper()
@classmethod
def get_endpoint_basename(cls):
return "relatedmodeltest"
@classmethod
def get_representation_endpoint(cls):
return "relatedmodeltestrepresentation-list"
@classmethod
def get_representation_value_key(cls):
return "id"
@classmethod
def get_representation_label_key(cls):
return "{{char_field}}"
class Meta:
verbose_name = "Related Model Test"
verbose_name_plural = "Related Model Tests"
| 31.22335 | 123 | 0.677451 | 5,524 | 0.898065 | 0 | 0 | 5,619 | 0.91351 | 0 | 0 | 1,070 | 0.173955 |
327639bba2a2aa36c47d30fbf67b64ee714db74b | 2,975 | py | Python | RNAstructure_Source/RNAstructure_python_interface/Error_handling.py | mayc2/PseudoKnot_research | 33e94b84435d87aff3d89dbad970c438ac173331 | [
"MIT"
]
| null | null | null | RNAstructure_Source/RNAstructure_python_interface/Error_handling.py | mayc2/PseudoKnot_research | 33e94b84435d87aff3d89dbad970c438ac173331 | [
"MIT"
]
| null | null | null | RNAstructure_Source/RNAstructure_python_interface/Error_handling.py | mayc2/PseudoKnot_research | 33e94b84435d87aff3d89dbad970c438ac173331 | [
"MIT"
]
| null | null | null | #automated error checking for RNAstructure python interface
from __future__ import print_function
import inspect
from functools import wraps
from collections import defaultdict
debug = False
class StructureError(Exception): pass
class RNAstructureInternalError(Exception):pass
lookup_exceptions = defaultdict(lambda:RuntimeError,
{ 1:IOError,
2:IOError,
3:IndexError,
4:IndexError,
5:EnvironmentError,
6:StructureError,
7:StructureError,
8:StructureError,
9:StructureError,
10:ValueError,
11:ValueError,
12:ValueError,
13:IOError,
14:RNAstructureInternalError,
15:ValueError,
16:ValueError,
17:ValueError,
18:ValueError,
19:ValueError,
20:ValueError,
21:RNAstructureInternalError,
22:RNAstructureInternalError,
23:ValueError,
24:ValueError,
25:ValueError,
26:ValueError
})
def check_for_errors(method):
@wraps(method)
def RNAstructure_error_checker(self,*args,**kwargs):
if debug: print ("checking for errors in %s" % method.__name__)
ret = method(self,*args,**kwargs)
error = self.GetErrorCode()
self.ResetError()
if error != 0:
raise lookup_exceptions[error]("Error in %s: " % method.__name__ +
self.GetErrorMessage(error))
return ret
return RNAstructure_error_checker
def check_for_init_errors(method):
@wraps(method)
def RNAstructure_error_checker(self,*args):
if debug: print ("checking for errors in %s" % method.__name__)
ret = method(self,*args)
error = self.GetErrorCode()
if error != 0:
raise RuntimeError("Error in call to %s.%s: " % (self.__name__,method.__name__) +
self.GetErrorMessage(error))
return ret
return RNAstructure_error_checker
def is_init(method):
result = inspect.ismethod(method) and method.__name__=="__init__"
if inspect.ismethod(method):
pass
return result
def not_excluded(method):
excluded = ["__repr__","__setattr__","__getattr__","__str__","__init__","<lambda>","swig_repr",
"GetErrorCode","GetErrorMessage","GetErrorMessageString","ResetError","fromFile","fromString"]
result = inspect.ismethod(method) and method.__name__ not in excluded
if inspect.ismethod(method):
if debug: print ("checking if", method.__name__ , "should be excluded: ",result)
return result
def decorate_methods(decorator,methodtype):
def decorate(cls):
for attr in inspect.getmembers(cls, methodtype):
if debug: print ("decorating %s!" % attr[0])
setattr(cls, attr[0], decorator(getattr(cls, attr[0])))
return cls
return decorate
| 35.416667 | 110 | 0.621176 | 84 | 0.028235 | 0 | 0 | 829 | 0.278655 | 0 | 0 | 379 | 0.127395 |
3276b79a61cf27161c545de376944d5851538c10 | 52,691 | py | Python | Src/si_figs.py | jomimc/FoldAsymCode | 1896e5768e738bb5d1921a3f4c8eaf7f66c06be9 | [
"MIT"
]
| 1 | 2020-10-07T14:24:06.000Z | 2020-10-07T14:24:06.000Z | Src/si_figs.py | jomimc/FoldAsymCode | 1896e5768e738bb5d1921a3f4c8eaf7f66c06be9 | [
"MIT"
]
| null | null | null | Src/si_figs.py | jomimc/FoldAsymCode | 1896e5768e738bb5d1921a3f4c8eaf7f66c06be9 | [
"MIT"
]
| null | null | null | from collections import defaultdict, Counter
from itertools import product, permutations
from glob import glob
import json
import os
from pathlib import Path
import pickle
import sqlite3
import string
import sys
import time
import matplotlib as mpl
from matplotlib import colors
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from multiprocessing import Pool
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
from palettable.colorbrewer.diverging import PuOr_5, RdYlGn_6, PuOr_10, RdBu_10
from palettable.scientific.diverging import Cork_10
from scipy.spatial import distance_matrix, ConvexHull, convex_hull_plot_2d
from scipy.stats import linregress, pearsonr, lognorm
import seaborn as sns
import svgutils.compose as sc
import asym_io
from asym_io import PATH_BASE, PATH_ASYM, PATH_ASYM_DATA
import asym_utils as utils
import folding_rate
import paper_figs
import structure
PATH_FIG = PATH_ASYM.joinpath("Figures")
PATH_FIG_DATA = PATH_FIG.joinpath("Data")
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
####################################################################
### SI Figures
####################################################################
### FIG 1
def fig1(df, nx=3, ny=3, N=50):
fig, ax = plt.subplots(nx,ny, figsize=(12,12))
ax = ax.reshape(ax.size)
fig.subplots_adjust(hspace=.5)
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
cat = 'HS.D'
scop_desc = {row[1]:row[2] for row in pd.read_csv(PATH_BASE.joinpath('SCOP/scop-des-latest.txt')).itertuples()}
CF_count = sorted(df.CF.value_counts().items(), key=lambda x:x[1], reverse=True)[1:]
bold_idx = [0, 1, 2, 6, 8]
for i in range(nx*ny):
cf_id, count = CF_count[i]
countN, countC = utils.pdb_end_stats_disorder_N_C(df.loc[df.CF==cf_id], N=N, s1='SEQ_PDB2', s2='SS_PDB2')
base = np.zeros(len(countN['S']), dtype=float)
Yt = np.array([[sum(p.values()) for p in countN[s]] for s in cat]).sum(axis=0)
X = np.arange(base.size)
for j, s in enumerate(cat):
YN = np.array([sum(p.values()) for p in countN[s]])
YC = np.array([sum(p.values()) for p in countC[s]])
ax[i].plot(YN/Yt, '-', c=col[j], label=f"{s} N")
ax[i].plot(YC/Yt, ':', c=col[j], label=f"{s} C")
if i in bold_idx:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}", fontweight='bold')
else:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}")
ax[i].set_xlabel('Sequence distance from ends')
if not i%3:
ax[i].set_ylabel('Secondary\nstructure\nprobability')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[1].legend(handles=handles, bbox_to_anchor=(1.40, 1.45), frameon=False,
ncol=6, columnspacing=1.5, handlelength=2.0)
fig.savefig(PATH_FIG.joinpath("si1.pdf"), bbox_inches='tight')
####################################################################
### FIG 2
def fig2():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(1,2, figsize=(10,5))
fig.subplots_adjust(wspace=0.3)
X1 = np.log10(pfdb.loc[pfdb.use, 'L'])
X2 = np.log10(pfdb.loc[pfdb.use, 'CO'])
Y = pfdb.loc[pfdb.use, 'log_kf']
sns.regplot(X1, Y, ax=ax[0])
sns.regplot(X2, Y, ax=ax[1])
print(pearsonr(X1, Y))
print(pearsonr(X2, Y))
ax[0].set_ylabel(r'$\log_{10} k_f$')
ax[1].set_ylabel(r'$\log_{10} k_f$')
ax[0].set_xlabel(r'$\log_{10}$ Sequence Length')
ax[1].set_xlabel(r'$\log_{10}$ Contact Order')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si2.pdf"), bbox_inches='tight')
####################################################################
### FIG 3
def fig3(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
fig, ax = plt.subplots()
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
for i, d in enumerate([pdb, LO, HI]):
print(f"{i}: frac R less than 0 = {utils.R_frac_1(d)}")
print(f"{i}: Euk frac (.1 < R < 10) = {utils.R_frac_2(d, k=5)}")
print(f"{i}: Prok frac (.1 < R < 10) = {utils.R_frac_2(d, k=10)}")
print(f"{i}: frac R faster than 'speed-limit' = {utils.R_frac_3(d)}")
print(f"{i}: frac R slower than 20 minutes = {utils.R_frac_4(d)}")
print()
sns.distplot(d['REL_RATE'], label=lbls[i], color=col[i])
ax.legend(loc='best', frameon=False)
ax.set_xlim(-6, 6)
ax.set_xlabel(r'$\log_{10}R$')
ax.set_ylabel('Density')
fig.savefig(PATH_FIG.joinpath("si3.pdf"), bbox_inches='tight')
####################################################################
### FIG 4
def fig4(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
# For the results using only 2-state proteins...
# HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='best', only2s=True)
fig = plt.figure(figsize=(8,10.5))
gs = GridSpec(5,12, wspace=0.5, hspace=0.0, height_ratios=[1,0.5,1,0.5,1.5])
ax = [fig.add_subplot(gs[i*2,j*4:(j+1)*4]) for i in [0,1] for j in [0,1,2]] + \
[fig.add_subplot(gs[4,:5]), fig.add_subplot(gs[4,7:])]
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
for k, pdb in enumerate([LO, HI]):
quantiles = pdb['REL_RATE'].quantile(np.arange(0,1.1,.1)).values
pdb['quant'] = pdb['REL_RATE'].apply(lambda x: utils.assign_quantile(x, quantiles))
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich.pickle"), 'rb'))
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
if i:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[k*3+i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[k*3+2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[k*3+2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
for i in [0,2]:
ax[k*3+i].set_yticks(np.arange(len(quantiles))*sep)
ax[k*3+i].set_yticklabels([round(x,1) for x in quantiles])
for i in range(2):
ax[k*3+i].spines['top'].set_visible(False)
ax[k*3+i].spines['right'].set_visible(False)
for i in range(1,3):
ax[k*3+i].spines['left'].set_visible(False)
ax[k*3+i].spines['top'].set_visible(False)
for i in range(3):
ax[k*3+i].set_ylim(0-sep/4, (0.5+sep/4)*1.05)
ax[k*3+1].set_yticks([])
ax[k*3+2].yaxis.set_label_position('right')
ax[k*3+2].yaxis.tick_right()
ax[k*3+0].set_xlabel(r"asym$_{\alpha}$")
ax[k*3+1].set_xlabel(r"asym$_{\beta}$")
ax[k*3+0].set_ylabel(r'$\log_{10}R$')
ax[k*3+2].set_xlabel('N terminal\nEnrichment')
plot_metric_space(fig, ax[6:])
fs = 14
for i, b in zip([0,3,6], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si4.pdf"), bbox_inches='tight')
def get_ci_index(X, Y):
xlo = np.quantile(X, 0.025)
xhi = np.quantile(X, 0.975)
ylo = np.quantile(Y, 0.025)
yhi = np.quantile(Y, 0.975)
return np.where((X>=xlo)&(X<=xhi)&(Y>=ylo)&(Y<=yhi))[0]
def plot_hull(boot_fit, patt, ax='', c='k', lw=1):
idx = get_ci_index(*boot_fit[:,:2].T)
tmp = boot_fit[idx].copy()
hull = ConvexHull(np.array([boot_fit[idx,1], boot_fit[idx, 0]]).T)
for simplex in hull.simplices:
if not isinstance(ax, str):
ax.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
else:
plt.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
def plot_metric_space(fig, ax):
fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_met.pickle"), 'rb'))['AA']
boot_fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param.pickle"), 'rb'))
boot_fit_0 = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param_useall.pickle"), 'rb'))
X, Y = np.meshgrid(fit["c1"], fit["c2"])
cmap = colors.ListedColormap(sns.diverging_palette(230, 22, s=100, l=47, n=8))
bounds = np.linspace(-2, 2, 9)
norm = colors.BoundaryNorm(bounds, cmap.N)
im = []
ttls = ['Helices', 'Sheets']
for i in range(2):
im = ax[i].contourf(X, Y, fit['met'][:,:,i], bounds, cmap=cmap, vmin=-2, vmax=2, norm=norm)
cbar = fig.colorbar(im, ax=ax[i], fraction=0.046, pad=0.04, norm=norm, boundaries=bounds, ticks=bounds)
cbar.set_label(r"$R_{\mathregular{max}}$", labelpad=-5)
ax[i].set_xlabel('A')
ax[i].set_xlim(X.min(), X.max())
ax[i].set_ylabel('B')
ax[i].set_ylim(Y.max(), Y.min())
ax[i].invert_yaxis()
ax[i].set_aspect((np.max(X)-np.min(X))/(np.max(Y)-np.min(Y)))
ax[i].set_title(ttls[i])
col = ['k', '#79C726']
for i, boofi in enumerate([boot_fit, boot_fit_0]):
for j in range(2):
for bf, p in zip(boofi, ['-', ':']):
plot_hull(bf, p, ax[j], c=col[i])
c1 = [13.77, -6.07]
c1a = [11.36553036, -4.87716477]
c1b = [16.17819934, -7.27168306]
patt = ['*', 'o', 'o']
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
col = "#CB7CE6"
for i in range(2):
for coef, p, l in zip([c1, c1a, c1b], patt, lbls):
ax[i].plot([coef[0]], [coef[1]], p, label=l, fillstyle='none', ms=10, c=col, mew=2)
ax[i].legend(loc='best', frameon=False)
####################################################################
### FIG 5
def fig5():
fig, ax = plt.subplots(2,1)
fig.subplots_adjust(hspace=0.3)
bins = np.arange(0,620,20)
X = [bins[:-1] + np.diff(bins[:2])]
bins = np.arange(0,61,2.0)
X.append(bins[:-1] + np.diff(bins[:2]))
yellows = sns.diverging_palette(5, 55, s=95, l=77, n=13)
pinks = sns.diverging_palette(5, 55, s=70, l=52, n=13)
col = [yellows[12], pinks[0]]
col2 = [yellows[10], pinks[3]]
data = [pickle.load(open(PATH_FIG_DATA.joinpath(f"dom_{x}_dist_boot.pickle"), 'rb')) for x in ['aa', 'smco']]
for j in range(2):
for i in [1,2]:
MEAN, LO, HI = [np.array(x) for x in data[j][f"pos{i}"]]
ax[j].plot(X[j], MEAN, '--', c=col[i-1], label=f'position {i}')
ax[j].fill_between(X[j], LO, HI, color=col2[i-1], alpha=0.5)
ax[0].set_xlabel('Sequence Length')
ax[1].set_xlabel('Contact Order')
ax[0].set_ylabel('Density')
ax[1].set_ylabel('Density')
ax[0].legend(loc='upper right', frameon=False)
fig.savefig(PATH_FIG.joinpath("si5.pdf"), bbox_inches='tight')
####################################################################
### FIG 6
def fig6(X='REL_RATE', Y='S_ASYM'):
fig, ax = plt.subplots(1,2, figsize=(10,4))
fig.subplots_adjust(hspace=0.7, wspace=0.3)
sep = 0.40
col = Paired_12.hex_colors[5]
ttls = [f"Position {i}" for i in range(1,3)]
dom_pos_boot = pickle.load(open(PATH_FIG_DATA.joinpath("dom_pos_boot.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[11]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
# ttls = ["Two-domain", "Three-domain"]
xlbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i in range(2):
for j, (pos, dat) in enumerate(dom_pos_boot[2].items()):
quantiles = dat[0].mean(axis=0)
mean = dat[1][:,i,:].mean(axis=0)
lo = np.abs(np.quantile(dat[1][:,i,:], 0.025, axis=0) - mean)
hi = np.abs(np.quantile(dat[1][:,i,:], 0.975, axis=0) - mean)
ax[j].bar(np.arange(10)+(i+1)*sep, mean, sep, yerr=(lo, hi), color=col[i], label=xlbls[i], alpha=0.7, error_kw={'lw':.8})
ax[j].set_xticks(np.arange(len(quantiles)))
ax[j].set_xticklabels(np.round(quantiles, 1), rotation=90)
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_title(ttls[i], loc='left')
ax[i].set_xlabel(r'$\log_{10}R$')
# ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[i].set_ylabel("N Terminal Enrichment")
ax[0].legend(bbox_to_anchor=(1.17, 1.12), frameon=False, ncol=3)
fig.savefig(PATH_FIG.joinpath("si6.pdf"), bbox_inches='tight')
####################################################################
### FIG 7
def fig7(pdb, Y='D_ASYM'):
fig, ax = plt.subplots(3,3, figsize=(12,8))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
sep = 0.05
col = Paired_12.hex_colors[7]
xlbls = [r'$\log_{10} R$', 'Sequence Length', 'Contact Order']
ttls = ['Full sample', 'Eukaryotes', 'Prokaryotes']
for k, df in enumerate([pdb, pdb.loc[pdb.k_trans==5], pdb.loc[pdb.k_trans==10]]):
for i, X in enumerate(['REL_RATE', 'AA_PDB', 'CO']):
quantiles = df[X].quantile(np.arange(0,1.1,.1)).values
df['quant'] = df[X].apply(lambda x: utils.assign_quantile(x, quantiles))
ratio = []
for j in range(len(quantiles)-1):
left = len(df.loc[(df.quant==j)&(df[Y]<0)]) / max(1, len(df.loc[(df.quant==j)]))
right = len(df.loc[(df.quant==j)&(df[Y]>0)]) / max(1, len(df.loc[(df.quant==j)]))
ratio.append((right - left))
# print(ratio)
ax[i,k].bar([sep*j+sep/2 for j in range(10)], ratio, sep/2, color=[col if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[i,k].set_xticks(np.arange(len(quantiles))*sep)
if i == 1:
ax[i,k].set_xticklabels([int(x) for x in quantiles], rotation=90)
else:
ax[i,k].set_xticklabels([round(x,1) for x in quantiles], rotation=90)
ax[i,k].set_xlabel(xlbls[i])
ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[0,k].set_title(ttls[k])
fig.savefig(PATH_FIG.joinpath("si7.pdf"), bbox_inches='tight')
####################################################################
### FIG 8
def fig8(df_pdb):
fig = plt.figure()
gs = GridSpec(2,1, wspace=0.0, height_ratios=[.5,1])
ax = [fig.add_subplot(gs[1,0]), fig.add_subplot(gs[0,0])]
X = np.arange(-3, 3, 0.01)
Y = np.array([(10**x + 1)/max(10**x, 1) for x in X])
Y2 = (1+10**X) / np.array([max(1, 10**x+30./100.) for x in X])
ax[0].plot(X, Y, '-', label=r"$\tau_{ribo}=0$")
ax[0].plot(X, Y2, ':', label=r"$\tau_{ribo}=0.3\tau_{trans}$")
lbls = ['1ILO', '2OT2', '3BID']
patt = ['o', 's', '^']
for l, p in zip(lbls, patt):
X, Y = np.load(PATH_FIG_DATA.joinpath(f"{l}.npy"))
ax[0].plot(X, Y, p, label=l, alpha=0.5, mec='k', ms=7)
ax[0].set_xlim(-2.3, 2.3)
ax[0].set_ylim(1, 2.05)
ax[0].set_xlabel(r'$\log_{10} R$')
ax[0].set_ylabel("Speed-up")
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[0].legend(loc='upper right', frameon=False, bbox_to_anchor=(1.05, 1.00), ncol=1, labelspacing=.1)
fig8a(df_pdb, ax[1])
fig.savefig(PATH_FIG.joinpath("si8.pdf"), bbox_inches='tight')
def fig8a(df_pdb, ax):
lbls = ['2OT2', '1ILO', '3BID']
idx = [98212, 19922, 127370]
SS = df_pdb.loc[idx, 'SS_PDB2'].values
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[3], 'S':custom_cmap[9]}
ec_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[1], 'S':custom_cmap[11]}
wid_key = {'.':0.1, 'D':0.1, 'H':0.3, 'S':0.3}
lw_key = {'.':0.7, 'D':0.7, 'H':1.5, 'S':1.5}
for i, ss in enumerate(SS):
left = 0.
for j, strand in enumerate(new_figs.generate_strand(ss)):
s = strand[0]
ax.barh([i], [len(strand)], wid_key[s], left=[left], color=col_key[s], ec=ec_key[s], linewidth=lw_key[s])
left += len(strand) + 0.20
ax.annotate("N", xy=(-0.01, 1.0), xycoords='axes fraction')
ax.annotate("C", xy=(0.59, 1.0), xycoords='axes fraction')
for pos in ['left', 'right', 'top', 'bottom']:
ax.spines[pos].set_visible(False)
col = np.array(custom_cmap)[[3,9,1,11]]
ax.legend(handles=[mpatches.Patch(fc=c1, ec=c2, label=l) for c1, c2, l in zip(col[:2], col[2:], ['Helix', 'Sheet'])],
loc='upper right', frameon=False, ncol=1, bbox_to_anchor=(0.95, 1.10))
ax.set_xticks([])
ax.set_yticks(range(3))
ax.set_yticklabels(lbls)
ax.tick_params(axis='y', which='major', length=0, pad=10)
####################################################################
### FIG 9
def fig9(pdb, s='S'):
pdb = pdb.loc[(pdb.USE_RSA)]
pdb = pdb.loc[(pdb.SS_PDB2.str.len()==pdb.RSA.apply(len))]
path = PATH_FIG_DATA.joinpath("RSA_quantiles.pickle")
if path.exists():
quantiles, euk_quantiles, prok_quantiles = pickle.load(open(path, 'rb'))
else:
quantiles = [np.quantile([x for y in pdb['RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
euk_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==5, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
prok_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==10, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
pickle.dump([quantiles, euk_quantiles, prok_quantiles], open(path, 'wb'))
print(quantiles)
# fig, ax = plt.subplots(4,3, figsize=(8,8))
# fig.subplots_adjust(wspace=0.5)
fig = plt.figure(figsize=(12,9))
gs = GridSpec(5,3, wspace=0.3, height_ratios=[1,1,1,1,1])
ax = [fig.add_subplot(gs[j,i]) for i in range(3) for j in [0,1]] + \
[fig.add_subplot(gs[j,i]) for i in range(3) for j in [3,4]]
print("All proteins, all SS")
fig9a(pdb['RSA'], pdb['SS_PDB2'], quantiles, ax[:2], s='SH.D')
print("euk proteins, all ss")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[2:4], s='SH.D')
print("Prok proteins, all SS")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[4:6], s='SH.D')
print("Euk proteins, only SHC")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[6:8], s='SH.')
print("Euk proteins, only S")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[8:10], s='S')
print("Prok proteins, only S")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[10:12], s='S')
ttls = ['All proteins\nAll residues', 'Eukaryotic proteins\nAll residues', 'Prokaryotic proteins\nAll residues',
'Eukaryotic proteins\nHelix, sheet and coil', 'Eukaryotic proteins\nOnly Sheets', 'Prokaryotic proteins\nOnly Sheets']
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
lbls = ['Buried', 'Middle', 'Exposed']
ax[0].set_ylabel('Solvent accessibility\nprobability')
ax[1].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
ax[6].set_ylabel('Solvent accessibility\nprobability')
ax[7].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[8].legend(handles=handles, bbox_to_anchor=(1.30, 1.85), frameon=False,
ncol=5, columnspacing=1.5, handlelength=2.0, labelspacing=2.0)
for i, a in enumerate(ax):
if i % 2:
ax[i].set_xticks(range(0, 60, 10))
ax[i].set_xlabel('Sequence distance from ends')
else:
ax[i].set_xticks([])
ax[i].set_title(ttls[i//2])
ax[i].set_xlim(0, 50)
fig.savefig(PATH_FIG.joinpath("si9.pdf"), bbox_inches='tight')
def fig9a(rsa_list, ss_list, quantiles, ax, s='S'):
cat = 'BME'
countN, countC = utils.sheets_rsa_seq_dist(rsa_list, ss_list, quantiles, ss_key=s)
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
base = np.zeros(len(countN[cat[0]]), dtype=float)
YtN = np.array(list(countN.values())).sum(axis=0)
YtC = np.array(list(countC.values())).sum(axis=0)
X = np.arange(base.size)
for i, s in enumerate(cat):
YN = countN[s]
YC = countC[s]
ax[0].plot(YN/YtN, '-', c=col[i], label=f"{s} N")
ax[0].plot(YC/YtC, ':', c=col[i], label=f"{s} C")
ax[1].plot(np.log2(YN/YC*YtC/YtN), '-', c=col[i], label=f"{s}")
print(s, np.round((np.sum(YN[:20]) / np.sum(YtN[:20])) / (np.sum(YC[:20]) / np.sum(YtC[:20])), 2))
ax[1].plot([0]*base.size, ':', c='k')
ax[0].set_ylim(0,1)
ax[1].set_ylim(-1,1)
for a in ax:
a.set_xlim(X[0], X[-1])
####################################################################
### FIG 10
def fig10(pdb):
pfdb = asym_io.load_pfdb()
acpro = asym_io.load_acpro()
fig = plt.figure(figsize=(12,9))
gs = GridSpec(3,7, wspace=0.0, width_ratios=[5,0.2,5,0.4,3,1.0,6], height_ratios=[1,.3,1])
ax = [fig.add_subplot(gs[2,i*2]) for i in range(4)] + \
[fig.add_subplot(gs[0,0:3]), fig.add_subplot(gs[0,5:])]
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - PFDB fit', hist=False)
pdb = pdb.copy()
coef = folding_rate.linear_fit(np.log10(acpro['L']), acpro['log_kf']).params
pdb['ln_kf'] = folding_rate.pred_fold(np.log10(pdb.AA_PDB), coef)
pdb = utils.get_rel_rate(pdb)
fig10a(fig, ax[4])
fig10b(fig, ax[:4], pdb)
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - ACPro fit', hist=False)
# sns.distplot(pfdb.log_kf, ax=ax[5], label='PFDB data', kde=False, norm_hist=True)
# sns.distplot(acpro["ln kf"], ax=ax[5], label='KDB data', kde=False, norm_hist=True)
sns.regplot(np.log10(acpro['L']), acpro['log_kf'], label='ACPro data', scatter_kws={"alpha":0.5})
sns.regplot(np.log10(pfdb.loc[pfdb.use, 'L']), pfdb.loc[pfdb.use, 'log_kf'], label='PFDB data', scatter_kws={"alpha":0.5})
ax[5].legend(loc='best', frameon=False)
ax[5].set_xlabel(r"$\log_{10}L$")
ax[5].set_ylabel(r"$\log_{10}k_f$")
fs = 14
for i, b in zip([4,5,0,2,3], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.16, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si10.pdf"), bbox_inches='tight')
def fig10a(fig, ax):
Rdist_data = pickle.load(open(PATH_FIG_DATA.joinpath("R_dist_acpro.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['All', 'Prokaryotes', 'Eukaryotes']
for i, k in enumerate(['All', 'Prok', 'Euk']):
ax.plot(Rdist_data['grid'], Rdist_data[k][0], '-', c=col[i], label=lbls[i])
ax.fill_between(Rdist_data['grid'], Rdist_data[k][1], Rdist_data[k][2], color=col[i], alpha=0.5)
ax.plot([0,0], [0, 0.60], ':', c='k', alpha=0.7)
ax.set_xlabel(r'$\log_{10} R$')
ax.set_ylabel('Density')
ax.set_xticks(np.arange(-6, 5, 2))
ax.set_xlim(-7, 2)
ax.set_ylim(0, 0.60)
ax.legend(loc='upper center', bbox_to_anchor=(0.55, 1.17), frameon=False, ncol=3, columnspacing=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
def fig10b(fig, ax, pdb, Y='S_ASYM'):
ft = 12
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
# col = np.array(Paired_12.hex_colors)[[1,5]]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich_acpro.pickle"), 'rb'))
quantiles = enrich_data['edges'].mean(axis=0)
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
# total = len(pdb)/10
# left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<0)]) / total
# right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>0)]) / total
# print(Y, j, ''.join([f"{x:6.3f}" for x in [left, right, left/right, right / left]]))
if i:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[2].legend(loc='upper center', ncol=2, columnspacing=1.5, frameon=False,
bbox_to_anchor=(0.52, 1.15))
for i, t in zip([0,1], ttls):
ax[i].set_title(t)
ax[i].set_xlim(-.15, .15)
ax[i].set_xticks([-.1, 0, .1])
for i in range(3):
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_ylim(0-sep/4, 0.5+sep)
for i in [1,2]:
ax[i].spines['left'].set_visible(False)
ax[i].set_yticks([])
ax[0].set_xlabel(r"asym$_{\alpha}$")
ax[1].set_xlabel(r"asym$_{\beta}$")
ax[0].set_ylabel(r'$\log_{10}R$')
ax[2].set_xlabel('N terminal\nEnrichment')
pdb = pdb.loc[pdb.OC!='Viruses']
X = np.arange(10)
X = np.array([sep*j+(i+.7)*sep/3 for j in range(10)])
width = .175
ttls = ['Eukaryote ', 'Prokaryote ']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col = [custom_cmap[i] for i in [3, 9, 0, 12]]
paths = [f"fig3_enrich_{a}_acpro.pickle" for a in ['eukaryote', 'prokaryote']]
for i, path in enumerate(paths):
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath(path), 'rb'))
for j, Y in enumerate(['H_ASYM', 'S_ASYM']):
# adjust = (j - 1 + i*2)*width
adjust = (j*2 - 4.0 + i)*(sep/5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
print(i, Y, max(np.abs(mean)))
ax[3].barh(X+adjust, mean, sep/5.0, ec='k', xerr=(lo, hi), color=col[i*2+j],
label=ttls[i]+lbls[j], lw=0.001, error_kw={'lw':.2})
ax[3].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[3].set_yticks(np.arange(len(quantiles))*sep)
ax[3].set_ylabel(r'$\log_{10} R$')
ax[3].set_yticklabels([round(x,1) for x in quantiles])
ax[3].set_xlabel('N terminal\nEnrichment')
ax[3].set_xlim(-.42, .42)
ax[3].set_ylim(0-sep/4, 0.5+sep)
ax[3].spines['top'].set_visible(False)
ax[3].spines['left'].set_visible(False)
handles = [mpatches.Patch([], [], color=col[j*2+i], label=ttls[j]+lbls[i]) for i in [0,1] for j in [1,0]]
ax[3].legend(handles=handles, bbox_to_anchor=(1.05, 1.25), frameon=False,
loc='upper right', ncol=2, columnspacing=1.0, handlelength=1.5)
ax[3].yaxis.set_label_position('right')
ax[3].yaxis.tick_right()
####################################################################
### FIG 11
def fig11(pdb, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(4,2, figsize=(9,12))
fig.subplots_adjust(wspace=0.0, hspace=0.65)
# ax = ax.reshape(ax.size)
pdb_CO = np.load(PATH_FIG_DATA.joinpath("pdb_config_CO.npy"))[:,:,0]
df = pdb.copy()
q = np.arange(w,1+w,w)
lbls = ['Helix', 'Sheet']
# cb_lbl = [r"$E_{\alpha}$", r"$E_{\beta}$"]
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
for j, co in enumerate(pdb_CO.T):
df['CO'] = co
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[j,i].imshow(np.array(mean).reshape(q.size, q.size).T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[j,i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[j,i].set_title(lbls[i])
ax[j,i].set_xticks(np.arange(q.size+1)-0.5)
ax[j,i].set_yticks(np.arange(q.size+1)-0.5)
ax[j,i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[j,i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax.ravel():
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip(range(4), list('ABCDEFGHI')):
ax[i,0].text( -0.20, 1.16, b, transform=ax[i,0].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si11.pdf"), bbox_inches='tight')
def fig12(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
fig = plt.figure(figsize=(8,12))
gs = GridSpec(3,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(3)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
lbls = ['Sheet', 'Helix']
quantiles = pdb[X].quantile(np.arange(0,1+w,w)).values
# print(np.round(quantiles, 2))
pdb['quant'] = pdb[X].apply(lambda x: utils.assign_quantile(x, quantiles))
# pdb['quant'] = np.random.choice(pdb['quant'], len(pdb), replace=False)
for ax, threshold in zip(ax_all, [0, 0.025, 0.05]):
print(f"threshold = {threshold}")
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
print(Y, j, left, right)
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5)
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
fig.savefig(PATH_FIG.joinpath("si12.pdf"), bbox_inches='tight')
def fig13(df, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(1,3, figsize=(15,4))
fig.subplots_adjust(wspace=0.5)
q = np.arange(w,1+w,w)
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
lbls = ['Helix', 'Sheet']
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
count = []
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
if not i:
count.append(len(samp))
# print(len(samp))
mean = np.array(mean).reshape(q.size, q.size)
count = np.array(count).reshape(q.size, q.size)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[i].imshow(mean.T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[i].set_title(lbls[i])
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for i in [2]:
cmap = plt.cm.Greys
# norm = colors.BoundaryNorm([-.04, .04], cmap.N)
# bounds = np.linspace(-.04, .04, 5)
im = ax[i].imshow(np.array(count).reshape(q.size, q.size).T, cmap=cmap, vmin=0)
cbar = fig.colorbar(im, cmap=cmap, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label('Count')
ax[i].set_title('Distribution')
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax:
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si13.pdf"), bbox_inches='tight')
def scop_ss():
fig, ax = plt.subplots(2,1)
cat = 'HS.D'
N = 50
X = np.arange(50)
Nboot, Cboot, asym, enrich_edges, enrich_vals = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_scop_indep.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/4, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/4, data[0][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/4, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/4, data[1][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.2)
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][:20]), 2), round(np.mean(data[2][s]['mean'][20:]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
ax[1].set_ylim(-1, 1.3)
ax[1].plot([0]*50, '-', c='k')
ax[1].set_yticks(np.arange(-1,1.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[1].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si14.pdf"), bbox_inches='tight')
def percentage_asym(x):
return np.sign(x) * 100*2**(abs(x)) - np.sign(x) * 100
def fig15():
fig, ax = plt.subplots(3,1, figsize=(10,10))
cat = 'HS.D'
N = 100
X = np.arange(N)
Nboot, Cboot, asym, = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_ss_max_asym.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
X2 = np.arange(5)
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/2, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/2, data[0][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/2, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/2, data[1][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.2)
for k in range(5):
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][k*20:(k+1)*20]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
if s in 'HS':
Y2 = [percentage_asym(np.log2(data[2][s]['mean'])[k*20:(k+1)*20].mean()) for k in range(5)]
ax[2].bar(X2, Y2, 0.5, color=col[j], label=lbls[j], ec='k')
ax[1].set_ylim(-1.5, 2.0)
ax[1].plot([0]*100, '-', c='k')
ax[2].plot([0]*5, '-', c='k')
ax[1].set_yticks(np.arange(-1,2.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[2].set_xticks(np.arange(5))
ax[2].set_xticklabels([f"{i*20} - {(i+1)*20}" for i in range(5)])
ax[0].set_xlabel('Sequence distance from ends')
ax[1].set_xlabel('Sequence distance from ends')
ax[2].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
ax[2].set_ylabel('Percentage asymmetry')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si15.pdf"), bbox_inches='tight')
def oligomer(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
pdb = pdb.copy()
fig = plt.figure(figsize=(8,8))
gs = GridSpec(2,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(2)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
threshold = 0
lbls = [r'$E_{\beta}$', r'$E_{\alpha}$']
ttls = ['Monomers', 'Oligomers']
for ax, idx, ttl in zip(ax_all, [pdb.NPROT==1, pdb.NPROT>1], ttls):
quantiles = pdb.loc[idx, X].quantile(np.arange(0,1+w,w)).values
pdb['quant'] = pdb.loc[idx, X].apply(lambda x: utils.assign_quantile(x, quantiles))
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[(idx)&(pdb.quant==j), Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
right = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5, label=lbls[i])
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
a.set_title(f"{ttl}, N={np.sum(idx)}")
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
ax[0].legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(1.20, 1.20))
fig.savefig(PATH_FIG.joinpath("si16.pdf"), bbox_inches='tight')
fig.savefig(PATH_FIG.joinpath("oligomers.png"), bbox_inches='tight')
def scop2(X='REL_RATE', Y='S_ASYM', w=0.1):
fig, ax = plt.subplots(figsize=(10,6))
edges, data = pickle.load(open(PATH_FIG_DATA.joinpath("pdb_scop_indep.pickle"), 'rb'))[3:]
edges = edges[0]
sep = 0.05
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
mean = np.mean(data[:,i], axis=0)
lo = np.abs(mean - np.quantile(data[:,i], 0.025, axis=0))
hi = np.abs(mean - np.quantile(data[:,i], 0.975, axis=0))
ax.barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax.set_yticks(np.arange(len(edges))*sep)
ax.set_yticklabels([round(x,1) for x in edges])
ax.legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(0.52, 1.06))
ax.set_xlim(-.38, .38)
ax.set_xticks(np.arange(-.3, .4, .1))
# To create this figure, you need to download the complete
# Human and E. coli proteomes at:
# https://alphafold.ebi.ac.uk/download
# and then change the code so that "base" points to the
# folder that contains the downloaded ".pdb" files
def disorder_proteome(N=100):
fig, ax = plt.subplots(1,2, figsize=(12,4))
lbls = ["Human", "Ecoli"]
ttls = ["Human", "E. coli"]
for i, l in enumerate(lbls):
path = PATH_FIG_DATA.joinpath(f"alphafold_{l}.npy")
if not path.exists():
base = PATH_BASE.joinpath(f"AlphaFold/{l}")
countN = np.zeros(N, float)
countC = np.zeros(N, float)
tot = np.zeros(N, float)
with Pool(50) as pool:
dis = list(pool.imap_unordered(utils.get_disorder_from_conf, base.glob("*pdb"), 10))
for d in dis:
n = min(int(len(d)/2), N)
countN[:n] = countN[:n] + d[:n]
countC[:n] = countC[:n] + d[-n:][::-1]
tot[:n] = tot[:n] + 1
fracN = countN / tot
fracC = countC / tot
np.save(path, np.array([fracN, fracC]))
else:
fracN, fracC = np.load(path)
ax[i].plot(np.arange(N)+1, fracN, '-', label='N')
ax[i].plot(np.arange(N)+1, fracC, '--', label='C')
ax[i].set_title(ttls[i])
ax[i].set_xlabel("Sequence distance from ends")
ax[i].set_ylabel("Disorder probability")
ax[i].set_ylim(0, 1)
ax[i].legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si17.pdf"), bbox_inches='tight')
def kfold_vs_ss():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(figsize=(8,8))
for c in pfdb.Class.unique():
X = np.log10(pfdb.loc[pfdb.Class==c, 'L'])
Y = pfdb.loc[pfdb.Class==c, 'log_kf']
sns.regplot(X, Y, label=c)
ax.set_xlabel(r"$\log_{10}$ Sequence Length")
ax.set_ylabel(r"$\log_{10} k_f$")
ax.legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si18.pdf"), bbox_inches='tight')
def hbond_asym(pdb, Xl='REL_RATE', Y='hb_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrogen bond length')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si19.pdf"), bbox_inches='tight')
def hyd_asym(pdb, Xl='REL_RATE', Y='hyd_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-4.5, 4.5, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrophobicity')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si20.pdf"), bbox_inches='tight')
| 42.085463 | 162 | 0.557723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,586 | 0.181929 |
327808782f63cb50deaafbd843fb0446afafa40c | 81 | py | Python | release_ce.py | BTW-Community/BTW-MCP | 4422e153525265029754dec222fc0c0064e03962 | [
"MIT"
]
| 2 | 2021-12-12T17:14:53.000Z | 2021-12-25T04:03:18.000Z | release_ce.py | BTW-Community/BTW-MCP | 4422e153525265029754dec222fc0c0064e03962 | [
"MIT"
]
| null | null | null | release_ce.py | BTW-Community/BTW-MCP | 4422e153525265029754dec222fc0c0064e03962 | [
"MIT"
]
| null | null | null | from btw_mcp import *
package_release("vanilla", "main", directory="ce_release") | 27 | 58 | 0.765432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.333333 |
327857254668f20b13612c825f93043e95b1c5c9 | 3,449 | py | Python | test_beam_search.py | slegroux/slgBeam | 733049ad4a97f582bc169623941cfbdf3efea207 | [
"Apache-2.0"
]
| null | null | null | test_beam_search.py | slegroux/slgBeam | 733049ad4a97f582bc169623941cfbdf3efea207 | [
"Apache-2.0"
]
| null | null | null | test_beam_search.py | slegroux/slgBeam | 733049ad4a97f582bc169623941cfbdf3efea207 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# (c) 2020 Sylvain Le Groux <[email protected]>
import pytest
from pytest import approx
import numpy as np
import torch
from IPython import embed
from beam_search import Tokenizer, Score, BeamSearch
@pytest.fixture(scope='module')
def data():
mat = torch.Tensor(np.genfromtxt('data/rnnOutput.csv',delimiter=';')[:,: -1])
# mat = mat.unsqueeze(0)
classes = ' !"#&\'()*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_'
mat_prob = np.array([[0.2, 0.0, 0.8],
[0.4, 0.0, 0.6]])
syms = 'ab-'
bs = BeamSearch(syms, mat_prob)
bs2 = BeamSearch(classes, mat)
data = {'probs': mat_prob, 'syms': syms, 'bs': bs, 'mat': mat, 'classes': classes, 'bs2': bs2}
return(data)
def test_data(data):
assert data['probs'].shape == (2,3)
assert data['mat'].shape == (100, 80)
assert len(data['classes']) == 79
def test_tokenizer(data):
tok = Tokenizer(data['syms'])
assert(tok.char2int('b') == 1)
assert(tok.int2char(1) == 'b')
tok2 = Tokenizer(data['classes'])
assert(tok2.char2int('Y') == 51)
assert(tok2.int2char(51) == 'Y')
def test_score(data):
tok = Tokenizer(data['syms'])
score = Score(tok, data['probs'])
assert score(1,'-') == 0.6
tok2 = Tokenizer(data['classes'])
score = Score(tok2, data['mat'])
assert float(score(0,' ')) == approx(float(0.946499))
def test_init(data):
beam_search = data['bs']
b, nb, s_b, s_nb = beam_search.init_paths()
assert b == {''}
assert nb == {'a', 'b'}
assert s_b == {'-': 0.8}
assert s_nb == {'b': 0.0, 'a': 0.2}
bs2 = data['bs2']
b, nb, s_b, s_nb = bs2.init_paths()
assert b == {''}
# assert s_b == {'-': 0.8}
def test_prune(data):
bs = data['bs']
path_b, path_nb = bs.prune_paths({''}, {'a','b'}, {'-':0.2}, {'a': 0.1,'b': 0.3}, 2)
assert path_b == {''}
assert path_nb == {'b'}
print(bs.score_b, bs.score_nb)
def test_extend_blank(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
print("init:", init_b, init_nb, init_s_b, init_s_nb)
# incidentally init global b & nb paths
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
print("Pruned: ", path_b, path_nb)
print(bs.score_b, bs.score_nb)
new_path_b, new_score_b = bs.extend_with_blank(path_b, path_nb, 1)
print(new_path_b, new_score_b)
def test_extend_syms(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
print("init:", init_b, init_nb, init_s_b, init_s_nb)
# incidentally init global b & nb paths
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
print("Pruned: ", path_b, path_nb)
print(bs.score_b, bs.score_nb)
new_path_nb, new_score_nb = bs.extend_with_symbol(path_b, path_nb, 1)
print(new_path_nb, new_score_nb)
def test_merge(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
new_path_b, new_score_b = bs.extend_with_blank(path_b, path_nb, 1)
new_path_nb, new_score_nb = bs.extend_with_symbol(path_b, path_nb, 1)
bs.merge_paths(new_path_b, new_path_nb, new_score_b, new_score_nb)
def test_decode(data):
bs = data['bs']
print("decoded: ", bs.decode(2))
bs2 = data['bs2']
print("decoded: ", bs2.decode(1))
| 33.813725 | 98 | 0.632647 | 0 | 0 | 0 | 0 | 540 | 0.156567 | 0 | 0 | 573 | 0.166135 |
327872875221fcfb18f1db81613c4a83884de390 | 3,404 | py | Python | src/main/python/hydra/kafkatest/maxrate_test.py | bopopescu/hydra | ec0793f8c1f49ceb93bf1f1a9789085b68d55f08 | [
"Apache-2.0"
]
| 10 | 2016-05-28T15:56:43.000Z | 2018-01-03T21:30:58.000Z | src/main/python/hydra/kafkatest/maxrate_test.py | bopopescu/hydra | ec0793f8c1f49ceb93bf1f1a9789085b68d55f08 | [
"Apache-2.0"
]
| 17 | 2016-06-06T22:15:28.000Z | 2020-07-22T20:28:12.000Z | src/main/python/hydra/kafkatest/maxrate_test.py | bopopescu/hydra | ec0793f8c1f49ceb93bf1f1a9789085b68d55f08 | [
"Apache-2.0"
]
| 5 | 2016-06-01T22:01:44.000Z | 2020-07-22T20:12:49.000Z | __author__ = 'annyz'
from pprint import pprint, pformat # NOQA
import logging
import os
import sys
from datetime import datetime
from hydra.lib import util
from hydra.kafkatest.runtest import RunTestKAFKA
from hydra.lib.boundary import Scanner
from optparse import OptionParser
l = util.createlogger('runSuitMaxRate', logging.INFO)
class RunSuitMaxRate(object):
def __init__(self, options):
l.info(" Starting Max Rate ....")
pwd = os.getcwd()
fname = 'kafkasuit.test.log'
ofile = open(pwd + '/' + fname, 'w')
ofile.truncate()
ofile.write('Starting at :' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n')
# setattr(options, 'test_duration', 15)
setattr(options, 'msg_batch', 100)
setattr(options, 'msg_rate', 10000)
setattr(options, 'keep_running', False)
setattr(options, 'acks', 0)
setattr(options, 'linger_ms', 0)
setattr(options, 'consumer_max_buffer_size', 0)
self.first_test = None
# Parameters
client_set = [30, 60, 120, 240, 480, 960, 1920, 3840, 7680, 10000]
for client_count in client_set:
setattr(options, 'total_sub_apps', int(client_count / 10))
if not self.first_test:
runner = RunTestKAFKA(options, None)
self.first_test = runner
self.first_test.start_appserver()
else:
# Keep the old runner
# But rescale the app
runner.set_options(options)
runner.scale_sub_app()
if client_count < 50:
scanner = Scanner(runner.run, 30000)
elif client_count < 200:
scanner = Scanner(runner.run, 10000)
else:
scanner = Scanner(runner.run, 500)
(status, rate, drop) = scanner.find_max_rate()
l.info("Found for Client Count %d Max message Rate %d with drop %f" %
(client_count, rate, drop))
maxrate_drop = drop
maxrate_rate = rate
if True and maxrate_drop != 0:
l.info("Searching for no-drop rate")
scanner_drop = Scanner(runner.run, maxrate_rate / 2)
(status, step_cnt, nodrop, nodrop_rate) = scanner_drop.search(0.5, 0.01)
l.info("Found for Client Count %d Max message Rate %d with no drop (%f)" %
(client_count, nodrop_rate, nodrop))
else:
nodrop_rate = rate
# Delete all launched apps once the required drop is achieved for this set
runner.delete_all_launched_apps()
self.first_test.stop_appserver()
l.info("TestSuite Completed.")
sys.exit(0)
def Run(argv): # NOQA
usage = ('python %prog --c_pub --c_sub'
' --test_duration=<time to run test> --msg_batch=<msg burst batch before sleep>')
parser = OptionParser(description='kafka scale maxrate test master',
version="0.1", usage=usage)
parser.add_option("--test_duration", dest='test_duration', type='int', default=15)
parser.add_option("--msg_batch", dest='msg_batch', type='int', default=100)
parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini')
(options, args) = parser.parse_args()
RunSuitMaxRate(options)
return True
| 38.247191 | 94 | 0.595476 | 2,426 | 0.712691 | 0 | 0 | 0 | 0 | 0 | 0 | 821 | 0.241187 |
327a37a67a58b314caa95c02379bd85e44d7216f | 722 | py | Python | src/api/v1/villains/serializers.py | reiniervdwindt/power-ranger-api | 13ce639a7f5e9d4b106ce5f094c076db0aad398e | [
"MIT"
]
| null | null | null | src/api/v1/villains/serializers.py | reiniervdwindt/power-ranger-api | 13ce639a7f5e9d4b106ce5f094c076db0aad398e | [
"MIT"
]
| null | null | null | src/api/v1/villains/serializers.py | reiniervdwindt/power-ranger-api | 13ce639a7f5e9d4b106ce5f094c076db0aad398e | [
"MIT"
]
| null | null | null | from rest_framework import serializers
from series.models import Series
from villains.models import Villain
class VillainSeriesSerializer(serializers.ModelSerializer):
name = serializers.CharField()
class Meta(object):
fields = ('id', 'name',)
model = Series
class VillainDetailSerializer(serializers.ModelSerializer):
series = VillainSeriesSerializer(many=True)
class Meta(object):
fields = ('id', 'name', 'description', 'gender', 'type', 'homeworld', 'series',)
model = Villain
class VillainListSerializer(serializers.ModelSerializer):
class Meta(object):
fields = ('id', 'name', 'description', 'gender', 'type', 'homeworld',)
model = Villain
| 26.740741 | 88 | 0.688366 | 604 | 0.836565 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.157895 |
327a4fc033970cf2fec138ab6d2ea6fa9e580d97 | 1,574 | py | Python | map_report.py | porcpine1967/aoe2stats | 52965e437b8471753186ba1fc34cb773807eb496 | [
"MIT"
]
| null | null | null | map_report.py | porcpine1967/aoe2stats | 52965e437b8471753186ba1fc34cb773807eb496 | [
"MIT"
]
| null | null | null | map_report.py | porcpine1967/aoe2stats | 52965e437b8471753186ba1fc34cb773807eb496 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
""" Writes out map popularity of last two pools."""
from datetime import datetime, timedelta
from utils.map_pools import map_type_filter, pools
from utils.tools import execute_sql, last_time_breakpoint, map_name_lookup
SQL = """SELECT map_type, COUNT(*) as cnt
FROM matches
WHERE started BETWEEN {:0.0f} AND {:0.0f}
{}
AND team_size = {}
GROUP BY map_type
ORDER BY cnt DESC"""
def run():
""" Run the report."""
map_names = map_name_lookup()
weeks = pools()[-2:]
for size in (1, 2):
print("TEAM" if size > 1 else "1v1")
week_infos = []
for idx, week in enumerate(weeks):
week_info = []
year = int(week[:4])
month = int(week[4:6])
day = int(week[6:])
start = last_time_breakpoint(datetime(year, month, day))
end = start + timedelta(days=14)
sql = SQL.format(
start.timestamp(), end.timestamp(), map_type_filter(week, size), size
)
total = 0
for map_type, count in execute_sql(sql):
week_info.append((map_names[map_type], count,))
total += count
hold = []
for name, count in week_info:
hold.append("{:17}: {:4.1f}%".format(name, 100.0 * count / total))
week_infos.append(hold)
print("{:^24} {:^24}".format(*weeks))
for idx in range(len(week_infos[0])):
print("{} {}".format(week_infos[0][idx], week_infos[1][idx]))
if __name__ == "__main__":
run()
| 32.122449 | 85 | 0.560991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.199492 |
327c981e0a47fcedcb62aea60362f8adb3c7ccec | 5,277 | py | Python | common/xrd-ui-tests-qautomate/pagemodel/ss_keys_and_cert_dlg_subject_dname.py | ria-ee/XTM | 6103f3f5bbba387b8b59b050c0c4f1fb2180fc37 | [
"MIT"
]
| 3 | 2018-03-15T14:22:50.000Z | 2021-11-08T10:30:35.000Z | common/xrd-ui-tests-qautomate/pagemodel/ss_keys_and_cert_dlg_subject_dname.py | ria-ee/XTM | 6103f3f5bbba387b8b59b050c0c4f1fb2180fc37 | [
"MIT"
]
| 11 | 2017-04-06T09:25:41.000Z | 2018-06-04T09:08:48.000Z | common/xrd-ui-tests-qautomate/pagemodel/ss_keys_and_cert_dlg_subject_dname.py | ria-ee/XTM | 6103f3f5bbba387b8b59b050c0c4f1fb2180fc37 | [
"MIT"
]
| 20 | 2017-03-14T07:21:58.000Z | 2019-05-21T09:26:30.000Z | # -*- coding: utf-8 -*-
# Example for using WebDriver object: driver = get_driver() e.g driver.current_url
from webframework import TESTDATA
from variables import strings
from selenium.webdriver.common.by import By
from webframework.extension.util.common_utils import *
from webframework.extension.util.webtimings import get_measurements
from webframework.extension.parsers.parameter_parser import get_parameter
from time import sleep
class Ss_keys_and_cert_dlg_subject_dname(CommonUtils):
"""
Pagemodel
Changelog:
* 27.07.2017
| Docstrings updated
"""
# Pagemodel timestamp: 20160928092046
# Pagemodel url: https://xroad-lxd-ss1.lxd:4000/keys
# Pagemodel area: (680, 330, 560, 307)
# Pagemodel screen resolution: (1920, 1080)
# Use project settings: True
# Used filters: id, css_selector, class_name, link_text, xpath
# Xpath type: xpath-position
# Create automated methods: True
# Depth of css path: 3
# Minimize css selector: True
# Use css pattern: False
# Allow non unique css pattern: False
# Pagemodel template: False
# Use testability: True
# testability attribute: data-name
# Use contains text in xpath: True
# Exclude dynamic table filter: True
# Row count: 5
# Element count: 20
# Big element filter width: 55
# Big element filter height: 40
# Not filtered elements: button, strong, select
# Canvas modeling: False
# Pagemodel type: normal
# Links found: 0
# Page model constants:
MENUBAR_MAXIMIZE = (By.XPATH, u'//div[8]/div[1]/div[1]/button[1]') # x: 1133 y: 336 width: 51 height: 49, tag: button, type: submit, name: None, form_id: keys, checkbox: , table_id: 4, href:
MENUBAR_CLOSE = (By.XPATH, u'//div[8]/div[1]/div[1]/button[2]') # x: 1184 y: 336 width: 51 height: 49, tag: button, type: submit, name: None, form_id: keys, checkbox: , table_id: 4, href:
NAME_C_UI_STATE_DISABLED_FI_TEXT = (By.CSS_SELECTOR, u'input[name="C"].ui-state-disabled') # x: 877 y: 410 width: 340 height: 33, tag: input, type: text, name: C, form_id: keys, checkbox: , table_id: 4, href:
NAME_O_GOFORE_TEXT = (By.CSS_SELECTOR, u'input[name="O"]') # x: 877 y: 453 width: 340 height: 33, tag: input, type: text, name: O, form_id: keys, checkbox: , table_id: 4, href:
NAME_SERIAL_NUMBER_UI_STATE_DISABLED_FI_COM_TEXT = (By.CSS_SELECTOR, u'input[name="serialNumber"].ui-state-disabled') # x: 877 y: 496 width: 340 height: 33, tag: input, type: text, name: serialNumber, form_id: keys, checkbox: , table_id: 4, href:
NAME_CN_1234_TEXT = (By.CSS_SELECTOR, u'input[name="CN"]') # x: 877 y: 539 width: 340 height: 33, tag: input, type: text, name: CN, form_id: keys, checkbox: , table_id: 4, href:
BUTTON_CANCEL = (By.XPATH, u'//div[9]/div[3]/div[1]/button[2]') # x: 1098 y: 590 width: 77 height: 37, tag: button, type: button, name: None, form_id: keys, checkbox: , table_id: 4, href:
BUTTON_OK = (By.XPATH, u'//div[9]/div[3]/div[1]/button[1]') # x: 1185 y: 590 width: 45 height: 37, tag: button, type: button, name: None, form_id: keys, checkbox: , table_id: 4, href:
def fill_input_values_keys_dname_sign(self, parameters=None):
"""
Input random text to dna name field
:param parameters: Test data section dictionary
**Test steps:**
* **Step 1:** :func:`~webframework.extension.util.common_utils.CommonUtils.input_text`, *self.NAME_O_GOFORE_TEXT*, *parameters['member_name'] + rword*
"""
import random, string
rword = ''.join(random.choice(string.lowercase) for i in range(4))
self.input_text(self.NAME_O_GOFORE_TEXT, parameters['member_name'] + rword)
def submit_keys_dname(self):
"""
Click button to submit dnaname
**Test steps:**
* **Step 1:** :func:`~webframework.extension.util.common_utils.CommonUtils.click_element`, *self.BUTTON_OK*
* **Step 2:** :func:`~webframework.extension.util.common_utils.CommonUtils.wait_until_jquery_ajax_loaded`
"""
# AutoGen method submit form: keys
self.click_element(self.BUTTON_OK)
self.wait_until_jquery_ajax_loaded()
def fill_input_values_keys_dname_auth(self, parameters=None):
"""
Input random text to dna name field
:param parameters: Test data section dictionary
**Test steps:**
* **Step 1:** :func:`~webframework.extension.util.common_utils.CommonUtils.input_text`, *self.NAME_O_GOFORE_TEXT*, *parameters['member_name'] + rword*
* **Step 2:** :func:`~webframework.extension.util.common_utils.CommonUtils.input_text`, *self.NAME_CN_1234_TEXT*, *parameters['server_address']*
"""
# AutoGen methods form: keys
import random, string
rword = ''.join(random.choice(string.lowercase) for i in range(4))
self.input_text(self.NAME_O_GOFORE_TEXT, parameters['member_name'] + rword)
# AutoGen methods form: keys
server_address = parameters['server_address']
if strings.server_environment_type() == strings.lxd_type_environment:
server_address = server_address.replace("user@", "")
self.input_text(self.NAME_CN_1234_TEXT, server_address)
| 53.30303 | 250 | 0.675005 | 4,840 | 0.917188 | 0 | 0 | 0 | 0 | 0 | 0 | 3,500 | 0.663256 |
327cb6d4121abb0fa5a0265759fdf829da140dce | 6,303 | py | Python | tempdb/postgres.py | runfalk/tempdb | a19f7568db1795025c9ec8adfd84a9544f9a6966 | [
"MIT"
]
| 2 | 2021-01-17T00:01:14.000Z | 2021-01-18T09:26:56.000Z | tempdb/postgres.py | runfalk/tempdb | a19f7568db1795025c9ec8adfd84a9544f9a6966 | [
"MIT"
]
| null | null | null | tempdb/postgres.py | runfalk/tempdb | a19f7568db1795025c9ec8adfd84a9544f9a6966 | [
"MIT"
]
| null | null | null | import getpass
import os
import platform
import psycopg2
import sys
import tempfile
from glob import glob
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, quote_ident
from subprocess import check_output, PIPE, Popen
from time import sleep
from ._compat import ustr
from .utils import is_executable, Uri, Version
__all__ = [
"PostgresFactory",
"PostgresCluster",
]
class PostgresFactory(object):
def __init__(self, pg_bin_dir, superuser=None):
# Temporary value until the first time we request it
self._version = None
self.initdb = os.path.join(pg_bin_dir, "initdb")
if not is_executable(self.initdb):
raise ValueError(
"Unable to find initdb command in {}".format(pg_bin_dir)
)
self.postgres = os.path.join(pg_bin_dir, "postgres")
if not is_executable(self.postgres):
raise ValueError(
"Unable to find postgres command in {}".format(pg_bin_dir)
)
if superuser is None:
superuser = getpass.getuser()
self.superuser = superuser
@property
def version(self):
if self._version is None:
self._version = get_version(self.postgres)
return self._version
def init_cluster(self, data_dir=None):
"""
Create a postgres cluster that trusts all incoming connections.
This is great for testing, but a horrible idea for production usage.
:param data_dir: Directory to create cluster in. This directory will
be automatically created if necessary.
:return: Path to the created cluster that can be used by load_cluster()
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
# If the target directory is not empty we don't want to risk wiping it
if os.listdir(data_dir):
raise ValueError((
"The given data directory {} is not empty. A new cluster will "
"not be created."
).format(data_dir))
check_output([
self.initdb,
"-U", self.superuser,
"-A", "trust",
data_dir
])
return data_dir
def create_temporary_cluster(self):
data_dir = self.init_cluster()
# Since we know this database should never be loaded again we disable
# safe guards Postgres has to prevent data corruption
return self.load_cluster(
data_dir,
is_temporary=True,
fsync=False,
full_page_writes=False,
)
def load_cluster(self, data_dir, is_temporary=False, **params):
uri = Uri(
scheme="postgresql",
user=self.superuser,
host=data_dir,
params=params,
)
return PostgresCluster(self.postgres, uri, is_temporary)
class PostgresCluster(object):
def __init__(self, postgres_bin, uri, is_temporary=False):
if uri.host is None or not uri.host.startswith("/"):
msg = "{!r} doesn't point to a UNIX socket directory"
raise ValueError(msg.format(uri))
self.uri = uri
self.is_temporary = is_temporary
self.returncode = None
cmd = [
postgres_bin,
"-D", uri.host,
"-k", uri.host,
"-c", "listen_addresses=",
]
# Add additional configuration from kwargs
for k, v in uri.params.items():
if isinstance(v, bool):
v = "on" if v else "off"
cmd.extend(["-c", "{}={}".format(k, v)])
# Start cluster
self.process = Popen(
cmd,
stdout=PIPE,
stderr=PIPE,
)
# Wait for a ".s.PGSQL.<id>" file to appear before continuing
while not glob(os.path.join(uri.host, ".s.PGSQL.*")):
sleep(0.1)
# Superuser connection
self.conn = psycopg2.connect(
ustr(self.uri.replace(database="postgres"))
)
self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
def __del__(self):
self.close()
def iter_databases(self):
with self.conn.cursor() as c:
default_databases = {"postgres", "template0", "template1"}
c.execute("SELECT datname FROM pg_database")
for name, in c:
if name not in default_databases:
yield name
def create_database(self, name, template=None):
if name in self.iter_databases():
raise KeyError("The database {!r} already exists".format(name))
with self.conn.cursor() as c:
sql = "CREATE DATABASE {}".format(quote_ident(name, c))
if template is not None:
sql += " TEMPLATE {}".format(quote_ident(template, c))
c.execute(sql)
return PostgresDatabase(self, self.uri.replace(database=name))
def get_database(self, name):
if name not in self.iter_databases():
raise KeyError("The database {!r} doesn't exist".format(name))
return PostgresDatabase(self, self.uri.replace(database=name))
def close(self):
if self.process is None:
return
# Kill all connections but this control connection. This prevents
# the server waiting for connections to close indefinately
with self.conn.cursor() as c:
c.execute("""
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE pid != pg_backend_pid()
""")
self.conn.close()
self.process.terminate()
self.returncode = self.process.wait()
# Remove temporary clusters when closing
if self.is_temporary:
for path, dirs, files in os.walk(self.uri.host, topdown=False):
for f in files:
os.remove(os.path.join(path, f))
for d in dirs:
os.rmdir(os.path.join(path, d))
os.rmdir(self.uri.host)
self.process = None
class PostgresDatabase(object):
def __init__(self, cluster, uri):
self.cluster = cluster
self.uri = uri
@property
def dsn(self):
return ustr(self.uri)
| 30.597087 | 79 | 0.58369 | 5,908 | 0.937331 | 300 | 0.047596 | 208 | 0.033 | 0 | 0 | 1,610 | 0.255434 |
327ee9780e46ebbfd9024596b22934ad7011175f | 426 | py | Python | nymph/modules/tool.py | smilelight/nymph | c8da2211f7a8f58d1c6d327b243e419ed9e64ead | [
"Apache-2.0"
]
| 1 | 2020-08-10T00:58:14.000Z | 2020-08-10T00:58:14.000Z | nymph/modules/tool.py | smilelight/nymph | c8da2211f7a8f58d1c6d327b243e419ed9e64ead | [
"Apache-2.0"
]
| null | null | null | nymph/modules/tool.py | smilelight/nymph | c8da2211f7a8f58d1c6d327b243e419ed9e64ead | [
"Apache-2.0"
]
| 1 | 2021-07-03T07:06:41.000Z | 2021-07-03T07:06:41.000Z | # -*- coding: utf-8 -*-
import pandas as pd
def save_dict_to_csv(dict_data: dict, csv_path: str):
indexes = list(dict_data.keys())
columns = list(list(dict_data.values())[0].keys())
data = []
for row in dict_data:
data.append([item for item in dict_data[row].values()])
pd_data = pd.DataFrame(data, index=indexes, columns=columns)
pd_data.to_csv(csv_path, encoding='utf8')
return pd_data
| 30.428571 | 64 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.068075 |
327fa5382ee48b811835bb16249bdcc124edd278 | 1,187 | py | Python | apps/core/serializers.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
]
| 7 | 2015-01-02T19:31:14.000Z | 2021-03-22T17:30:23.000Z | apps/core/serializers.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
]
| 1 | 2015-03-06T08:34:59.000Z | 2015-03-06T08:34:59.000Z | apps/core/serializers.py | jfterpstra/onepercentclub-site | 43e8e01ac4d3d1ffdd5959ebd048ce95bb2dba0e | [
"BSD-3-Clause"
]
| null | null | null | from rest_framework import serializers
from bluebottle.utils.model_dispatcher import get_donation_model
from bluebottle.bb_projects.serializers import ProjectPreviewSerializer as BaseProjectPreviewSerializer
from bluebottle.bb_accounts.serializers import UserPreviewSerializer
DONATION_MODEL = get_donation_model()
class ProjectSerializer(BaseProjectPreviewSerializer):
task_count = serializers.IntegerField(source='task_count')
owner = UserPreviewSerializer(source='owner')
partner = serializers.SlugRelatedField(slug_field='slug', source='partner_organization')
class Meta(BaseProjectPreviewSerializer):
model = BaseProjectPreviewSerializer.Meta.model
fields = ('id', 'title', 'image', 'status', 'pitch', 'country', 'task_count', 'allow_overfunding',
'is_campaign', 'amount_asked', 'amount_donated', 'amount_needed', 'deadline', 'status', 'owner', 'partner')
class LatestDonationSerializer(serializers.ModelSerializer):
project = ProjectSerializer()
user = UserPreviewSerializer()
class Meta:
model = DONATION_MODEL
fields = ('id', 'project', 'fundraiser', 'user', 'created', 'anonymous', 'amount')
| 42.392857 | 125 | 0.754844 | 864 | 0.727885 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.228307 |
328065cc7a0c80c52a732c0213b03b1281db7d57 | 1,035 | py | Python | Python/rockpaperscissors/rockpaperscissors.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| 12 | 2016-10-03T20:43:43.000Z | 2021-06-12T17:18:42.000Z | Python/rockpaperscissors/rockpaperscissors.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| null | null | null | Python/rockpaperscissors/rockpaperscissors.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
]
| 10 | 2017-11-14T19:56:37.000Z | 2021-02-02T07:39:57.000Z | # WORKS BUT ISN'T FAST ENOUGH
first_run = True
while(True):
inp = input().split()
if len(inp) == 1:
break
if first_run:
first_run = False
else:
print()
nPlayers, nGames = [int(x) for x in inp]
resultsW = [0] * nPlayers
resultsL = [0] * nPlayers
for i in range( int( ((nGames*nPlayers)*(nPlayers - 1)) / 2 ) ):
p1, p1move, p2, p2move = [int(x) if x.isdigit() else x for x in input().split()]
if p1move == p2move:
continue
if (p1move == "scissors" and p2move == "paper") or (p1move == "paper" and p2move == "rock") or (p1move == "rock" and p2move == "scissors"):
resultsW[p1-1] += 1
resultsL[p2-1] += 1
else:
resultsW[p2-1] += 1
resultsL[p1-1] += 1
for i in range(nPlayers):
w_plus_l = resultsL[i] + resultsW[i]
if w_plus_l == 0:
print("-")
else:
print("%.3f" % (resultsL[i] / w_plus_l))
print("\n\n\n\n\n\n\n")
print(resultsW) | 32.34375 | 147 | 0.510145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.096618 |
3280c700cb467b6fd44a96a8f003a083cb2e0a5f | 9,460 | py | Python | monitorcontrol/monitor_control.py | klwlau/monitorcontrol | 92d07c7a93585de14551ba1f1dd8bb3a009c4842 | [
"MIT"
]
| null | null | null | monitorcontrol/monitor_control.py | klwlau/monitorcontrol | 92d07c7a93585de14551ba1f1dd8bb3a009c4842 | [
"MIT"
]
| null | null | null | monitorcontrol/monitor_control.py | klwlau/monitorcontrol | 92d07c7a93585de14551ba1f1dd8bb3a009c4842 | [
"MIT"
]
| null | null | null | ###############################################################################
# Copyright 2019 Alex M.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
from . import vcp
import sys
from typing import Type, List, Union, Iterable
class Monitor:
"""
A physical monitor attached to a Virtual Control Panel (VCP).
Generated with :py:meth:`get_monitors()` or
:py:meth:`iterate_monitors()`.
Args:
vcp: virtual control panel for the monitor
"""
#: Power modes and their integer values.
POWER_MODES = {
"on": 0x01,
"standby": 0x02,
"suspend": 0x03,
"off_soft": 0x04,
"off_hard": 0x05,
}
def __init__(self, vcp: Type[vcp.VCP]):
self.vcp = vcp
self.code_maximum = {}
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
"""
Opens the connection to the VCP.
Raises:
VCPError: failed to open VCP
"""
self.vcp.open()
def close(self):
"""
Closes the connection to the VCP.
Raises:
VCPError: failed to close VCP
"""
self.vcp.close()
def _get_code_maximum(self, code: Type[vcp.VCPCode]) -> int:
"""
Gets the maximum values for a given code, and caches in the
class dictionary if not already found.
Args:
code: feature code definition class
Returns:
maximum value for the given code
Raises:
TypeError: code is write only
"""
if not code.readable:
raise TypeError(f"code is not readable: {code.name}")
if code.value in self.code_maximum:
return self.code_maximum[code.value]
else:
_, maximum = self.vcp.get_vcp_feature(code.value)
self.code_maximum[code.value] = maximum
return maximum
def _set_vcp_feature(self, code: Type[vcp.VCPCode], value: int):
"""
Sets the value of a feature on the virtual control panel.
Args:
code: feature code definition class
value: feature value
Raises:
TypeError: code is ready only
ValueError: value is greater than the maximum allowable
VCPError: failed to get VCP feature
"""
if code.type == "ro":
raise TypeError(f"cannot write read-only code: {code.name}")
elif code.type == "rw":
maximum = self._get_code_maximum(code)
if value > maximum:
raise ValueError(
f"value of {value} exceeds code maximum of {maximum}"
)
self.vcp.set_vcp_feature(code.value, value)
def _get_vcp_feature(self, code: Type[vcp.VCPCode]) -> int:
"""
Gets the value of a feature from the virtual control panel.
Args:
code: feature code definition class
Returns:
current feature value
Raises:
TypeError: code is write only
VCPError: failed to get VCP feature
"""
if code.type == "wo":
raise TypeError(f"cannot read write-only code: {code.name}")
current, maximum = self.vcp.get_vcp_feature(code.value)
return current
@property
def luminance(self) -> int:
"""
Gets the monitors back-light luminance.
Returns:
current luminance value
Raises:
VCPError: failed to get luminance from the VCP
"""
code = vcp.get_vcp_code_definition("image_luminance")
return self._get_vcp_feature(code)
@luminance.setter
def luminance(self, value: int):
"""
Sets the monitors back-light luminance.
Args:
value: new luminance value (typically 0-100)
Raises:##### have not implemented or checked
ValueError: luminance outside of valid range
VCPError: failed to set luminance in the VCP
"""
code = vcp.get_vcp_code_definition("image_luminance")
self._set_vcp_feature(code, value)
@property
def contrast(self) -> int:
"""
Gets the monitors contrast.
Returns:
current contrast value
Raises:
VCPError: failed to get contrast from the VCP
"""
code = vcp.get_vcp_code_definition("image_contrast")
return self._get_vcp_feature(code)
@contrast.setter
def contrast(self, value: int):
"""
Sets the monitors back-light contrast.
Args:
value: new contrast value (typically 0-100)
Raises:
ValueError: contrast outside of valid range
VCPError: failed to set contrast in the VCP
"""
code = vcp.get_vcp_code_definition("image_contrast")
self._set_vcp_feature(code, value)
@property
def power_mode(self) -> int:
"""
The monitor power mode.
When used as a getter this returns the integer value of the
monitor power mode.
When used as a setter an integer value or a power mode
string from :py:attr:`Monitor.POWER_MODES` may be used.
Raises:
VCPError: failed to get or set the power mode
ValueError: set power state outside of valid range
KeyError: set power mode string is invalid
"""
code = vcp.get_vcp_code_definition("display_power_mode")
return self._get_vcp_feature(code)
@power_mode.setter
def power_mode(self, value: Union[int, str]):
if isinstance(value, str):
mode_value = Monitor.POWER_MODES[value]
elif isinstance(value, int):
mode_value = value
else:
raise TypeError("unsupported mode type: " + repr(type(value)))
if mode_value not in Monitor.POWER_MODES.values():
raise ValueError(f"cannot set reserved mode value: {mode_value}")
code = vcp.get_vcp_code_definition("display_power_mode")
self._set_vcp_feature(code, mode_value)
def get_vcps() -> List[Type[vcp.VCP]]:
"""
Discovers virtual control panels.
This function should not be used directly in most cases, use
:py:meth:`get_monitors()` or :py:meth:`iterate_monitors()` to
get monitors with VCPs.
Returns:
List of VCPs in a closed state.
Raises:
NotImplementedError: not implemented for your operating system
VCPError: failed to list VCPs
"""
if sys.platform == "win32" or sys.platform.startswith("linux"):
return vcp.get_vcps()
else:
raise NotImplementedError(f"not implemented for {sys.platform}")
def get_monitors() -> List[Monitor]:
"""
Creates a list of all monitors.
Returns:
List of monitors in a closed state.
Raises:
NotImplementedError: not implemented for your operating system
VCPError: failed to list VCPs
Example:
Setting the power mode of all monitors to standby::
for monitor in get_monitors():
try:
monitor.open()
# put monitor in standby mode
monitor.power_mode = "standby"
except VCPError:
print("uh-oh")
raise
finally:
monitor.close()
Setting all monitors to the maximum brightness using the
context manager::
for monitor in get_monitors():
with monitor as m:
# set back-light luminance to 100%
m.luminance = 100
"""
return [Monitor(v) for v in get_vcps()]
def iterate_monitors() -> Iterable[Monitor]:
"""
Iterates through all monitors, opening and closing the VCP for
each monitor.
Yields:
Monitor in an open state.
Raises:
NotImplementedError: not implemented for this platform
VCPError: failed to list VCPs
Example:
Setting all monitors to the maximum brightness::
for monitor in iterate_monitors():
monitor.luminance = 100
"""
for v in get_vcps():
monitor = Monitor(v)
with monitor:
yield monitor
| 30.031746 | 79 | 0.595455 | 5,947 | 0.628647 | 567 | 0.059937 | 2,764 | 0.292178 | 0 | 0 | 6,268 | 0.662579 |
328135201e01cdb2208c77c5703c4b619db0d327 | 6,201 | py | Python | algorithms/vae.py | ENSP-AI-Mentoring/machine-learning-algorithms | d53d5342f79d08066e158228cab6240872f61f72 | [
"Apache-2.0"
]
| 1 | 2021-11-14T19:46:46.000Z | 2021-11-14T19:46:46.000Z | algorithms/vae.py | ENSP-AI-Mentoring/machine-learning-algorithms | d53d5342f79d08066e158228cab6240872f61f72 | [
"Apache-2.0"
]
| null | null | null | algorithms/vae.py | ENSP-AI-Mentoring/machine-learning-algorithms | d53d5342f79d08066e158228cab6240872f61f72 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
class VAE:
def __init__(
self,
train_data,
test_data,
in_dim,
encoder_width,
decoder_width,
latent_dim,
device=None,
):
# device
self.name = "VAE"
if device is None:
device = torch.device("cuda") if device else torch.device("cpu")
self.device = device
self.latent_dim = latent_dim
self.encoder_width = encoder_width
self.decoder_width = decoder_width
self.in_dim = in_dim
# initialize encoder/decoder weights and biases
self.weights, self.biases = self.init_vae_params(
in_dim, encoder_width, decoder_width, latent_dim
)
# config dataset
self.train_data = train_data
data = next(iter(train_data))
self.example_size = data.size()
self.test_data = test_data
def train(self, batch_size, max_epoch, lr, weight_decay):
optimizer = self._get_optimizer(lr, weight_decay)
hist_loss = []
train_dataloader = DataLoader(
self.train_data, batch_size, shuffle=True, drop_last=True, num_workers=0
)
# print initial loss
data = next(iter(train_dataloader))
Xground = data.view((batch_size, -1)).to(self.device)
loss = self._vae_loss(Xground)
tk = tqdm(range(max_epoch))
for epoch in tk:
for ii, data in enumerate(train_dataloader):
Xground = data.view((batch_size, -1)).to(self.device)
optimizer.zero_grad()
loss = self._vae_loss(Xground)
# backward propagate
loss.backward()
optimizer.step()
hist_loss.append(loss.item())
tk.set_postfix({"val_loss": hist_loss[-1], "epoch": epoch})
return np.array(hist_loss)
def test1(self, batch_size):
"""data reconstruction test"""
test_dataloader = DataLoader(
self.test_data, batch_size, shuffle=True, drop_last=True, num_workers=0
)
data = next(iter(test_dataloader))
Xground = data.view((batch_size, -1)).to(self.device)
z_mean, z_logstd = self._encoding(Xground)
epsi = torch.randn(z_logstd.size()).to(self.device)
z_star = z_mean + torch.exp(0.5 * z_logstd) * epsi
Xstar = self._decoding(z_star)
Xstar = torch.sigmoid(Xstar)
Xstar = Xstar.view(data.size())
return data, Xstar
def test2(self, batch_size):
"""distribution transformation test(generate artificial dataset from random noises)"""
Z = torch.randn((batch_size, self.latent_dim)).to(self.device)
Xstar = self._decoding(Z).view((-1, *self.example_size))
return Xstar
def _vae_loss(self, Xground):
"""compute VAE loss = kl_loss + likelihood_loss"""
# KL loss
z_mean, z_logstd = self._encoding(Xground)
kl_loss = 0.5 * torch.sum(
1 + z_logstd - z_mean ** 2 - torch.exp(z_logstd), dim=1
)
# likelihood loss
epsi = torch.randn(z_logstd.size()).to(self.device)
z_star = z_mean + torch.exp(0.5 * z_logstd) * epsi # reparameterize trick
Xstar = self._decoding(z_star)
llh_loss = Xground * torch.log(1e-12 + Xstar) + (1 - Xground) * torch.log(
1e-12 + 1 - Xstar
)
llh_loss = torch.sum(llh_loss, dim=1)
var_loss = -torch.mean(kl_loss + llh_loss)
return var_loss
def _get_optimizer(self, lr, weight_decay):
opt_params = []
# adding weights to optimization paramters list
for k, v in self.weights.items():
opt_params.append({"params": v, "lr": lr})
# adding biases to optimization parameters list
for k, v in self.biases.items():
opt_params.append({"params": v, "lr": lr})
return Adam(opt_params, lr=lr, weight_decay=weight_decay)
def _encoding(self, X):
# Kingma Supplemtary C.2
output = (
torch.matmul(X, self.weights["encoder_hidden_w"])
+ self.biases["encoder_hidden_b"]
)
output = torch.tanh(output)
mean_output = (
torch.matmul(output, self.weights["latent_mean_w"])
+ self.biases["latent_mean_b"]
)
logstd_output = (
torch.matmul(output, self.weights["latent_std_w"])
+ self.biases["latent_std_b"]
)
return mean_output, logstd_output
def _decoding(self, Z):
output = (
torch.matmul(Z, self.weights["decoder_hidden_w"])
+ self.biases["decoder_hidden_b"]
)
output = torch.tanh(output)
Xstar = (
torch.matmul(output, self.weights["decoder_out_w"])
+ self.biases["decoder_out_b"]
)
Xstar = torch.sigmoid(Xstar)
return Xstar
def init_vae_params(self, in_dim, encoder_width, decoder_width, latent_dim):
weights = {
"encoder_hidden_w": self.xavier_init(in_dim, encoder_width),
"latent_mean_w": self.xavier_init(encoder_width, latent_dim),
"latent_std_w": self.xavier_init(encoder_width, latent_dim),
"decoder_hidden_w": self.xavier_init(latent_dim, decoder_width),
"decoder_out_w": self.xavier_init(decoder_width, in_dim),
}
biases = {
"encoder_hidden_b": self.xavier_init(1, encoder_width),
"latent_mean_b": self.xavier_init(1, latent_dim),
"latent_std_b": self.xavier_init(1, latent_dim),
"decoder_hidden_b": self.xavier_init(1, decoder_width),
"decoder_out_b": self.xavier_init(1, in_dim),
}
return weights, biases
def xavier_init(self, in_d, out_d):
xavier_stddev = np.sqrt(2.0 / (in_d + out_d))
W = torch.normal(
size=(in_d, out_d),
mean=0.0,
std=xavier_stddev,
requires_grad=True,
device=self.device,
)
return W
| 31.8 | 94 | 0.588776 | 6,075 | 0.979681 | 0 | 0 | 0 | 0 | 0 | 0 | 820 | 0.132237 |
32837c01862960b0796752083e66eefb2afb0c24 | 1,244 | py | Python | qfig.py | mth1haha/BlockchainQueueingNetwork | 611dc84b857efbec22edfe5f3a1bb8f7052a39aa | [
"Apache-2.0"
]
| 1 | 2021-11-30T08:22:43.000Z | 2021-11-30T08:22:43.000Z | qfig.py | mth1haha/BlockchainQueueingNetwork | 611dc84b857efbec22edfe5f3a1bb8f7052a39aa | [
"Apache-2.0"
]
| null | null | null | qfig.py | mth1haha/BlockchainQueueingNetwork | 611dc84b857efbec22edfe5f3a1bb8f7052a39aa | [
"Apache-2.0"
]
| 1 | 2020-11-25T08:48:25.000Z | 2020-11-25T08:48:25.000Z | import simpy as sp
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats, integrate
def client(env, lamda, q, tic):
meant = 1/lamda
while True:
t = np.random.exponential(meant)
yield env.timeout(t)
q.put('job')
tic.append(env.now)
def server(env, alpha, mu1, mu2, q, toc):
mean1 = 1/mu1
mean2 = 1/mu2
while True:
yield q.get()
p = np.random.uniform()
if p < alpha:
t = np.random.exponential(mean1)
else:
t = np.random.exponential(mean2)
yield env.timeout(t)
toc.append(env.now)
lamda = 75
alpha = 0.333
mu1 = 370
mu2 = 370*(0.666)
num_bins = 50
runtime = 1000 #运行多长时间
tic = [] #每个任务进系统的时间点
toc = [] #每个任务出系统的时间点
env = sp.Environment()
q = sp.Store(env)
env.process(client(env, lamda, q, tic))
env.process(server(env, alpha, mu1, mu2, q, toc))
env.run(until=runtime)
l = len(tic)
a = toc
b = toc
#b = toc[0:l:40]
histdata = [b[i] - b[i-1] for i in range(1, len(b))]
sns.distplot(histdata, kde=False, fit=stats.expon)
plt.xlabel("inter departure time (s)")
plt.xlim(0,0.15)
#plt.ylim(0,100)
plt.savefig('dist1.png')
plt.show()
#plt.hist(histdata, num_bins)
#plt.show()
| 20.393443 | 52 | 0.619775 | 0 | 0 | 516 | 0.396923 | 0 | 0 | 0 | 0 | 201 | 0.154615 |
328382e2d62ec49094cab44e02a8b760c1f9a700 | 4,756 | py | Python | all_words.py | secureterminal/100-Days-of-Code | 04383ae541938d8a551b5aac9a0dad3348a6ef23 | [
"MIT"
]
| 1 | 2022-01-28T13:55:39.000Z | 2022-01-28T13:55:39.000Z | Day 7/all_words.py | secureterminal/100-Days-of-Code | 04383ae541938d8a551b5aac9a0dad3348a6ef23 | [
"MIT"
]
| 1 | 2022-02-02T00:13:18.000Z | 2022-02-03T11:32:53.000Z | Day 7/all_words.py | secureterminal/100-Days-of-Code | 04383ae541938d8a551b5aac9a0dad3348a6ef23 | [
"MIT"
]
| 2 | 2022-02-07T20:49:36.000Z | 2022-02-19T21:22:15.000Z | word_list = ['pseudolamellibranchiate',
'microcolorimetrically',
'pancreaticoduodenostomy',
'theologicoastronomical',
'pancreatoduodenectomy',
'tetraiodophenolphthalein',
'choledocholithotripsy',
'hematospectrophotometer',
'deintellectualization',
'pharyngoepiglottidean',
'psychophysiologically',
'pathologicopsychological',
'pseudomonocotyledonous',
'philosophicohistorical',
'Pseudolamellibranchia',
'chlamydobacteriaceous',
'cholecystoduodenostomy',
'anemometrographically',
'duodenopancreatectomy',
'dacryocystoblennorrhea',
'thymolsulphonephthalein',
'aminoacetophenetidine',
'ureterocystanastomosis',
'undistinguishableness',
'disestablishmentarian',
'cryptocrystallization',
'scientificogeographical',
'chemicopharmaceutical',
'overindustrialization',
'counterinterpretation',
'superincomprehensible',
'dacryocystorhinostomy',
'choledochoduodenostomy',
'cholecystogastrostomy',
'photochronographically',
'philosophicoreligious',
'scleroticochoroiditis',
'pyopneumocholecystitis',
'crystalloluminescence',
'phoneticohieroglyphic',
'historicogeographical',
'counterreconnaissance',
'pathologicoanatomical',
'omnirepresentativeness',
'establishmentarianism',
'glossolabiopharyngeal',
'pseudohermaphroditism',
'anthropoclimatologist',
'cholecystojejunostomy',
'epididymodeferentectomy',
'pericardiomediastinitis',
'cholecystolithotripsy',
'tessarescaedecahedron',
'electrotelethermometer',
'pharmacoendocrinology',
'poliencephalomyelitis',
'duodenocholedochotomy',
'cholecystonephrostomy',
'formaldehydesulphoxylate',
'dacryocystosyringotomy',
'counterpronunciamento',
'cholecystenterorrhaphy',
'deanthropomorphization',
'microseismometrograph',
'pseudoparthenogenesis',
'Pseudolamellibranchiata',
'ureteropyelonephritis',
'electroencephalography',
'anticonstitutionalist',
'electroencephalograph',
'hypsidolichocephalism',
'mandibulosuspensorial',
'acetylphenylhydrazine',
'hexanitrodiphenylamine',
'historicocabbalistical',
'hexachlorocyclohexane',
'anatomicophysiological',
'pseudoanthropological',
'microcryptocrystalline',
'lymphangioendothelioma',
'nonrepresentationalism',
'blepharoconjunctivitis',
'hydropneumopericardium',
'stereoroentgenography',
'otorhinolaryngologist',
'scientificohistorical',
'phenolsulphonephthalein',
'mechanicointellectual',
'counterexcommunication',
'duodenocholecystostomy',
'noninterchangeability',
'thermophosphorescence',
'naphthylaminesulphonic',
'polioencephalomyelitis',
'stereophotomicrograph',
'philosophicotheological',
'theologicometaphysical',
'benzalphenylhydrazone',
'scleroticochorioiditis',
'anthropomorphologically',
'thyroparathyroidectomize',
'disproportionableness',
'heterotransplantation',
'membranocartilaginous',
'scientificophilosophical',
'thyroparathyroidectomy',
'enterocholecystostomy',
'Prorhipidoglossomorpha',
'constitutionalization',
'poluphloisboiotatotic',
'anatomicopathological',
'zoologicoarchaeologist',
'protransubstantiation',
'labioglossopharyngeal',
'pneumohydropericardium',
'choledochoenterostomy',
'zygomaticoauricularis',
'anthropomorphological',
'stereophotomicrography',
'aquopentamminecobaltic',
'hexamethylenetetramine',
'macracanthrorhynchiasis',
'palaeodendrologically',
'intertransformability',
'hyperconscientiousness',
'laparocolpohysterotomy',
'indistinguishableness',
'formaldehydesulphoxylic',
'blepharosphincterectomy',
'transubstantiationalist',
'transubstantiationite',
'prostatovesiculectomy',
'pathologicohistological',
'platydolichocephalous',
'pneumoventriculography',
'photochromolithograph',
'gastroenteroanastomosis',
'chromophotolithograph',
'pentamethylenediamine',
'historicophilosophica',
'intellectualistically',
'gastroenterocolostomy',
'pancreaticogastrostomy',
'appendorontgenography',
'photospectroheliograph']
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
logo = '''
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
''' | 21.716895 | 47 | 0.676409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,270 | 0.897813 |
32839d586b1955e1c6b167959e736b233c1def5e | 363 | py | Python | vandal/objects/__init__.py | vandal-dev/vandal | 1981c86f4de6632776a4132ecbc206fac5188f32 | [
"Apache-2.0"
]
| 1 | 2022-02-22T18:39:57.000Z | 2022-02-22T18:39:57.000Z | vandal/objects/__init__.py | vandal-dev/vandal | 1981c86f4de6632776a4132ecbc206fac5188f32 | [
"Apache-2.0"
]
| null | null | null | vandal/objects/__init__.py | vandal-dev/vandal | 1981c86f4de6632776a4132ecbc206fac5188f32 | [
"Apache-2.0"
]
| null | null | null | # import all relevant contents from the associated module.
from vandal.objects.montecarlo import (
MonteCarlo,
MCapp,
)
from vandal.objects.eoq import(
EOQ,
EOQapp,
)
from vandal.objects.dijkstra import Dijkstra
# all relevant contents.
__all__ = [
'MonteCarlo',
'EOQ',
'Dijkstra',
'MCapp',
'EOQapp',
]
| 16.5 | 59 | 0.628099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.347107 |
3283d11b9d4cf8bd45f4150291dcecd926809bd7 | 124 | py | Python | authentication/admin.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
]
| 7 | 2018-11-09T14:40:54.000Z | 2019-12-20T08:10:17.000Z | authentication/admin.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
]
| 25 | 2018-11-30T17:38:36.000Z | 2018-12-27T17:21:09.000Z | authentication/admin.py | jatingupta14/cruzz | 9a00f1555cdd5c76c9ef250d7037d72d725de367 | [
"MIT"
]
| 6 | 2018-12-03T14:44:29.000Z | 2018-12-26T11:49:43.000Z | # Django
from django.contrib import admin
# local Django
from authentication.models import User
admin.site.register(User)
| 15.5 | 38 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.177419 |
328414bd2a696253fdce02e567455456707be002 | 480 | py | Python | src/dlkp/datasets/__init__.py | midas-research/dlkp | 5f47a780a6b05a71f799287d8ad612542a897047 | [
"MIT"
]
| 2 | 2022-03-12T15:08:55.000Z | 2022-03-14T09:11:43.000Z | src/dlkp/datasets/__init__.py | midas-research/dlkp | 5f47a780a6b05a71f799287d8ad612542a897047 | [
"MIT"
]
| 14 | 2022-02-19T07:42:09.000Z | 2022-03-20T21:43:42.000Z | src/dlkp/datasets/__init__.py | midas-research/dlkp | 5f47a780a6b05a71f799287d8ad612542a897047 | [
"MIT"
]
| null | null | null | class KPDatasets:
def __init__(self) -> None:
pass
def get_train_dataset(self):
if "train" not in self.datasets:
return None
return self.datasets["train"]
def get_eval_dataset(self):
if "validation" not in self.datasets:
return None
return self.datasets["validation"]
def get_test_dataset(self):
if "test" not in self.datasets:
return None
return self.datasets["test"]
| 25.263158 | 45 | 0.597917 | 479 | 0.997917 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.104167 |
3287cec655cdef3ec14897e557822dfcd28c5019 | 84 | py | Python | nv/__init__.py | 3stack-software/nv | 7b00fb857aea238ed060a9eb017e351aac19258e | [
"Apache-2.0"
]
| null | null | null | nv/__init__.py | 3stack-software/nv | 7b00fb857aea238ed060a9eb017e351aac19258e | [
"Apache-2.0"
]
| 1 | 2017-06-19T00:52:37.000Z | 2017-06-19T00:52:37.000Z | nv/__init__.py | 3stack-software/nv | 7b00fb857aea238ed060a9eb017e351aac19258e | [
"Apache-2.0"
]
| null | null | null | from .__version__ import __version__
from .core import create, remove, launch_shell
| 28 | 46 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32885105782d33bbebe4c4cc904fbc2149735713 | 784 | py | Python | app/live/tests.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
]
| 2 | 2017-12-02T13:58:30.000Z | 2018-08-02T17:07:59.000Z | app/live/tests.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
]
| null | null | null | app/live/tests.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
]
| null | null | null | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
from django.test.utils import override_settings
class HeyDoAppTest(unittest.TestCase):
def createUser(self):
is_new, user = User.create_user(
openid="1234567890",
source=1,
nick="username",
gender=1,
ip=self.request.remote_ip,
province="",
city="",
country="",
headimgurl="",
)
#success,message = QCloudIM.account_import(user)
return is_new, user
if __name__ == '__main__':
unittest.main()
| 27.034483 | 60 | 0.632653 | 444 | 0.566327 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.112245 |
328b211073d9f2b0d84385aebf512b9639d8569d | 1,133 | py | Python | application/utils/data_transfer_objects.py | charles-crawford/sentiment | 38cfd6af1cc81ad1858621a182cd76dc3e5f04db | [
"MIT"
]
| null | null | null | application/utils/data_transfer_objects.py | charles-crawford/sentiment | 38cfd6af1cc81ad1858621a182cd76dc3e5f04db | [
"MIT"
]
| null | null | null | application/utils/data_transfer_objects.py | charles-crawford/sentiment | 38cfd6af1cc81ad1858621a182cd76dc3e5f04db | [
"MIT"
]
| null | null | null | from flask_restx.fields import String, Boolean, Raw, List, Float, Nested
class DataTransferObjects:
def __init__(self, ns):
self.ns = ns
self.general_responses = {200: 'OK',
404: "Resource not found",
400: "Bad Request",
500: "Internal Server Error"}
self.plain_text = self.ns.model('plain_text', {
'plain_text': String(example='some sample text')
})
self.text_list = self.ns.model('text_list', {
'text_list': List(String(), example=['This is the first sentence.', 'This is the second sentence.'])
})
self.label = self.ns.model('label', {
'value': String(example='POSITIVE'),
'confidence': Float(example=.9)
})
self.prediction = self.ns.model('prediction', {
'text': String(example='some sample text'),
'labels': List(Nested(self.label))
})
self.predictions = self.ns.model('predictions', {
'predictions': List(Nested(self.prediction))
})
| 33.323529 | 112 | 0.529568 | 1,057 | 0.932921 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.255075 |
328faff3ddad6381d560dd2330552d383362af7f | 91 | py | Python | utils.py | Spratiher9/newsnuggets | 1147e55a9a0c8a483384711840462b1526cf7681 | [
"MIT"
]
| 1 | 2021-11-17T19:18:42.000Z | 2021-11-17T19:18:42.000Z | utils.py | Spratiher9/newsnuggets | 1147e55a9a0c8a483384711840462b1526cf7681 | [
"MIT"
]
| null | null | null | utils.py | Spratiher9/newsnuggets | 1147e55a9a0c8a483384711840462b1526cf7681 | [
"MIT"
]
| null | null | null | from gnews import GNews
def get_client():
news_client = GNews()
return news_client | 18.2 | 25 | 0.725275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
329003760fc6877a5fb340f8c2de344d9c2c4d3e | 13,284 | py | Python | grover.py | raulillo82/TFG-Fisica-2021 | 8acfd748c7f49ea294606a9c185227927ec2e256 | [
"MIT"
]
| null | null | null | grover.py | raulillo82/TFG-Fisica-2021 | 8acfd748c7f49ea294606a9c185227927ec2e256 | [
"MIT"
]
| null | null | null | grover.py | raulillo82/TFG-Fisica-2021 | 8acfd748c7f49ea294606a9c185227927ec2e256 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
'''
* Copyright (C) 2021 Raúl Osuna Sánchez-Infante
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE.txt file for details.
'''
##################
#Needed libraries#
##################
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import qiskit as q
import sys
from qiskit.visualization import plot_histogram
from qiskit.providers.ibmq import least_busy
from random import getrandbits
'''
Grover's algorithim. Intro
'''
#######################
#Functions definitions#
#######################
'''
Usage function
calling the program with "-h" or "--help" will display the help without returning an error (help was intended)
calling the progam with no options or wrong ones, will display the same help but returning an error
Please bear in mind that some combination of options are simply ignored, see the text of this function itself
'''
def usage():
print("Usage: " + str((sys.argv)[0]) + " i j k l")
print("i: Number of qubits (2 or 3, will yield error if different)")
print("j: Number of solutions (only taken into account if i=3, otherwise ignored). Can only be 1 or 2, will yield error otherwise")
print("k: Number of iterations (only taken into account for i=3 and j=1, othwerise ignored). Can only be 1 or 2, will yield error otherwise")
print("l: Perform computations in real quantum hardware, can only be 0 (no) or 1 (yes), will yield error otherwise")
if len(sys.argv) == 2 and (str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help"):
exit(0)
else:
exit(1)
'''
Check whether parameter is an integer
'''
def is_intstring(s):
try:
int(s)
return True
except ValueError:
return False
'''
Initialization:
Simply apply an H gate to every qubit
'''
def initialize():
if len(sys.argv) == 1:
print ("No arguments given")
usage()
elif len(sys.argv) > 5 or str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help" or (not (is_intstring(sys.argv[1]))) or (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
#elif (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
usage()
else:
#print ("Rest of cases")
for arg in sys.argv[2:]:
if not is_intstring(arg):
sys.exit("All arguments must be integers. Exit.")
qc = q.QuantumCircuit((sys.argv)[1])
#Apply a H-gate to all qubits in qc
for i in range(qc.num_qubits):
qc.h(i)
qc.barrier()
return qc
'''
Implement multi controlled Z-gate, easy to reutilize
'''
def mctz(qc):
qc.h(2)
qc.mct(list(range(2)), 2)
qc.h(2)
'''
Oracle metaimplementation
This function will simply call one of the possibles oracles functions
'''
def oracle (qc):
#Generate some random bits and implement the oracle accordingly with the result
bits=getrandbits(qc.num_qubits)
#2 qubits
if int((sys.argv)[1]) == 2:
print("Random bits to search for are (decimal representation): " + str(bits))
oracle_2_qubits(qc,bits)
#3 qubits
elif int((sys.argv)[1]) == 3:
#Single solution
if int((sys.argv)[2]) == 1:
'''
Explanation:
less than sqrt(N) iterations will be needed (so will need to "floor" (truncate) the result)
As 2 < sqrt(8) < 3 --> n=2 for 100% prob. With n=1, p=0.78125=78,125%
In the classical case, p=1/4=25% (single query followed by a random guess: 1/8 + 7/8 · 1/7 = 1/4 = 25%)
Classical results with two runs, p=1/8+7/8·1/7+6/8·1/6= 1/4 + 1/8 = 3/8 = 0.375 = 37,5%
'''
print("Random bits to search for are (decimal representation): " + str(bits))
#Check whether 1 or 2 iterations were requested
if (int((sys.argv)[3]) == 1) or (int((sys.argv)[3]) == 2):
iterations = int((sys.argv)[3])
for i in range(iterations):
oracle_3_qubits_single_solution(qc,bits)
diffusion(grover_circuit)
#For any other case, wrong arguments were used, exit
else:
usage()
#2 possible solutions
elif int((sys.argv)[2]) == 2:
'''
Explanation:
less than sqrt(N/M) times (M=2 different results to look for) will be needed (so will need to "floor" (truncate) the result)
As sqrt(8/2) = 2 --> n=1 for a theoretical 100% prob. In the classical case, 13/28 = 46,4%
'''
#A list instead of a single element will be used, initialize it with the previous value as first element
bits=[bits]
#Generate the second element, also randomly
bits.append(getrandbits(qc.num_qubits))
#Elements have to be different, regenerate as many times as needed till different
while bits[0] == bits[1]:
bits[1]=getrandbits(3)
#When done, sort the list of random bits. Order does not matter for our upcoming permutations
bits.sort()
print("Random bits to search for are (decimal representation): " + str(bits[0]) + " and " + str(bits[1]))
oracle_3_qubits_2_solutions(qc,bits)
#Algorithm only implemented for 1 or 2 possible solution(s), exit if something different requested
else:
usage()
#Algorithm only implemented for 1 or 2 qubits, exit if something different requested
else:
usage()
'''
Oracle implementation for 2 qubits.
Simply a controlled-Z gate (cz in qiskit).
For qubits different to 1, an x-gate is needed before and after the cz-gate
'''
def oracle_2_qubits(qc,bits):
if bits == 0: #00
qc.x(0)
qc.x(1)
qc.cz(0, 1)
qc.x(0)
qc.x(1)
elif bits == 1: #01
qc.x(1)
qc.cz(0,1)
qc.x(1)
elif bits == 2: #10
qc.x(0)
qc.cz(0,1)
qc.x(0)
elif bits == 3: #11
qc.cz(0,1)
qc.barrier()
'''
Oracle implementation for 3 qubits and single solution.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 1)
'''
def oracle_3_qubits_single_solution(qc,bits):
if bits == 0:
for i in range(3):
qc.x(i)
mctz(qc)
for i in range(3):
qc.x(i)
elif bits == 1:
for i in range(1, 3):
qc.x(i)
mctz(qc)
for i in range(1, 3):
qc.x(i)
elif bits == 2:
for i in range(0, 3, 2):
qc.x(i)
mctz(qc)
for i in range(0, 3, 2):
qc.x(i)
elif bits == 3:
qc.x(2)
mctz(qc)
qc.x(2)
elif bits == 4:
for i in range(2):
qc.x(i)
mctz(qc)
for i in range(2):
qc.x(i)
elif bits == 5:
qc.x(1)
mctz(qc)
qc.x(1)
elif bits == 6:
qc.x(0)
mctz(qc)
qc.x(0)
elif bits == 7:
mctz(qc)
qc.barrier()
'''
Oracle implementation for 3 qubits and two possible solutions.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 2)
'''
def oracle_3_qubits_2_solutions(qc,bits):
if (bits[0] == 0 and bits[1] == 1):
for i in range(1,3):
qc.z(i)
qc.cz(1, 2)
elif (bits[0] == 0 and bits[1] == 2):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 3):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 4):
for i in range(2):
qc.z(i)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 5):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 6):
for i in range(3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 7):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 2):
for i in range(2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 3):
qc.z(0)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 4):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 5):
qc.z(0)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 6):
qc.z(0)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 7):
qc.z(0)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 3):
qc.z(1)
qc.cz(1, 2)
elif (bits[0] == 2 and bits[1] == 4):
for i in range(1,3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 5):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 6):
qc.z(1)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 7):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 4):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 5):
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 7):
qc.cz(0, 1)
elif (bits[0] == 4 and bits[1] == 5):
qc.z(2)
qc.cz(1, 2)
elif (bits[0] == 4 and bits[1] == 6):
qc.z(2)
qc.cz(0, 2)
elif (bits[0] == 4 and bits[1] == 7):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 7):
qc.cz(0, 2)
elif (bits[0] == 6 and bits[1] == 7):
qc.cz(1, 2)
qc.barrier()
'''
Diffusion operator: Flip sign and amplify
For 2 qubits, simply apply H and Z to each qubit, then cz, and then apply H again to each qubit:
'''
def diffusion(qc):
if qc.num_qubits == 2:
qc.h(0)
qc.h(1)
qc.z(0)
qc.z(1)
qc.cz(0,1)
qc.h(0)
qc.h(1)
elif qc.num_qubits == 3:
#Apply diffusion operator
for i in range(3):
qc.h(i)
qc.x(i)
# multi-controlled-toffoli
mctz(qc)
qc.barrier()
for i in range(3):
qc.x(i)
qc.h(i)
#qc.barrier()
'''
Add measurements and plot the quantum circuit:
'''
def measure(qc):
qc.measure_all()
qc.draw('mpl')
plt.draw()
plt.title("Quantum Circuit")
'''
Generate results from quantum simulator (no plotting)
'''
def results_qsim(qc):
backend = q.Aer.get_backend('qasm_simulator')
job = q.execute(qc, backend, shots = 1024)
return job
'''
Generate results from real quantum hardware (no plotting)
'''
def results_qhw(qc):
'''
#Only needed if credentials are not stored (e.g., deleted and regeneration is needed
token='XXXXXXXX' #Use token from ibm quantum portal if needed to enable again, should be stored under ~/.qiskit directory
q.IBMQ.save_account(token)
'''
provider = q.IBMQ.load_account()
provider = q.IBMQ.get_provider()
device = q.providers.ibmq.least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and
not x.configuration().simulator and x.status().operational==True))
print("Running on current least busy device: ", device)
transpiled_grover_circuit = q.transpile(qc, device, optimization_level=3)
qobj = q.assemble(transpiled_grover_circuit)
job = device.run(qobj)
q.tools.monitor.job_monitor(job, interval=2)
return job
'''
Plot results
'''
def draw_job (job,title):
result = job.result()
counts = result.get_counts()
plot_histogram(counts)
plt.draw()
plt.title(title)
##############################
#End of functions definitions#
##############################
################################
#Program actually starts here!!#
################################
#Initialization
grover_circuit = initialize()
#Generate the oracle randomly according to the command line arguments
oracle(grover_circuit)
#Diffusion
if (not(int(sys.argv[1]) == 3 and int(sys.argv[2]) == 1)):
diffusion(grover_circuit)
#Add measurements
measure(grover_circuit)
#Generate results in simulator
job_sim = results_qsim(grover_circuit)
#Plot these results
draw_job(job_sim, "Quantum simulator output")
#Generate results in quantum hw if requested
if int(sys.argv[4]) == 1:
plt.show(block=False)
plt.draw()
#Next line needed for keeping computations in background while still seeing the previous plots
plt.pause(0.001)
#Generate results in real quantum hardware
job_qhw = results_qhw(grover_circuit)
#Plot these results as well
draw_job(job_qhw, "Quantum hardware output")
#Keep plots active when done till they're closed, used for explanations during presentations
plt.show()
| 29.851685 | 187 | 0.546522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,201 | 0.391376 |
3291b0fa03bb75af83a902f66fc3f91285f8e9a3 | 9,147 | py | Python | TM1py/Services/GitService.py | adscheevel/tm1py | 8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43 | [
"MIT"
]
| 113 | 2019-03-12T19:42:39.000Z | 2022-03-31T22:40:05.000Z | TM1py/Services/GitService.py | adscheevel/tm1py | 8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43 | [
"MIT"
]
| 459 | 2019-01-25T09:32:18.000Z | 2022-03-24T21:57:16.000Z | TM1py/Services/GitService.py | adscheevel/tm1py | 8a53c7a63e3c0e2c6198c2cd0c2f57d10a7cfe43 | [
"MIT"
]
| 107 | 2019-01-31T15:08:34.000Z | 2022-03-16T14:58:38.000Z | # -*- coding: utf-8 -*-
import json
from typing import List
from TM1py.Objects.Git import Git
from TM1py.Objects.GitCommit import GitCommit
from TM1py.Objects.GitPlan import GitPushPlan, GitPullPlan, GitPlan
from TM1py.Services.ObjectService import ObjectService
from TM1py.Services.RestService import RestService, Response
from TM1py.Utils.Utils import format_url
class GitService(ObjectService):
""" Service to interact with GIT
"""
COMMON_PARAMETERS = {'username': 'Username', 'password': 'Password', 'message': 'Message', 'author': 'Author',
'email': 'Email', 'branch': 'Branch', 'new_branch': 'NewBranch', 'force': 'Force',
'public_key': 'PublicKey', 'private_key': 'PrivateKey', 'passphrase': 'Passphrase',
'config': 'Config'}
def __init__(self, rest: RestService):
super().__init__(rest)
def git_init(self, git_url: str, deployment: str, username: str = None, password: str = None,
public_key: str = None, private_key: str = None, passphrase: str = None, force: bool = None,
config: dict = None, **kwargs) -> Git:
""" Initialize GIT service, returns Git object
:param git_url: file or http(s) path to GIT repository
:param deployment: name of selected deployment group
:param username: GIT username
:param password: GIT password
:param public_key: SSH public key, available from PAA V2.0.9.4
:param private_key: SSH private key, available from PAA V2.0.9.4
:param passphrase: Passphrase for decrypting private key, if set
:param force: reset git context on True
:param config: Dictionary containing git configuration parameters
"""
url = "/api/v1/GitInit"
body = {'URL': git_url, 'Deployment': deployment}
for key, value in locals().items():
if value is not None and key in self.COMMON_PARAMETERS.keys():
body[self.COMMON_PARAMETERS.get(key)] = value
body_json = json.dumps(body)
response = self._rest.POST(url=url, data=body_json, **kwargs)
return Git.from_dict(response.json())
def git_uninit(self, force: bool = False, **kwargs):
""" Unitialize GIT service
:param force: clean up git context when True
"""
url = "/api/v1/GitUninit"
body = json.dumps(force)
return self._rest.POST(url=url, data=body, **kwargs)
def git_status(self, username: str = None, password: str = None, public_key: str = None, private_key: str = None,
passphrase: str = None, **kwargs) -> Git:
""" Get GIT status, returns Git object
:param username: GIT username
:param password: GIT password
:param public_key: SSH public key, available from PAA V2.0.9.4
:param private_key: SSH private key, available from PAA V2.0.9.4
:param passphrase: Passphrase for decrypting private key, if set
"""
url = "/api/v1/GitStatus"
body = {}
for key, value in locals().items():
if value is not None and key in self.COMMON_PARAMETERS.keys():
body[self.COMMON_PARAMETERS.get(key)] = value
response = self._rest.POST(url=url, data=json.dumps(body), **kwargs)
return Git.from_dict(response.json())
def git_push(self, message: str, author: str, email: str, branch: str = None, new_branch: str = None,
force: bool = False, username: str = None, password: str = None, public_key: str = None,
private_key: str = None, passphrase: str = None, execute: bool = None, **kwargs) -> Response:
""" Creates a gitpush plan, returns response
:param message: Commit message
:param author: Name of commit author
:param email: Email of commit author
:param branch: The branch which last commit will be used as parent commit for new branch.
Must be empty if GIT repo is empty
:param new_branch: If specified, creates a new branch and pushes the commit onto it. If not specified,
pushes to the branch specified in "Branch"
:param force: A flag passed in for evaluating preconditions
:param username: GIT username
:param password: GIT password
:param public_key: SSH public key, available from PAA V2.0.9.4
:param private_key: SSH private key, available from PAA V2.0.9.4
:param passphrase: Passphrase for decrypting private key, if set
:param execute: Executes the plan right away if True
"""
url = "/api/v1/GitPush"
body = {}
for key, value in locals().items():
if value is not None and key in self.COMMON_PARAMETERS.keys():
body[self.COMMON_PARAMETERS.get(key)] = value
response = self._rest.POST(url=url, data=json.dumps(body), **kwargs)
if execute:
plan_id = json.loads(response.content).get('ID')
self.git_execute_plan(plan_id=plan_id)
return response
def git_pull(self, branch: str, force: bool = None, execute: bool = None, username: str = None,
password: str = None, public_key: str = None, private_key: str = None, passphrase: str = None,
**kwargs) -> Response:
""" Creates a gitpull plan, returns response
:param branch: The name of source branch
:param force: A flag passed in for evaluating preconditions
:param execute: Executes the plan right away if True
:param username: GIT username
:param password: GIT password
:param public_key: SSH public key, available from PAA V2.0.9.4
:param private_key: SSH private key, available from PAA V2.0.9.4
:param passphrase: Passphrase for decrypting private key, if set
"""
url = "/api/v1/GitPull"
body = {}
for key, value in locals().items():
if value is not None and key in self.COMMON_PARAMETERS.keys():
body[self.COMMON_PARAMETERS.get(key)] = value
body_json = json.dumps(body)
response = self._rest.POST(url=url, data=body_json, **kwargs)
if execute:
plan_id = json.loads(response.content).get('ID')
self.git_execute_plan(plan_id=plan_id)
return response
def git_execute_plan(self, plan_id: str, **kwargs) -> Response:
""" Executes a plan based on the planid
:param plan_id: GitPlan id
"""
url = format_url("/api/v1/GitPlans('{}')/tm1.Execute", plan_id)
return self._rest.POST(url=url, **kwargs)
def git_get_plans(self, **kwargs) -> List[GitPlan]:
""" Gets a list of currently available GIT plans
"""
url = "/api/v1/GitPlans"
plans = []
response = self._rest.GET(url=url, **kwargs)
# Every individual plan is wrapped in a "value" parent, iterate through those to get the actual plans
for plan in response.json().get('value'):
plan_id = plan.get('ID')
# Check if plan has an ID, sometimes there's a null in the mix that we don't want
if plan_id is None:
continue
plan_branch = plan.get('Branch')
plan_force = plan.get('Force')
# A git plan can either be a PushPlan or a PullPlan, these have slightly different variables,
# so we need to handle those differently
if plan.get('@odata.type') == '#ibm.tm1.api.v1.GitPushPlan':
plan_new_branch = plan.get('NewBranch')
plan_source_files = plan.get('SourceFiles')
new_commit = GitCommit(
commit_id=plan.get('NewCommit').get('ID'),
summary=plan.get('NewCommit').get('Summary'),
author=plan.get('NewCommit').get('Author'))
parent_commit = GitCommit(
commit_id=plan.get('ParentCommit').get('ID'),
summary=plan.get('ParentCommit').get('Summary'),
author=plan.get('ParentCommit').get('Author'))
current_plan = GitPushPlan(
plan_id=plan_id, branch=plan_branch, force=plan_force,
new_branch=plan_new_branch, new_commit=new_commit,
parent_commit=parent_commit, source_files=plan_source_files)
elif plan.get('@odata.type') == '#ibm.tm1.api.v1.GitPullPlan':
plan_commit = GitCommit(
commit_id=plan.get('Commit').get('ID'),
summary=plan.get('Commit').get('Summary'),
author=plan.get('Commit').get('Author'))
plan_operations = plan.get('Operations')
current_plan = GitPullPlan(plan_id=plan_id, branch=plan_branch, force=plan_force, commit=plan_commit,
operations=plan_operations)
else:
raise RuntimeError(f"Invalid plan detected: {plan.get('@odata.type')}")
plans.append(current_plan)
return plans
| 44.619512 | 117 | 0.608396 | 8,778 | 0.959659 | 0 | 0 | 0 | 0 | 0 | 0 | 3,750 | 0.40997 |
3294741b0f8e1bf0eeabf4019d19a68a63e99c23 | 1,419 | py | Python | tests/bind_tests/diagram_tests/strategies.py | lycantropos/voronoi | 977e0b3e5eff2dd294e2e6ce1a8030c763e86233 | [
"MIT"
]
| null | null | null | tests/bind_tests/diagram_tests/strategies.py | lycantropos/voronoi | 977e0b3e5eff2dd294e2e6ce1a8030c763e86233 | [
"MIT"
]
| null | null | null | tests/bind_tests/diagram_tests/strategies.py | lycantropos/voronoi | 977e0b3e5eff2dd294e2e6ce1a8030c763e86233 | [
"MIT"
]
| null | null | null | from hypothesis import strategies
from hypothesis_geometry import planar
from tests.bind_tests.hints import (BoundCell,
BoundDiagram,
BoundEdge,
BoundVertex)
from tests.bind_tests.utils import (bound_source_categories,
to_bound_multipoint,
to_bound_multisegment)
from tests.strategies import (doubles,
integers_32,
sizes)
from tests.utils import to_maybe
booleans = strategies.booleans()
coordinates = doubles
empty_diagrams = strategies.builds(BoundDiagram)
source_categories = strategies.sampled_from(bound_source_categories)
cells = strategies.builds(BoundCell, sizes,
source_categories)
vertices = strategies.builds(BoundVertex, coordinates, coordinates)
edges = strategies.builds(BoundEdge, to_maybe(vertices), cells,
booleans, booleans)
cells_lists = strategies.lists(cells)
edges_lists = strategies.lists(edges)
vertices_lists = strategies.lists(vertices)
diagrams = strategies.builds(BoundDiagram, cells_lists, edges_lists,
vertices_lists)
multipoints = planar.multipoints(integers_32).map(to_bound_multipoint)
multisegments = planar.multisegments(integers_32).map(to_bound_multisegment)
| 44.34375 | 76 | 0.653982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32955f3ecdc5ec46e6e7127a3ed57f1411af2c54 | 2,381 | py | Python | apps/blog/serializers.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
]
| null | null | null | apps/blog/serializers.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
]
| 2 | 2021-03-31T19:56:46.000Z | 2021-04-30T21:19:15.000Z | apps/blog/serializers.py | yc19890920/dble_fastapi_blog | dd9b8984d849df893d4fea270e8b75ac12d01241 | [
"Apache-2.0"
]
| null | null | null | """
@Author: YangCheng
@contact: [email protected]
@Software: Y.C
@Time: 2020/7/21 15:22
"""
from typing import List
from pydantic import BaseModel, Field
from tortoise import Tortoise
from tortoise.contrib.pydantic import pydantic_model_creator, pydantic_queryset_creator
from lib.tortoise.pydantic import json_encoders
from .models import Tag, Category, Article
Tortoise.init_models(["apps.blog.models"], "models")
class PydanticResponse(BaseModel):
index: int
limit: int
total: int
# -*- tag -*-
# Tag create/update
TagCreateRequest = pydantic_model_creator(
Tag, name="TagCreateRequest", exclude_readonly=True
)
TagCreateResponse = pydantic_model_creator(
Category, name="TagCreateResponse", exclude=["articles"]
)
TagCreateResponse.Config.json_encoders = json_encoders
# Tag List
TagListSerializer = pydantic_queryset_creator(
Tag, name="TagListSerializer", exclude=["articles"]
)
class TagListResponse(PydanticResponse):
results: List[TagListSerializer]
class TagResponse(BaseModel):
id: int
name: str
# -*- Category -*-
# Category create/update
CategoryCreateRequest = pydantic_model_creator(
Category, name="CategoryCreateRequest", exclude_readonly=True
)
CategoryCreateResponse = pydantic_model_creator(
Category, name="CategoryCreateResponse", exclude=("articles",)
)
CategoryCreateResponse.Config.json_encoders = json_encoders
# Category List
CategoryListSerializer = pydantic_queryset_creator(
Category, name="CategoryListSerializer", exclude=("articles",)
)
class CategoryListResponse(PydanticResponse):
results: List[CategoryListSerializer]
# -*- Article -*-
# Article create/update
class ArticleCreateRequest(BaseModel):
title: str = Field(..., description="Title")
content: str = Field(..., description="Content")
abstract: str = None
status: str = Field(default="publish", description="Content")
category_id: int = Field(..., description="category_id")
tags: List[int] = Field(..., description="tag_id list")
ArticleCreateResponse = pydantic_model_creator(
Article, name="ArticleCreateResponse"
)
ArticleCreateResponse.Config.json_encoders = json_encoders
ArticleListSerializer = pydantic_queryset_creator(
Article, name="ArticleListSerializer"
)
# Article List
class ArticleListResponse(PydanticResponse):
results: List[ArticleCreateResponse]
| 25.063158 | 87 | 0.761025 | 735 | 0.308694 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.231415 |
329a1a34027b83c6621340af222a98c0d43067e0 | 1,102 | py | Python | Python/image_analysis_centerlines/analysis_example.py | fromenlab/guides | ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b | [
"MIT"
]
| null | null | null | Python/image_analysis_centerlines/analysis_example.py | fromenlab/guides | ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b | [
"MIT"
]
| null | null | null | Python/image_analysis_centerlines/analysis_example.py | fromenlab/guides | ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b | [
"MIT"
]
| null | null | null | from skimage import img_as_bool, io, color, morphology
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Testing process
# Import images
one = img_as_bool(color.rgb2gray(io.imread('1.jpg')))
cross = img_as_bool(color.rgb2gray(io.imread('cross.jpg')))
grid = img_as_bool(color.rgb2gray(io.imread('grid.jpg')))
# Get skeleton
one_skel = morphology.skeletonize(one)
cross_skel = morphology.skeletonize(cross)
grid_skel = morphology.skeletonize(grid)
# Get medial axis
one_med, one_med_distance = morphology.medial_axis(one, return_distance=True)
cross_med, cross_med_distance = morphology.medial_axis(cross, return_distance=True)
grid_med, grid_med_distance = morphology.medial_axis(grid, return_distance=True)
# Get skeleton distance
one_skel_distance = one_med_distance*one_skel
# Data processing for "1.jpg"
one_skel_nonzero = one_skel_distance.nonzero()
trans = np.transpose(one_skel_nonzero)
df_coords = pd.DataFrame(data = trans, columns = ["y", "x"])
df_dist = pd.DataFrame(data = one_skel_distance[one_skel_nonzero])
combined = pd.concat([df_coords, df_dist], axis=1) | 34.4375 | 83 | 0.791289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.135209 |
329a5ba2f15a3280c3c7c2b2a6a0114abcec0cf9 | 485 | py | Python | resources/settings.py | Miriel-py/Room-Wizard | 83d86fe8e8fed8bb073b38465cd0e97b1a6113b8 | [
"MIT"
]
| null | null | null | resources/settings.py | Miriel-py/Room-Wizard | 83d86fe8e8fed8bb073b38465cd0e97b1a6113b8 | [
"MIT"
]
| null | null | null | resources/settings.py | Miriel-py/Room-Wizard | 83d86fe8e8fed8bb073b38465cd0e97b1a6113b8 | [
"MIT"
]
| null | null | null | # global_data.py
import os
from dotenv import load_dotenv
# Read the bot token from the .env file
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
DEBUG_MODE = os.getenv('DEBUG_MODE')
BOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DB_FILE = os.path.join(BOT_DIR, 'database/room_wizard_db.db')
LOG_FILE = os.path.join(BOT_DIR, 'logs/discord.log')
DEV_GUILDS = [730115558766411857]
# Embed color
EMBED_COLOR = 0x6C48A7
DEFAULT_FOOTER = 'Just pinning things.' | 24.25 | 69 | 0.764948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.336082 |
329e532aeccbe51ed3829d6a07920bf7c69171ef | 602 | py | Python | Python OOP/Exams/23 August 2021/1, 2/project/astronaut/astronaut_repository.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
]
| null | null | null | Python OOP/Exams/23 August 2021/1, 2/project/astronaut/astronaut_repository.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
]
| null | null | null | Python OOP/Exams/23 August 2021/1, 2/project/astronaut/astronaut_repository.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
]
| null | null | null | class AstronautRepository:
def __init__(self):
self.astronauts = []
def add(self, astronaut):
self.astronauts.append(astronaut)
def remove(self, astronaut):
self.astronauts.remove(astronaut)
def find_by_name(self, name: str):
for astronaut in self.astronauts:
if astronaut.name == name:
return astronaut
def find_suited_astronauts(self):
return sorted([astronaut for astronaut in self.astronauts if astronaut.oxygen > 30],
key=lambda x: x.oxygen,
reverse=True)[0:5]
| 30.1 | 92 | 0.609635 | 601 | 0.998339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
329eec6934c9b0ff2824d0ffd01a1902dae80850 | 1,767 | py | Python | detection_algorithms/temporal_anomaly_detection/model_def.py | hanahs-deepfake-detection/detection-algorithms | 6d7ec53eaf333adb10a1aba448f80fceaf7722be | [
"MIT"
]
| null | null | null | detection_algorithms/temporal_anomaly_detection/model_def.py | hanahs-deepfake-detection/detection-algorithms | 6d7ec53eaf333adb10a1aba448f80fceaf7722be | [
"MIT"
]
| null | null | null | detection_algorithms/temporal_anomaly_detection/model_def.py | hanahs-deepfake-detection/detection-algorithms | 6d7ec53eaf333adb10a1aba448f80fceaf7722be | [
"MIT"
]
| null | null | null | """
Model Definition
"""
from tensorflow import keras
from tensorflow.keras.applications import ResNet101V2
from tensorflow.keras.layers import (
BatchNormalization, Conv2D, Dense, Dropout, Flatten, LSTM, MaxPool2D,
TimeDistributed, Lambda
)
import tensorflow as tf
from .spatial_transformer.bilinear_sampler import BilinearSampler
def gen_model(batch_size, video_frames):
inputs = keras.Input((video_frames, 384, 512, 3), batch_size=batch_size)
x = TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu'))(inputs)
x = TimeDistributed(MaxPool2D())(x)
x = TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu'))(x)
x = TimeDistributed(MaxPool2D())(x)
x = TimeDistributed(Flatten())(x)
x = TimeDistributed(Dense(64, activation='tanh', kernel_initializer='zeros'))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(6, activation='tanh', kernel_initializer='zeros',
bias_initializer=lambda shape, dtype=None: tf.constant(
[1, 0, 0, 0, 1, 0], tf.float32
)))(x)
x = Lambda(lambda ls: tf.concat([ls[0], tf.reshape(ls[1],
(batch_size, video_frames, -1))], -1))([x, inputs])
x = TimeDistributed(BilinearSampler(input_shape=(batch_size, 384, 512, 3),
output_shape=(batch_size, 224, 224, 3)))(x)
resnet = ResNet101V2(include_top=False, weights=None)
x = TimeDistributed(resnet)(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(32, return_sequences=True)(x)
x = LSTM(32)(x)
x = Dense(10, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
| 42.071429 | 84 | 0.654782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.043577 |
329f38947acdd5b4c36b6e62995a1a5be5206f16 | 1,515 | py | Python | scripts/lwtnn-build-dummy-inputs.py | aghoshpub/lwtnn | 979069b372f8c3d001d08fb0c756ff98954db644 | [
"MIT"
]
| 98 | 2016-11-27T04:05:56.000Z | 2022-02-28T17:14:19.000Z | scripts/lwtnn-build-dummy-inputs.py | aghoshpub/lwtnn | 979069b372f8c3d001d08fb0c756ff98954db644 | [
"MIT"
]
| 90 | 2016-11-24T15:13:31.000Z | 2021-11-29T14:09:34.000Z | scripts/lwtnn-build-dummy-inputs.py | aghoshpub/lwtnn | 979069b372f8c3d001d08fb0c756ff98954db644 | [
"MIT"
]
| 46 | 2016-12-15T17:21:43.000Z | 2022-01-27T22:45:42.000Z | #!/usr/bin/env python3
"""Generate fake serialized NNs to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("variable_spec.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
| 27.545455 | 72 | 0.634323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.19736 |
329f8f1e2538fb2f56b719613eee2ed54216347d | 4,884 | py | Python | osspeak/platforms/windows.py | OSSpeak/OSSpeak | 327c38a37684165f87bf8d76ab2ca135b43b8ab7 | [
"MIT"
]
| 1 | 2020-03-17T10:24:41.000Z | 2020-03-17T10:24:41.000Z | osspeak/platforms/windows.py | OSSpeak/OSSpeak | 327c38a37684165f87bf8d76ab2ca135b43b8ab7 | [
"MIT"
]
| 12 | 2016-09-28T05:16:00.000Z | 2020-11-27T22:32:40.000Z | osspeak/platforms/windows.py | OSSpeak/OSSpeak | 327c38a37684165f87bf8d76ab2ca135b43b8ab7 | [
"MIT"
]
| null | null | null | '''
Collection of Windows-specific I/O functions
'''
import msvcrt
import time
import ctypes
from platforms import winconstants, winclipboard
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
def flush_io_buffer():
while msvcrt.kbhit():
print(msvcrt.getch().decode('utf8'), end='')
def close_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.PostMessageA(hwnd, winconstants.WM_CLOSE, 0, 0)
def get_active_window_name():
hwnd = ctypes.windll.user32.GetForegroundWindow()
return get_window_title(hwnd)
def maximize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 3)
def minimize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 6)
def get_window_title(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
return buff.value
def get_matching_windows(title_list):
matches = {}
def window_enum_callback(hwnd, lParam):
if IsWindowVisible(hwnd):
window_name = get_window_title(hwnd).lower()
for name in title_list:
if name not in window_name:
return True
matches[window_name] = hwnd
return True
EnumWindows(EnumWindowsProc(window_enum_callback), 0)
return matches
def activate_window(title, position=1):
if position > 0:
position -= 1
matches = get_matching_windows(title)
sorted_keys = list(sorted(matches.keys(), key=len))
key = sorted_keys[position]
hwnd = matches[key]
# magic incantations to activate window consistently
IsIconic = ctypes.windll.user32.IsIconic
ShowWindow = ctypes.windll.user32.ShowWindow
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
BringWindowToTop = ctypes.windll.user32.BringWindowToTop
AttachThreadInput = ctypes.windll.user32.AttachThreadInput
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoA
if IsIconic(hwnd):
ShowWindow(hwnd, winconstants.SW_RESTORE)
if GetForegroundWindow() == hwnd:
return True
ForegroundThreadID = GetWindowThreadProcessId(GetForegroundWindow(), None)
ThisThreadID = GetWindowThreadProcessId(hwnd, None)
if AttachThreadInput(ThisThreadID, ForegroundThreadID, True):
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
AttachThreadInput(ThisThreadID, ForegroundThreadID, False)
if GetForegroundWindow() == hwnd:
return True
timeout = ctypes.c_int()
zero = ctypes.c_int(0)
SystemParametersInfo(winconstants.SPI_GETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), 0)
(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(zero), winconstants.SPIF_SENDCHANGE)
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
SystemParametersInfo(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), winconstants.SPIF_SENDCHANGE)
if GetForegroundWindow() == hwnd:
return True
return False
def get_mouse_location():
pt = winconstants.POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pt))
return pt.x, pt.y
def mouse_click(button, direction, number):
event_nums = get_mouse_event_nums(button, direction)
for i in range(number):
for num in event_nums:
ctypes.windll.user32.mouse_event(num, 0, 0, 0, 0)
def mouse_move(x=None, y=None, relative=False):
startx, starty = get_mouse_location()
if not relative:
if x is None: x = startx
if y is None: y = starty
ctypes.windll.user32.SetCursorPos(x, y)
return
if x is None: x = 0
if y is None: y = 0
ctypes.windll.user32.SetCursorPos(startx + x, starty + y)
def get_clipboard_contents():
return winclipboard.init_windows_clipboard()[1]()
def set_clipboard_contents(text):
return winclipboard.init_windows_clipboard()[0](str(text))
def get_mouse_event_nums(button, direction):
if button == 'left' and direction == 'down': return [2]
if button == 'left' and direction == 'up': return [4]
if button == 'left' and direction == 'both': return [2, 4]
if button == 'right' and direction == 'down': return [8]
if button == 'right' and direction == 'up': return [16]
if button == 'right' and direction == 'both': return [8, 16] | 37.282443 | 123 | 0.719287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.037469 |
32a0d30f56c4a1916c5ad0aef5a7b50495e1860b | 715 | py | Python | sudokusolver/common/messenger.py | Blondberg/SudokuSolver | 4a6f1f927d41f7a39a953b9784b28d570edf1f09 | [
"MIT"
]
| null | null | null | sudokusolver/common/messenger.py | Blondberg/SudokuSolver | 4a6f1f927d41f7a39a953b9784b28d570edf1f09 | [
"MIT"
]
| null | null | null | sudokusolver/common/messenger.py | Blondberg/SudokuSolver | 4a6f1f927d41f7a39a953b9784b28d570edf1f09 | [
"MIT"
]
| null | null | null | # messenger.py - contains functions to create different kinds of messages like info or error
# color the text, usage: print bcolors.WARNING + "Warning: No active frommets remain. Continue?" + bcolors.ENDC
BCOLORS = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m'
}
# Information message
def info(message):
print(BCOLORS['OKBLUE'] + message + BCOLORS['ENDC'])
# Action message
def action(message):
print(BCOLORS['OKGREEN'] + message + BCOLORS['ENDC'])
# Error message
def error(message):
print(BCOLORS['FAIL'] + message + BCOLORS['ENDC'])
| 23.833333 | 111 | 0.633566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.60979 |
32a23291b7486cbc9a87ce5a914dd735071b20e4 | 554 | py | Python | test.py | w0w/miniPFC | 63b1bf608de03efada2a1b57c0370b6a7c2bf1ad | [
"MIT"
]
| null | null | null | test.py | w0w/miniPFC | 63b1bf608de03efada2a1b57c0370b6a7c2bf1ad | [
"MIT"
]
| null | null | null | test.py | w0w/miniPFC | 63b1bf608de03efada2a1b57c0370b6a7c2bf1ad | [
"MIT"
]
| null | null | null | import json
import RPi.GPIO as GPIO
from modules.sensor import getTempC, getHumidity
def loadConfig():
with open('./config/pin.json') as data_file:
data = json.load(data_file)
return data
currentPins = loadConfig().values()
def bootActuators():
'''Assumes that pi is booting and set off all the relays'''
GPIO.setmode(GPIO.BOARD)
for i, p in enumerate(currentPins):
GPIO.setup(p, GPIO.OUT)
GPIO.output(p, GPIO.HIGH)
print(p, GPIO.input(p))
print('Actuators turned off')
bootActuators() | 25.181818 | 63 | 0.66426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.180505 |
32a426fd1c9efac97183a6c708ae91ac77c14062 | 1,170 | py | Python | example.py | clagraff/habu | 28d05c2fa2204b26177bbaed969648b92b89c735 | [
"MIT"
]
| null | null | null | example.py | clagraff/habu | 28d05c2fa2204b26177bbaed969648b92b89c735 | [
"MIT"
]
| null | null | null | example.py | clagraff/habu | 28d05c2fa2204b26177bbaed969648b92b89c735 | [
"MIT"
]
| null | null | null | import json
import habu
def do_req(uri, *args, **kwargs):
route_data = {
"/": {
"_links": {
"people": { "href": "/people" },
"animals": { "href": "/animals" }
}
},
"/people": {
"_links": {
"self": { "href": "/products" }
},
"_embedded": {
"people": [
{ "_links": { "self": { "href": "/people/clagraff" } }, "name": "Curtis", "age": 22 }
]
},
"total": 1
},
"/people/clagraff": {
"_links": {
"self": { "href": "/people/clagraff" }
},
"name": "Curtis",
"age": 22
}
}
return route_data[uri]
def main():
habu.set_request_func(do_req)
api = habu.enter("/")
people = api.people()
print("There are %i people" % people.total)
for person in people.embedded.people:
print("Hi! I am %s and I am %i years old" % (person.name, person.age))
curtis = habu.enter("/people/clagraff")
print(curtis)
if __name__ == "__main__":
main()
| 23.4 | 105 | 0.417949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.294017 |
32a62b611ae086d7c010dc8106960f0f8f3738b2 | 1,162 | py | Python | notify_tweet.py | mkaraki/WatchTweets | 9b0a4ef66e38311453fff99d02091758b1bd0df5 | [
"MIT"
]
| null | null | null | notify_tweet.py | mkaraki/WatchTweets | 9b0a4ef66e38311453fff99d02091758b1bd0df5 | [
"MIT"
]
| 1 | 2022-01-26T18:03:15.000Z | 2022-01-26T18:03:35.000Z | notify_tweet.py | mkaraki/WatchTweets | 9b0a4ef66e38311453fff99d02091758b1bd0df5 | [
"MIT"
]
| null | null | null | import json
import os
import requests
from dotenv import load_dotenv
# You have to configure in this file to notify other services
def notifyHandler(tweet):
notifyDiscord(tweet)
return
def notifyDiscord(tweet, find_user_info=False):
msg = tweet['text']
if ('entities' in tweet and 'urls' in tweet['entities']):
for (i, url) in enumerate(tweet['entities']['urls']):
msg = msg.replace(url['url'], url['expanded_url'])
c = {
'embeds': [{
'description': msg,
'author': {
'name': tweet['author_id'],
'url': 'https://twitter.com/intent/user?user_id=' + tweet['author_id'],
},
'title': 'Tweet',
'url': 'https://twitter.com/intent/like?tweet_id=' + tweet['id'],
'footer': {
'text': 'Twitter',
'icon_url': 'http://github.com/twitter.png',
},
'timestamp': tweet['created_at'],
}]
}
requests.post(os.getenv('DISCORD_WEBHOOK_URL'), json.dumps(
c), headers={'Content-Type': 'application/json'})
return
load_dotenv(override=True)
| 26.409091 | 87 | 0.553356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 438 | 0.376936 |
32aa7faedb604f995e124967e180cd9dc0c8087d | 2,245 | py | Python | credentials.py | Ken-mbira/Trust_Password_Protector | 7d4d25e6d10582c21cc84ce0ffdffe45d45c0d63 | [
"MIT"
]
| null | null | null | credentials.py | Ken-mbira/Trust_Password_Protector | 7d4d25e6d10582c21cc84ce0ffdffe45d45c0d63 | [
"MIT"
]
| null | null | null | credentials.py | Ken-mbira/Trust_Password_Protector | 7d4d25e6d10582c21cc84ce0ffdffe45d45c0d63 | [
"MIT"
]
| 1 | 2021-09-07T05:08:02.000Z | 2021-09-07T05:08:02.000Z | import random
import string
class Cred:
"""
This is a class that makes the user credentials for their different accounts
"""
def __init__(self,account_name,user_name,email,password):
"""
This will construct an instance of the credentials class
"""
self.account_name = account_name
self.user_name = user_name
self.email = email
self.password = password
credential_list = []
def save_credential(self):
"""
This will add a newly created credential to the credentials list
"""
Cred.credential_list.append(self)
def delete_credential(self):
"""
This method will delete a credential from the credential list
"""
Cred.credential_list.remove(self)
@classmethod
def display_credentials(cls):
"""
This will display all the credentials in the credentials list
"""
return Cred.credential_list
@classmethod
def find_account(cls,account):
"""
This will return the credentials after being given an account name
Args:
account_name: This is the account of the credentials that will be used to locate the credential
"""
for credential in cls.credential_list:
if credential.account_name == account:
return credential
@classmethod
def credential_found(cls,account):
"""
This is a method that returns a boolean on finding or not finding a credential
Args:
account: This is the name of the account that is used to find the credential
"""
for credential in cls.credential_list:
if credential.account_name == account:
return True
return False
@classmethod
def password_generator(cls,length):
"""
this method generates a random password with letters, symbols and digits
Args:
length: This is the desired length of the password
"""
characters = string.ascii_letters + string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return "".join(random.choice(characters) for i in range(length)) | 32.071429 | 128 | 0.632962 | 2,216 | 0.987082 | 0 | 0 | 1,425 | 0.634744 | 0 | 0 | 1,023 | 0.455679 |
32ac15da27e5771cb19e9b355fd09244b1a2fee3 | 561 | py | Python | misprogs/sensor_Luz_LCD.py | dacocube/CursoGalileo | 1dac903031d9ff61174cb0c5e00e3f3795ea60de | [
"Apache-2.0"
]
| null | null | null | misprogs/sensor_Luz_LCD.py | dacocube/CursoGalileo | 1dac903031d9ff61174cb0c5e00e3f3795ea60de | [
"Apache-2.0"
]
| null | null | null | misprogs/sensor_Luz_LCD.py | dacocube/CursoGalileo | 1dac903031d9ff61174cb0c5e00e3f3795ea60de | [
"Apache-2.0"
]
| null | null | null | import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__=='__main__':
signal.signal(signal.SIGINT, interruptHandler)
myLcd = lcd.Jhd1313m1(0, 0x3E,0x62)
sensorluz=grove.GroveLight(0)
coloR=255
colorG=200
colorB=100
myLcd.setColor(coloR,colorG,colorB)
#read the input and print, waiting 1/2 seconds between reading
while True:
valorSensor=sensorluz.value()
myLcd.setCursor(0,0)
myLcd.write('%6d'% valorSensor)
time.sleep(0.5)
del sensorluz
| 20.777778 | 63 | 0.761141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.137255 |
32b0d4c387e53daeda7939c3bdfe5d3e18cb6dbb | 210 | py | Python | setup.py | cogsy23/pyfsm | 22236994f7455a39489d1438b7c8bbcd081352be | [
"MIT"
]
| null | null | null | setup.py | cogsy23/pyfsm | 22236994f7455a39489d1438b7c8bbcd081352be | [
"MIT"
]
| null | null | null | setup.py | cogsy23/pyfsm | 22236994f7455a39489d1438b7c8bbcd081352be | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
setup(
name='FSM',
version='0.1',
author='Ben Coughlan',
author_email='[email protected]',
packages=find_packages(),
license_file='LICENSE',
)
| 19.090909 | 43 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.238095 |
32b26100558c8d0079fd4f055056d994cd62c099 | 9,553 | py | Python | clustviz/clarans.py | barbarametzler/ClustViz | a460e1ffb5195dfe1e12bca106366901d169a690 | [
"MIT"
]
| 6 | 2019-11-14T11:22:54.000Z | 2020-03-01T09:14:21.000Z | clustviz/clarans.py | barbarametzler/ClustViz | a460e1ffb5195dfe1e12bca106366901d169a690 | [
"MIT"
]
| 2 | 2020-07-21T07:49:07.000Z | 2021-04-06T16:16:09.000Z | clustviz/clarans.py | barbarametzler/ClustViz | a460e1ffb5195dfe1e12bca106366901d169a690 | [
"MIT"
]
| 5 | 2020-07-14T15:22:00.000Z | 2022-03-19T19:45:32.000Z | import random
from typing import Tuple, Dict, Any
import scipy
import itertools
import graphviz
import numpy as np
import pandas as pd
from clustviz.pam import plot_pam
from pyclustering.utils import euclidean_distance_square
from pyclustering.cluster.clarans import clarans as clarans_pyclustering
class clarans(clarans_pyclustering):
def process(self, plotting: bool = False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[point_medoid_index],
)
# apply rules for cost calculation
if point_cluster_index == current_medoid_cluster_index:
# case 1:
if distance_candidate >= distance_nearest:
candidate_cost += (
distance_nearest - distance_current
)
# case 2:
else:
candidate_cost += (
distance_candidate - distance_current
)
elif point_cluster_index == other_medoid_cluster_index:
# case 3 ('nearest medoid' is the representative object of that cluster and object is more
# similar to 'nearest' than to 'candidate'):
if distance_candidate > distance_nearest:
pass
# case 4:
else:
candidate_cost += (
distance_candidate - distance_nearest
)
if candidate_cost < 0:
counter += 1
# set candidate that has won
self.__current[
current_medoid_cluster_index
] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def compute_cost_clarans(data: pd.DataFrame, _cur_choice: list) -> Tuple[float, Dict[Any, list]]:
"""
A function to compute the configuration cost. (modified from that of CLARA)
:param data: The input dataframe.
:param _cur_choice: The current set of medoid choices.
:return: The total configuration cost, the medoids.
"""
total_cost = 0.0
medoids = {}
for idx in _cur_choice:
medoids[idx] = []
for i in list(data.index):
choice = -1
min_cost = np.inf
for m in medoids:
# fast_euclidean from CLARA
tmp = np.linalg.norm(data.loc[m] - data.loc[i])
if tmp < min_cost:
choice = m
min_cost = tmp
medoids[choice].append(i)
total_cost += min_cost
# print("total_cost: ", total_cost)
return total_cost, medoids
def plot_tree_clarans(data: pd.DataFrame, k: int) -> None:
"""
plot G_{k,n} as in the paper of CLARANS; only to use with small input data.
:param data: input DataFrame.
:param k: number of points in each combination (possible set of medoids).
"""
n = len(data)
num_points = int(scipy.special.binom(n, k))
num_neigh = k * (n - k)
if (num_points > 50) or (num_neigh > 10):
print(
"Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big"
)
return
# all possibile combinations of k elements from input data
name_nodes = list(itertools.combinations(list(data.index), k))
dot = graphviz.Digraph(comment="Clustering")
# draw nodes, also adding the configuration cost
for i in range(num_points):
tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i]))
tc = round(tot_cost, 3)
dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc))
# only connect nodes if they have k-1 common elements
for i in range(num_points):
for j in range(num_points):
if i != j:
if (
len(set(list(name_nodes[i])) & set(list(name_nodes[j])))
== k - 1
):
dot.edge(str(name_nodes[i]), str(name_nodes[j]))
graph = graphviz.Source(dot) # .view()
display(graph)
| 36.185606 | 114 | 0.539098 | 6,934 | 0.725845 | 0 | 0 | 0 | 0 | 0 | 0 | 2,233 | 0.233749 |
32b489e63deb6a7323ecb9996f33d06edac172bd | 1,507 | py | Python | bin/demo_findit_backup_url.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
]
| 28 | 2019-09-09T08:12:31.000Z | 2021-12-17T00:09:14.000Z | bin/demo_findit_backup_url.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
]
| 33 | 2019-11-07T05:36:04.000Z | 2022-01-29T01:14:57.000Z | bin/demo_findit_backup_url.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
]
| 10 | 2019-09-09T10:04:05.000Z | 2021-06-08T16:00:14.000Z | from __future__ import absolute_import, print_function, unicode_literals
import os
import requests
from metapub.findit import FindIt
from metapub.exceptions import *
from requests.packages import urllib3
urllib3.disable_warnings()
OUTPUT_DIR = 'findit'
CURL_TIMEOUT = 4000
def try_request(url):
# verify=False means it ignores bad SSL certs
OK_STATUS_CODES = [200, 301, 302, 307]
response = requests.get(url, stream=True, timeout=CURL_TIMEOUT, verify=False)
if response.status_code in OK_STATUS_CODES:
if response.headers.get('content-type').find('pdf') > -1:
return True
return False
def try_backup_url(pmid):
source = FindIt(pmid=pmid)
if not source.pma:
return
if source.url:
print(pmid, source.pma.journal, source.url, try_request(source.url))
else:
print(pmid, source.pma.journal, source.reason)
try:
if source.backup_url is not None:
print(pmid, source.pma.journal, source.backup_url, try_request(source.backup_url))
else:
print(pmid, source.pma.journal, "no backup url")
except Exception as err:
print(pmid, '%r' % err)
if __name__=='__main__':
import sys
try:
start_pmid = int(sys.argv[1])
except (IndexError, TypeError) as err:
print("Supply a pubmed ID as the starting point for this script.")
sys.exit()
for pmid in range(start_pmid, start_pmid+1000):
try_backup_url(pmid)
| 28.433962 | 98 | 0.666224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.106171 |
32b5c206b4bd2dca61a6557018af529be9b8ba2f | 3,939 | py | Python | kgcnn/layers/conv/dmpnn_conv.py | the16thpythonist/gcnn_keras | 27d794095b684333d93149c825d84b85df8c30ff | [
"MIT"
]
| 47 | 2021-03-10T10:15:42.000Z | 2022-03-14T00:53:40.000Z | kgcnn/layers/conv/dmpnn_conv.py | the16thpythonist/gcnn_keras | 27d794095b684333d93149c825d84b85df8c30ff | [
"MIT"
]
| 36 | 2021-05-06T15:06:51.000Z | 2022-03-02T13:06:16.000Z | kgcnn/layers/conv/dmpnn_conv.py | the16thpythonist/gcnn_keras | 27d794095b684333d93149c825d84b85df8c30ff | [
"MIT"
]
| 11 | 2021-04-05T02:14:27.000Z | 2022-03-02T03:25:52.000Z | import tensorflow as tf
from kgcnn.layers.base import GraphBaseLayer
from kgcnn.layers.gather import GatherNodesOutgoing, GatherNodesIngoing
from kgcnn.layers.pooling import PoolingLocalEdges
from kgcnn.layers.modules import LazySubtract
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='DMPNNGatherEdgesPairs')
class DMPNNGatherEdgesPairs(GraphBaseLayer):
"""Gather edge pairs that also works for invalid indices given a certain pair, i.e. if a edge does not have its
reverse counterpart in the edge indices list.
This class is used in `DMPNN <https://pubs.acs.org/doi/full/10.1021/acs.jcim.9b00237>`_ .
"""
def __init__(self, **kwargs):
"""Initialize layer."""
super(DMPNNGatherEdgesPairs, self).__init__(**kwargs)
self.gather_layer = GatherNodesIngoing()
def build(self, input_shape):
"""Build layer."""
super(DMPNNGatherEdgesPairs, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs (list): [edges, pair_index]
- edges (tf.RaggedTensor): Node embeddings of shape (batch, [M], F)
- pair_index (tf.RaggedTensor): Edge indices referring to edges of shape (batch, [M], 1)
Returns:
list: Gathered edge embeddings that match the reverse edges of shape (batch, [M], F) for selection_index.
"""
self.assert_ragged_input_rank(inputs)
edges, pair_index = inputs
index_corrected = tf.RaggedTensor.from_row_splits(
tf.where(pair_index.values >= 0, pair_index.values, tf.zeros_like(pair_index.values)),
pair_index.row_splits, validate=self.ragged_validate)
edges_paired = self.gather_layer([edges, index_corrected], **kwargs)
edges_corrected = tf.RaggedTensor.from_row_splits(
tf.where(pair_index.values >= 0, edges_paired.values, tf.zeros_like(edges_paired.values)),
edges_paired.row_splits, validate=self.ragged_validate)
return edges_corrected
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='DMPNNPPoolingEdgesDirected')
class DMPNNPPoolingEdgesDirected(GraphBaseLayer):
"""Pooling of edges for around a target node as defined by
`DMPNN <https://pubs.acs.org/doi/full/10.1021/acs.jcim.9b00237>`_ . This slightly different than the normal node
aggregation from message passing like networks. Requires edge pairs for this implementation.
"""
def __init__(self, **kwargs):
"""Initialize layer."""
super(DMPNNPPoolingEdgesDirected, self).__init__(**kwargs)
self.pool_edge_1 = PoolingLocalEdges(pooling_method="sum")
self.gather_edges = GatherNodesOutgoing()
self.gather_pairs = DMPNNGatherEdgesPairs()
self.subtract_layer = LazySubtract()
def build(self, input_shape):
"""Build layer."""
super(DMPNNPPoolingEdgesDirected, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs: [nodes, edges, edge_index, edge_reverse_pair]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Edge or message embeddings of shape (batch, [M], F)
- edge_index (tf.RaggedTensor): Edge indices referring to nodes of shape (batch, [M], 2)
- edge_reverse_pair (tf.RaggedTensor): Pair mappings for reverse edges (batch, [M], 1)
Returns:
tf.RaggedTensor: Edge embeddings of shape (batch, [M], F)
"""
n, ed, edi, edp = inputs
pool_edge_receive = self.pool_edge_1([n, ed, edi], **kwargs) # Sum pooling of all edges
ed_new = self.gather_edges([pool_edge_receive, edi], **kwargs)
ed_not = self.gather_pairs([ed, edp], **kwargs)
out = self.subtract_layer([ed_new, ed_not], **kwargs)
return out
| 43.766667 | 117 | 0.67276 | 3,507 | 0.890327 | 0 | 0 | 3,694 | 0.937801 | 0 | 0 | 1,724 | 0.437675 |
32b80da9076a6963ab2a24a72478920a41611e59 | 181 | py | Python | src/keys_management/secret_key/types.py | nielsen-oss/keys-management | ddeeceb19dae68516272fe13dfc6521dcbe295f2 | [
"Apache-2.0"
]
| 6 | 2021-06-25T17:21:18.000Z | 2021-07-13T17:31:28.000Z | src/keys_management/secret_key/types.py | nielsen-oss/keys-management | ddeeceb19dae68516272fe13dfc6521dcbe295f2 | [
"Apache-2.0"
]
| null | null | null | src/keys_management/secret_key/types.py | nielsen-oss/keys-management | ddeeceb19dae68516272fe13dfc6521dcbe295f2 | [
"Apache-2.0"
]
| null | null | null | from typing import Callable, Tuple, Union
StrOrBytes = Union[str, bytes]
StrOrBytesPair = Tuple[StrOrBytes, StrOrBytes]
KeysStore = Callable[[], Union[StrOrBytes, StrOrBytesPair]]
| 30.166667 | 59 | 0.78453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
32b877d4916dd5d40bd6976997b7ef7d01823785 | 349 | py | Python | api/admin.py | jchmura/suchary-django | af2e8a62d222fd6eb18f29af95c23ab098ccc2a6 | [
"MIT"
]
| null | null | null | api/admin.py | jchmura/suchary-django | af2e8a62d222fd6eb18f29af95c23ab098ccc2a6 | [
"MIT"
]
| 2 | 2021-03-19T21:54:17.000Z | 2021-06-10T19:20:12.000Z | api/admin.py | jchmura/suchary-django | af2e8a62d222fd6eb18f29af95c23ab098ccc2a6 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from api.models import Device
class DeviceAdmin(admin.ModelAdmin):
list_display = ['android_id', 'alias', 'model', 'os_version', 'version', 'created', 'last_seen', 'active']
list_filter = ['active']
search_fields = ['registration_id', 'android_id', 'alias']
admin.site.register(Device, DeviceAdmin)
| 26.846154 | 110 | 0.713467 | 239 | 0.684814 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.340974 |
32b93fe289994ee8aa84e901e1536e526ce09b82 | 169 | py | Python | project/help/urls.py | samuraii/otus_python_backend | 1bc7c8953a03008c94dd4b0ca89a7c830772f79a | [
"MIT"
]
| null | null | null | project/help/urls.py | samuraii/otus_python_backend | 1bc7c8953a03008c94dd4b0ca89a7c830772f79a | [
"MIT"
]
| null | null | null | project/help/urls.py | samuraii/otus_python_backend | 1bc7c8953a03008c94dd4b0ca89a7c830772f79a | [
"MIT"
]
| null | null | null | # from django.contrib import admin
# from django.urls import path
from django.conf.urls import url
from help import views
urlpatterns = [
url(r'^$', views.index)
]
| 18.777778 | 34 | 0.727811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.408284 |
32b9a1053b526032d5d6c19f20fe7c9cbc1b1859 | 5,299 | py | Python | social_network/utils.py | diana-gv/django-social-network | 48bafca81f28874ceead59e263ce5b7e3853dbfb | [
"BSD-3-Clause"
]
| 3 | 2015-01-13T05:45:04.000Z | 2020-01-10T19:05:35.000Z | social_network/utils.py | diana-gv/django-social-network | 48bafca81f28874ceead59e263ce5b7e3853dbfb | [
"BSD-3-Clause"
]
| null | null | null | social_network/utils.py | diana-gv/django-social-network | 48bafca81f28874ceead59e263ce5b7e3853dbfb | [
"BSD-3-Clause"
]
| 6 | 2015-01-13T04:40:53.000Z | 2021-08-13T01:07:40.000Z | # coding=utf-8
import random
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from notifications.models import EventType
from social_graph import EdgeType
try:
from hashlib import sha1 as sha_constructor, md5 as md5_constructor
except ImportError:
pass
#---------------------NOTIFICATIONS---------------------------------
def group_comment_event_type():
comment_event_type = cache.get('SOCIAL_NETWORK_COMMENT_EVENT_TYPE')
if comment_event_type is not None:
return comment_event_type
try:
from . import SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME
comment_event_type = EventType.objects.get(name=SOCIAL_GROUP_COMMENT_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_COMMENT_EVENT_TYPE', comment_event_type)
return comment_event_type
except ObjectDoesNotExist as e:
pass # TODO Log this
def group_shared_link_event_type():
shared_link = cache.get('SOCIAL_NETWORK_SHARED_LINK_EVENT_TYPE')
if shared_link is not None:
return shared_link
try:
from . import SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME
shared_link = EventType.objects.get(name=SOCIAL_GROUP_SHARED_LINK_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_SHARED_LINK_EVENT_TYPE', shared_link)
return shared_link
except ObjectDoesNotExist as e:
pass # TODO Log this
def group_photo_event_type():
photo_event_type = cache.get('SOCIAL_NETWORK_PHOTO_EVENT_TYPE')
if photo_event_type is not None:
return photo_event_type
try:
from . import SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME
photo_event_type = EventType.objects.get(name=SOCIAL_GROUP_PHOTO_EVENT_TYPE_NAME)
cache.set('SOCIAL_NETWORK_PHOTO_EVENT_TYPE', photo_event_type)
return photo_event_type
except ObjectDoesNotExist as e:
pass # TODO Log this
#---------------------EDGES-----------------------------------------
def friendship_edge():
_friendship = cache.get('FRIENDSHIP_EDGE_TYPE')
if _friendship is not None:
return _friendship
try:
_friendship = EdgeType.objects.get(name="Friendship")
cache.set('FRIENDSHIP_EDGE_TYPE', _friendship)
return _friendship
except ObjectDoesNotExist as e:
pass # TODO Log this
def integrated_by_edge():
_integrated_by = cache.get('INTEGRATED_BY_EDGE_TYPE')
if _integrated_by is not None:
return _integrated_by
try:
_integrated_by = EdgeType.objects.get(name="Integrated by")
cache.set('INTEGRATED_BY_EDGE_TYPE', _integrated_by)
return _integrated_by
except ObjectDoesNotExist as e:
pass # TODO Log this
def member_of_edge():
_member_of = cache.get('MEMBER_OF_EDGE_TYPE')
if _member_of is not None:
return _member_of
try:
_member_of = EdgeType.objects.get(name="Member")
cache.set('MEMBER_OF_EDGE_TYPE', _member_of)
return _member_of
except ObjectDoesNotExist as e:
pass # TODO Log this
def follower_of_edge():
_follower_of = cache.get('FOLLOWER_OF_EDGE_TYPE')
if _follower_of is not None:
return _follower_of
try:
_follower_of = EdgeType.objects.get(name="Follower")
cache.set('FOLLOWER_OF_EDGE_TYPE', _follower_of)
return _follower_of
except ObjectDoesNotExist:
pass
def followed_by_edge():
_followed_by = cache.get('FOLLOWED_BY_EDGE_TYPE')
if _followed_by is not None:
return _followed_by
try:
_followed_by = EdgeType.objects.get(name="Followed by")
cache.set('FOLLOWED_BY_EDGE_TYPE', _followed_by)
return _followed_by
except ObjectDoesNotExist:
pass
#---------------------GENERAL-----------------------------------------
def generate_sha1(string, salt=None):
"""
Generates a sha1 hash for supplied string. Doesn't need to be very secure
because it's not used for password checking. We got Django for that.
:param string:
The string that needs to be encrypted.
:param salt:
Optionally define your own salt. If none is supplied, will use a random
string of 5 characters.
:return: Tuple containing the salt and hash.
"""
if not isinstance(string, (str, unicode)):
string = str(string)
if isinstance(string, unicode):
string = string.encode("utf-8")
if not salt:
salt = sha_constructor(str(random.random())).hexdigest()[:5]
hash = sha_constructor(salt+string).hexdigest()
return (salt, hash)
# A tuple of standard large number to their converters
intword_converters = (
(3, lambda number: _('%(value)dK')),
(6, lambda number: _('%(value)dM')),
(9, lambda number: _('%(value)dG')),
)
def intmin(value):
"""
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000:
return value
for exponent, converter in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / large_number
tpl = "+%s" if value > large_number else "%s"
return tpl % converter(new_value) % {'value': new_value}
return value | 31.35503 | 93 | 0.670881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,344 | 0.253633 |
32ba91d9753d50c77b106fbc0d73eade94889fbb | 219 | py | Python | datavis/urls.py | poulomihore/iot-hackathon | 4f90c12c164f3ee09341fc1381b1f7898a5d3055 | [
"MIT"
]
| null | null | null | datavis/urls.py | poulomihore/iot-hackathon | 4f90c12c164f3ee09341fc1381b1f7898a5d3055 | [
"MIT"
]
| null | null | null | datavis/urls.py | poulomihore/iot-hackathon | 4f90c12c164f3ee09341fc1381b1f7898a5d3055 | [
"MIT"
]
| null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.get_percentage, name='get_percentage'),
path('get_percentage_value', views.get_percentage_value, name='get_percentage_value'),
]
| 24.333333 | 90 | 0.748858 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.283105 |
32bb0cd05fa6989d453a40177c162d1a6d206545 | 10,866 | py | Python | datafiles/migrations/0001_initial.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
]
| 1 | 2021-04-26T20:03:11.000Z | 2021-04-26T20:03:11.000Z | datafiles/migrations/0001_initial.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
]
| 17 | 2021-04-23T16:51:59.000Z | 2021-12-13T21:17:41.000Z | datafiles/migrations/0001_initial.py | ChalkLab/SciFlow | 5bf021007d6184402ebfe6cefc2111d99160cb69 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.8 on 2021-10-12 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AspectActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activitycode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='AspectErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('errorcode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_errors',
'managed': False,
},
),
migrations.CreateModel(
name='AspectFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.TextField()),
('type', models.CharField(max_length=32)),
('version', models.IntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_files',
'managed': False,
},
),
migrations.CreateModel(
name='AspectLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueid', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
('type', models.CharField(max_length=16)),
('graphname', models.CharField(max_length=256)),
('currentversion', models.IntegerField()),
('auth_user_id', models.PositiveIntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'aspect_lookup',
'managed': False,
},
),
migrations.CreateModel(
name='Datasets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=64)),
('sourcecode', models.CharField(max_length=16, null=True)),
('source', models.CharField(default='', max_length=64)),
('sourceurl', models.CharField(default='', max_length=256)),
('datasetname', models.CharField(max_length=16, null=True)),
('uniqueidformat', models.CharField(max_length=128, null=True)),
('protected', models.CharField(choices=[('yes', 'Yes'), ('no', 'No')], default='no', max_length=16)),
('count', models.IntegerField(default=0)),
],
options={
'db_table': 'datasets',
'managed': False,
},
),
migrations.CreateModel(
name='FacetActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activitycode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='FacetErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('errorcode', models.CharField(max_length=16)),
('comment', models.CharField(max_length=256)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_errors',
'managed': False,
},
),
migrations.CreateModel(
name='FacetFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.TextField()),
('type', models.CharField(max_length=32)),
('version', models.IntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_files',
'managed': False,
},
),
migrations.CreateModel(
name='FacetLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueid', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
('type', models.CharField(max_length=16)),
('graphname', models.CharField(max_length=256)),
('currentversion', models.IntegerField()),
('auth_user_id', models.PositiveIntegerField()),
('updated', models.DateTimeField()),
],
options={
'db_table': 'facet_lookup',
'managed': False,
},
),
migrations.CreateModel(
name='JsonActlog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(default=None, max_length=24)),
('activitylog', models.CharField(default='', max_length=2048)),
('comment', models.CharField(default=None, max_length=256)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_actlog',
'managed': False,
},
),
migrations.CreateModel(
name='JsonAspects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_aspects',
'managed': False,
},
),
migrations.CreateModel(
name='JsonErrors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session', models.CharField(default=None, max_length=24)),
('errorcode', models.CharField(default='', max_length=128)),
('comment', models.CharField(default=None, max_length=256)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_errors',
'managed': False,
},
),
migrations.CreateModel(
name='JsonFacets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_facets',
'managed': False,
},
),
migrations.CreateModel(
name='JsonFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.TextField(default='')),
('type', models.CharField(default='', max_length=32)),
('version', models.IntegerField(default='')),
('jhash', models.CharField(blank=True, max_length=52, null=True)),
('comments', models.CharField(blank=True, max_length=32, null=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_files',
'managed': False,
},
),
migrations.CreateModel(
name='JsonLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uniqueid', models.CharField(default='', max_length=128, unique=True)),
('title', models.CharField(default='', max_length=256)),
('graphname', models.CharField(default='', max_length=256)),
('currentversion', models.IntegerField(default=0)),
('auth_user_id', models.IntegerField(default='')),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'json_lookup',
'managed': False,
},
),
migrations.CreateModel(
name='References',
fields=[
('id', models.SmallAutoField(primary_key=True, serialize=False)),
('journal', models.CharField(blank=True, max_length=256, null=True)),
('authors', models.CharField(blank=True, max_length=2048, null=True)),
('aulist', models.CharField(blank=True, max_length=1024, null=True)),
('year', models.PositiveSmallIntegerField(blank=True, null=True)),
('volume', models.CharField(blank=True, max_length=12, null=True)),
('issue', models.CharField(blank=True, max_length=16, null=True)),
('startpage', models.CharField(blank=True, max_length=16, null=True)),
('endpage', models.CharField(blank=True, max_length=16, null=True)),
('title', models.CharField(blank=True, max_length=512, null=True)),
('url', models.CharField(blank=True, max_length=256, null=True)),
('doi', models.CharField(max_length=256)),
('count', models.SmallIntegerField(blank=True, null=True)),
('updated', models.DateTimeField()),
],
options={
'db_table': 'references',
'managed': False,
},
),
]
| 43.119048 | 117 | 0.510307 | 10,773 | 0.991441 | 0 | 0 | 0 | 0 | 0 | 0 | 1,657 | 0.152494 |
32bd83533b8a10d702670e0618e12d21f2714992 | 712 | py | Python | f8a_jobs/handlers/flow.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
]
| 5 | 2017-05-04T11:22:31.000Z | 2018-08-24T16:12:30.000Z | f8a_jobs/handlers/flow.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
]
| 325 | 2017-05-03T08:44:03.000Z | 2021-12-13T21:03:49.000Z | f8a_jobs/handlers/flow.py | sawood14012/fabric8-analytics-jobs | a7d850dfef5785144676b9a3b4e29942161e5347 | [
"Apache-2.0"
]
| 28 | 2017-05-02T05:09:32.000Z | 2021-03-11T09:42:34.000Z | """Schedule multiple flows of a type."""
from .base import BaseHandler
class FlowScheduling(BaseHandler):
"""Schedule multiple flows of a type."""
def execute(self, flow_name, flow_arguments):
"""Schedule multiple flows of a type, do filter expansion if needed.
:param flow_name: flow name that should be scheduled
:param flow_arguments: a list of flow arguments per flow
"""
for node_args in flow_arguments:
if self.is_filter_query(node_args):
for args in self.expand_filter_query(node_args):
self.run_selinon_flow(flow_name, args)
else:
self.run_selinon_flow(flow_name, node_args)
| 33.904762 | 76 | 0.651685 | 637 | 0.894663 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.40309 |
32bdf6c9f66952e90bfd46bcfa58f2ec034c3c0d | 1,032 | py | Python | mako/stats/notifier.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
]
| null | null | null | mako/stats/notifier.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
]
| 1 | 2021-06-02T04:22:46.000Z | 2021-06-02T04:22:46.000Z | mako/stats/notifier.py | zer0tonin/mako | 12420056e13e1acd333e686537d5ebc909450620 | [
"MIT"
]
| null | null | null | import logging
logger = logging.getLogger(__name__)
class Notifier:
def __init__(self, redis):
self.redis = redis
async def notify_guilds(self):
guilds_set = "guilds"
logger.debug("Scanning {}".format(guilds_set))
result = []
async for guild_id in self.redis.isscan(guilds_set):
result.extend(await self.notify_guild(guild_id))
return result
async def notify_guild(self, guild_id):
notify_list = "guilds:{}:notify".format(guild_id)
level_zset = "guilds:{}:levels".format(guild_id)
result = []
logger.debug("Popping {} queue".format(notify_list))
user_id = await self.redis.lpop(notify_list)
while user_id is not None:
logger.debug("Accessing {} zset for user: {}".format(level_zset, user_id))
level = await self.redis.zscore(level_zset, user_id)
result.append((guild_id, user_id, level))
user_id = await self.redis.lpop(notify_list)
return result
| 30.352941 | 86 | 0.631783 | 975 | 0.944767 | 0 | 0 | 0 | 0 | 890 | 0.862403 | 107 | 0.103682 |
32be27b57feb5ea94289c2693437fff5fe254149 | 286 | py | Python | app/models/users.py | muzzammilh/valid-voice | 7e5f8211471cfeb1f404de6b0b715196e8276b41 | [
"MIT"
]
| null | null | null | app/models/users.py | muzzammilh/valid-voice | 7e5f8211471cfeb1f404de6b0b715196e8276b41 | [
"MIT"
]
| null | null | null | app/models/users.py | muzzammilh/valid-voice | 7e5f8211471cfeb1f404de6b0b715196e8276b41 | [
"MIT"
]
| null | null | null | from app.helpers.sqlalchemy import db
class Role(db.Model):
__tablename__ = 'tt'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer) | 35.75 | 62 | 0.706294 | 247 | 0.863636 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.013986 |
32c012e2243ac30d8702a0e4c7e1a09c458c9ec8 | 12,819 | py | Python | pysnmp/HUAWEI-CDP-COMPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/HUAWEI-CDP-COMPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/HUAWEI-CDP-COMPLIANCE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
]
| 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HUAWEI-CDP-COMPLIANCE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-CDP-COMPLIANCE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:31:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
ZeroBasedCounter32, TimeFilter = mibBuilder.importSymbols("RMON2-MIB", "ZeroBasedCounter32", "TimeFilter")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, TimeTicks, Counter32, IpAddress, iso, NotificationType, ObjectIdentity, ModuleIdentity, Counter64, Bits, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "TimeTicks", "Counter32", "IpAddress", "iso", "NotificationType", "ObjectIdentity", "ModuleIdentity", "Counter64", "Bits", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32")
TextualConvention, TruthValue, TimeStamp, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "TimeStamp", "DisplayString")
hwCdpComplianceMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198))
if mibBuilder.loadTexts: hwCdpComplianceMIB.setLastUpdated('200905050000Z')
if mibBuilder.loadTexts: hwCdpComplianceMIB.setOrganization('Huawei Technologies co.,Ltd.')
hwCdpComplianceObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1))
hwCdpComplianceNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 2))
hwCdpComplianceConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3))
hwCdpComplianceConfiguration = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1))
hwCdpComplianceStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2))
hwCdpComplianceRemoteSystemsData = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3))
hwCdpComplianceEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 1), EnabledStatus().clone()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpComplianceEnable.setStatus('current')
hwCdpComplianceNotificationInterval = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(5)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpComplianceNotificationInterval.setStatus('current')
hwCdpCompliancePortConfigTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3), )
if mibBuilder.loadTexts: hwCdpCompliancePortConfigTable.setStatus('current')
hwCdpCompliancePortConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1), ).setIndexNames((0, "HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpCompliancePortConfigIfIndex"))
if mibBuilder.loadTexts: hwCdpCompliancePortConfigEntry.setStatus('current')
hwCdpCompliancePortConfigIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwCdpCompliancePortConfigIfIndex.setStatus('current')
hwCdpCompliancePortConfigAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("rxOnly", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpCompliancePortConfigAdminStatus.setStatus('current')
hwCdpCompliancePortConfigHoldTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 254)).clone(180)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpCompliancePortConfigHoldTime.setStatus('current')
hwCdpCompliancePortConfigNotificationEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpCompliancePortConfigNotificationEnable.setStatus('current')
hwCdpCompliancePortStatsReset = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 1, 3, 1, 5), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwCdpCompliancePortStatsReset.setStatus('current')
hwCdpComplianceStatsRemTablesLastChangeTime = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceStatsRemTablesLastChangeTime.setStatus('current')
hwCdpComplianceStatsRemTablesAgeouts = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 2), ZeroBasedCounter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceStatsRemTablesAgeouts.setStatus('current')
hwCdpComplianceStatsRxPortTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 3), )
if mibBuilder.loadTexts: hwCdpComplianceStatsRxPortTable.setStatus('current')
hwCdpComplianceStatsRxPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 3, 1), ).setIndexNames((0, "HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRxPortIfIndex"))
if mibBuilder.loadTexts: hwCdpComplianceStatsRxPortEntry.setStatus('current')
hwCdpComplianceStatsRxPortIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwCdpComplianceStatsRxPortIfIndex.setStatus('current')
hwCdpComplianceStatsRxPortFramesTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceStatsRxPortFramesTotal.setStatus('current')
hwCdpComplianceStatsRxPortAgeoutsTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 2, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceStatsRxPortAgeoutsTotal.setStatus('current')
hwCdpComplianceRemoteTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3, 1), )
if mibBuilder.loadTexts: hwCdpComplianceRemoteTable.setStatus('current')
hwCdpComplianceRemoteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3, 1, 1), ).setIndexNames((0, "HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceRemLocalPortIfIndex"))
if mibBuilder.loadTexts: hwCdpComplianceRemoteEntry.setStatus('current')
hwCdpComplianceRemLocalPortIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwCdpComplianceRemLocalPortIfIndex.setStatus('current')
hwCdpComplianceRemTimeMark = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3, 1, 1, 2), TimeFilter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceRemTimeMark.setStatus('current')
hwCdpComplianceRemoteInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 1, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1600))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwCdpComplianceRemoteInfo.setStatus('current')
hwCdpComplianceNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 2, 1))
hwCdpComplianceRemTablesChange = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 2, 1, 1)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRemTablesLastChangeTime"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRemTablesAgeouts"))
if mibBuilder.loadTexts: hwCdpComplianceRemTablesChange.setStatus('current')
hwCdpComplianceCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 1))
hwCdpComplianceGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 2))
hwCdpComplianceCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 1, 1)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceConfigGroup"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsGroup"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceRemSysGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwCdpComplianceCompliance = hwCdpComplianceCompliance.setStatus('current')
hwCdpComplianceConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 2, 1)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceEnable"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceNotificationInterval"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpCompliancePortConfigAdminStatus"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpCompliancePortConfigHoldTime"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpCompliancePortConfigNotificationEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwCdpComplianceConfigGroup = hwCdpComplianceConfigGroup.setStatus('current')
hwCdpComplianceStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 2, 2)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRxPortFramesTotal"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpCompliancePortStatsReset"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRemTablesLastChangeTime"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRemTablesAgeouts"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceStatsRxPortAgeoutsTotal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwCdpComplianceStatsGroup = hwCdpComplianceStatsGroup.setStatus('current')
hwCdpComplianceRemSysGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 2, 3)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceRemoteInfo"), ("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceRemTimeMark"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwCdpComplianceRemSysGroup = hwCdpComplianceRemSysGroup.setStatus('current')
hwCdpComplianceTrapGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 198, 3, 2, 4)).setObjects(("HUAWEI-CDP-COMPLIANCE-MIB", "hwCdpComplianceRemTablesChange"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwCdpComplianceTrapGroup = hwCdpComplianceTrapGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-CDP-COMPLIANCE-MIB", hwCdpComplianceRemoteTable=hwCdpComplianceRemoteTable, hwCdpCompliancePortConfigAdminStatus=hwCdpCompliancePortConfigAdminStatus, hwCdpComplianceRemoteInfo=hwCdpComplianceRemoteInfo, hwCdpComplianceGroups=hwCdpComplianceGroups, hwCdpComplianceRemoteEntry=hwCdpComplianceRemoteEntry, hwCdpCompliancePortConfigIfIndex=hwCdpCompliancePortConfigIfIndex, hwCdpComplianceEnable=hwCdpComplianceEnable, hwCdpComplianceNotifications=hwCdpComplianceNotifications, hwCdpComplianceCompliance=hwCdpComplianceCompliance, hwCdpCompliancePortConfigTable=hwCdpCompliancePortConfigTable, hwCdpComplianceNotificationPrefix=hwCdpComplianceNotificationPrefix, hwCdpComplianceStatsGroup=hwCdpComplianceStatsGroup, hwCdpComplianceStatsRemTablesAgeouts=hwCdpComplianceStatsRemTablesAgeouts, hwCdpComplianceStatsRemTablesLastChangeTime=hwCdpComplianceStatsRemTablesLastChangeTime, hwCdpComplianceStatsRxPortIfIndex=hwCdpComplianceStatsRxPortIfIndex, hwCdpComplianceRemTimeMark=hwCdpComplianceRemTimeMark, hwCdpComplianceRemoteSystemsData=hwCdpComplianceRemoteSystemsData, hwCdpComplianceStatsRxPortAgeoutsTotal=hwCdpComplianceStatsRxPortAgeoutsTotal, hwCdpCompliancePortStatsReset=hwCdpCompliancePortStatsReset, hwCdpComplianceRemTablesChange=hwCdpComplianceRemTablesChange, hwCdpComplianceConfiguration=hwCdpComplianceConfiguration, hwCdpComplianceTrapGroup=hwCdpComplianceTrapGroup, hwCdpComplianceMIB=hwCdpComplianceMIB, hwCdpComplianceRemLocalPortIfIndex=hwCdpComplianceRemLocalPortIfIndex, hwCdpComplianceObjects=hwCdpComplianceObjects, hwCdpComplianceNotificationInterval=hwCdpComplianceNotificationInterval, hwCdpComplianceStatsRxPortEntry=hwCdpComplianceStatsRxPortEntry, hwCdpCompliancePortConfigEntry=hwCdpCompliancePortConfigEntry, PYSNMP_MODULE_ID=hwCdpComplianceMIB, hwCdpComplianceCompliances=hwCdpComplianceCompliances, hwCdpComplianceRemSysGroup=hwCdpComplianceRemSysGroup, hwCdpCompliancePortConfigHoldTime=hwCdpCompliancePortConfigHoldTime, hwCdpComplianceStatsRxPortTable=hwCdpComplianceStatsRxPortTable, hwCdpComplianceConformance=hwCdpComplianceConformance, hwCdpComplianceConfigGroup=hwCdpComplianceConfigGroup, hwCdpComplianceStatistics=hwCdpComplianceStatistics, hwCdpCompliancePortConfigNotificationEnable=hwCdpCompliancePortConfigNotificationEnable, hwCdpComplianceStatsRxPortFramesTotal=hwCdpComplianceStatsRxPortFramesTotal)
| 140.868132 | 2,381 | 0.791715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,843 | 0.22178 |
32c0b9c3ba62988df85d3108c0c4b36be8f563b9 | 1,223 | py | Python | pybb/contrib/mentions/processors.py | thoas/pybbm | 0e7ab7ef60f15951660015f2b9be0ff7192f1095 | [
"BSD-2-Clause"
]
| 1 | 2015-05-18T09:19:30.000Z | 2015-05-18T09:19:30.000Z | pybb/contrib/mentions/processors.py | ulule/pybbm | 0e7ab7ef60f15951660015f2b9be0ff7192f1095 | [
"BSD-2-Clause"
]
| 5 | 2017-06-13T16:25:34.000Z | 2018-07-17T20:30:56.000Z | pybb/contrib/mentions/processors.py | ulule/pybbm | 0e7ab7ef60f15951660015f2b9be0ff7192f1095 | [
"BSD-2-Clause"
]
| 1 | 2018-10-29T13:12:59.000Z | 2018-10-29T13:12:59.000Z | import re
from pybb.processors import BaseProcessor
from pybb.compat import get_user_model
from . import settings
class MentionProcessor(BaseProcessor):
username_re = r'@([\w\-]+)'
format = '@%(username)s'
tag = '[mention=%(user_id)s]%(username)s[/mention]'
model = get_user_model()
def get_user_url(self, user):
return settings.PYBB_MENTIONS_USER_URL(user)
def get_users(self, username_list):
return self.model.objects.filter(username__in=username_list).values_list('username', 'id')
def _format(self, user, body):
username, user_id = user
format = self.format % {
'username': username
}
body = body.replace(format, self.tag % {
'user_id': user_id,
'username': username
})
return body
def render(self):
body = self.body
username_list = [m.group(1) for m in re.finditer(self.username_re,
body,
re.MULTILINE)]
users = self.get_users(username_list)
for user in users:
body = self._format(user, body)
return body
| 25.479167 | 98 | 0.562551 | 1,104 | 0.902698 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.094849 |
32c304191982cf35da8aed8e53fd875c3bef3ba2 | 1,505 | py | Python | PageObjectModel/Test/addAndEditionData.py | lblaszkowski/Arena | 61f924bc7c3994ec7714fe68f60b02b35ccd286b | [
"Apache-2.0"
]
| null | null | null | PageObjectModel/Test/addAndEditionData.py | lblaszkowski/Arena | 61f924bc7c3994ec7714fe68f60b02b35ccd286b | [
"Apache-2.0"
]
| null | null | null | PageObjectModel/Test/addAndEditionData.py | lblaszkowski/Arena | 61f924bc7c3994ec7714fe68f60b02b35ccd286b | [
"Apache-2.0"
]
| null | null | null | import unittest
from selenium import webdriver
from PageObjectModel.Pages.addAndEditionDataPage import AddAndEditionData_Page
from time import sleep
url = 'https://buggy-testingcup.pgs-soft.com/'
class AddAndEditionDataPage(unittest.TestCase):
def setUp(self, browser="mozilla", task="task_3"):
if browser == "chrome" or browser == "ch":
self.driver = webdriver.Chrome(executable_path=r'../Drivers/ChromeDrive_74/chromedriver.exe')
self.driver.maximize_window()
self.driver.get(url + task)
elif browser == "mozilla" or browser == "ff":
self.driver = webdriver.Firefox(executable_path=r'../Drivers/FirefoxDrive_24/geckodriver.exe')
self.driver.maximize_window()
self.driver.get(url + task)
else:
print("Brak przeglądarki")
raise Exception("Brak przeglądarki")
return self.driver
def tearDown(self):
self.driver.close()
self.driver.quit()
def test_AddAndEditionData(self):
AddandEditionData = AddAndEditionData_Page(self.driver)
AddandEditionData.menuButtonClick()
AddandEditionData.dropdownMenuClick()
AddandEditionData.editFile()
AddandEditionData.fieldName("Jan")
AddandEditionData.fieldSurname("Nowak")
AddandEditionData.fieldNotes("Testowy napis")
AddandEditionData.fieldPhone("10981234098")
AddandEditionData.fieldImage()
AddandEditionData.saveButton()
| 32.717391 | 106 | 0.67907 | 1,302 | 0.863968 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.16722 |
32c40b429ba7f1090b72fd13e36b8055346940c3 | 827 | py | Python | q2_api_client/clients/mobile_ws/calendar_client.py | jcook00/q2-api-client | 4431af164eb4baf52e26e8842e017cad1609a279 | [
"BSD-2-Clause"
]
| null | null | null | q2_api_client/clients/mobile_ws/calendar_client.py | jcook00/q2-api-client | 4431af164eb4baf52e26e8842e017cad1609a279 | [
"BSD-2-Clause"
]
| null | null | null | q2_api_client/clients/mobile_ws/calendar_client.py | jcook00/q2-api-client | 4431af164eb4baf52e26e8842e017cad1609a279 | [
"BSD-2-Clause"
]
| null | null | null | from q2_api_client.clients.base_q2_client import BaseQ2Client
from q2_api_client.endpoints.mobile_ws_endpoints import CalendarEndpoint
class CalendarClient(BaseQ2Client):
def get_calendar(self):
"""GET /mobilews/calendar
:return: Response object
:rtype: requests.Response
"""
endpoint = CalendarEndpoint.CALENDAR.value
return self._get(url=self._build_url(endpoint))
def get_calendar_by_type(self, transaction_type):
"""GET /mobilews/calendar/{transactionType}
:param str transaction_type: path parameter
:return: Response object
:rtype: requests.Response
"""
endpoint = CalendarEndpoint.CALENDAR_TRANSACTION_TYPE.value.format(transactionType=transaction_type)
return self._get(url=self._build_url(endpoint))
| 33.08 | 108 | 0.718259 | 689 | 0.833132 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.338573 |
32c4baf38f537ef55e48bae1faabe6aee1fe7ca3 | 11,477 | py | Python | cg_token.py | gmnicke2/GISolve-API-Util | 74d10d2ae60c1f000ef151a394ef9276b284867a | [
"MIT"
]
| null | null | null | cg_token.py | gmnicke2/GISolve-API-Util | 74d10d2ae60c1f000ef151a394ef9276b284867a | [
"MIT"
]
| null | null | null | cg_token.py | gmnicke2/GISolve-API-Util | 74d10d2ae60c1f000ef151a394ef9276b284867a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
"""
Set of utilities to issue/verify/revoke a CG token with REST calls
Requires valid username and password either in bash environment or
given at the command line.
Issue Token:
Token can be easily created (and stored to env) with the folloing:
# create token using CG_USERNAME, CG_PASSWORD, and CG_API env variables
./cg_token.py
# create token specifying all the parameters on command line
./cg_token.py --username <login> --password <password> --endpoint <url>
# create token using CG_USERNAME, CG_API, but prompt for password
./cg_token.py --password -
# add token to environmental variables
export CG_TOKEN=`./cg_token.py`
# add token to environmental variable, specify extra parameters
export CG_TOKEN=`./cg_token.py --username <login> --endpoint <newurl>`
Verify or Revoke Token:
Verifying or Revoking requires the positional 'verify' or 'revoke'
command line argument.
User can still override env variables with command-line arguments.
Uses CG_API, and CG_TOKEN env variables for both.
Verify uses CG_CLIENT_ID and CG_CLIENT_IP for consumer ID & user client IP,
Revoke uses CG_USERNAME and CG_PASSWORD for security purposes :
# Verify token, overriding CG_CLIENT_ID and CG_CLIENT_IP with command
# line (Upon success, it will print the remaining lifetime of the token
# in seconds)
./cg_token.py verify --clientid <ID> --clientip <IP>
# Revoke token, overriding CG_TOKEN with command line
./cg_token.py revoke --token <token>
Print debug info to stderr:
Append the flag "--debug" or "-d" :
./cg_token.py --debug
"""
import sys, os, getpass
import json
import logging
import requests
import argparse
from requests import exceptions as rex
# This is used sed to disable InsecureRequestWarning.
requests.packages.urllib3.disable_warnings()
logger = logging.getLogger(__name__)
class CGException(Exception) :
def __init__(self, result) :
self.message = result['message']
self.error_code = result['error_code']
def __str__(self) :
return ("Error %d: %s" %(self.error_code, self.message))
def logger_initialize(debug) :
"""Initializes the format and level for the logger"""
_format = ("%(levelname)s - %(asctime)s\n%(message)s\n")
if debug :
logging.basicConfig(format=_format,
level=logging.DEBUG)
else :
logging.basicConfig(format=_format,
level=logging.WARNING)
def log_response(method, url, response, request) :
"""Logs request and response when in debug mode"""
if request.get('password', '') :
request['password'] = '*******'
logger.debug("URL: " + url)
logger.debug("Request: " + method)
logger.debug("Request Data (in JSON format)"
": " + json.dumps(request,indent=4,separators=(',',': ')))
logger.debug("Response (in JSON format)"
": " + json.dumps(response,indent=4,separators=(',',': ')))
def parse_args() :
"""Defines command line positional and optional arguments and checks
for valid action input if present. Additionally prompts with getpass
if user specifies "--password -" to override CG_PASSWORD
Args: none
Returns: A (tuple) containing the following:
args (namespace) : used to overwrite env variables when necessary
action (string) : for main to use as a switch for calls to perform
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug",
action="store_true",
help='Allow debug info to be written to stderr')
parser.add_argument("-e", "--endpoint",
default=os.getenv('CG_API',''),
help="Set API url")
parser.add_argument("-p", "--password",
default=os.getenv('CG_PASSWORD',''),
help="Set password. '-' for secure prompting")
parser.add_argument("-u", "--username",
default=os.getenv('CG_USERNAME',''),
help="Set Username")
parser.add_argument("-t", "--token",
default=os.getenv('CG_TOKEN',''),
help="Set Token for Verify/Revoke")
parser.add_argument("-l", "--lifetime",
type=long,
default=43200,
help="Set Lifetime for Token Issue in seconds"
". minimum=3600 (1hr), maximum=12*3600 (12hr)")
parser.add_argument("-b", "--binding",
type=int,
default=1,
help="1: Bind with IP Address, 0: Don't Bind")
parser.add_argument("-c", "--clientid",
default=os.getenv('CG_CLIENT_ID',''),
help="Set Client ID for Verify")
parser.add_argument("-i", "--clientip",
default=os.getenv('CG_CLIENT_IP',''),
help="Set Client IP for Verify")
parser.add_argument("action", nargs='?', type=str, default='issue',
help='issue/verify/revoke')
args = parser.parse_args()
logger_initialize(args.debug)
if args.password and args.password == '-' :
args.password = getpass.getpass("Enter desired CG Password: ")
if not args.endpoint :
logger.error('CG_API (API url for REST calls) '
'not specified\n')
sys.exit(1)
if args.action.lower() not in ['issue','verify','revoke'] :
logger.error('Invalid Action')
sys.exit(1)
return (args,args.action.lower())
def cg_rest(method, endpoint, headers={}, **kwargs) :
"""Calls the CG REST endpoint passing keyword arguments given.
'cg_rest' provides a basic wrapper around the HTTP request to
the rest endpoint, and attempts to provide informative error
messages when errors occur. Exceptions are passed to the calling
function for final resolution.
cg_rest('POST', <url>, headers=<HTTP headers dict>, username=<username>,
password=<password>, ...)
or with a previously constructed data/params dict
cg_rest('POST', <url>, headers=headers, **data/params)
or with no header necessary
cg_rest('POST', <url>, **data/params)
Args:
method (str): the HTTP method that will be called
endpoint (str, URL): the REST endpoint
headers (dict, optional): HTTP headers
kwargs (optional): common keywords include username, password, etc.
Returns:
(dict): decodes the response and returns it as a dictionary
Raises:
Raises CGException when the gateway server return an error status.
Other exceptions may be raised based errors with the HTTP request
and response. See documentation of Python's request module for
a complete list.
"""
try :
if method.upper() == 'POST' or method.upper() == 'PUT' :
r = requests.request(method.upper(), endpoint, timeout=50,
verify=False, headers=headers, data=kwargs)
else : # Must be 'GET' or 'DELETE'
r = requests.request(method.upper(), endpoint, timeout=50,
verify=False, headers=headers, params=kwargs)
r.raise_for_status()
except (rex.ConnectionError, rex.HTTPError, rex.MissingSchema) as e :
logger.debug("Problem with API endpoint '%s', "
"is it entered correctly?" %endpoint)
raise
except (rex.Timeout) as e :
logger.debug('Request timed out, the service may be '
'temporarily unavailable')
raise
response = r.json()
log_response(method, endpoint, response, kwargs)
# If status is not provided, default to error.
if response.get('status','') and response.get('status','') == 'error' :
logger.debug("Call fails with '%s'" %response['result']['message'])
raise CGException(response['result'])
return response
def issue_token(endpoint, username, password, lifetime, binding) :
"""Calls the Gateway issueToken function and returns token.
Args:
endpoint (string, URL): the REST endpoint
username (string): the user's login
password (string): the user's password
lifetime (int): the lifetime of a token in seconds
(3600 <= lifetime <= 12*3600)
binding (int): 1 if user wants token to be bound to user IP
0 else
Returns:
(string): Open Service API token
Raises:
Passes any exceptions raised in cg_rest.
"""
data = {
'username' : username,
'password' : password,
'lifetime' : lifetime,
'binding' : binding
}
url = endpoint.rstrip('/') + '/token'
logger.debug('Issuing token from %s' %url)
response = cg_rest('POST', url, **data)
return response['result']['token']
def verify_token(endpoint, username, token, client_id, client_ip) :
"""Calls the Gateway verifyToken function, returns remaining token lifetime.
Args:
endpoint(string, URL): the REST endpoint
username (string):
token (string): Token to verify
client_id (string): Consumer ID
client_ip (string): User Client's IP Address
Returns:
(int): Remaining lifetime of token (in seconds)
Raises:
Passes any exceptions raised in cg_rest.
"""
data = {
'token' : token,
'consumer' : client_id,
'remote_addr' : client_ip,
'username' : username
}
url = endpoint.rstrip('/') + '/token'
logger.debug("Verifying token '%s' from '%s'" %(token,url))
data_length = str(len(json.dumps(data)))
headers = {'Content-Length' : data_length}
response = cg_rest('PUT', url, headers=headers, **data)
return response['result']['lifetime']
def revoke_token(endpoint, username, password, token) :
"""Calls the Gateway revokeToken function
Args:
endpoint (string, URL): the REST endpoint
username (string): the user's login
password (string): the user's password
token (string): The token to be revoked
Returns: void
Raises:
Passes any exceptions raised in cg_rest.
"""
params = {
'token' : token,
'username' : username,
'password' : password,
}
url = endpoint.rstrip('/') + "/token"
logger.debug("Revoking token '%s' from '%s'" %(token,url))
response = cg_rest('DELETE', url, **params)
def main() :
(args, action) = parse_args()
try :
if action == "issue" :
if ((args.binding not in [0,1]) or
not (3600<=args.lifetime<=43200)) :
logger.error("Lifetime must be between 3600 and 43200,"
"\nBinding must be 0 or 1")
sys.exit(1)
print issue_token(args.endpoint, args.username, args.password,
args.lifetime, args.binding)
else :
if not args.token :
logger.error('No valid CG_TOKEN given')
sys.exit(1)
if action == "verify" :
print verify_token(args.endpoint, args.username,
args.token, args.clientid, args.clientip)
else :
revoke_token(args.endpoint, args.username,
args.password, args.token)
except CGException as e :
logger.error(e)
sys.exit(1)
if __name__ == '__main__' :
main()
| 33.55848 | 81 | 0.611658 | 241 | 0.020999 | 0 | 0 | 0 | 0 | 0 | 0 | 6,487 | 0.565217 |
32c57ec480ef32335403cba14fba78c713f0eb97 | 741 | py | Python | azext_script/compilers/az/handlers/HDInsight.py | yorek/adl | d9da1b7d46c71415e38a6efe5b1c8d45b02b3704 | [
"MIT"
]
| null | null | null | azext_script/compilers/az/handlers/HDInsight.py | yorek/adl | d9da1b7d46c71415e38a6efe5b1c8d45b02b3704 | [
"MIT"
]
| 1 | 2018-10-15T05:51:38.000Z | 2018-10-15T05:51:38.000Z | azext_script/compilers/az/handlers/HDInsight.py | yorek/adl | d9da1b7d46c71415e38a6efe5b1c8d45b02b3704 | [
"MIT"
]
| 1 | 2018-10-18T18:41:02.000Z | 2018-10-18T18:41:02.000Z | from .Generic import GenericHandler
class HDInsightHandler(GenericHandler):
azure_object = "hdinsight"
def execute(self):
fqn = self.get_full_resource_name()
self.add_context_parameter("resource-group", "group")
if fqn == "hdinsight" and self.action == "create":
self.add_context_parameter("location", "location")
if 'storage account' in self.context:
storage_account = self.context["storage account"]
storage_account += ".blob.core.windows.net"
self.add_parameter("storage-account", storage_account)
cmd = super(HDInsightHandler, self).execute()
self.save_to_context()
return cmd
| 30.875 | 70 | 0.618084 | 702 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.19973 |
32c59fc06a151e5b5740b23fbb1aff371ee1d8f2 | 30,841 | py | Python | a2-py-beta/erd_converter.py | francisgerman70/CSC370 | 0682ea5abdfdbc87b76efd18f98e27a6c49d2b45 | [
"MIT"
]
| null | null | null | a2-py-beta/erd_converter.py | francisgerman70/CSC370 | 0682ea5abdfdbc87b76efd18f98e27a6c49d2b45 | [
"MIT"
]
| null | null | null | a2-py-beta/erd_converter.py | francisgerman70/CSC370 | 0682ea5abdfdbc87b76efd18f98e27a6c49d2b45 | [
"MIT"
]
| null | null | null | from audioop import add
from erd import *
from table import *
# This function converts an ERD object into a Database object
# The Database object should correspond to a fully correct implementation
# of the ERD, including both data structure and constraints, such that the
# CREATE TABLE statements generated by the Database object will populate an
# empty MySQL database to exactly implement the conceptual design communicated
# by the ERD.
#
# @TODO: Implement me!
def convert_to_table( erd ):
if len(erd.entity_sets[0].connections) != 0 and len(erd.entity_sets[1].connections) != 0:
divide0 = erd.entity_sets[0].connections[0]
set1, set2 = zip(divide0)
divide1 = erd.entity_sets[1].connections[0]
set11, set22 = zip(divide1)
entity_length = len(erd.entity_sets)
if entity_length == 1:
end = one_entity(erd)
return end
elif len(erd.entity_sets) == 3 and ((len(erd.entity_sets[0].parents) != 0) or (len(erd.entity_sets[1].parents) != 0) or (len(erd.entity_sets[2].parents) != 0)):
end = threeEntitySetWithParents(erd)
return end
elif erd.entity_sets[0].supporting_relations != [] and len(erd.entity_sets) == 2:
end = twoEntitySetWithSupportingRelation(erd)
return end
elif len(erd.entity_sets) == 3 and len(erd.relationships) == 1:
end = threeEntitySetsOneManyRelationship(erd)
return end
elif erd.entity_sets[1].supporting_relations != [] and len(erd.entity_sets) == 2:
end = twoEntitySetWithSupportingRelation(erd)
return end
elif entity_length == 2 and ((len(erd.entity_sets[0].parents) != 0) or (len(erd.entity_sets[1].parents) != 0)):
end = TwoEntityNoRelationship(erd)
return end
elif entity_length == 2 and len(erd.relationships[0].primary_key) != 0 and set22[0].value == 0:
end = oneManyWithRelationshipAttr(erd, set2)
return end
elif entity_length == 2 and len(erd.relationships[0].primary_key) != 0 and set2[0].value == 0:
end = manyOneWithRelationshipAttr(erd,set22)
return end
elif entity_length == 4:
end = fourEntitySetsWeak(erd)
return end
else:
#rel_length = len(erd.relationships)
#erd_length = entity_length + rel_length
split = erd.entity_sets[0].connections[0]
list1, list2 = zip(split)
split2 = erd.entity_sets[1].connections[0]
list11, list22 = zip(split2)
#print(list22[0].value)
#print(list2[0].value)
if list22[0].value == 0:
finish = one_many(erd , entity_length)
return finish
elif list2[0].value ==0:
finish2 = many_one(erd, entity_length)
return finish2
else:
# if entity_length == 1:
# one_entity(erd)
# return
#elif erd_length == 1 and list2[0].value == 1:
# two_entity(erd)
# return
foreign_key = []
my_db = []
#db = Database([])
entity_length = len(erd.entity_sets)
rel_length = len(erd.relationships)
erd_length = entity_length + rel_length
split = erd.entity_sets[0].connections[0]
list1, list2 = zip(split)
amount_primary1 = len(erd.entity_sets[0].primary_key)
amount_primary2 = len(erd.entity_sets[1].primary_key)
if (list1[0]) == erd.relationships[0].name and list2[0].value == 1:
for x in range(amount_primary1):
erd.relationships[0].attributes.append(erd.entity_sets[0].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[0].primary_key[x])
for x in range(amount_primary2):
erd.relationships[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[1].primary_key[x])
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,))
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].attributes != [] and erd.entity_sets[x].primary_key != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
for x in range(rel_length):
if erd.relationships[x].name != []:
my_db.append(Table(erd.relationships[x].name,set(erd.relationships[x].attributes),set(erd.relationships[x].primary_key),set(foreign_key)))
db = Database(my_db)
cat = erd.entity_sets[0].primary_key[0]
#print(erd.entity_sets[0].name)
#print(erd_length)
#print(db.tables)
#print(amount_primary1)
#print(cat)
#print(sample_db.tables)
#for y in db:
# print(y.tables)
#print(erd.entity_sets[1].attributes[0])
#return sample_db
return db
def one_entity(erd):
my_db = []
db = Database([])
entity_length = len(erd.entity_sets)
for x in range(entity_length):
if erd.entity_sets[x].name != '' and erd.entity_sets[x].attributes != '' and erd.entity_sets[x].primary_key != '':
#my_db.append(Table(erd.entity_sets[x].name,erd.entity_sets[x].attributes,erd.entity_sets[x].primary_key,''))
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
db = Database(my_db)
#print(db.tables)
return db
def one_many(erd, entity_length):
foreign_key = []
my_db = []
amount_primary1 = len(erd.entity_sets[1].primary_key)
for x in range(amount_primary1):
erd.entity_sets[0].attributes.append(erd.entity_sets[1].primary_key[x])
#erd.entity_sets[0].primary_key.append(erd.entity_sets[1].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
foreign_key = t
#print(t)
#print(entity_length)
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set(foreign_key)))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set()))
db = Database(my_db)
#print(db.tables)
return db
def many_one(erd, entity_length):
foreign_key = []
my_db = []
amount_primary1 = len(erd.entity_sets[0].primary_key)
for x in range(amount_primary1):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
#erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,))
foreign_key = t
#print(t)
#print(entity_length)
#my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set([((erd.entity_sets[1].primary_key[1],), erd.entity_sets[1].name, (erd.entity_sets[1].primary_key[1],))])))
#my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set()))
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
def TwoEntityNoRelationship(erd):
foreign_key = []
my_db = []
entity_length = len(erd.entity_sets)
for x in range(1):
if erd.entity_sets[0].parents != []:
for x in range(len(erd.entity_sets[1].primary_key)):
erd.entity_sets[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.entity_sets[0].primary_key.append(erd.entity_sets[1].primary_key[x])
elif erd.entity_sets[1].parents != []:
for x in range(len(erd.entity_sets[0].primary_key)):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
if erd.entity_sets[0].parents != []:
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
elif erd.entity_sets[1].parents != []:
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].parents != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set(foreign_key)))
elif erd.entity_sets[x].name != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
db = Database(my_db)
#print(db.tables)
return db
def oneManyWithRelationshipAttr(erd, set2):
foreign_key = []
my_db = []
entity_length = len(erd.entity_sets)
amount_primary1 = len(erd.entity_sets[1].primary_key)
for x in range(amount_primary1):
erd.entity_sets[0].attributes.append(erd.entity_sets[1].primary_key[x])
#erd.entity_sets[0].primary_key.append(erd.entity_sets[1].primary_key[x])
for x in range(len(erd.relationships[0].primary_key)):
erd.entity_sets[0].attributes.append(erd.relationships[0].primary_key[x])
erd.entity_sets[0].primary_key.append(erd.relationships[0].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
foreign_key = t
#for x in range(entity_length):
# if erd.entity_sets[x].name != '' and erd.entity_sets[x].attributes != '' and erd.entity_sets[x].primary_key != '' and set2 == 1:
# my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set(foreign_key)))
# elif erd.entity_sets[x].name != '' and erd.entity_sets[x].attributes != '' and erd.entity_sets[x].primary_key != '':
# my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set(foreign_key)))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set()))
db = Database(my_db)
#print(db.tables)
return db
def manyOneWithRelationshipAttr(erd,set22):
foreign_key = []
my_db = []
entity_length = len(erd.entity_sets)
amount_primary1 = len(erd.entity_sets[0].primary_key)
for x in range(amount_primary1):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
for x in range(len(erd.relationships[0].primary_key)):
erd.entity_sets[1].attributes.append(erd.relationships[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.relationships[0].primary_key[x])
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
foreign_key = t
#for x in range(entity_length):
# if erd.entity_sets[x].name != '' and erd.entity_sets[x].attributes != '' and erd.entity_sets[x].primary_key != '' and set22 == 1:
# my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set(foreign_key)))
# elif erd.entity_sets[x].name != '' and erd.entity_sets[x].attributes != '' and erd.entity_sets[x].primary_key != '':
# my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
def twoEntitySetWithSupportingRelation(erd):
foreign_key = []
my_db = []
entity_length = len(erd.entity_sets)
for x in range(1):
if erd.entity_sets[0].supporting_relations != []:
amount_primary1 = len(erd.entity_sets[1].primary_key)
for x in range(amount_primary1):
erd.entity_sets[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.entity_sets[0].primary_key.append(erd.entity_sets[1].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
foreign_key = t
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set(foreign_key)))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set()))
db = Database(my_db)
#print(db.tables)
return db
elif erd.entity_sets[1].supporting_relations != []:
amount_primary1 = len(erd.entity_sets[0].primary_key)
for x in range(amount_primary1):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
t = ((erd.entity_sets[0].primary_key[0],),erd.entity_sets[0].name,(erd.entity_sets[0].primary_key[0],)),
foreign_key = t
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key), set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
def threeEntitySetsOneManyRelationship( erd ):
foreign_key = []
my_db = []
t = set()
entity_length = len(erd.entity_sets)
divide00 = erd.entity_sets[0].connections[0]
set1, set2 = zip(divide00)
divide11 = erd.entity_sets[1].connections[0]
set11, set22 = zip(divide11)
divide22 = erd.entity_sets[2].connections[0]
set111, set222 = zip(divide22)
for x in range(1):
if set2[0].value == 0:
for x in range(len(erd.entity_sets[0].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[0].primary_key[x])
for x in range(len(erd.entity_sets[1].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[1].primary_key[x])
for x in range(len(erd.entity_sets[2].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[2].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[2].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,))
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].attributes != [] and erd.entity_sets[x].primary_key != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
for x in range(1):
if erd.relationships[x].name != [] and erd.relationships[x].attributes != []:
my_db.append(Table(erd.relationships[x].name,set(erd.relationships[x].attributes),set(erd.relationships[x].primary_key),set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
elif set22[0].value == 0:
for x in range(len(erd.entity_sets[0].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[0].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[0].primary_key[x])
for x in range(len(erd.entity_sets[1].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[1].primary_key[x])
for x in range(len(erd.entity_sets[2].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[2].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[2].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,))
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].attributes != [] and erd.entity_sets[x].primary_key != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
for x in range(1):
if erd.relationships[x].name != [] and erd.relationships[x].attributes != []:
my_db.append(Table(erd.relationships[x].name,set(erd.relationships[x].attributes),set(erd.relationships[x].primary_key),set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
elif set222[0].value == 0:
for x in range(len(erd.entity_sets[0].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[0].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[0].primary_key[x])
for x in range(len(erd.entity_sets[1].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[1].primary_key[x])
for x in range(len(erd.entity_sets[2].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[2].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,))
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].attributes != [] and erd.entity_sets[x].primary_key != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
for x in range(1):
if erd.relationships[x].name != [] and erd.relationships[x].attributes != []:
my_db.append(Table(erd.relationships[x].name,set(erd.relationships[x].attributes),set(erd.relationships[x].primary_key),set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
else:
for x in range(len(erd.entity_sets[0].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[0].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[0].primary_key[x])
for x in range(len(erd.entity_sets[1].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[1].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[1].primary_key[x])
for x in range(len(erd.entity_sets[2].primary_key)):
erd.relationships[0].attributes.append(erd.entity_sets[2].primary_key[x])
erd.relationships[0].primary_key.append(erd.entity_sets[2].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,))
foreign_key = t
for x in range(entity_length):
if erd.entity_sets[x].name != [] and erd.entity_sets[x].attributes != [] and erd.entity_sets[x].primary_key != []:
my_db.append(Table(erd.entity_sets[x].name,set(erd.entity_sets[x].attributes),set(erd.entity_sets[x].primary_key),set()))
for x in range(1):
if erd.relationships[x].name != [] and erd.relationships[x].attributes != []:
my_db.append(Table(erd.relationships[x].name,set(erd.relationships[x].attributes),set(erd.relationships[x].primary_key),set(foreign_key)))
db = Database(my_db)
#print(db.tables)
return db
def threeEntitySetWithParents(erd):
foreign_key = []
foreign_key2 = []
my_db = []
entity_length = len(erd.entity_sets)
hi = 0
for x in range(entity_length):
if erd.entity_sets[x].parents != []:
hi +=1
#print(hi)
if erd.entity_sets[0].parents != [] and erd.entity_sets[1].parents != []:
t2 = ((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
for x in range(len(erd.entity_sets[2].primary_key)):
erd.entity_sets[0].attributes.append(erd.entity_sets[2].primary_key[x])
erd.entity_sets[0].primary_key.append(erd.entity_sets[2].primary_key[x])
t = ((*erd.entity_sets[2].primary_key,),erd.entity_sets[2].name,(*erd.entity_sets[2].primary_key,)),
for x in range(len(erd.entity_sets[0].primary_key)):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
foreign_key = t
foreign_key2 = t2
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key),set(foreign_key)))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key),set(foreign_key2)))
my_db.append(Table(erd.entity_sets[2].name,set(erd.entity_sets[2].attributes),set(erd.entity_sets[2].primary_key),set()))
db = Database(my_db)
#print(db.tables)
return db
elif erd.entity_sets[1].parents != [] and erd.entity_sets[2].parents != []:
t2 = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
for x in range(len(erd.entity_sets[0].primary_key)):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
for x in range(len(erd.entity_sets[1].primary_key)):
erd.entity_sets[2].attributes.append(erd.entity_sets[1].primary_key[x])
erd.entity_sets[2].primary_key.append(erd.entity_sets[1].primary_key[x])
foreign_key = t
foreign_key2 = t2
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key),set()))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key),set(foreign_key)))
my_db.append(Table(erd.entity_sets[2].name,set(erd.entity_sets[2].attributes),set(erd.entity_sets[2].primary_key),set(foreign_key2)))
db = Database(my_db)
#print(db.tables)
return db
elif erd.entity_sets[0].parents != [] and erd.entity_sets[2].parents != []:
t2 = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
for x in range(len(erd.entity_sets[0].primary_key)):
erd.entity_sets[1].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[1].primary_key.append(erd.entity_sets[0].primary_key[x])
t = ((*erd.entity_sets[0].primary_key,),erd.entity_sets[0].name,(*erd.entity_sets[0].primary_key,)),
for x in range(len(erd.entity_sets[1].primary_key)):
erd.entity_sets[2].attributes.append(erd.entity_sets[1].primary_key[x])
erd.entity_sets[2].primary_key.append(erd.entity_sets[1].primary_key[x])
foreign_key = t
foreign_key2 = t2
my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key),set()))
my_db.append(Table(erd.entity_sets[1].name,set(erd.entity_sets[1].attributes),set(erd.entity_sets[1].primary_key),set(foreign_key)))
my_db.append(Table(erd.entity_sets[2].name,set(erd.entity_sets[2].attributes),set(erd.entity_sets[2].primary_key),set(foreign_key2)))
db = Database(my_db)
#print(db.tables)
return db
return entity_length
def fourEntitySetsWeak(erd):
foreign_key = []
foreign_key2 = []
my_db = []
entity_length = len(erd.entity_sets)
#print(entity_length)
#entity_length = len(erd.entity_sets)
divi001 = erd.entity_sets[0].connections[0]
set123, set223 = zip(divi001)
div = erd.entity_sets[3].connections[0]
#hi, hello = zip(div)
#print(div)
count = 0
for y in range(entity_length-1):
# print(x)
if len(erd.entity_sets[y].supporting_relations) == 0 and count == 0:
count+=1
#print(count)
#amount_primary1 = len(erd.entity_sets[1].primary_key)
#for x in range(amount_primary1):
# erd.entity_sets[0].attributes.append(erd.entity_sets[1].primary_key[x])
# erd.entity_sets[0].primary_key.append(erd.entity_sets[1].primary_key[x])
#t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
#foreign_key = t
#my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set(foreign_key)))
my_db.append(Table(erd.entity_sets[y].name,set(erd.entity_sets[y].attributes),set(erd.entity_sets[y].primary_key), set()))
# db = Database(my_db)
#print(db.tables)
#return db
elif len(erd.entity_sets[y].supporting_relations) == 1 and erd.entity_sets[y].supporting_relations[0] == erd.relationships[0].name:
count+=1
#print(count)
amount_primary1 = len(erd.entity_sets[0].primary_key)
for x in range(amount_primary1):
erd.entity_sets[y].attributes.append(erd.entity_sets[0].primary_key[x])
erd.entity_sets[y].primary_key.append(erd.entity_sets[0].primary_key[x])
t = ((erd.entity_sets[0].primary_key[0],),erd.entity_sets[0].name,(erd.entity_sets[0].primary_key[0],)),
foreign_key = t
#print(y)
#my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[y].name,set(erd.entity_sets[y].attributes),set(erd.entity_sets[y].primary_key), set(foreign_key)))
#db = Database(my_db)
#print(db.tables)
#return db
elif erd.entity_sets[y].supporting_relations[0] == erd.relationships[1].name:
count+=1
#print(count)
amount_primary1 = len(erd.entity_sets[1].primary_key)
for x in range(amount_primary1):
erd.entity_sets[y].attributes.append(erd.entity_sets[1].primary_key[x])
erd.entity_sets[y].primary_key.append(erd.entity_sets[1].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
foreign_key = t
#my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[y].name,set(erd.entity_sets[y].attributes),set(erd.entity_sets[y].primary_key), set(foreign_key)))
#db = Database(my_db)
#print(db.tables)
#return db
else:
#len(erd.entity_sets[y].supporting_relations) == 0 :
count+=1
#print(count)
amount_primary1 = len(erd.entity_sets[1].primary_key)
for x in range(amount_primary1):
erd.entity_sets[3].attributes.append(erd.entity_sets[1].primary_key[x])
#erd.entity_sets[y].primary_key.append(erd.entity_sets[1].primary_key[x])
t = ((*erd.entity_sets[1].primary_key,),erd.entity_sets[1].name,(*erd.entity_sets[1].primary_key,)),
foreign_key = t
#my_db.append(Table(erd.entity_sets[0].name,set(erd.entity_sets[0].attributes),set(erd.entity_sets[0].primary_key), set()))
my_db.append(Table(erd.entity_sets[3].name,set(erd.entity_sets[3].attributes),set(erd.entity_sets[3].primary_key), set(foreign_key)))
#print(erd.entity_sets[1].supporting_relations[0])
#print(erd.relationships[0].name)
db = Database(my_db)
#print(db.tables)
return db
| 55.171735 | 303 | 0.642489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,371 | 0.141727 |
32c6b6ee54440932d94dc43f2f2f342cc123a082 | 1,848 | py | Python | ObitSystem/ObitSD/scripts/scriptResidCal.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
]
| 5 | 2019-08-26T06:53:08.000Z | 2020-10-20T01:08:59.000Z | ObitSystem/ObitSD/scripts/scriptResidCal.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
]
| null | null | null | ObitSystem/ObitSD/scripts/scriptResidCal.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
]
| 8 | 2017-08-29T15:12:32.000Z | 2022-03-31T12:16:08.000Z | # Program to self calibrate OTF data
import Obit, OTF, Image, OSystem, OErr, OTFGetSoln, InfoList, Table
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("Python", 1, 103, 1, ["None"], 1, ["./"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# Files
disk = 1
# Dirty
inFullFile = "OTFDirtyFull.fits" # input Full OTF data
inSubFile = "OTFDirtySub.fits" # input Full OTF data
#Clean
#inFullFile = "OTFCleanFull.fits" # input Full OTF data
#inSubFile = "OTFCleanSub.fits" # input Full OTF data
# Set data
fullData = OTF.newPOTF("Input data", inFullFile, disk, 1, err)
subData = OTF.newPOTF("Input data", inSubFile, disk, 1, err)
OErr.printErrMsg(err, "Error creating input data object")
# Calibration parameters
calType = "Filter"
solint = 5.0 / 86400.0
minRMS = 0.0
minEl = 0.0
calJy = [1.0,1.0]
dim = OTF.dim
dim[0] = 1
inInfo = OTF.POTFGetList(subData)
InfoList.PInfoListAlwaysPutFloat(inInfo, "SOLINT", dim, [solint])
InfoList.PInfoListAlwaysPutFloat(inInfo, "MINRMS", dim, [minRMS])
InfoList.PInfoListAlwaysPutFloat(inInfo, "MINEL", dim, [minEl])
dim[0] = len(calJy)
InfoList.PInfoListAlwaysPutFloat(inInfo, "CALJY", dim, calJy)
dim[0] = len(calType)
InfoList.PInfoListAlwaysPutString(inInfo, "calType", dim, [calType])
dim[0] = 1
solnTable = OTFGetSoln.POTFGetSolnFilter (subData, fullData, err)
soln = Table.PTableGetVer(solnTable)
# Update Cal table
# Soln2Cal parameters (most defaulted)
OTF.Soln2CalInput["InData"] = fullData
OTF.Soln2CalInput["soln"] = soln
# Use highest extant Cal table as input
oldCal = Obit.OTFGetHighVer(fullData.me, "OTFCal")
if oldCal == 0: # Must not be one
oldCal = -1
OTF.Soln2CalInput["oldCal"] = oldCal
OTF.Soln2CalInput["newCal"] = 0
OTF.Soln2Cal(err,OTF.Soln2CalInput)
# Shutdown
OErr.printErr(err)
print 'Done, calibrated',inFullFile
| 31.862069 | 77 | 0.715368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.34145 |
32c6c31592e8107e78ef2bb52771dcffacd50781 | 393 | py | Python | html_mining/twitter.py | sourceperl/sandbox | bbe1be52c3e51906a8ec94411c4df6a95dcbb39c | [
"MIT"
]
| null | null | null | html_mining/twitter.py | sourceperl/sandbox | bbe1be52c3e51906a8ec94411c4df6a95dcbb39c | [
"MIT"
]
| null | null | null | html_mining/twitter.py | sourceperl/sandbox | bbe1be52c3e51906a8ec94411c4df6a95dcbb39c | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
r = requests.get("https://twitter.com/ThePSF", headers={"User-Agent": ""})
if r.status_code == 200:
s = BeautifulSoup(r.content, "html.parser")
# extract tweets
l_tw = []
for p in s.find_all("p", attrs={"class": "tweet-text"}):
l_tw.append(p.text.strip())
print(l_tw)
| 23.117647 | 74 | 0.62341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.351145 |
32c80a80f478110db9183291633d248502cd65ad | 590 | py | Python | warehouse_labeling_machines/libs/utils.py | sdg97/warehouse_labeling_machines | 3650b9fb2d3fef85ee01925acf0a9266dafe746a | [
"Apache-2.0"
]
| null | null | null | warehouse_labeling_machines/libs/utils.py | sdg97/warehouse_labeling_machines | 3650b9fb2d3fef85ee01925acf0a9266dafe746a | [
"Apache-2.0"
]
| null | null | null | warehouse_labeling_machines/libs/utils.py | sdg97/warehouse_labeling_machines | 3650b9fb2d3fef85ee01925acf0a9266dafe746a | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import decimal
import multiprocessing
import random
def roundDecimal(v):
'''
Sembra che l'arrotondamento di un decimal sia più complicato del previsto
'''
return v.quantize(decimal.Decimal('0.01'), rounding=decimal.ROUND_HALF_UP)
def maybeStart(startCb, debug):
'''
Ogni tanto esegue questa callback...
Ad ogni restart di un worker in maniera casuale esegue la callback
'''
if debug:
return
workers = multiprocessing.cpu_count() * 2 + 1
if random.randrange(workers) == 0:
startCb()
| 21.851852 | 78 | 0.666102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.43824 |
32c8f25c548f019704dfb22f0db7ab07f62d2dd9 | 504 | py | Python | projeto/main/migrations/0017_alter_user_room.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
]
| null | null | null | projeto/main/migrations/0017_alter_user_room.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
]
| 1 | 2021-10-03T22:26:45.000Z | 2021-10-03T22:26:45.000Z | projeto/main/migrations/0017_alter_user_room.py | neilom18/g5-chess | 8998199b3432f0b83aa27e5c2126173ecc87f311 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.4 on 2021-09-24 15:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0016_alter_user_usercode'),
]
operations = [
migrations.AlterField(
model_name='user',
name='room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='room', to='main.room'),
),
]
| 25.2 | 141 | 0.640873 | 378 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.214286 |
08613adf55222eb81cf9aea8d6ff94d2cf2ab660 | 105 | py | Python | groups/views.py | AliAkberAakash/learn-in-groups | 850601ddd5520c850ebec12003c8337670762948 | [
"MIT"
]
| null | null | null | groups/views.py | AliAkberAakash/learn-in-groups | 850601ddd5520c850ebec12003c8337670762948 | [
"MIT"
]
| null | null | null | groups/views.py | AliAkberAakash/learn-in-groups | 850601ddd5520c850ebec12003c8337670762948 | [
"MIT"
]
| null | null | null | from django.shortcuts import render
def group_list(request):
return render(request, 'group_list.html')
| 21 | 42 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.161905 |
086749fe086bfe8b53982e2dc76e87c1e91b6cc7 | 1,596 | py | Python | code/p3.py | OscarFlores-IFi/CDINP19 | 7fb0cb6ff36b9a10bcfa0772b172c5e49996df48 | [
"MIT"
]
| null | null | null | code/p3.py | OscarFlores-IFi/CDINP19 | 7fb0cb6ff36b9a10bcfa0772b172c5e49996df48 | [
"MIT"
]
| null | null | null | code/p3.py | OscarFlores-IFi/CDINP19 | 7fb0cb6ff36b9a10bcfa0772b172c5e49996df48 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 09:18:37 2019
@author: if715029
"""
import pandas as pd
import numpy as np
import sklearn.metrics as skm
import scipy.spatial.distance as sc
#%% Leer datos
data = pd.read_excel('../data/Test de películas(1-16).xlsx', encoding='latin_1')
#%% Seleccionar datos (a mi estilo)
pel = pd.DataFrame()
for i in range((len(data.T)-5)//3):
pel = pel.append(data.iloc[:,6+i*3])
pel = pel.T
print(pel)
#%% Seleccionar datos (estilo Riemann)
csel = np.arange(6,243,3)
cnames = list(data.columns.values[csel])
datan = data[cnames]
#%% Promedios
movie_prom = datan.mean(axis=0)
user_prom = datan.mean(axis=1)
#%% Calificaciones a binarios (>= 3)
datan = datan.copy()
datan[datan<3] = 0
datan[datan>=3] = 1
#%% Calcular distancias de indices de similitud
#D1 = sc.pdist(datan,'hamming') # hamming == matching
D1 = sc.pdist(datan,'jaccard')
D1 = sc.squareform(D1)
#D2 = sc.pdist(data_b,'jaccard') # hamming == matching
#D2 = sc.squareform(D2)
Isim1 = 1-D1
#%% Seleccionar usuario y determinar sus parecidos
user = 1
Isim_user = Isim1[user]
Isim_user_sort = np.sort(Isim_user)
indx_user = np.argsort(Isim_user)
#%% Recomendación de películas p1.
USER = datan.loc[user]
USER_sim = datan.loc[indx_user[-2]]
indx_recomend1 = (USER_sim==1)&(USER==0)
recomend1 = list(USER.index[indx_recomend1])
#%% Recomendación peliculas p2.
USER = datan.loc[user]
USER_sim = np.mean(datan.loc[indx_user[-6:-1]],axis = 0)
USER_sim[USER_sim<=.5]=0
USER_sim[USER_sim>.5]=1
indx_recomend2 = (USER_sim==1)&(USER==0)
recomend2 = list(USER.index[indx_recomend2])
| 21.863014 | 80 | 0.697368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.359375 |
0867a27f2b0a9d65b0fbacf348d77dfbc3427264 | 1,187 | py | Python | itao/utils/qt_logger.py | MaxChangInnodisk/itao | b0745eb48bf67718ef00db566c4cc19896d903a7 | [
"MIT"
]
| null | null | null | itao/utils/qt_logger.py | MaxChangInnodisk/itao | b0745eb48bf67718ef00db566c4cc19896d903a7 | [
"MIT"
]
| null | null | null | itao/utils/qt_logger.py | MaxChangInnodisk/itao | b0745eb48bf67718ef00db566c4cc19896d903a7 | [
"MIT"
]
| null | null | null | import logging
class CustomLogger:
def __init__(self):
pass
""" Create logger which name is 'dev' """
def create_logger(self, name='dev', log_file='itao.log', write_mode='w'):
logger = logging.getLogger(name)
# setup LEVEL
logger.setLevel(logging.DEBUG)
# setup formatter
formatter = logging.Formatter(
"%(asctime)s %(levelname)-.4s %(message)s",
"%m-%d %H:%M:%S")
# setup handler
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler(log_file, write_mode, 'utf-8')
# add formatter into handler
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# add handler into logger
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.info('Create Logger: {}'.format(name))
return logger
""" get logger """
def get_logger(self, name='dev', log_file='itao.log', write_mode='w'):
logger = logging.getLogger(name)
return logger if logger.hasHandlers() else self.create_logger(name, log_file, write_mode) | 34.911765 | 97 | 0.615838 | 1,171 | 0.986521 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.233361 |
0867ca55ebc85e21b29f1ca39ad6e18f1d0c662a | 99 | py | Python | pypupil/serializer.py | choisuyeon/pypupil | 3fdb1f29c6b28613b6b39094c01e61560214daff | [
"MIT"
]
| 9 | 2018-08-07T11:00:54.000Z | 2021-02-13T04:36:05.000Z | pypupil/serializer.py | choisuyeon/pypupil | 3fdb1f29c6b28613b6b39094c01e61560214daff | [
"MIT"
]
| null | null | null | pypupil/serializer.py | choisuyeon/pypupil | 3fdb1f29c6b28613b6b39094c01e61560214daff | [
"MIT"
]
| 1 | 2020-12-03T00:44:29.000Z | 2020-12-03T00:44:29.000Z | class Serializer:
""" send data serially
"""
def __init__(self):
return
| 14.142857 | 27 | 0.525253 | 97 | 0.979798 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.313131 |
08687783aacc944c351fc37618c9c87ef69b3d6b | 2,296 | py | Python | scripts/ndvi_diff.py | hkfrei/pythonRemoteSensing | c8681d859313ee5ad01e5b9753f8c43462268624 | [
"MIT"
]
| 1 | 2019-12-18T21:54:22.000Z | 2019-12-18T21:54:22.000Z | scripts/ndvi_diff.py | hkfrei/pythonRemoteSensing | c8681d859313ee5ad01e5b9753f8c43462268624 | [
"MIT"
]
| null | null | null | scripts/ndvi_diff.py | hkfrei/pythonRemoteSensing | c8681d859313ee5ad01e5b9753f8c43462268624 | [
"MIT"
]
| 1 | 2020-07-01T16:44:21.000Z | 2020-07-01T16:44:21.000Z | import numpy
import rasterio
import gdal
print('all modules imported')
# path to the folder with the ndvi rasters
base_path = "/Users/hk/Downloads/gaga/"
# shapefile with forest mask
forest_mask = base_path + "waldmaske_wgs84.shp"
# initialize the necessary rasters for the ndvi calculation.
ndvi_2017 = rasterio.open(base_path + "ndvi_17.tiff", driver="GTiff")
ndvi_2018 = rasterio.open(base_path + "ndvi_18.tiff", driver="GTiff")
# print out metadata about the ndvi's
print(ndvi_2018.count) # number of raster bands
print(ndvi_2017.count) # number of raster bands
print(ndvi_2018.height) # column count
print(ndvi_2018.dtypes) # data type of the raster e.g. ('float64',)
print(ndvi_2018.crs) # projection of the raster e.g. EPSG:32632
print("calculate ndvi difference")
# this is will give us an array of values, not an actual raster image.
ndvi_diff_array = numpy.subtract(ndvi_2018.read(1), ndvi_2017.read(1))
print("reclassify")
# reclassify
ndvi_diff_reclass_array = numpy.where(
ndvi_diff_array <= -0.05, 1, 9999.0
)
# create a new (empty) raster for the "original" diff
ndvi_diff_image = rasterio.open(base_path + "ndvi_diff.tif", "w", driver="Gtiff", width=ndvi_2018.width,
height=ndvi_2018.height, count=1, crs=ndvi_2018.crs, transform=ndvi_2018.transform,
dtype='float64')
# create a new (empty) raster for the reclassified diff
ndvi_diff_reclass_image = rasterio.open(base_path + "ndvi_reclass_diff.tif", "w", driver="Gtiff", width=ndvi_2018.width,
height=ndvi_2018.height, count=1, crs=ndvi_2018.crs,
transform=ndvi_2018.transform, dtype='float64')
# write the ndvi's to raster
ndvi_diff_image.write(ndvi_diff_array.astype("float64"), 1)
ndvi_diff_reclass_image.write(ndvi_diff_reclass_array.astype("float64"), 1)
ndvi_diff_image.close()
ndvi_diff_reclass_image.close()
# extract forest areas
# Make sure to add correct Nodata and Alpha values. They have to match the reclassified values.
warp_options = gdal.WarpOptions(cutlineDSName=forest_mask, cropToCutline=True, dstNodata=9999, dstAlpha=9999)
gdal.Warp(base_path + "change_masked.tif", base_path + "ndvi_reclass_diff.tif", options=warp_options)
print("finished")
| 41.745455 | 120 | 0.726916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 946 | 0.412021 |
08691612fc229c4b74017cbf49ecddb0965a12ea | 462 | py | Python | helga_umb/signals/util.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
]
| null | null | null | helga_umb/signals/util.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
]
| 2 | 2018-04-27T15:37:10.000Z | 2018-08-22T21:00:40.000Z | helga_umb/signals/util.py | ktdreyer/helga-umb | f0c6858745d90205e74eec0eb5ebaafa655b2336 | [
"MIT"
]
| null | null | null | def product_from_branch(branch):
"""
Return a product name from this branch name.
:param branch: eg. "ceph-3.0-rhel-7"
:returns: eg. "ceph"
"""
if branch.startswith('private-'):
# Let's just return the thing after "private-" and hope there's a
# product string match somewhere in there.
return branch[8:]
# probably not gonna work for "stream" branches :(
parts = branch.split('-', 1)
return parts[0]
| 30.8 | 73 | 0.621212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.642857 |
08698150dd4c0d31ae984574dc2eb2d108201474 | 752 | py | Python | work/2021/ne201076/src/cpu_notify.py | tora01/SkillLab | 61ebfaf45c503b9e6f4a3d05a7edd4de2fcad93e | [
"CC0-1.0"
]
| 2 | 2020-09-09T02:40:23.000Z | 2021-09-12T18:08:15.000Z | work/2021/ne201076/src/cpu_notify.py | tora01/SkillLab | 61ebfaf45c503b9e6f4a3d05a7edd4de2fcad93e | [
"CC0-1.0"
]
| 1 | 2021-09-14T09:36:38.000Z | 2021-09-14T09:36:38.000Z | work/2021/ne201076/src/cpu_notify.py | tora01/SkillLab | 61ebfaf45c503b9e6f4a3d05a7edd4de2fcad93e | [
"CC0-1.0"
]
| 19 | 2021-09-07T06:11:29.000Z | 2021-09-07T07:45:08.000Z | import requests
url = 'https://notify-api.line.me/api/notify'#LINE NotifyのAPIのURL
token = '2RNdAKwlaj69HK0KlEdMX1y575gDWNKrPpggFcLnh82' #自分のアクセストークン
ms = "新たなソフトを開くと負担が過剰にかかってしまいます。"#送信する通知内容
def line(message,url,token):
post_data = {'message': message}
headers = {'Authorization': 'Bearer ' + token}
#送信する
res = requests.post(url,
data=post_data,
headers=headers)
print(res.text)#メッセージが送信されたかどうかの確認
while True:
now=dt.('cpu_temps')
dt = getCpuTempFromFile(data_file) #CPU温度取得
print(cpu_temps)
if print(cpu_temp) == "print >= 80":#CPU温度が80度以上の際にラインが送られるようにする
line(postdate=message, date=postdate, palams=postdate )#lineを呼び出す
break
time.sleep(1)
| 31.333333 | 73 | 0.670213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.503145 |
0869ba6e18dfa77decb88cf8144acde0c451215e | 49 | py | Python | src/titiler/application/titiler/application/__init__.py | kalxas/titiler | 5e4e497f1033eb64b65315068c094abe8259cd8c | [
"MIT"
]
| null | null | null | src/titiler/application/titiler/application/__init__.py | kalxas/titiler | 5e4e497f1033eb64b65315068c094abe8259cd8c | [
"MIT"
]
| null | null | null | src/titiler/application/titiler/application/__init__.py | kalxas/titiler | 5e4e497f1033eb64b65315068c094abe8259cd8c | [
"MIT"
]
| null | null | null | """titiler.application"""
__version__ = "0.6.0"
| 12.25 | 25 | 0.653061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.653061 |
0869cc3c4f8fe0eb7c864da5eb1b5caf6b676944 | 550 | py | Python | testScripts/getAllFiles.py | ryanemerson/JGroups-HiTab | 8fd8c6c45219e4c04618630be7e2449ebb0578dc | [
"Apache-2.0"
]
| null | null | null | testScripts/getAllFiles.py | ryanemerson/JGroups-HiTab | 8fd8c6c45219e4c04618630be7e2449ebb0578dc | [
"Apache-2.0"
]
| null | null | null | testScripts/getAllFiles.py | ryanemerson/JGroups-HiTab | 8fd8c6c45219e4c04618630be7e2449ebb0578dc | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import os
from collections import defaultdict
hosts = {'mill001', 'mill004', 'mill005'}
user = 'a7109534'
file_location = '/work/a7109534/'
#file_location = '/home/ryan/workspace/JGroups'
#file_location = '/home/pg/p11/a7109534/'
file_wildcard = '*'
extension = ".csv"
get_file = file_location + file_wildcard + extension
destination = '.'
number_of_rounds = 18
os.system("rm *" + extension)
for hostname in hosts:
cmd = "scp " + user + "@" + hostname + ":" + get_file + " " + destination
print cmd
os.system(cmd)
| 26.190476 | 77 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.356364 |
0869fc3b1af3273cc468fc0da2d162910f894bff | 3,610 | py | Python | studio/model.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | studio/model.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | studio/model.py | NunoEdgarGFlowHub/studio | 42b221892a81535842ff25cbbcc434d6422a19e5 | [
"Apache-2.0"
]
| null | null | null | """Data providers."""
import os
try:
# try-except statement needed because
# pip module is not available in google app engine
import pip
except ImportError:
pip = None
import yaml
import six
from .artifact_store import get_artifact_store
from .http_provider import HTTPProvider
from .firebase_provider import FirebaseProvider
from .s3_provider import S3Provider
from .gs_provider import GSProvider
from . import logs
def get_config(config_file=None):
config_paths = []
if config_file:
if not os.path.exists(config_file):
raise ValueError('User config file {} not found'
.format(config_file))
config_paths.append(os.path.expanduser(config_file))
config_paths.append(os.path.expanduser('~/.studioml/config.yaml'))
config_paths.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"default_config.yaml"))
for path in config_paths:
if not os.path.exists(path):
continue
with(open(path)) as f:
config = yaml.load(f.read())
def replace_with_env(config):
for key, value in six.iteritems(config):
if isinstance(value, six.string_types):
config[key] = os.path.expandvars(value)
elif isinstance(value, dict):
replace_with_env(value)
replace_with_env(config)
return config
raise ValueError('None of the config paths {} exits!'
.format(config_paths))
def get_db_provider(config=None, blocking_auth=True):
if not config:
config = get_config()
verbose = parse_verbosity(config.get('verbose'))
logger = logs.getLogger("get_db_provider")
logger.setLevel(verbose)
logger.debug('Choosing db provider with config:')
logger.debug(config)
if 'storage' in config.keys():
artifact_store = get_artifact_store(
config['storage'],
blocking_auth=blocking_auth,
verbose=verbose)
else:
artifact_store = None
assert 'database' in config.keys()
db_config = config['database']
if db_config['type'].lower() == 'firebase':
return FirebaseProvider(
db_config,
blocking_auth,
verbose=verbose,
store=artifact_store)
elif db_config['type'].lower() == 'http':
return HTTPProvider(db_config,
verbose=verbose,
blocking_auth=blocking_auth)
elif db_config['type'].lower() == 's3':
return S3Provider(db_config,
verbose=verbose,
store=artifact_store,
blocking_auth=blocking_auth)
elif db_config['type'].lower() == 'gs':
return GSProvider(db_config,
verbose=verbose,
store=artifact_store,
blocking_auth=blocking_auth)
else:
raise ValueError('Unknown type of the database ' + db_config['type'])
def parse_verbosity(verbosity=None):
if verbosity is None:
return parse_verbosity('info')
if verbosity == 'True':
return parse_verbosity('info')
logger_levels = {
'debug': 10,
'info': 20,
'warn': 30,
'error': 40,
'crit': 50
}
if isinstance(verbosity, six.string_types) and \
verbosity in logger_levels.keys():
return logger_levels[verbosity]
else:
return int(verbosity)
| 28.88 | 77 | 0.591967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.126039 |
086a788b83deae56a16772a629310d3b84a228a0 | 570 | py | Python | Server/server/model_inference/predictor.py | thaiminhpv/Doctor-Cyclop-Hackathon-2021 | afb943f7d00ceccb408c895077517ddd06d87fd7 | [
"MIT"
]
| 6 | 2021-04-30T05:28:04.000Z | 2022-03-21T14:50:43.000Z | Server/server/model_inference/predictor.py | thaiminhpv/Doctor-Cyclop-Hackathon-2021 | afb943f7d00ceccb408c895077517ddd06d87fd7 | [
"MIT"
]
| null | null | null | Server/server/model_inference/predictor.py | thaiminhpv/Doctor-Cyclop-Hackathon-2021 | afb943f7d00ceccb408c895077517ddd06d87fd7 | [
"MIT"
]
| 1 | 2022-01-10T14:58:02.000Z | 2022-01-10T14:58:02.000Z | import numpy as np
import pandas as pd
from server.model_inference.config import labels
from server.model_inference.core_model import get_model_prediction
from server.util.prediction_to_json import pandas_to_json
def get_predictions(images):
ids = list(images.keys())
out = np.hstack((np.asarray(ids)[np.newaxis,].T, (np.zeros((len(ids), len(labels))))))
df_sub = pd.DataFrame(out, columns=['StudyInstanceUID', *labels])
predicted_df = get_model_prediction(df_sub, images)
predicted_json = pandas_to_json(predicted_df)
return predicted_json
| 31.666667 | 90 | 0.764912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.031579 |
086b6939a15a14e2ba2c7a9bf78818444b385782 | 7,310 | py | Python | extendPlugins/minecraft.py | f88af65a/XyzB0ts | 21a557288877b24f337f16002d8bb72b155f2551 | [
"MIT"
]
| 4 | 2021-10-17T11:54:07.000Z | 2022-03-18T13:10:11.000Z | extendPlugins/minecraft.py | f88af65a/XyzB0ts | 21a557288877b24f337f16002d8bb72b155f2551 | [
"MIT"
]
| null | null | null | extendPlugins/minecraft.py | f88af65a/XyzB0ts | 21a557288877b24f337f16002d8bb72b155f2551 | [
"MIT"
]
| 1 | 2021-10-16T09:51:25.000Z | 2021-10-16T09:51:25.000Z | import asyncio
import json
import socket
import time
from botsdk.util.BotPlugin import BotPlugin
from botsdk.util.Error import printTraceBack
def getMcRequestData(ip, port):
data = (b"\x00\xff\xff\xff\xff\x0f"
+ bytes([len(ip.encode("utf8"))])
+ ip.encode("utf8")
+ int.to_bytes(port, 2, byteorder="big")
+ b"\x01\x01\x00")
return bytes([len(data) - 2]) + data
def getVarInt(b):
b = list(b)
b.reverse()
ans = 0
for i in b:
ans <<= 7
ans |= (i & 127)
return ans
class plugin(BotPlugin):
"/[mcbe/mcpe] ip [端口]"
def onLoad(self):
self.name = "minecraft"
self.addTarget("GroupMessage", "mc", self.getMc)
self.addTarget("GroupMessage", "mcbe", self.getBe)
self.addTarget("GROUP:1", "mc", self.getMc)
self.addTarget("GROUP:1", "mcbe", self.getBe)
self.addBotType("Mirai")
self.addBotType("Kaiheila")
self.canDetach = True
async def getMc(self, request):
"/mc ip [端口]不写默认25565"
data = request.getFirstTextSplit()
serverIp = None
serverPort = 25565
if len(data) < 2:
await request.sendMessage("缺少参数\n/mc ip [端口]不写默认25565")
return
if len(data) >= 2:
serverIp = data[1]
if len(data) >= 3:
if not (data[2].isnumeric()
and int(data[2]) >= 0
and int(data[2]) <= 65535):
request.sendMessage("端口有误")
return
serverPort = int(data[2])
# 初始化socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(0)
loop = asyncio.get_event_loop()
# 连接
try:
await loop.sock_connect(sock, (serverIp, serverPort))
except Exception:
await request.sendMessage("连接失败")
return
requestData = getMcRequestData(serverIp, serverPort)
# 发送
try:
await loop.sock_sendall(sock, requestData)
except Exception:
await request.sendMessage("请求发送失败")
return
# 接受
responseData = bytes()
breakFlag = True
dataSize = 10000000
stime = time.time()
while time.time() - stime <= 2 and breakFlag:
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
dataSize = getVarInt(responseData[0:i + 1]) + i + 1
break
if len(responseData) == dataSize:
breakFlag = False
break
rdata = await loop.sock_recv(sock, 10240)
if len(rdata) == 0:
await request.sendMessage("接受请求时连接断开")
return -1
responseData += rdata
await asyncio.sleep(0)
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
responseData = responseData[i + 2:]
break
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
responseData = responseData[i + 1:]
break
responseData = json.loads(responseData)
description = ""
if "text" in responseData["description"]:
description = responseData["description"]["text"]
if "extra" in responseData["description"]:
for i in responseData["description"]["extra"]:
if "text" in i:
description += i["text"]
try:
printData = "信息:{0}\n版本:{1}\n人数:{2}/{3}".format(
description, responseData["version"]["name"],
responseData["players"]["online"],
responseData["players"]["max"])
if "playerlist" in data:
printData += "\n在线玩家:\n"
for i in range(0, len(responseData["players"]["sample"])):
printData += (responseData
["players"]["sample"][i]["name"])
if i != len(responseData["players"]["sample"]) - 1:
printData += "\n"
await request.sendMessage(printData)
except Exception:
await request.sendMessage("解析过程中出错")
printTraceBack()
async def getBe(self, request):
"/mcbe ip [端口]不写默认19132"
data = request.getFirstTextSplit()
serverIp = None
serverPort = 19132
if len(data) < 2:
await request.sendMessage("缺少参数\n/mcbe ip [端口]不写默认19132")
return
if len(data) >= 2:
serverIp = data[1]
if len(data) == 3:
if not (data[2].isnumeric()
and int(data[2]) >= 0
and int(data[2]) <= 65535):
request.sendMessage("端口有误")
return
serverPort = int(data[2])
# 初始化socket
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.settimeout(0)
loop = asyncio.get_event_loop()
# 连接
try:
await loop.sock_connect(sock, (serverIp, serverPort))
except Exception:
await request.sendMessage("连接失败")
return
requestData = (b"\x01"
+ b"\x00" * 8
+ b"\x00\xff\xff\x00\xfe\xfe\xfe"
+ b"\xfe\xfd\xfd\xfd\xfd\x12\x34\x56\x78"
+ b"\x00" * 8)
# 发送
try:
await loop.sock_sendall(sock, requestData)
except Exception:
await request.sendMessage("请求发送失败")
return
# 接受
responseData = bytes()
breakFlag = True
stime = time.time()
while time.time() - stime <= 2 and breakFlag:
try:
responseData = await loop.sock_recv(sock, 10240)
except Exception:
responseData = b""
if len(responseData) == 0:
sock.close()
await request.sendMessage("接收过程中连接断开")
return
breakFlag = False
await asyncio.sleep(0)
responseData = responseData[35:].decode()
responseData = responseData.split(";")
printData = ""
try:
printData += f"服务器名:{responseData[1]}\n"
printData += f"人数:{responseData[4]}/{responseData[5]}\n"
printData += f"游戏模式:{responseData[8]}\n"
printData += (
f"版本:{responseData[0]} {responseData[2]} {responseData[3]}"
)
await request.sendMessage(printData)
except Exception:
await request.sendMessage("解析过程中出错")
printTraceBack()
def handle(*args, **kwargs):
return plugin(*args, **kwargs)
| 36.733668 | 79 | 0.477291 | 6,951 | 0.91726 | 0 | 0 | 0 | 0 | 6,508 | 0.858802 | 1,178 | 0.15545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.