seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
21478478680
|
import logging, datetime, sys
from modules import *
args = parser.parse_args()
start_time = datetime.datetime.now()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create a file handler for INFO
handler = logging.FileHandler(CONFIG['log_path'] + 'info_the_release_note.log')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
# create a file handler for DEBUG
debug_handler = logging.FileHandler(CONFIG['log_path'] + 'debug_the_release_note.log')
debug_handler.setLevel(logging.DEBUG)
debug_handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
if args.debug:
print("Now running in debug mode.")
logger.setLevel(logging.DEBUG)
logger.addHandler(debug_handler)
dzr = Deezer()
weekday = datetime.datetime.today().weekday()
# Retrieve users, either from args of a contact list
if args.user:
args.do_not_send = True if not args.email else False
users = [{ 'deezer_user_id': int(user), 'email': args.email } for user in args.user]
else:
try:
users = getContacts(args.contact_list_id) if args.contact_list_id else getContacts(CONFIG['contact_list_id'])
except Exception as e:
logger.info("An error occured while trying to retrieve the contact list.")
logger.debug(e)
sys.exit(2)
logger.info(str(len(users)) + ' users found.')
logger.debug(users)
for user in users:
print("Checking new releases for user id " + str(user['deezer_user_id']) + "...")
logger.info("Checking new releases for user id " + str(user['deezer_user_id']) + "...")
if args.released_since:
released_since = args.released_since
else:
try:
# For weekly users, send new releases on friday only
if weekday != 4 and user['frequency'] == 'weekly':
logger.debug("Skipping this user as he's a weekly user and will only receive new releases on Friday.")
continue
else:
released_since = {
'daily': 1,
'weekly': 7
}.get(user['frequency'], 1)
except KeyError as e:
logger.debug("Frequency setting not found. Fallback to default value.")
released_since = 1
except Exception as e:
logger.debug("An error occured while trying to retrieve the frequency setting:")
logger.debug(e)
continue
try:
new_releases = dzr.getNewReleases(user['deezer_user_id'], released_since)
except IOError as e:
logger.debug("Stopwords and banned artists could not be retrieved.")
logger.debug(e)
sys.exit(2)
except Exception as e:
logger.debug(e)
sys.exit(2)
nb_releases = len(new_releases)
logger.info("User id " + str(user['deezer_user_id']) + " has " + str(nb_releases) + " albums released in the past " + str(released_since) + " days.")
logger.debug(new_releases)
if nb_releases < 1:
continue
# Store new releases into database
try:
db = Database()
db.storeNewReleases(new_releases, user['deezer_user_id'])
del(db)
except Exception as e:
logger.info("An error occured while trying to store the new releases in the database.")
logger.debug(e)
# Send new releases by email
subject = "♩ Have you listened to " + new_releases[0]['artist']['name'] + "'s new album ?"
contenthtml = get_template(new_releases, user['deezer_user_id'])
if not args.do_not_send:
try:
send = sendMail(CONFIG['from_mail'], CONFIG['from_name'], user['email'], subject, contenthtml)
logger.info("Sending email - Status: " + str(send.status_code))
logger.debug(send.headers)
except Exception as e:
logger.info("An error occured while trying to send the mail.")
logger.debug(e)
sys.exit(2)
print('Done')
logger.info("Done in %s seconds " % (datetime.datetime.now() - start_time).total_seconds())
|
greird/the-release-note
|
the-release-note.py
|
the-release-note.py
|
py
| 3,711 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21256935702
|
# 500 Exercícios Resolvidos com Python
# Thiago Barros
# Exercícios resolvidos com base no Livro - 500 Algoritimos Resolvidos (ANITA LOPES E GUTO GARCIA)
# Algoritimo Numero 122
# Capitulo 3
"""
Ler três números e verificar se os três números são possiveis de
serem lados de um triângulo
"""
lde1 = float(input("Entre com o Primeiro Lado"))
lde2 = float(input("Entre com o Segundo Lado Lado"))
lde3 = float(input("Entre com o Terceiro Lado Lado"))
if (lde1 < lde2 + lde3) and (lde2 < lde3 + lde1) and (lde3 < lde1 + lde2):
print("Sim , os valores são lados de um triângulo")
else:
print("Não, os valores não são lados de um triângulo")
|
Tbarros1996/500algoritmos
|
Capitulo_3/algoritmo_122.py
|
algoritmo_122.py
|
py
| 670 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
39404813463
|
from django.urls import path
from .views import (
ColaboradorList,
ColaboradorUpdate,
ColaboradorDelete,
ColaboradorCreate,
ColaboradorReport,
HtmlPdf,
)
urlpatterns = [
path('listar', ColaboradorList.as_view(), name='list_colaborador'),
path('criar', ColaboradorCreate.as_view(), name='create_colaborador'),
path('editar/<int:pk>', ColaboradorUpdate.as_view(), name='update_colaborador'),
path('excluir/<int:pk>', ColaboradorDelete.as_view(), name='delete_colaborador'),
path('relatorio', ColaboradorReport, name='report_colaborador'),
path('relatorio_html', HtmlPdf.as_view(), name='report_colaborador_html'),
]
|
fabiogpassos/GRH
|
apps/colaboradores/urls.py
|
urls.py
|
py
| 664 |
python
|
es
|
code
| 0 |
github-code
|
6
|
10033128545
|
# Créé par LEWEEN.MASSIN, le 23/03/2023 en Python 3.7
from csv import reader as read
file = '49-prenoms-2013-22.csv'
name = 'Aaron'
gender = 'F'
year = 2019
def import_table(file):
l=[]
with open(file, 'r') as csv_open:
csv_read = read(csv_open, delimiter=';')
for row in csv_read:
l.append(row)
return l
def easier_table(file):
l = import_table(file)
l.pop(0)
for row in l:
for i in range(2):
row.pop(0)
return l
def nbr_enfants(file, name):
l = easier_table(file)
nbr = 0
for row in l:
if row[1] == name:
nbr+=int(row[2])
return nbr
def nbr_gender(file, gender, year):
l = easier_table(file)
occur = 0
for row in l:
if row[0] == gender and int(row[3]) == year:
occur += 1
return occur
print(f"{name}: {nbr_enfants(file, name)}")
print(f"{gender} en {year}: {nbr_gender(file, gender, year)}")
|
Remingusu/NSI_premiere
|
7 - Traitement de données en tables/Projet Traitement de données en tables/main.py
|
main.py
|
py
| 957 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43193667766
|
#!/usr/bin/env python
import rospy
import smach
from PrintColours import *
from std_msgs.msg import String,UInt8
from mavros_msgs.msg import ExtendedState
# import custom message:
from muav_state_machine.msg import UAVState
# global variables to catch the data from the agent state machine node
airframe_type = ""
mission_state = ""
wp_reached = 0
extended_state = ExtendedState()
uav_state = 0
landed_state = 0
flight_status_dji = 0
#callback functions
def airframe_type_cb(msg):
global airframe_type
airframe_type = msg.data
def mission_state_cb(msg):
global mission_state
mission_state = msg.data
def wp_reached_cb(msg):
global wp_reached
wp_reached = msg.data
def estate_cb(msg):
global extended_state
extended_state = msg
def flight_status_dji_cb(msg):
global flight_status_dji
flight_status_dji = msg.data
class UavState(smach.State):
def __init__(self,uav_id):#modify, common_data
smach.State.__init__(
self, outcomes=['mission_finished', 'shutdown'])
self.uav_id = uav_id
def execute(self, ud):
UAVState_pub = rospy.Publisher("/muav_sm/uav_{}/uavstate".format(self.uav_id), UAVState, queue_size=10)
rospy.loginfo('[UavState] - UAV{} state'.format(self.uav_id))
rate = rospy.Rate(20) # 20hz
UAVState_msg = UAVState()
#subscribers initialization
autopilot_sub = rospy.Subscriber("/uav_{}_sm/com/airframe_type".format(self.uav_id), String, airframe_type_cb)
mission_state_sub = rospy.Subscriber("/uav_{}_sm/com/mission_state".format(self.uav_id), String, mission_state_cb)
wp_reached_sub = rospy.Subscriber("/uav_{}_sm/com/wp_reached".format(self.uav_id), UInt8, wp_reached_cb)
if airframe_type=="px4/vtol":
extended_state_sub = rospy.Subscriber("/uav_{}_sm/com/extended_state".format(self.uav_id), ExtendedState, estate_cb)
# subscribers for dji data
if airframe_type=="dji/M210":
flight_status_dji_sub = rospy.Subscriber("/uav_{}_sm/com/flight_status_dji".format(self.uav_id), UInt8, flight_status_dji_cb)
# UAVState_msg initialization
UAVState_msg.airframe_type = "px4/vtol"
UAVState_msg.mission_state = "idle"
UAVState_msg.wp_reached = 0
UAVState_msg.uav_state = 0
UAVState_msg.landed_state = 0
# transition to X state
while not rospy.is_shutdown():
# TBD: error detection if not namespaces with the name of the uav_id
#rospy.loginfo('[UavState] - UAV{} state: airframetype: {}, mission_state: {}, wp_reached: {}, extended_state: {}'.format(self.uav_id,autopilot,mission_state,wp_reached,extended_state))
#fill the UAVState_msg custom message
UAVState_msg.airframe_type = airframe_type #parameter
UAVState_msg.mission_state = mission_state#published by the agent state machine
if airframe_type=="px4/vtol" and mission_state=="mission_running":
UAVState_msg.wp_reached = wp_reached #published by the agent state machine
UAVState_msg.uav_state = extended_state.vtol_state #published by the agent state machine
UAVState_msg.landed_state = extended_state.landed_state
else:
UAVState_msg.wp_reached = 0 #TBD: create a function to do it in the DJI
UAVState_msg.uav_state = 0
UAVState_msg.landed_state = 0 # modified with dji data
if airframe_type=="dji/M210" and mission_state=="mission_running":
UAVState_msg.landed_state = flight_status_dji #published by the agent state machine
#publish the UAVState_msg custom message
UAVState_pub.publish(UAVState_msg)
#finish this state:
# if UAVState_msg.mission_state == "idle":
# return 'mission_finished'
rate.sleep()
return 'shutdown'
|
miggilcas/muav_state_machine
|
scripts/GStates/uav_state.py
|
uav_state.py
|
py
| 4,040 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43431937253
|
#!/usr/bin/env python3
""" Evaluator """
import sys
import tensorflow as tf
from utils import decode_img, image_patches, write_tensor_as_image
from model import image_diff, UPSCALER_FACTOR
def main():
""" Main function """
try:
image_path = sys.argv[1]
except:
print("Usage: {} <image path>".format(sys.argv[0]))
exit(-1)
try:
model_path = sys.argv[2]
except:
model_path = './saved_model'
PATCH_SIZE = 240 // UPSCALER_FACTOR
N_CHANNELS = 3
model = tf.keras.models.load_model(model_path)
image = decode_img(image_path, N_CHANNELS)
patches = image_patches(image, PATCH_SIZE, PATCH_SIZE, N_CHANNELS)
model_out = model(patches)
for idx, (patch_in, patch_out) in enumerate(zip(patches, model_out)):
write_tensor_as_image("{}a.png".format(idx), patch_in)
write_tensor_as_image("{}b.png".format(idx), patch_out)
#image = tf.expand_dims(image, axis=0)
#model_out = model(image)
#model_out = tf.squeeze(model_out, axis=0)
#write_tensor_as_image("out.png", model_out)
if __name__ == "__main__":
main()
|
Masterchef365/ENHANCE
|
eval.py
|
eval.py
|
py
| 1,123 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17529991766
|
import os, glob, asyncio
class CommandDispatcher:
"""Register commands and run them"""
def __init__(self):
self.commands = {}
self.commands_admin = []
self.unknown_command = None
def get_admin_commands(self, bot, conv_id):
"""Get list of admin-only commands (set by plugins or in config.json)"""
commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or []
return list(set(commands_admin + self.commands_admin))
@asyncio.coroutine
def run(self, bot, event, *args, **kwds):
"""Run command"""
try:
func = self.commands[args[0]]
except KeyError:
if self.unknown_command:
func = self.unknown_command
else:
raise
args = list(args[1:])
try:
yield from func(bot, event, *args, **kwds)
except Exception as e:
print(e)
def register(self, *args, admin=False):
"""Decorator for registering command"""
def wrapper(func):
# Automatically wrap command function in coroutine
func = asyncio.coroutine(func)
self.commands[func.__name__] = func
if admin:
self.commands_admin.append(func.__name__)
return func
# If there is one (and only one) positional argument and this argument is callable,
# assume it is the decorator (without any optional keyword arguments)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
def register_unknown(self, func):
"""Decorator for registering unknown command"""
# Automatically wrap command function in coroutine
func = asyncio.coroutine(func)
self.unknown_command = func
return func
# Create CommandDispatcher singleton
command = CommandDispatcher()
# Build list of commands
_plugins = glob.glob(os.path.join(os.path.dirname(__file__), "*.py"))
__all__ = [os.path.splitext(os.path.basename(f))[0] for f in _plugins
if os.path.isfile(f) and not os.path.basename(f).startswith("_")]
# Load all commands
from hangupsbot.commands import *
|
xmikos/hangupsbot
|
hangupsbot/commands/__init__.py
|
__init__.py
|
py
| 2,229 |
python
|
en
|
code
| 105 |
github-code
|
6
|
7165790234
|
import argparse
import logging
import sys
from itertools import chain
from logging import getLogger
from typing import Iterable, Optional, Union
from competitive_verifier import oj
from competitive_verifier.arg import add_verify_files_json_argument
from competitive_verifier.error import VerifierError
from competitive_verifier.log import configure_logging
from competitive_verifier.models import (
ProblemVerification,
VerificationFile,
VerificationInput,
)
from competitive_verifier.resource import ulimit_stack
logger = getLogger(__name__)
UrlOrVerificationFile = Union[str, VerificationFile]
def parse_urls(
input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]]
) -> Iterable[str]:
def parse_single(url_or_file: UrlOrVerificationFile) -> Iterable[str]:
if isinstance(url_or_file, str):
return (url_or_file,)
else:
return enumerate_urls(url_or_file)
if isinstance(input, (str, VerificationFile)):
return parse_single(input)
return chain.from_iterable(parse_single(uf) for uf in input)
def enumerate_urls(file: VerificationFile) -> Iterable[str]:
for v in file.verification:
if isinstance(v, ProblemVerification):
yield v.problem
def run_impl(
input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]],
check: bool = False,
group_log: bool = False,
) -> bool:
result = True
try:
ulimit_stack()
except Exception:
logger.warning("failed to increase the stack size[ulimit]")
for url in parse_urls(input):
if not oj.download(url, group_log=group_log):
result = False
if check and not result:
raise VerifierError("Failed to download")
return result
def run(args: argparse.Namespace) -> bool:
logger.debug("arguments=%s", vars(args))
logger.info("verify_files_json=%s", str(args.verify_files_json))
logger.info("urls=%s", args.urls)
files: list[VerificationFile] = []
if args.verify_files_json:
verification = VerificationInput.parse_file_relative(args.verify_files_json)
files = list(verification.files.values())
return run_impl(files + args.urls, group_log=True)
def argument(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
add_verify_files_json_argument(parser, required=False)
parser.add_argument(
"urls",
nargs="*",
help="A list of problem URL",
)
return parser
def main(args: Optional[list[str]] = None) -> None:
try:
configure_logging(logging.INFO)
parsed = argument(argparse.ArgumentParser()).parse_args(args)
if not run(parsed):
sys.exit(1)
except Exception as e:
sys.stderr.write(str(e))
sys.exit(2)
if __name__ == "__main__":
main()
|
competitive-verifier/competitive-verifier
|
src/competitive_verifier/download/main.py
|
main.py
|
py
| 2,823 |
python
|
en
|
code
| 8 |
github-code
|
6
|
34199641942
|
#!/usr/bin/env python
import sys
import rospy
from art_collision_env.int_collision_env import IntCollisionEnv
import os
def main():
rospy.init_node('collision_env_node', anonymous=True)
try:
setup = os.environ["ARTABLE_SETUP"]
except KeyError:
rospy.logfatal("ARTABLE_SETUP has to be set.")
return
ce = IntCollisionEnv(setup, "marker")
ce.load_from_db()
ce.start()
rospy.spin()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Shutting down")
|
robofit/arcor
|
art_collision_env/src/node.py
|
node.py
|
py
| 551 |
python
|
en
|
code
| 9 |
github-code
|
6
|
10918517772
|
""" Plugin entry point for helga """
import math
from craigslist_scraper.scraper import scrape_url
from helga.plugins import match
TEMPLATE = 'Listing title: {}, price: {}'
@match(r'[A-Za-z]+\.craigslist\.org/.../\S+')
def craigslist_meta(client, channel, nick, message, match):
""" Return meta information about a listing """
data = scrape_url('http://' + match[0])
result = TEMPLATE.format(data.title, data.price)
for key, value in data.attrs.items():
result += ', {}: {}'.format(key, value)
return result
|
narfman0/helga-craigslist-metadata
|
helga_craigslist_meta/plugin.py
|
plugin.py
|
py
| 538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3682557651
|
class MapFlags(object):
__slots__ = ('_value')
name = 'tmwa::map::MapFlags'
enabled = True
def __init__(self, value):
self._value = value['flags']
def to_string(self):
i = int(self._value)
s = []
for n, v in MapFlags.junk:
v = 1 << v
if i & v:
i -= v
s.append(n)
if i or not s:
s.append('%#08x' % i)
return 'MapFlags(%s)' % (' | '.join(s))
junk = [
#('ALIAS', 21),
#('NOMEMO', 0),
('NOTELEPORT', 1),
('NORETURN', 22),
('MONSTER_NOTELEPORT', 23),
('NOSAVE', 2),
#('NOBRANCH', 3),
('NOPENALTY', 4),
('PVP', 6),
('PVP_NOPARTY', 7),
#('PVP_NOGUILD', 8),
#('PVP_NIGHTMAREDROP', 24),
('PVP_NOCALCRANK', 25),
#('GVG', 9),
#('GVG_NOPARTY', 10),
#('NOZENYPENALTY', 5),
#('NOTRADE', 11),
#('NOSKILL', 12),
('NOWARP', 13),
('NOWARPTO', 26),
('NOPVP', 14),
('MASK', 15),
('SNOW', 16),
('FOG', 17),
('SAKURA', 18),
('LEAVES', 19),
('RAIN', 20),
('NO_PLAYER_DROPS', 27),
('TOWN', 28),
('OUTSIDE', 29),
('RESAVE', 30),
]
tests = [
('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(0x80000000))', 'MapFlags(0x80000000)'),
('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(0xf0000000))', 'MapFlags(TOWN | OUTSIDE | RESAVE | 0x80000000)'),
] + [
('tmwa::map::MapFlags(); value.set(tmwa::map::MapFlag::%s, true)' % n, 'MapFlags(%s)' % n)
for (n, _) in junk
] + [
('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(1 << %d))' % i, 'MapFlags(%s)' % n)
for (n, i) in junk
]
|
themanaworld/tmwa
|
src/map/mapflag.py
|
mapflag.py
|
py
| 2,048 |
python
|
en
|
code
| 48 |
github-code
|
6
|
11353299783
|
"""
Problem Statement
Given a binary tree, populate an array to represent its level-by-level traversal.
You should populate the values of all nodes of each level from left to right in separate sub-arrays.
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
def traverse(root):
result = []
deq = deque()
if root:
deq.append(root)
while deq:
length = len(deq)
node_list = []
for _ in range(length):
current_node = deq.popleft()
node_list.append(current_node.val)
if current_node.left:
deq.append(current_node.left)
if current_node.right:
deq.append(current_node.right)
if node_list:
result.append(node_list)
return result
def main():
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
print("Level order traversal: " + str(traverse(root)))
main()
|
jihoonyou/problem-solving
|
Educative/bfs/example1.py
|
example1.py
|
py
| 1,137 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6533110297
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
from moduls.data import *
from PyQt4 import QtGui, QtCore, uic
# StudentData = StudentData()
class MainWindow(QtGui.QMainWindow):
"""docstring for MainWindow"""
def __init__(self):
super(MainWindow, self).__init__()
self.ui = uic.loadUi("ui\MainWindow.ui", self)
self.FormData = FormData()
self.FormLayout.addWidget( self.FormData )
self.Faculty = FacultyData()
self.FacLayout.addWidget( self.Faculty )
self.Speciality = SpecialityData()
self.SpecLayout.addWidget( self.Speciality)
self.Students = StudentData( self.Faculty.data[0], self.Speciality.data[0], self.FormData.data[0] )
self.StudLayout.addWidget( self.Students )
self.Contract = ContractData( self.Students.data )
self.ContrLayout.addWidget( self.Contract )
self.connect( self.tabWidget, QtCore.SIGNAL('currentChanged(int)'), self.Contract.update )
self.connect( self.tabWidget, QtCore.SIGNAL('currentChanged(int)'), self.Students.update )
|
TchippunkT/Kursuch
|
moduls/windows/MainWindow.py
|
MainWindow.py
|
py
| 1,129 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24993496301
|
from osv import fields
from osv import osv
class dm_address_segmentation(osv.osv): # {{{
_inherit = "dm.address.segmentation"
_description = "Order Segmentation"
def set_address_criteria(self, cr, uid, ids, context={}):
sql_query = super(dm_address_segmentation,self).set_address_criteria(cr, uid, ids, context)
if isinstance(ids, (int, long)):
ids = [ids]
criteria=[]
browse_id = self.browse(cr, uid, ids)[0]
if browse_id.order_text_criteria_ids:
for i in browse_id.order_text_criteria_ids:
if i.field_id.ttype == 'many2one':
relation_obj = self.pool.get(i.field_id.relation)
rec_name = relation_obj._rec_name
criteria.append("so.%s in (select id from %s where %s %s '%s' )"%(
i.field_id.name, relation_obj._table,
rec_name, i.operator,
"%"+i.value+"%"))
else :
criteria.append("so.%s %s '%s'"%(i.field_id.name, i.operator, "%"+i.value+"%"))
if browse_id.order_numeric_criteria_ids:
for i in browse_id.order_numeric_criteria_ids:
criteria.append("so.%s %s %f"%(i.field_id.name, i.operator, i.value))
if browse_id.order_boolean_criteria_ids:
for i in browse_id.order_boolean_criteria_ids:
criteria.append("so.%s %s %s"%(i.field_id.name, i.operator, i.value))
if browse_id.order_date_criteria_ids:
for i in browse_id.order_date_criteria_ids:
criteria.append("so.%s %s '%s'"%(i.field_id.name, i.operator, i.value))
if criteria:
so_sql_query = ("""select distinct so.partner_invoice_id \nfrom sale_order so\nwhere %s\n""" % (' and '.join(criteria))).replace('isnot','is not')
sql_query += '''and pa.id in (%s)'''%so_sql_query
return sql_query
_columns = {
'order_text_criteria_ids' : fields.one2many('dm.extract.sale.text_criteria', 'segmentation_id', 'Customers Order Textual Criteria'),
'order_numeric_criteria_ids' : fields.one2many('dm.extract.sale.numeric_criteria', 'segmentation_id', 'Customers Order Numeric Criteria'),
'order_boolean_criteria_ids' : fields.one2many('dm.extract.sale.boolean_criteria', 'segmentation_id', 'Customers Order Boolean Criteria'),
'order_date_criteria_ids' : fields.one2many('dm.extract.sale.date_criteria', 'segmentation_id', 'Customers Order Date Criteria'),
}
dm_address_segmentation() # }}}
TEXT_OPERATORS = [ # {{{
('like','like'),
('ilike','ilike'),
] # }}}
NUMERIC_OPERATORS = [ # {{{
('=','equals'),
('<','smaller then'),
('>','bigger then'),
] # }}}
BOOL_OPERATORS = [ # {{{
('is','is'),
('isnot','is not'),
] # }}}
DATE_OPERATORS = [ # {{{
('=','equals'),
('<','before'),
('>','after'),
] # }}}
class dm_extract_sale_text_criteria(osv.osv): # {{{
_name = "dm.extract.sale.text_criteria"
_description = "Customer Order Segmentation Textual Criteria"
_rec_name = "segmentation_id"
_columns = {
'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'),
'field_id' : fields.many2one('ir.model.fields','Customers Field',
domain=[('model_id.model','=','sale.order'),
('ttype','in',['char','many2one'])],
context={'model':'sale.order'}, required = True),
'operator' : fields.selection(TEXT_OPERATORS, 'Operator', size=32, required = True),
'value' : fields.char('Value', size=128, required = True),
}
dm_extract_sale_text_criteria() # }}}
class dm_extract_sale_numeric_criteria(osv.osv): # {{{
_name = "dm.extract.sale.numeric_criteria"
_description = "Customer Order Segmentation Numeric Criteria"
_rec_name = "segmentation_id"
_columns = {
'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'),
'field_id' : fields.many2one('ir.model.fields','Customers Field',
domain=[('model_id.model','=','sale.order'),
('ttype','in',['integer','float'])],
context={'model':'sale.order'}, required = True),
'operator' : fields.selection(NUMERIC_OPERATORS, 'Operator', size=32, required = True),
'value' : fields.float('Value', digits=(16,2), required = True),
}
dm_extract_sale_numeric_criteria() # }}}
class dm_extract_sale_boolean_criteria(osv.osv): # {{{
_name = "dm.extract.sale.boolean_criteria"
_description = "Customer Order Segmentation Boolean Criteria"
_rec_name = "segmentation_id"
_columns = {
'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'),
'field_id' : fields.many2one('ir.model.fields','Customers Field',
domain=[('model_id.model','=','sale.order'),
('ttype','like','boolean')],
context={'model':'sale.order'}, required = True),
'operator' : fields.selection(BOOL_OPERATORS, 'Operator', size=32, required = True),
'value' : fields.selection([('true','True'),('false','False')],'Value', required = True),
}
dm_extract_sale_boolean_criteria() # }}}
class dm_extract_sale_date_criteria(osv.osv): # {{{
_name = "dm.extract.sale.date_criteria"
_description = "Customer Order Segmentation Date Criteria"
_rec_name = "segmentation_id"
_columns = {
'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'),
'field_id' : fields.many2one('ir.model.fields','Customers Field',
domain=[('model_id.model','=','sale.order'),
('ttype','in',['date','datetime'])],
context={'model':'sale.order'}, required = True),
'operator' : fields.selection(DATE_OPERATORS, 'Operator', size=32, required = True),
'value' : fields.date('Date', required = True),
}
dm_extract_sale_date_criteria() # }}}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
dm_extract_sale/dm_extract_sale.py
|
dm_extract_sale.py
|
py
| 6,108 |
python
|
en
|
code
| 9 |
github-code
|
6
|
20172575137
|
import os
import sys
from typing import Optional
from dotenv import load_dotenv
from spinner import Spinner
import actions
import response_parser
import speech
import gpt
message_history = []
GENERAL_DIRECTIONS_PREFIX = """
CONSTRAINTS:
- Cannot run Python code that requires user input.
ACTIONS:
- "TELL_USER": tell the user something. The schema for the action is:
TELL_USER: <TEXT>
- "READ_FILE": read the current state of a file. The schema for the action is:
READ_FILE: <PATH>
- "WRITE_FILE": write a block of text to a file. The schema for the action is:
WRITE_FILE: <PATH>
```
<TEXT>
```
- "RUN_PYTHON": run a Python file. The schema for the action is:
RUN_PYTHON: <PATH>
- "SEARCH_ONLINE": search online and get back a list of URLs relevant to the query. The schema for the action is:
SEARCH_ONLINE: <QUERY>
- EXTRACT_INFO: extract specific information from a webpage. The schema for the action is:
EXTRACT_INFO: <URL>, <a brief instruction to GPT for information to extract>
- "SHUTDOWN": shut down the program. The schema for the action is:
SHUTDOWN
RESOURCES:
1. File contents after reading file.
2. Online search results returning URLs.
3. Output of running a Python file.
PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behaviour constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every action has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
Write only one action. The action must one of the actions specified above and must be written according to the schema specified above.
After the action, write a JSON object (parseable by Python's json.loads()) which must contain the following keys:
- "reason": a short sentence explaining the action above
- "plan": a short high-level plan in plain English
"""
FLAG_VERBOSE = "--verbose"
FLAG_SPEECH = "--speech"
FLAG_CONTINUOUS = "--continuous"
def main():
general_directions = GENERAL_DIRECTIONS_PREFIX
if FLAG_SPEECH in sys.argv[1:]:
general_directions += '- "speak": a short summary of thoughts to say to the user'
general_directions += "\n\n"
general_directions += "If you want to run an action that is not in the above list of actions, send the SHUTDOWN action instead and explain in 'reason' which action you wanted to run.\n"
general_directions += "So, write one action and one metadata JSON object, nothing else."
load_dotenv()
os.makedirs("workspace", exist_ok=True)
os.chdir("workspace")
new_plan: Optional[str] = None
user_directions = input("What would you like me to do:\n")
while True:
print("========================")
with Spinner("Thinking..."):
assistant_response = gpt.chat(user_directions, general_directions, new_plan, message_history)
if FLAG_VERBOSE in sys.argv[1:]:
print(f"ASSISTANT RESPONSE: {assistant_response}")
action, metadata = response_parser.parse(assistant_response)
print(f"ACTION: {action.short_string()}")
if FLAG_SPEECH in sys.argv[1:] and metadata.speak is not None:
speech.say_async(metadata.speak)
if isinstance(action, actions.ShutdownAction):
print("Shutting down...")
break
else:
print(f"REASON: {metadata.reason}")
print(f"PLAN: {metadata.plan}")
if FLAG_CONTINUOUS not in sys.argv[1:]:
run_action = input("Run the action? [Y/n]")
if run_action.lower() != "y" and run_action != "":
break
action_output = action.run()
message_content = f"Action {action.key()} returned:\n{action_output}"
message_history.append({"role": "system", "content": message_content})
change_plan = input("Change the proposed plan? [N/y]")
if change_plan.lower() == "y":
new_plan = input("What would you like me to change the plan to? ")
else:
new_plan = None
if __name__ == "__main__":
main()
|
rokstrnisa/RoboGPT
|
robogpt/main.py
|
main.py
|
py
| 4,135 |
python
|
en
|
code
| 264 |
github-code
|
6
|
25874708021
|
class MyClass:
nome: str
cognome: str
def __init__(self, nome, cognome):
self.nome = nome
self.cognome = cognome
mc = MyClass(nome = "Roberto", cognome = "Gianotto")
print(mc)
print(mc.nome)
print(mc.cognome)
|
pinguinato/corso-python
|
esercizi/type_annotations/myclass.py
|
myclass.py
|
py
| 239 |
python
|
la
|
code
| 0 |
github-code
|
6
|
11000393367
|
import typing as T
from datetime import datetime, timedelta
from pydantic import BaseModel
from mirai import (
Mirai, Member, Friend,
MessageChain, At
)
from .alias import MESSAGE_T
# https://mirai-py.originpages.com/tutorial/annotations.html
Sender = T.Union[Member, Friend]
Type = str
def reply(app: Mirai, sender: "Sender", event_type: "Type"):
"""app_reply = reply(app, sender, event_type)
app_reply(message)
"""
async def wrapper(message: MESSAGE_T, *, at_sender: bool = False):
if at_sender:
if isinstance(message, list):
message.insert(0, At(sender.id))
elif isinstance(message, MessageChain):
message.__root__.insert(0, At(sender.id))
else:
raise TypeError(f"not supported type for reply: {message.__class__.__name__}")
if event_type == "GroupMessage":
await app.sendGroupMessage(sender.group, message)
elif event_type == "FriendMessage":
await app.sendFriendMessage(sender, message)
else:
raise ValueError("Not supported event type")
return wrapper
def at_me(app: Mirai, message: MessageChain):
at: T.Optional[At] = message.getFirstComponent(At)
if at:
return at.target == app.qq
else:
return False
class CoolDown(BaseModel):
"""example:
cd = CoolDown(app='app1', td=20)
cd.update(123)
cd.check(123)
"""
app: str
td: float # timedelta
value: T.Dict[int, datetime] = {}
def update(self, mid: int) -> None:
self.value.update({mid: datetime.now()})
def check(self, mid: int) -> bool:
ret = datetime.now() >= self.value.get(mid, datetime.utcfromtimestamp(0)) + timedelta(seconds=self.td)
return ret
def shuzi2number(shuzi: T.Optional[str]) -> int:
s = {'一': 1, '两': 2, '二': 2, '三': 3,
'四': 4, '五': 5, '六': 6, '七': 7,
'八': 8, '九': 9, '十': 10}
if not shuzi:
return 1
elif shuzi.isdecimal():
return int(shuzi)
elif shuzi in s.keys():
return s[shuzi]
else:
return 1
|
Lycreal/MiraiBot
|
plugins/_utils/__init__.py
|
__init__.py
|
py
| 2,146 |
python
|
en
|
code
| 70 |
github-code
|
6
|
33608143285
|
# 1 Перевести строку в массив
# "Robin Singh" => ["Robin”, “Singh"]
# "I love arrays they are my favorite" => ["I", "love", "arrays", "they", "are", "my", "favorite"]
rob = "Robin Singh"
fav = "I love arrays they are my favorite"
def robin(rob):
rob = list(rob)
return rob
robin(rob)
robin(fav)
# 2 Дан список: [‘Ivan’, ‘Ivanou’], и 2 строки: Minsk, Belarus
# Напечатайте текст: “Привет, Ivan Ivanou! Добро пожаловать в Minsk Belarus”
a = ['Ivan', 'Ivanou']
b = 'Minsk'
c = 'Belarus'
def welcome(a, b, c):
a = " ".join(a)
print("Привет, {0}! Добро пожаловать в {1} {2}".format(a, b, c))
welcome(a, b, c)
# 3 Дан список ["I", "love", "arrays", "they", "are", "my", "favorite"] сделайте из него
# строку => "I love arrays they are my favorite"
def faw(fav):
fav = "".join(fav)
return fav
faw(fav)
# 4 Создайте список из 10 элементов, вставьте на 3-ю позицию новое значение,
# удалите элемент из списка под индексом 6
ex = ['cake', '20', 'ball', 'pill', 'love', 'like', '88', 'eight', ' apple', '8']
def ex_4(ex):
ex.pop(6)
ex[2] = 'doll'
return ex
ex_4(ex)
|
visek8/-QAP12OnlVikanas
|
home_work/hw_4/types.py
|
types.py
|
py
| 1,345 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
74280782907
|
import pandas as pd
import numpy as np
from xgboost import XGBClassifier
from metrics import macro_f1
import settings
import pickle
import gc
import time
class BCXGBTrainer:
def __init__(self, config, logger):
self.config = config
self.model_params = config['model_params']
self.training_params = config['training_params']
self.logger = logger
def train_and_validate(self, df):
self.logger.info('Run training !')
self.logger.info(f'config : {self.config}')
xgb_oof = np.zeros((df.shape[0],))
xgb_oof_score = []
xgb_importances = pd.DataFrame()
model_save_dir = settings.MODEL / self.model_params['model_save_dir']
model_save_dir.mkdir(parents=True, exist_ok=True)
tabular_features = self.config['tabular_features']
target = self.training_params['target']
X = df[tabular_features]
y = df[target]
model = XGBClassifier(**self.training_params['best_params'])
for fold in range(self.config['n_folds']):
self.logger.info(f'Fold {fold} training ...')
start_time = time.time()
train_idx, valid_idx = df.loc[df['fold'] !=
fold].index, df.loc[df['fold'] == fold].index
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y.iloc[train_idx], y.iloc[valid_idx]
model.fit(X_train, y_train,
eval_set=[(X_valid, y_valid)],
**self.training_params['fit_params'])
fi_tmp = pd.DataFrame()
fi_tmp['feature'] = X_train.columns
fi_tmp['importance'] = model.feature_importances_
fi_tmp['fold'] = fold
fi_tmp['seed'] = self.config['seed']
xgb_importances = xgb_importances.append(fi_tmp)
xgb_oof[valid_idx] = model.predict(X_valid)
score = macro_f1(y.iloc[valid_idx], xgb_oof[valid_idx])
xgb_oof_score.append(score)
model_save_path = model_save_dir / f'model_f{fold}_best.pkl'
with open(model_save_path, 'wb') as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
elapsed = time.time() - start_time
self.logger.info(
f'[Fold {fold}] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s')
self.logger.info(
f"[Fold {fold}] best model saved : {model_save_path}")
self.logger.info('-'*100)
self.logger.info(
f'Average best valid_macro_F1 Score: {np.mean(xgb_oof_score):.6f}')
del model
gc.collect()
def inference(self, df_test):
xgb_preds = np.zeros((df_test.shape[0], ))
tabular_features = self.config['tabular_features']
X_test = df_test[tabular_features]
for fold in range(self.config['n_folds']):
start_time = time.time()
model_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl'
model = pickle.load(open(model_save_path, 'rb'))
xgb_preds += model.predict_proba(X_test)[:, 1] / \
self.config['n_folds']
elapsed = time.time() - start_time
self.logger.info(
f'[model_f{fold}_best] inference time : {elapsed:.0f}s')
del model
gc.collect()
xgb_preds = np.expand_dims(xgb_preds, axis=1)
preds_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'preds.npy'
np.save(preds_save_path, xgb_preds)
self.logger.info(
f'Prediction result saved : {preds_save_path}')
def save_oof(self, df):
xgb_oof = np.zeros((df.shape[0], ))
xgb_oof_score = []
tabular_features = self.config['tabular_features']
target = self.training_params['target']
X = df[tabular_features]
y = df[target]
for fold in range(self.config['n_folds']):
start_time = time.time()
model_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl'
model = pickle.load(open(model_save_path, 'rb'))
valid_idx = df.loc[df['fold'] == fold].index
X_valid = X.iloc[valid_idx]
xgb_oof[valid_idx] = model.predict_proba(X_valid)[:, 1]
score = macro_f1(y.iloc[valid_idx], np.where(
xgb_oof[valid_idx] > 0.5, 1, 0))
xgb_oof_score.append(score)
elapsed = time.time() - start_time
self.logger.info(
f'[model_f{fold}_best] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s')
del model
gc.collect()
xgb_oof = np.expand_dims(xgb_oof, axis=1)
oof_save_path = settings.MODEL / \
self.model_params['model_save_dir'] / f'oof.npy'
np.save(oof_save_path, xgb_oof)
self.logger.info(
f'Validation result saved : {oof_save_path}')
|
lim-hyo-jeong/DACON-Breast-Cancer
|
xgb_trainer.py
|
xgb_trainer.py
|
py
| 5,105 |
python
|
en
|
code
| 4 |
github-code
|
6
|
36030628166
|
"""Countdown/Stopwatch functionalities."""
import subprocess
import threading
import time
import traceback
from abc import (
ABC,
abstractmethod,
)
from pathlib import Path
from typing import (
List,
Optional,
Union,
)
from overrides import overrides
import albert as v0
import gi # isort:skip
gi.require_version("Notify", "0.7") # isort:skip
from gi.repository import (
GdkPixbuf,
Notify,
) # isort:skip
__title__ = "Countdown/Stopwatch functionalities"
__version__ = "0.4.0"
__triggers__ = "clock "
__authors__ = "Nikos Koukis"
__homepage__ = (
"https://github.com/bergercookie/awesome-albert-plugins/blob/master/plugins/clock"
)
countdown_path = str(Path(__file__).parent / "countdown.png")
stopwatch_path = str(Path(__file__).parent / "stopwatch.png")
sound_path = Path(__file__).parent.absolute() / "bing.wav"
cache_path = Path(v0.cacheLocation()) / "clock"
config_path = Path(v0.configLocation()) / "clock"
data_path = Path(v0.dataLocation()) / "clock"
dev_mode = True
# plugin main functions -----------------------------------------------------------------------
def play_sound(num):
for x in range(num):
t = threading.Timer(0.5 * x, lambda: subprocess.Popen(["cvlc", sound_path,]),)
t.start()
def notify(
app_name: str, msg: str, image=None,
):
Notify.init(app_name)
n = Notify.Notification.new(app_name, msg, image)
n.show()
def format_time(t: float):
"""Return the string representation of t. t must be in *seconds*"""
if t >= 60:
return f"{round(t / 60.0, 2)} mins"
else:
return f"{round(t, 2)} secs"
def play_icon(started) -> str:
return "▶️" if started else "⏸️"
class Watch(ABC):
def __init__(self, name):
self._name = name if name is not None else ""
self._to_remove = False
def name(self,) -> Optional[str]:
return self._name
@abstractmethod
def start(self):
pass
def started(self) -> bool:
pass
return self._started
@abstractmethod
def pause(self):
pass
@abstractmethod
def notify(self):
pass
def to_remove(self,) -> bool:
return False
class Stopwatch(Watch):
def __init__(self, name=None):
super(Stopwatch, self).__init__(name=name)
self.total_time = 0
self.latest_start = 0
self._started = False
self.latest_stop_time = 0
@overrides
def start(self):
self.latest_start = time.time()
self._started = True
self.notify(msg=f"Stopwatch [{self.name()}] starting")
@overrides
def pause(self):
stop_time = time.time()
self.total_time += stop_time - self.latest_start
self._started = False
self.notify(
msg=f"Stopwatch [{self.name()}] paused, total: {format_time(self.total_time)}"
)
self.latest_stop_time = stop_time
@overrides
def notify(self, msg):
notify(
app_name="Stopwatch", msg=msg, image=stopwatch_path,
)
@classmethod
def icon(cls):
return stopwatch_path
def destroy(self):
pass
def __str__(self):
# current interval
if self.started():
latest = time.time()
else:
latest = self.latest_stop_time
current_interval = latest - self.latest_start
total = self.total_time + current_interval
s = get_as_subtext_field(play_icon(self._started))
s += get_as_subtext_field(self.name())
s += get_as_subtext_field(format_time(total), "Total",)
s += get_as_subtext_field(format_time(current_interval), "Current Interval",)[:-2]
return s
class Countdown(Watch):
def __init__(
self, name: str, count_from: float,
):
super(Countdown, self).__init__(name=name)
self.latest_start = 0
self.remaining_time = count_from
self._started = False
self.timer = None
@overrides
def start(self):
self._started = True
self.latest_start = time.time()
self.timer = threading.Timer(self.remaining_time, self.time_elapsed,)
self.timer.start()
self.notify(
msg=f"Countdown [{self.name()}] starting, remaining: {format_time(self.remaining_time)}"
)
@overrides
def pause(self):
self._started = False
self.remaining_time -= time.time() - self.latest_start
if self.timer:
self.timer.cancel()
self.notify(
msg=f"Countdown [{self.name()}] paused, remaining: {format_time(self.remaining_time)}"
)
def time_elapsed(self):
self.notify(msg=f"Countdown [{self.name()}] finished")
play_sound(1)
self._to_remove = True
@classmethod
def icon(cls):
return countdown_path
def destroy(self):
self.timer.cancel()
self.notify(msg=f"Cancelling [{self.name()}]")
@overrides
def notify(self, msg):
notify(
app_name="Countdown", msg=msg, image=countdown_path,
)
def __str__(self):
s = get_as_subtext_field(play_icon(self._started))
s += get_as_subtext_field(self.name())
# compute remaining time
remaining_time = self.remaining_time
if self.started():
remaining_time -= time.time() - self.latest_start
s += f"Remaining: {format_time(remaining_time)}"
return s
countdowns: List[Countdown] = []
stopwatches: List[Stopwatch] = []
def all_watches() -> List[Union[Countdown, Stopwatch]]:
return [
*countdowns,
*stopwatches,
]
def create_stopwatch(name, *query_parts):
stopwatches.append(Stopwatch(name=name))
stopwatches[-1].start()
def create_countdown(name, *query_parts):
t = float(query_parts[0].strip()) * 60
countdowns.append(Countdown(name=name, count_from=t,))
countdowns[-1].start()
def delete_item(item: Union[Stopwatch, Countdown]):
item.destroy()
# TODO: could be neater..
if isinstance(item, Stopwatch):
stopwatches.remove(item)
else:
countdowns.remove(item)
def initialize():
"""Called when the extension is loaded (ticked in the settings) - blocking."""
# create plugin locations
for p in (
cache_path,
config_path,
data_path,
):
p.mkdir(
parents=False, exist_ok=True,
)
def finalize():
pass
def handleQuery(query,) -> list:
"""Hook that is called by albert with *every new keypress*.""" # noqa
results = []
if query.isTriggered:
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
query_parts = query.string.strip().split()
name = None
if query_parts:
name = query_parts.pop(0)
subtext = f'Name: {name if name else "Not given"}'
results.extend(
[
v0.Item(
id=__title__,
icon=countdown_path,
text="Create countdown",
subtext=f'{subtext}{" - <u>Please provide a duration</u>" if not query_parts else ""}',
completion=__triggers__,
actions=[
v0.FuncAction(
"Create countdown",
lambda name=name, query_parts=query_parts: create_countdown(
name, *query_parts,
),
)
],
),
v0.Item(
id=__title__,
icon=stopwatch_path,
text="Create stopwatch",
subtext=subtext,
completion=__triggers__,
actions=[
v0.FuncAction(
"Create stopwatch",
lambda name=name, query_parts=query_parts: create_stopwatch(
name, *query_parts,
),
)
],
),
]
)
# cleanup watches that are done
for li in [
countdowns,
stopwatches,
]:
for watch in li:
if watch.to_remove():
li.remove(watch)
results.extend([get_as_item(item) for item in all_watches()])
except Exception: # user to report error
if dev_mode: # let exceptions fly!
print(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=countdown_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
# supplementary functions ---------------------------------------------------------------------
def get_as_item(item: Union[Countdown, Stopwatch]):
"""Return an item - ready to be appended to the items list and be rendered by Albert."""
actions = [v0.FuncAction("Remove", lambda: delete_item(item),)]
if item.started():
actions.append(v0.FuncAction("Pause", lambda: item.pause(),))
else:
actions.append(v0.FuncAction("Resume", lambda: item.start(),))
return v0.Item(
id=__title__,
icon=countdown_path if isinstance(item, Countdown) else stopwatch_path,
text=str(item),
subtext="",
completion=__triggers__,
actions=actions,
)
def get_as_subtext_field(field, field_title=None) -> str:
"""Get a certain variable as part of the subtext, along with a title for that variable."""
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}: " + s
return s
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w",) as f:
f.write(data)
def load_data(data_name,) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r",) as f:
data = f.readline().strip().split()[0]
return data
def setup(query):
"""Setup is successful if an empty list is returned.
Use this function if you need the user to provide you data
"""
results = []
return results
|
ppablocruzcobas/Dotfiles
|
albert/clock/__init__.py
|
__init__.py
|
py
| 11,096 |
python
|
en
|
code
| 2 |
github-code
|
6
|
28313903181
|
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QMenuBar, QAction, QTextEdit, QHBoxLayout, QWidget, QFontDialog, QColorDialog, QFileDialog, QDialog, QVBoxLayout, QMessageBox
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtPrintSupport import QPrinter, QPrintDialog, QPrintPreviewDialog
from PyQt5.QtCore import QFileInfo
import sys
class Window(QMainWindow): # Klasse Fenster
def __init__(self):
super().__init__()
self.title = ('Einfacher Text Editor mit PDF Funktion') # Window Title
self.top = 400 #
self.left = 600 # Abstand
self.width = 400 #
self.height = 300 #
self.iconName = 'win.png' #Icon
self.setWindowIcon(QIcon(self.iconName))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createEditor() # Anzeigen von Editor
self.CreateMenu() # Anzeigen von der Menü Bar
self.show()
#--------------------------------- M e n ü B a r -------------------------------#
def CreateMenu(self):
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu("Datei")
editMenu = mainMenu.addMenu("Bearbeiten")
infoMenu = mainMenu.addMenu("Info")
helpAction = QAction(QtGui.QIcon(""), 'Help', self)
helpAction.setShortcut("")
helpAction.triggered.connect(self.helpAction)
infoMenu.addAction(helpAction) # Öffnen
openAction = QAction(QIcon("open.png"), 'Öffnen', self)
openAction.setShortcut("")
openAction.triggered.connect(self.openAction)
fileMenu.addAction(openAction) # Öffnen
saveAction = QAction(QIcon("save.png"), 'Speichern unter', self)
saveAction.setShortcut("")
saveAction.triggered.connect(self.saveAction)
fileMenu.addAction(saveAction) # Speichern
printAction = QAction(QIcon("print.png"), 'Drucken', self)
printAction.setShortcut("")
printAction.triggered.connect(self.printDialog)
fileMenu.addAction(printAction) # Drucken
printpreviewAction = QAction(QIcon("preprint.png"), 'Druckvorschau', self)
printpreviewAction.triggered.connect(self.printPreviewDialog)
fileMenu.addAction(printpreviewAction) # Vorschau Druck
pdfAction = QAction(QIcon("pdf.png"), 'PDF Exportieren', self)
pdfAction.triggered.connect(self.pdfExport)
fileMenu.addAction(pdfAction) # Vorschau Druck
exitAction = QAction(QIcon("exit.png"), 'Beenden', self)
exitAction.setShortcut("")
exitAction.triggered.connect(self.exitWindow)
fileMenu.addAction(exitAction) # Beenden
editAction = QAction(QIcon("edit.png"), 'Schrift', self)
editAction.setShortcut("")
editAction.triggered.connect(self.fontDialog)
editMenu.addAction(editAction) # Bearbeiten
colorAction = QAction(QIcon("color.png"), 'Schrift Farbe', self) # Schrift Farbe
colorAction.triggered.connect(self.colorDialog)
editMenu.addAction(colorAction)
#------------------------ Exit Button funktion ----------------------------------#
def exitWindow(self):
self.close()
#-------------------------Text Editor---------------------------------------------#
def createEditor(self):
self.textEdit = QTextEdit(self)
self.setCentralWidget(self.textEdit)
#------------------------Schrift Dialog------------------------------------------#
def fontDialog(self):
font, ok = QFontDialog.getFont()
if ok:
self.textEdit.setFont(font)
#----------------------- Schrift Farbe Dialog ----------------------------------#
def colorDialog(self):
color = QColorDialog.getColor()
self.textEdit.setTextColor(color)
#----------------------------Drucken der Datei---------------------------------#
def printDialog(self):
printer = QPrinter(QPrinter.HighResolution)
dialog = QPrintDialog(printer, self)
if dialog.exec_() == QPrintDialog.Accepted:
self.textEdit.print_(printer)
#--------------------------Druck Vorschau---------------------------------------#
def printPreviewDialog(self):
printer = QPrinter(QPrinter.HighResolution)
previewDialog = QPrintPreviewDialog(printer, self)
previewDialog.paintRequested.connect(self.printPreview)
previewDialog.exec_()
def printPreview(self, printer):
self.textEdit.print_(printer)
#-------------------------PDF Exporter-----------------------------------------#
def pdfExport(self):
fn, _= QFileDialog.getSaveFileName(self, "Export PDF", None, "PDF files (.pdf);;All Files()")
if fn != '':
if QFileInfo(fn).suffix() == "" :fn += '.pdf'
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(fn)
self.textEdit.document ().print_(printer)
#-------------------------------Datei Laden------------------------------------#
def openAction(self):
fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
if fname[0]:
f = open(fname[0], 'r')
with f:
data = f.read()
self.textEdit.setText(data)
#------------------------------Datei Speichern---------------------------------#
def saveAction(self):
filename, _ = QFileDialog.getSaveFileName(self, 'Datei Speichern', ".txt", "Alle Datein (*);; Text Datei (*.txt)")
if filename:
with open(filename, "w") as file:
file.write(self.textEdit.toPlainText())
file.close()
#-----------------------------Message Box-------------------------------------#
def helpAction(self):
QMessageBox.about(self, "Entwickelt mit QT5", "Alpha 1.0")
#------------------------------Ende-------------------------------------------#
App = QApplication(sys.argv)
Window = Window()
sys.exit(App.exec_())
|
schnuppi1984/Easy-Text-Editor
|
start.py
|
start.py
|
py
| 6,702 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25159533855
|
import psycopg2
import random
con=psycopg2.connect('dbname=ecommerce_db user=postgres port=5432 host=localhost password=Murad2004')
cur=con.cursor()
def show(cursor):
cur.execute(query)
length = 30
print(*[desc[0].ljust(30) for desc in cursor.description], sep='')
print('-'*140)
result = cur.fetchall()
for row in result:
for col in row:
print(str(col).ljust(length)[:37], end='')
print()
# query="""
# CREATE TABLE seller(
# id SERIAL PRIMARY KEY,
# name VARCHAR(50)
# );
# CREATE TABLE product(
# id SERIAL PRIMARY KEY,
# title VARCHAR(50) NOT NULL,
# price NUMERIC NOT NULL,
# seller_id INT,
# CONSTRAINT fk_seller
# FOREIGN KEY(seller_id)
# REFERENCES seller(id)
# ON DELETE CASCADE
# );
# CREATE TABLE tag(
# id SERIAL PRIMARY KEY,
# title VARCHAR(50) NOT NULL
# );
# CREATE TABLE customer(
# id SERIAL PRIMARY KEY,
# name VARCHAR(50)
# );
# CREATE TABLE wishlist(
# id SERIAL PRIMARY KEY,
# customer_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE CASCADE
# );
# CREATE TABLE wishlist_products(
# id SERIAL PRIMARY KEY,
# product_id INT,
# customer_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE CASCADE,
# CONSTRAINT fk_product
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE
# );
# CREATE TABLE review(
# id SERIAL PRIMARY KEY,
# rate NUMERIC,
# customer_id INT,
# product_id INT,
# CONSTRAINT fk_customer
# FOREIGN KEY(customer_id)
# REFERENCES customer(id)
# ON DELETE SET NULL,
# CONSTRAINT fk_product
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE
# );
# CREATE TABLE product_tags(
# id SERIAL PRIMARY KEY,
# product_id INT,
# tag_id INT,
# CONSTRAINT fk_product_tag
# FOREIGN KEY(product_id)
# REFERENCES product(id)
# ON DELETE CASCADE,
# CONSTRAINT fk_tag_product
# FOREIGN KEY(tag_id)
# REFERENCES tag(id)
# ON DELETE CASCADE
# );
# """
customer_data=[{
"name": "Halette Milberry"
}, {
"name": "Barby Wastell"
}, {
"name": "Lexie Dragon"
}, {
"name": "Rosamond Kynston"
}, {
"name": "Christen Keyson"
}, {
"name": "Madeline Knottley"
}, {
"name": "Ruby Loachhead"
}, {
"name": "Aeriel Knowlden"
}, {
"name": "Hedy Phillipp"
}, {
"name": "Harmonia Freckelton"
}, {
"name": "Rossy Mustchin"
}, {
"name": "Dulcie Higgonet"
}, {
"name": "Kala Caldroni"
}, {
"name": "Nessie Lavery"
}, {
"name": "Shanta Polotti"
}, {
"name": "Berty Dampier"
}, {
"name": "Frans Fosdike"
}, {
"name": "Lotty Corkhill"
}, {
"name": "Randie Lawther"
}, {
"name": "Husain Reye"
}, {
"name": "Fayre McPhillimey"
}, {
"name": "Susette Raitie"
}, {
"name": "Sela Elsmore"
}, {
"name": "Taddeo Enterlein"
}, {
"name": "Valma Hutchence"
}, {
"name": "Micki Gorelli"
}, {
"name": "Arabelle Najera"
}, {
"name": "Annemarie Crenage"
}, {
"name": "Nara Whight"
}, {
"name": "Borg Downage"
}, {
"name": "Sheri Moreman"
}, {
"name": "Hew Dignum"
}, {
"name": "Jacquenette Caygill"
}, {
"name": "Margot Cradduck"
}, {
"name": "Adele Snassell"
}, {
"name": "Caryl Pevsner"
}, {
"name": "Gannon Northrop"
}, {
"name": "Artemas Goodlip"
}, {
"name": "Lawrence Crockatt"
}, {
"name": "Sheelagh Cosely"
}, {
"name": "Doralyn Tripett"
}, {
"name": "Grove Learman"
}, {
"name": "Rosanna Pretious"
}, {
"name": "Earle Sapshed"
}, {
"name": "Guido Onyon"
}, {
"name": "Rolfe Panner"
}, {
"name": "Hilly Dashwood"
}, {
"name": "Orland Shutt"
}, {
"name": "Kipp Blacksell"
}, {
"name": "Umberto Chaman"
}]
# query="""
# INSERT INTO customer(name) VALUES(%s);
# """
# for i in customer_data:
# cur.execute(query,(i['name'],))
# query="SELECT * FROM customer"
seller_data=[
{
"name": "Si Friary"
}, {
"name": "Scotty Ludlem"
}, {
"name": "Randa Ifill"
}, {
"name": "Vanessa Fay"
}, {
"name": "Tamarra Tossell"
}, {
"name": "Kennett Dumper"
}, {
"name": "Jessika Stienham"
}, {
"name": "Perry Branscombe"
}, {
"name": "Salaidh Schultz"
}, {
"name": "Nicolis Stonman"
}, {
"name": "Michale Brecknock"
}, {
"name": "Marian Withinshaw"
}, {
"name": "Lynea Benit"
}, {
"name": "Cale Giacometti"
}, {
"name": "Ave Jahnisch"
}, {
"name": "Aurelea Adshed"
}, {
"name": "Pavlov Borham"
}, {
"name": "Lamont McCanny"
}, {
"name": "Rustie Troyes"
}, {
"name": "Ivory Vina"
}]
# query="""
# INSERT INTO seller(name) VALUES(%s);
# """
# for i in seller_data:
# cur.execute(query,(i["name"],))
# query="SELECT * FROM seller"
# cur.execute(query)
tag_data=[
{
"title": "Cheese"
},
{
"title": "Chocolate"
},
{
"title": "Vanillia"
},
{
"title": "Vegetable"
},
{
"title": "Vegan"
},
{
"title": "Healthy"
},
{
"title": "Fit"
},
{
"title": "Meal"
},
{
"title": "Fast Food"
}
]
# query="""
# INSERT INTO tag(title) VALUES(%s);
# """
# for i in tag_data:
# cur.execute(query,(i['title'],))
# query='SELECT * FROM tag'
seller_ids=[]
for i in range(len(seller_data)):
seller_ids.append(i+1)
product_data=[
{
"title": "M&M Food Market",
"price": "17.0616609356653"
},
{
"title": "Soprole",
"price": "11.6234613464323"
},
{
"title": "Kinder",
"price": "2.62073436454904"
},
{
"title": "Andy Capp's fries",
"price": "14.6864611770429"
},
{
"title": "Bewley's",
"price": "7.01804420073426"
},
{
"title": "Vitta Foods",
"price": "4.5093621385793"
},
{
"title": "Taco Bell",
"price": "19.1318949810843"
},
{
"title": "Sun-Pat",
"price": "9.6603184191791"
},
{
"title": "Baskin robbins",
"price": "16.105171543595"
},
{
"title": "Wendy's",
"price": "5.43620887838128"
},
{
"title": "Cobblestone",
"price": "7.22419333514953"
},
{
"title": "Wonder Bread",
"price": "14.6278888390529"
},
{
"title": "Lavazza",
"price": "10.305469252777"
},
{
"title": "Kinder",
"price": "19.4697343713929"
},
{
"title": "Soprole",
"price": "16.3448767300439"
},
{
"title": "Nabisco",
"price": "2.48867588838966"
},
{
"title": "Tic Tac",
"price": "2.60812248457601"
},
{
"title": "Magnum",
"price": "19.4421954995218"
},
{
"title": "Papadopoulos",
"price": "19.4472127819654"
},
{
"title": "Wonder Bread",
"price": "12.7520409541913"
},
{
"title": "Papadopoulos",
"price": "1.811215852765"
},
{
"title": "Olymel",
"price": "7.34511601847835"
},
{
"title": "Domino",
"price": "7.64364533249459"
},
{
"title": "Pizza Hut",
"price": "12.6648227300797"
},
{
"title": "Red Lobster",
"price": "10.0007594130005"
},
{
"title": "Andy Capp's fries",
"price": "18.5981898673802"
},
{
"title": "Secret Recipe",
"price": "18.6991437984161"
},
{
"title": "Sun-Pat",
"price": "3.15631274094633"
},
{
"title": "Magnum",
"price": "10.3542353042188"
},
{
"title": "Heinz",
"price": "17.7369680049536"
},
{
"title": "Olymel",
"price": "19.9154627821015"
},
{
"title": "Taco Bell",
"price": "10.9514749045258"
},
{
"title": "Dunkin' Donuts",
"price": "11.479457990024"
},
{
"title": "Applebee's",
"price": "15.7718961763996"
},
{
"title": "Knorr",
"price": "10.4961827092321"
},
{
"title": "KFC",
"price": "12.4794360452702"
},
{
"title": "Domino",
"price": "17.0641279993877"
},
{
"title": "Knorr",
"price": "2.66790023197788"
},
{
"title": "Kits",
"price": "18.8862874209351"
},
{
"title": "Dunkin' Donuts",
"price": "7.84475450163929"
},
{
"title": "Applebee's",
"price": "13.4456292886499"
},
{
"title": "Nutella",
"price": "4.63776473637566"
},
{
"title": "Bewley's",
"price": "13.0057596485157"
},
{
"title": "Kits",
"price": "1.38640394266062"
},
{
"title": "Nesquik",
"price": "6.1496629436266"
},
{
"title": "KFC",
"price": "15.6723103028128"
},
{
"title": "Andy Capp's fries",
"price": "17.8805946269448"
},
{
"title": "Tic Tac",
"price": "7.01679017348997"
},
{
"title": "Andy Capp's fries",
"price": "7.87038087466284"
},
{
"title": "Bel Group",
"price": "10.6127773935966"
}
]
# query="""
# INSERT INTO product(title,price,seller_id) VALUES(%s,%s,%s);
# """
# for i in product_data:
# cur.execute(query,(i['title'],i['price'],random.choice(seller_ids)))
# query="SELECT * FROM product"
customers_ids=[]
for i in range(len(customer_data)):
customers_ids.append(i+1)
# query="""
# INSERT INTO wishlist(customer_id) VALUES(%s);
# """
# for i in customer_data:
# cur.execute(query,(random.choice(customers_ids),))
# query="SELECT * FROM wishlist"
# rate NUMERIC,
# # customer_id INT,
# # product_id INT,
# query="""
# INSERT INTO review(rate,customer_id,product_id) VALUES(%s,%s,%s);
# """
# for i in customer_data:
# cur.execute(query,(random.randint(1,5),random.choice(customers_ids),random.randint(1,len(product_data))))
# query='SELECT * FROM review'
# product_id INT,
# # customer_id INT,
# query="""
# INSERT INTO wishlist_products(product_id,customer_id) VALUES(%s,%s);
# """
# for i in customer_data:
# cur.execute(query,(random.randint(1,len(product_data)),random.choice(customers_ids)))
# query='SELECT * FROM wishlist_products'
# query="""
# INSERT INTO product_tags(product_id,tag_id) VALUES(%s,%s);
# """
# for i in product_data:
# cur.execute(query,(random.randint(1,len(product_data)),random.randint(1,len(tag_data))))
# query='SELECT * FROM product_tags'
# query="""
# SELECT *
# FROM product_tags pt
# LEFT JOIN tag t ON pt.tag_id = t.id
# WHERE pt.product_id = 5;
# """
# # query='SELECT * FROM product'
# query="""
# SELECT *
# FROM product
# LEFT JOIN seller ON product.seller_id = seller.id
# WHERE seller.id = 5;
# """
# query="""
# SELECT *
# FROM wishlist_products
# LEFT JOIN product ON wishlist_products.product_id = product.id
# WHERE wishlist_products.customer_id = 2;
# """
# query="""
# SELECT p.id, p.title
# FROM product p
# LEFT JOIN review r ON p.id = r.product_id
# GROUP BY p.id, p.title
# ORDER BY rate DESC
# LIMIT 10;
# """
# # query='''
# # SELECT * FROM review LEFT JOIN product ON product_id=product.id WHERE product_id=2 ;
# # '''
# # WHERE product_id IN (SELECT AVG(rate) FROM review GROUP BY product_id ORDER BY AVG(rate) DESC)
# # query="SELECT * FROM product"
#Burdan basliyir
# Bir teq seçin və həmin teqin məhsullarını göstərin
query="""SELECT * FROM product
LEFT JOIN product_tags on product_tags.product_id=product.id WHERE product_tags.tag_id=5"""
# Bir məhsul seçin və həmin məhsulların teqlərini göstərin
# query="""SELECT * FROM product_tags
# LEFT JOIN product on product.id=product_tags.product_id WHERE product.id=5
# """
# Bir satıcı seçin və həmin satıcının məhsullarını göstərin
# query="""
# SELECT * FROM product
# LEFT JOIN seller on seller.id=product.seller_id WHERE seller.id=5
# """
# Bir müştəri seçin və həmin müştərinin wishlistindəki məhsulları göstərin
# query="""
# SELECT * FROM wishlist_products
# LEFT JOIN customer on wishlist_products.customer_id=customer.id WHERE customer.id=45
# """
# Review ortalaması ən yüksək olan 10 məhsulu həmin ortalama ilə birlikdə göstərin
# query="""SELECT AVG(rate),product.id FROM product
# LEFT JOIN review on product.id=review.product_id GROUP BY product.id ORDER BY AVG(rate) LIMIT 10"""
# teqləri məhsullarının sayına görə düzün və bunu edərkən də məhsulların sayı da görünsün
# query="""
# SELECT COUNT(product_tags.product_id),product_tags.tag_id FROM product_tags LEFT JOIN tag on product_tags.tag_id=tag.id GROUP BY product_tags.tag_id ORDER BY COUNT(product_tags.product_id) DESC
# """
# Wishlistindəki məhsulların toplam qiyməti ən çox olan 10 müşətirini göstərin. Bunu edərkən həmin qiymət toplamı da görünsün
# query="""
# SELECT customer.id,SUM(wishlist_products.product_id) FROM customer LEFT JOIN wishlist_products on customer.id=wishlist_products.customer_id GROUP BY customer.id HAVING SUM(wishlist_products.product_id) IS NOT NULL ORDER BY SUM(wishlist_products.product_id) DESC LIMIT 10
# """
# id-lərinə görə ilk 10 satıcının məlumatlarını və həmin satıcının məhsullarına gələn reviewların ortalamasını göstərin
query="""
SELECT customer.id, AVG(rate) FROM customer LEFT JOIN review on customer.id=review.customer_id GROUP BY customer.id HAVING AVG(rate) IS NOT NULL ORDER BY AVG(rate) DESC LIMIT 10
"""
show(cur)
con.commit()
|
MuradAsadzade/Postresql-join-tasks
|
ecommerce.py
|
ecommerce.py
|
py
| 13,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21322953683
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Camel: A dos game ported to a cross platform solution.
Was originally: Camel Source Code for the BrailleNote, written in Rapid Euphoria
Original author: Louis Bryant
Modified by Nathaniel Schmidt <[email protected]>
Date modified: 09/09/2020; 23/01/2021; 03/02/2022
You have permission to modify and redistribute this code and software with or without changes.
Pulse Data International, Rapid Deployment Software, Programmers of other included files, and I take no responsibility for damages you cause by your modifying this software.
This code and software is provided 'as is' without any implied or express warranty.
"""
from random import randint
from time import sleep
# First, let's declare some global variables - bad practice but easier when translating from such a basic language such as euphoria:
you = 0 # Where you are.
hyenas = 0 # The hyenas location.
drinks = 0 # How many drinks you have left.
gocommands = 0 # How many commands you have before you need another drink.
days = 0 # How many good days your camel has left.
n = 0 # Temporary random number usages.
mainInput = None # Stores the user presses here.
gameLost = False# Whether you have lost, mainly for the printLoss function and main game loop.
def queryInstructions (prompt):
"""Ask the user whether they want instructions, then recursively query the user for instructions until the user declines.
@param prompt: The output prompt containing the query for the user to answer.
@type prompt: str"""
global mainInput
instructions = """Welcome to the game of Camel.
The object of the game is to travel 200 miles across the Great Desert.
A pack of nasty, ravenous hyenas will be chasing you.
You will be asked for commands every so often.
C O M M A N D S:
1 -- drink from your canteen,
2 -- move ahead moderate speed,
3 -- move ahead fast speed,
4 -- stop for a rest,
5 -- status check,
6 -- hope for help,
7 -- exit,
And 8 -- request help to list available commands.
You will get a quart of water which will last you six drinks.
You must renew your water supply at an Oases completely.
You get a half quart if found by help.
If help does not find you after command '6', you lose."""
mainInput = input(prompt)
mainInput = mainInput.upper ()
while mainInput != "Y" and mainInput != "N":
print ("Please enter either 'y' or 'n'")
mainInput = input("Would you like instructions? Type Y for yes or N for no. ")
mainInput = mainInput.upper ()
if mainInput == 'Y':
print (instructions)
queryInstructions ("Would you like to hear the instructions again? Type Y for yes or N for no.")
else:
print("Good luck and good cameling! ")
# Now, let's initialize the variables:
def init():
"""Initialise global variable identifiers with required initial value assignments to allow the game to start."""
global you
global hyenas
global drinks
global gocommands
global days
you = 0 # You haven't gone anywhere.
hyenas = 25 # The hyenas are 25 miles ahead of you.
drinks = 6 # You have six drinks left in your canteen.
gocommands = 4 # You have 4 commands without drinking.
days = 7 # Your camel has 7 good days left.
def printLoss ():
"""Print a random loss message from a randomised selection."""
global n
n = randint(1, 4) # We have four loser statements.
print("Your body and soul lay a rest in the sand. ")
if n == 1: # This is the first loser statement.
print("The National's Camel Union is not attending your funeral!!!!!! ")
elif n == 2: # This is the second loser statement.
print("Your body was eaten by voltures and hyenas!!!!!! ")
elif n == 3: # This is the fourth loser statement.
print("People with little inteligence should stay out of the desert. ")
elif n == 4: # This is the last loser statement.
print("Turkeys should fly, not ride camels. ") # No more loser statements.
def queryReplay ():
"""Ask whether to play the game again or exit."""
global gameLost
global mainInput
if gameLost == True:
printLoss ()
mainInput = input ("Want another camel and a new game? (Pres Y for yes or N for no) ")
mainInput = mainInput.upper ()
while mainInput != 'Y' and mainInput != 'N':
print ("Please enter either 'Y' or 'N'")
mainInput = input ("Want another game? (Pres Y for yes or N for no) ")
mainInput = mainInput.upper ()
if mainInput == 'Y':
gameLost = False
main ()
else:
print ("Chicken!")
exit ()
def gameStatus ():
"""Figure out what to do based on the current state of global vars."""
global you
global hyenas
global drinks
global gocommands
global days
global gameLost
# Check where you are before letting you proceed.
# Did you win? Or did the hyenas capture you?
# Or, maybe, you are still alive.
if you > 199: # You made it!
print("YOU WIN! A party is given in your honor! ")
print("The hyenas have been tamed and are planning to attend. ")
queryReplay ()
if you > hyenas: # You are ahead of the hyenas.
# Let them move.
hyenas += randint(1, 20) # Move at a random speed.
if hyenas >= you and you >30:
print("THE hyenas HAVE CAPTURED YOU!")
print ("CAMEL AND PEOPLE SOUP IS THEIR FAVORITE DISH. ")
gameLost = True
queryReplay ()
if gocommands < 3: # You had better get a drink.
print("W A R N I N G -- GET A DRINK ")
if gocommands < 0: # Too many commands without drinking.
print("YOU RAN OUT OF WATER... SORRY CHUM!!!!!! ")
gameLost = True
queryReplay ()
# What about your camel?
if days < 1: # You ran your camel to death!
print("YOU DIRTY LOUSY RAP SCALLION!!! ")
print("YOU RAN YOUR INNOCENT CAMEL TO DEATH! ")
gameLost = True
queryReplay ()
# Well? Let's continue!
if you == 0: # You are just starting.
print("You are in the middle of the desert at an oases. ")
if you > 25:
hyenas += randint(1, 10)
print("The hyenas are {0} miles behind you.".format(you-hyenas))
print("You have travelled {0} miles altogether, and have {1} more miles to go.".format (you, 200-you))
# Now let's start the game.
def main ():
"""Main procedure for the game."""
global you
global hyenas
global drinks
global gocommands
global days
global n
global gameLost
global mainInput
print("Welcome to The Game Of Camel. ")
queryInstructions ("Would you like to hear game instructions? Type Y for yes or N for no.")
init() # Call the function to initialize the variables.
gameStatus ()
while gameLost != True:
while True:
try:
mainInput = int(input("Your command?"))
break
except ValueError:
print ("Make sure you only enter a number.")
continue
if mainInput == 1: # Have a drink
# Drink from your canteen.
if drinks == 0:
print("YOU RAN OUT OF WATER. SORRY CHUM!!!!!! ")
gameLost = True
queryReplay ()
else: # Get a drink?
drinks -= 1
print("BETTER WATCH FOR A OASIS. ")
gocommands = 4 # Reset how many commands you can go before drinking.
gameStatus ()
elif mainInput == 2:
# Walk normally.
you += randint(1, 5) # Move randomly from 1 to 5 miles.
days -= 1 # Subtract one day from the camel.
print("Your camel likes this pace! ")
gocommands -= 1 # Subtract commands you have before drinking.
gameStatus ()
elif mainInput == 3:
# So try to run!
gocommands -= 1 # You wasted one more command.
gameStatus ()
n = randint(1, 4) # What happens here?
# Let's see.
if n == 1: # The computer chose the first action.
# The first action is a sand-storm.
print("YOU HAVE BEEN CAUGHT IN A SAND-STORM... ")
print("GOOD LUCK! ")
you += randint(1, 5) # Slow down.
gameStatus ()
elif n == 2: # The Note-taker chose to perform the second action. This action is to let your camel find an oases.
print("You have stopped at an Oases. Your camel is filling your canteen and eating figs. ")
drinks = 6 # Put six more drinks in the canteen.
gocommands = 4 # Reset the commands.
gameStatus ()
n = 4 # Force the Note-taker to do the last action.
elif n == 3: # Oops! The Note-taker chose the third action. This action gets you caught by a hidden crazy kidnapper.
print("YOU HAVE BEEN CAPTURED BY some HIDDEN CRAZY KIDNAPPERS. ")
print("Luckily the local council has agreed to their ransom-demands...")
print("You have a new set of commands. ")
print("#9 attempt an escape, or #0 wait for payment.")
subInput = int(input("Your sub-command? "))
if subInput == 9: # The number seven was pressed.
# Attempt an escape.
n = randint(1, 2) # One of two things can happen.
if n == 1: # You made it.
print("CONGRATULATIONS! YOU SUCCESSFULLY ESCAPED! ")
else: # Well, you didn't make it.
print("You were mortally wounded by a gunshot wound while trying to escape. ")
gameLost = True
queryReplay ()
elif subInput == 0: # The number eight was pressed.
print("Your ransom has been payed and you are free to go. The local council is collecting. ")
print("Just Wait ")
sleep(7) # Stop for ten seconds.
you += randint(1, 3) # Move from one to three miles.
# The kidnapper slowed you down.
elif n == 4: # Your camel is burning across the desert sands.
you += randint(6, 20) # Randomly move from one to twenty miles.
print("Your camel is burning across the desert sands. ")
days -= 3 # Subtract three days from your camel.
gameStatus ()
# You should never get here unless you press number 4.
elif mainInput == 4: # let the camel rest.
print("Your camel thanks you. ")
days = 7 # You now have seven good days left.
gocommands -= 1 # Lose one more command.
gameStatus ()
elif mainInput == 5: # Status Check Please?
print("Your camel has {0} good days left. You have {1} drinks left in the canteen. You can go {2} commands without drinking.".format(days, drinks, gocommands))
elif mainInput == 6: # HELP!
n = randint(1, 2) # Chose whether to give out help or not.
if n == 1: # Give Help.
print("Help has found you in a state of unconsciousness. ")
# Let the camel rest for a while.
days = 7 # Your camel is rejubinated.
drinks = 3 # You get the half-quart of water.
# You drink some water and get more commands.
gocommands = 8 # You now have eight commands without drinking.
gameStatus ()
else: # Help hasn't found you.
print ("You waited, and waited... and waited... but no help arrived.")
gameLost = True
queryReplay ()
elif mainInput == 7: # Exit
exitQuery = input ("Are you sure you want to exit? Press Y or N.")
exitQuery = exitQuery.upper ()
while exitQuery != "Y" and exitQuery != "N":
print ("Please enter either Y or N.")
exitQuery = input ("Are you sure you want to exit? Press Y or N.")
exitQuery = exitQuery.upper ()
if exitQuery == "Y":
exit ()
else:
print ("Okay.")
elif mainInput == 8: # request program help
print("The commands you can choose from are: ")
print("1 -- drink from your canteen ")
print("2 -- move ahead moderate speed ")
print("3 -- move ahead fast ]speed ")
print("4 -- stop for a rest ")
print("5 -- status check ")
print("6 -- hope for help ")
print ("7 - exit")
print ("8 - get program help and list commands.")
else: # Invalid option.
print("Invalid Option. ")
print("The commands you can choose from are:")
print("1 -- drink from your canteen ")
print("2 -- move ahead moderate speed ")
print("3 -- move ahead fast speed ")
print("4 -- stop for a rest ")
print("5 -- status check ")
print("6 -- hope for help ")
print ("7 -- exit")
print ("8 -- get program help and list commands.")
if __name__ == "__main__":
main ()
# End of program.
|
njsch/camel
|
camel.py
|
camel.py
|
py
| 13,805 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1824122729
|
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from book.models import BookInfo
# Create your views here.
################################# Request #######################################################################################################################################
def create_book(request):
book = BookInfo.objects.create(
name='abc',
pub_date='2022-5-3',
readcount=10
)
return HttpResponse('create')
def shop(request, city_id, shop_id):
print(city_id,shop_id)
query_params = request.GET
print(query_params)
# order = query_params['order']
# order = query_params.get('oder')
# <QueryDict: {'order': ['readcount'], 'page': ['1']}>
# QueryDict 具有字典的特性
# 还具有 一键多值
# # <QueryDict: {'order': ['readcount', 'commentcount'], 'page': ['1']}>
order = query_params.getlist('order')
print(order)
return HttpResponse('python_django学习')
def register(request):
data = request.POST
print(data)
# < QueryDict: {'username': ['xixi'], 'password': ['123']} >
return HttpResponse('Register')
def json(request):
body = request.body
# print(body)
# b'{\n\t"name":"xixi",\n\t"age": 28\n}'
body_str = body.decode()
# print(body_str)
"""
{
"name":"xixi",
"age": 28
}
<class 'str'>
"""
# print(type(body_str))
# JSON形式的字符串 可以转换为 Python的字典
import json
body_dict = json.loads(body_str)
print(body_dict)
# {'name': 'xixi', 'age': 28}
##############请求头############
# print(request.META)
print(request.META['SERVER_PROTOCOL'])
return HttpResponse('json')
def method(request):
print(request.method)
return HttpResponse('method')
def mobilePhone(request, phone_number):
print(phone_number)
return HttpResponse('mobilePhone')
################################### Response #################################################
def response(request):
# HttpResponse(content=响应体, content_type=响应体数据类型, status=状态码)
# response = HttpResponse('res', status=200)
#
# response['name'] = 'xixi'
#
# return response
# JSON -> dict
# dict -> JSON
info = {
'name': 'xixi',
'age': 28
}
info_list = [
{
'name': 'xixi',
'age': 28
},
{
'name': 'erxi',
'age': 28
}
]
# response = JsonResponse(info)
response = JsonResponse(info_list, safe=False)
# response = JsonResponse(data=info_list, safe=False)
# [{"name": "xixi", "age": 28}, {"name": "erxi", "age": 28}]
return response
# return redirect('http://www.baidu.com')
# import json
# data=json.dumps(info_list)
#
# response = HttpResponse(data)
# return response
# 1xx
# 2xx
# 200 成功
# 3xx
# 4xx 请求有问题
# 404 找不到页面 路由有问题
# 403 禁止访问 权限问题
# 5xx
# HTTP status code must be an integer from 100 to 599
#####################
"""
查询字符串
http://ip:port/path/path/?key=value&key1=value1
url 以 ? 为分割 分为2部分
?前边为 请求路径
?后边为 查询字符串 查询字符串 类似于字典 key=value 多个数据采用&拼接
"""
########################### cookie和session ##############################################################################
"""
第一次请求,携带 查询字符串
http://127.0.0.1:8000/set_cookie/?username=zhangsan&password=123
服务器接收到请求之后,获取username.服务器设置cookie信息,cookie信息包括 username
浏览器接收到服务器的响应之后,应该把cookie保存起来
第二次及其之后的请求,我们访问http://127.0.0.1:8000 都会携带cookie信息。 服务器就可以读取cookie信息,来判断用户身份
"""
def set_cookie(request):
# 设置cookies,服务器response设置cookie
# 1.获取查询字符串数据
username = request.GET.get('username')
pwd = request.GET.get('pwd')
# 2.服务器设置cookie
response = HttpResponse('set_cookie')
# key,value = '' max_age 过期时间,秒
response.set_cookie('name', username, max_age=3600) # 有效期一小时
response.set_cookie('pwd', pwd) # 临时cookie
# 删除cookies
response.delete_cookie('pwd')
return response
def get_cookie(request):
# 获取cookies 从request中获取
print(request.COOKIES)
# request.COOKIES 是字典数据
name = request.COOKIES.get('name')
return HttpResponse(name)
################## session #####################
# session 是保存在服务器端 -- 数据相对安全
# session需要依赖于cookie
"""
第一次请求 http://127.0.0.1:8000/set_session/?username=zhangsan 。我们在服务器端设置sesison信息
服务器同时会生成一个sessionid的cookie信息。
浏览器接收到这个信息之后,会把cookie数据保存起来
第二次及其之后的请求 都会携带这个sessionid. 服务器会验证这个sessionid. 验证没有问题会读取相关数据。实现业务逻辑
"""
def set_session(request):
# 1.模拟 获取用户信息
username = request.GET.get('username')
# 2. 设置session信息
user_id = 1
request.session['user_id'] = user_id
request.session['username'] = username
# 删除session
# request.session.clear() 清除 所有 session的value
# request.session.clear()
# request.session.flush() 清除 所有 session的 key&value
# request.session.flush()
# del request.session['键'] 清除 session 指定 key 的value
# del request.session['48e4r7tydk1z8zs6rbvxk0ox1ti14zh2']
# request.session.set_expiry(10)
return HttpResponse('set_session')
def get_session(request):
# 通过索引key 获取 字典 值,当session不存在/不匹配,异常报错,不推荐
# user_id = request.session['user_id']
# username = request.session['username']
user_id = request.session.get('user_id')
username = request.session.get('username')
content = '{},{}'.format(user_id,username)
return HttpResponse(content)
###############################类视图###################################
def login(requset):
print(requset.method)
if requset.method == 'GET':
return HttpResponse('get 请求')
else:
return HttpResponse('post 请求')
"""
类视图定义
类视图的定义
class 类视图名字(View):
def get(self,request):
return HttpResponse('xxx')
def http_method_lower(self,request):
return HttpResponse('xxx')
1. 继承自View
2. 类视图中的方法 是采用 http方法小写来区分不同的请求方式
"""
from django.views import View
class LoginView(View):
def get(self, request):
return HttpResponse('get 处理逻辑')
def post(self, request):
return HttpResponse('post 处理逻辑')
"""
我的订单、个人中心页面
如果登录用户 可以访问
如果未登录用户 不应该访问,应该跳转到登录页面
定义一个订单、个人中心 类视图
如果定义我有没有登录呢??? 我们以登录 后台站点为例
"""
from django.contrib.auth.mixins import LoginRequiredMixin
# class OrderView(View): # 只继承View类
# class OrderView(View, LoginRequiredMixin):
# 多继承LoginRequiredMixin 和 View类, 多继承有先后顺序
class OrderView(LoginRequiredMixin, View): # 多继承LoginRequiredMixin 和 View类
def get(self, request):
# 模拟登录标记
# isLogin = True
# if not isLogin:
# return HttpResponse('未登录,跳转到登录页面')
return HttpResponse('GET 我的订单页面,这个页面必须要登录')
def post(self, request):
isLogin = True
# if not isLogin:
# return HttpResponse('未登录,跳转到登录页面')
return HttpResponse('GET 我的订单页面,这个页面必须要登录')
|
guoxi-xixi/django_base
|
bookmanager03/book/views.py
|
views.py
|
py
| 8,205 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
31788132323
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Union, overload
from ..builtins import types
from ..common.error import ConstraintError
from ..node import (
ArrayTypeNode,
FuncTypeNode,
PointerTypeNode,
SimpleTypeNode,
TypeNode,
)
if TYPE_CHECKING:
from .block import Expression
class ChunkVariable:
def __init__(
self,
name: str,
vtype: Optional[TypeNode],
chunk: Optional["Chunk"],
initial: Optional["Expression"] = None,
):
self.name = name
self.vtype = vtype
self.chunk = chunk
self.initial = initial
def _typenamestr(
self, tp: TypeNode, original: Optional[str], name: Optional[str]
) -> str:
original = original or ""
name = name or ""
if isinstance(tp, SimpleTypeNode):
return f"{types.TRANSLATIONS[tp.core]} {name}"
elif isinstance(tp, FuncTypeNode):
args = ", ".join(self._typenamestr(arg, None, None) for arg in tp.args)
if name == original:
assert original
base = f"{original}({args})"
return self._typenamestr(tp.ret, base, base)
else:
ret = self._typenamestr(tp.ret, None, None)
return f"{ret} ({name})({args})"
elif isinstance(tp, PointerTypeNode):
return self._typenamestr(tp.base, original, f"*{name}")
elif isinstance(tp, ArrayTypeNode):
return self._typenamestr(tp.base, original, f"{name}[{tp.size}]")
else:
raise RuntimeError("invalid variable type")
def typename(self) -> str:
if self.vtype is None:
return f"void {self.name}"
return self._typenamestr(self.vtype, self.name, self.name)
def typestr(self) -> str:
if self.vtype is None:
return "void"
return self._typenamestr(self.vtype, None, None).strip()
def _basic_types(self, tp: TypeNode) -> Iterable[str]:
if isinstance(tp, SimpleTypeNode):
yield tp.core
elif isinstance(tp, PointerTypeNode):
yield from self._basic_types(tp.base)
elif isinstance(tp, ArrayTypeNode):
yield from self._basic_types(tp.base)
elif isinstance(tp, FuncTypeNode):
yield from self._basic_types(tp.ret)
for arg in tp.args:
yield from self._basic_types(arg)
else:
raise RuntimeError("invalid variable type")
def basic_types(self) -> Iterable[str]:
if self.vtype is None:
return iter(())
return self._basic_types(self.vtype)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.typename()}>"
class ChunkConstraint:
def __init__(self, islocal=False, isglobal=False, static=False):
self.islocal = islocal
self.isglobal = isglobal
self.static = static
self._verify()
def copy(self) -> "ChunkConstraint":
return ChunkConstraint(
islocal=self.islocal, isglobal=self.isglobal, static=self.static
)
def merge(self, other: "ChunkConstraint"):
self.islocal = self.islocal or other.islocal
self.isglobal = self.isglobal or other.isglobal
self.static = self.static or other.static
self._verify()
def _verify(self):
if self.islocal and self.isglobal:
raise ConstraintError("cannot allow local and global constraints")
def __repr__(self) -> str:
return f"<{self.__class__.__name__} local={self.islocal} global={self.isglobal} static={self.static}>"
class Chunk:
def __init__(
self,
variables: List[ChunkVariable],
constraint: Optional[ChunkConstraint] = None,
):
self.variables = variables
self._table = {
var.name: i
for i, var in enumerate(variables)
if not var.name.startswith("_")
}
self.constraint = ChunkConstraint() if constraint is None else constraint
@property
def varnames(self) -> List[str]:
return [var.name for var in self.variables]
def add_variable(self, variable: ChunkVariable):
if variable.name in self._table:
raise KeyError("variable already exists in chunk")
self.variables.append(variable)
self._table[variable.name] = len(self.variables) - 1
def rename_variable(self, variable: ChunkVariable, name: str):
if variable not in self.variables:
raise KeyError("variable not in chunk")
idx = self._table[variable.name]
self._table.pop(variable.name)
variable.name = name
self._table[variable.name] = idx
def remove_variable(self, variable: ChunkVariable):
if variable.name not in self._table:
raise KeyError("variable not in chunk table")
idx = self._table[variable.name]
target = self.variables[idx]
if target is not variable:
raise KeyError("variable does not match")
self.variables.remove(target)
self._table.pop(target.name)
def lookup(self, name: str) -> Optional[ChunkVariable]:
i = self._table.get(name)
if i is None:
return None
else:
return self.variables[i]
def __contains__(self, var: Union[str, ChunkVariable]) -> bool:
if isinstance(var, str):
return var in self._table
else:
return var in self.variables
def __repr__(self) -> str:
names = ", ".join(var.name for var in self.variables)
return f"<{self.__class__.__name__} {names}>"
@overload
def merge_chunks(first: Optional[Chunk], second: Chunk) -> Chunk:
...
@overload
def merge_chunks(first: Chunk, second: Optional[Chunk]) -> Chunk:
...
def merge_chunks(first: Optional[Chunk], second: Optional[Chunk]) -> Chunk:
if first is None:
assert second is not None
return second
if second is None:
assert first is not None
return first
constraint = first.constraint.copy()
constraint.merge(second.constraint)
return Chunk([*first.variables, *second.variables], constraint)
|
jedevc/fyp
|
vulnspec/graph/chunk.py
|
chunk.py
|
py
| 6,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21275819456
|
"""Defines all necessary networks for training / evaluation
"""
from typing import Optional, Tuple
import mindspore.nn as nn
from mindspore import Tensor
from .backbones import Backbone
from .decoders import Decoder
from .heads import Head
from .loss import Loss
from .necks import Neck
class Net(nn.Cell):
"""Create network for foward and backward propagate.
Args:
backbone: Model backbone
head: Model head
neck: Model neck. Default: None
Inputs:
| x: Tensor
Outputs:
| result: Tensor
"""
def __init__(
self, backbone: Backbone, head: Head, neck: Optional[Neck] = None
) -> None:
super().__init__()
self.backbone = backbone
self.head = head
self.neck = neck
self.has_neck = self.neck is not None
def construct(self, x: Tensor) -> Tensor:
x = self.backbone(x)
if self.has_neck:
x = self.neck(x)
x = self.head(x)
return x
class EvalNet(nn.Cell):
"""Create network for forward propagate and decoding only.
Args:
net: Network used for foward and backward propagate
decoder: Decoder
output_raw: Return extra net's ouput. Default: True
Inputs:
| inputs: List of tensors
Outputs
| result: Decoded result
| raw_result (optional): Raw result if output_raw is true
"""
def __init__(self, net: Net, decoder: Decoder, output_raw: bool = True) -> None:
super().__init__()
self.net = net
self.decoder = decoder
self.output_raw = output_raw
self.net.set_train(False)
self.decoder.set_train(False)
def construct(self, *inputs: Tensor) -> Tuple[Tensor, ...]:
x = self.net(inputs[0])
result = self.decoder(x, *inputs[1:])
if self.output_raw:
return result, x
return result
class NetWithLoss(nn.Cell):
"""Create network with loss.
Args:
net: Network used for foward and backward propagate
loss: Loss cell
has_extra_inputs: Has Extra inputs in the loss calculation. Default: False
Inputs:
| data: Tensor feed into network
| label: Tensor of label
| extra_inputs: List of extra tensors used in loss calculation
Outputs:
| loss: Loss value
"""
def __init__(self, net: Net, loss: Loss, has_extra_inputs: bool = False) -> None:
super().__init__()
self.net = net
self.loss = loss
self.has_extra_inputs = has_extra_inputs
def construct(self, data: Tensor, label: Tensor, *extra_inputs: Tensor) -> Tensor:
out = self.net(data)
if self.has_extra_inputs:
return self.loss(out, label, *extra_inputs)
return self.loss(out, label)
|
mindspore-lab/mindpose
|
mindpose/models/networks.py
|
networks.py
|
py
| 2,807 |
python
|
en
|
code
| 15 |
github-code
|
6
|
10678150202
|
import asyncio
from typing import List, Any, Set, Dict
import orjson
import websockets
from websockets import WebSocketServerProtocol
from blockchain import Blockchain
from block import Block
from transaction import Transaction
from utils import send, handle
class WsNode:
def __init__(self, domain: str):
self.domain: str = domain
self.nodes: Set[str] = set()
self.connects: Dict[str, WebSocketServerProtocol] = dict()
self.blockchain: Blockchain = Blockchain()
self.mem_pool: Set[Transaction] = set()
async def serve(self, node: str):
ws = self.connects[node]
while True:
try:
await self.handle(ws, orjson.loads(await ws.recv()))
except websockets.ConnectionClosed:
self.nodes.remove(node)
self.connects.pop(node)
break
async def handle(self, ws, message):
switcher = {
'blockchain_len': self.handle_blockchain_len,
'blockchain': self.handle_blockchain,
'hashes': self.handle_hashes,
}
await handle(switcher, ws, message)
async def broadcast(self, _type: str, data: Any = None, nodes: List[str] = None) -> None:
sockets = self.connects.values() if nodes is None else [self.connects[node] for node in nodes]
await asyncio.gather(*[send(ws, _type, data) for ws in sockets])
async def connect_nodes(self, nodes: List[str]):
olds = [self.domain] + self.node_list
news = []
for node in filter(lambda x: x not in olds, nodes):
news.append(node)
websocket = await websockets.connect(f'ws://{node}')
asyncio.get_event_loop().create_task(self.serve(node))
self.nodes.add(node)
self.connects[node] = websocket
inputs = [(node, olds + news) for node in news] + [(node, news) for node in olds]
if len(news) > 1 or (len(news) > 0 and self.domain not in news):
await asyncio.gather(*[self.share_nodes(*args) for args in inputs])
await self.pull_longest_chain(news)
async def share_nodes(self, node: str, nodes: List[str]):
print('share', nodes, 'to', node)
if node != self.domain:
ws = self.connects[node]
await send(ws, 'connect_nodes', {'nodes': nodes})
async def share_block(self, block: Block):
await self.broadcast('add_block', {'block': block.dict()})
async def pull_longest_chain(self, nodes: List[str] = None):
await self.broadcast('get_blockchain_len', nodes=nodes)
async def add_transaction(self, transaction: Transaction):
if transaction in self.mem_pool:
return
self.mem_pool.add(transaction)
await self.broadcast('add_transaction', {'transaction': transaction.dict()})
@property
def blockchain_len(self) -> int:
return len(self.blockchain)
@property
def node_list(self) -> List[str]:
return list(self.nodes)
@property
def mem_pool_list(self) -> List[Transaction]:
return list(self.mem_pool)
async def handle_blockchain_len(self, length: int) -> str:
if length > self.blockchain_len:
return 'get_blockchain_hashes'
async def handle_hashes(self, hashes: List[str]):
start = 0
for i, (a, b) in enumerate(zip(hashes, self.blockchain.hashes)):
if a != b:
start = i
break
return 'get_blockchain', {'start': start}
async def handle_blockchain(self, chain):
if chain[-1]['id'] > self.blockchain_len:
self.blockchain.blocks[chain[0]['id']:] = [Block.parse_obj(block_data['block']) for block_data in chain]
|
XmasApple/simple_blockchain
|
ws_node.py
|
ws_node.py
|
py
| 3,756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16563057746
|
import numpy as np
import time, cv2, copy, os, random, sys
# Check if Running On Pi
import io
import os
def is_raspberrypi():
try:
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower(): return True
except Exception: pass
return False
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
class image_processor:
def __init__(self, pixelValues, displayDim, image_folder): #displayName = 'generic display'
# Final Image Dimensions and Colors
self.dispWidth = displayDim[0]
self.dispHeight = displayDim[1]
self.pixelColors = pixelValues
self.image_folder = image_folder
#print('processor extablished for ' + displayName + ' dimension: ' + str(self.displayWidth) + 'x' + str(self.displayHeight) + ' pixel values: ' + pixelValues)
def newImage(self, image_title):
self.imgTitle = str(sys.path[0])+ '\DispPics' + str(image_title)
print("imported Image Title = " + self.imgTitle + " ----- of type " + str(type(self.imgTitle)))
def getImageTitle(self):
return self.imgTitle
def __displayRGB(self):
r = self.__imageResizeRGB()
plt.imshow(r)
plt.show()
# split self off
def __imageResizeRGB(self):
img = cv2.imread(self.imgTitle)
resized = cv2.resize(img, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA)
return resized
def __displayBW(self):
r = self._imageResizeBW()
plt.imshow(r, cmap = "gray")
plt.show()
# split self off
def __imageResizeBW(self):
img = cv2.imread(self.imgTitle)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(imgGray, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA)
return resized
def __reduceColors(self, img, K):
n = img[0][0].size
Z = img.reshape((-1,n))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
return res2
def __removeColors(self, img):
recorded = np.unique(img)
imgCopy = copy.deepcopy(img)
for y in range(0,len(img)):
for x in range(0,len(img[0])):
for n in range(0,len(recorded)):
if imgCopy[y][x] == recorded[n]:
imgCopy[y][x] = n
return imgCopy
def defaultConverter(self, imgTit = False, k = 4):
if imgTit is False:
self.getRandomImage()
else:
self.newImage(imgTit)
bw = self.__imageResizeBW()
lowRes = self.__reduceColors(bw, k)
remapped = self.__removeColors(lowRes)
return remapped
# Fucking Hell getRandomImage not working consistently
def getRandomImage(self):
#Compensate if is real raspberry pi
n=0
random.seed()
print("penis")
print(str(sys.path[0]) + self.image_folder)
print("penis")
for root, dirs, files in os.walk(str(sys.path[0]) + self.image_folder):
print("penis")
for name in files:
n += 1
if random.uniform(0, n) < 1:
print("got rfile")
rfile = os.path.join(root, name)
else:
print("rfile not selected")
print(rfile)
self.imgTitle = rfile
if __name__ == '__main__':
dispDim = (16, 16)
directory = "/DispPics"
ip = image_processor(('#CD853F','#8B5A2B','#008080','#D8BFD8'), dispDim, directory)
print(ip.defaultConverter(k = 3))
i = 1
while True:
time.sleep(1)
i += 1
|
Rolling-Blocks/RB-CODE-Prototype-1
|
image_processor.py
|
image_processor.py
|
py
| 4,098 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72486084988
|
n = int(input("Enter the value of n: "))
for i in range(11, n+1):
# Divisible by both 3 & 7
if i % 21 == 0:
print("TipsyTopsy")
elif i % 7 == 0:
print("Topsy")
elif i % 3 == 0:
print("Tipsy")
else:
print(i)
|
arnab7070/BeyondCoding
|
Python Programs/AOT IT Workshop/Final Lab Exam Revison/question14.py
|
question14.py
|
py
| 260 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31236652781
|
from youtube3.youtube import *
import json
from oauth2client.tools import argparser
import re
def process_videos(workDir='.', inputFile='liked.json', recommendedFile='recommended.json',
excludedFile='excluded.json', postponedFile='postponed.json',maxCount=5):
recommended, excluded, postponed, liked = {}, {}, {}, {}
workDir, inputFile, recommendedFile, excludedFile, postponedFile = workDir or '.', inputFile or 'liked.json', \
recommendedFile or 'recommended.json', excludedFile or 'excluded.json', postponedFile or 'postponed.json'
liked = load_definition(liked, inputFile, workDir)
recommended = load_definition(recommended, recommendedFile, workDir)
excluded = load_definition(excluded, excludedFile, workDir)
postponed = load_definition(postponed, postponedFile, args.workDir)
start = int(args.start) if args.start else 0
end = min(int(args.end), len(liked)) if args.end else len(liked)
youtube = Youtube(get_authenticated_service(args))
likedList = list(liked.items())[start:end]
for videoId, title in likedList:
print("Now processing %s, %s" % (videoId, title))
for relatedvideos in youtube.iterate_related_videos(videoId, maxCount):
for item in relatedvideos['items']:
rvideoId, rtitle = item['id']['videoId'], item['snippet']['title']
if rvideoId not in liked and rvideoId not in excluded and rvideoId not in postponed:
if rvideoId not in recommended:
recommended[rvideoId] = {"title": rtitle, "count": 1}
else:
recommended[rvideoId]["count"] += 1
recommendedSorted = sorted(recommended.items(), key=lambda x: x[1]["count"], reverse=True)
return recommendedSorted
def load_definition(records, inputFile, workDir):
inputFileC = workDir + '/' + inputFile
if os.path.isfile(inputFileC):
with open(inputFileC, 'r', encoding="utf-8") as f:
records = dict(json.load(f))
else:
print("Cannot find file {}".format(inputFileC))
return records
def tokenize_lists( recommended, liked, workDir , ignore_words_file):
def get_tokenized(str,ignored_words):
str = str.lower()
str = re.sub(r"\(.*\)", "" , str)
str = re.sub(r"[0-9]+", "", str)
strtok = re.split(r'[\[\s\-\(\)\"\\\/\|\!\&\,\.\+]',str)
strl = [s for s in strtok if s not in ignored_words and len(s) > 0]
return strl
ignored_words = []
if os.path.isfile(workDir + '/' + ignore_words_file):
with open(workDir + '/' + ignore_words_file, 'r', encoding="utf-8") as f:
ignored_words = f.read().splitlines()
ignored_words = [ i.lower() for i in ignored_words]
tok_liked = {k:get_tokenized(v,ignored_words) for k,v in liked.items()}
tok_liked_list = [get_tokenized(v, ignored_words) for k, v in liked.items()]
#print(tok_liked_list)
tok_recommended = {k: {"title": get_tokenized(v["title"],ignored_words), "count": v["count"]} for k, v in recommended.items()}
tok_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in
tok_recommended.items() if v["title"] in tok_liked_list}
tok_no_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in
tok_recommended.items() if v["title"] not in tok_liked_list}
return tok_duplicates, tok_no_duplicates
def save_recommended(workDir='.', recommendedFile='recommended.json', recommendedSorted={} ):
workDir, recommendedFile, recommendedSorted = workDir or '.', \
recommendedFile or 'recommended.json', recommendedSorted or {}
save_to_json(recommendedFile, recommendedSorted, workDir)
def save_to_json(outputFile, outputData, workDir):
with open(workDir + '/' + outputFile, 'w', encoding="utf-8") as f:
json.dump(outputData, f, ensure_ascii=False)
print("Saved file: {}".format(workDir + '/' + outputFile))
def retrieve_recommended(args):
recommendedSorted = process_videos(workDir=args.workDir, inputFile=args.inputFile,
recommendedFile=args.recommendedFile,
excludedFile=args.excludedFile, postponedFile=args.postponedFile,
maxCount=args.maxCount)
save_recommended(workDir=args.workDir, recommendedFile=args.recommendedFile, recommendedSorted=recommendedSorted)
return recommendedSorted
def eliminate_duplicates(args):
liked, recommended = {}, {}
liked = load_definition(liked, args.inputFile, args.workDir)
recommended = load_definition(recommended, args.recommendedFile or 'recommended.json', args.workDir)
duplicates, no_duplicates = tokenize_lists(recommended=recommended, liked=liked, workDir=args.workDir,
ignore_words_file='ignore_words.txt')
save_to_json(outputData=list([[k, v] for k, v in duplicates.items()]), outputFile='duplicates.json',
workDir=args.workDir)
save_to_json(outputData=list([[k, v] for k, v in no_duplicates.items()]), outputFile='recommended_no_dup.json',
workDir=args.workDir)
if __name__ == "__main__":
argparser.add_argument('--workDir')
argparser.add_argument('--maxCount')
argparser.add_argument('--inputFile')
argparser.add_argument('--start')
argparser.add_argument('--end')
argparser.add_argument('--recommendedFile')
argparser.add_argument('--excludedFile')
argparser.add_argument('--postponedFile')
args = argparser.parse_args()
if (args.workDir is None):
print("Usage : python recommend_videos.py --workdDir <workDir> --maxCount <maxCount> --inputFile <file>")
sys.exit(0)
if not os.path.isdir(args.workDir):
print("{} does not exist -- exiting".format(args.workDir))
sys.exit(0)
retrieve_recommended(args)
eliminate_duplicates(args)
|
diegoami/DA-youtube-scripts
|
youtube-scripts/recommend_videos.py
|
recommend_videos.py
|
py
| 6,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14175992166
|
import math
import random
import copy
import numpy
import numpy as np
file = open("australian.dat", "r")
l = []
for line in file:
l.append(line.split())
wynik = []
for i in l:
wynik.append(list(map(lambda e: float(e), i)))
mojalista = wynik
def MetrykaEuklidesowa(listaA, listaB):
tmp = 0
for i in range(len(listaA)-1):
tmp += (listaA[i] - listaB[i])**2
return math.sqrt(tmp)
def zadanie1(lista):
slownik = {}
for i in lista[1:]:
if i[14] not in slownik.keys():
slownik[i[14]] = [MetrykaEuklidesowa(lista[0], i)]
else:
slownik[i[14]].append(MetrykaEuklidesowa(lista[0], i))
return slownik
# print(MetrykaEuklidesowa(mojalista[0], mojalista[3]))
# print(zadanie1(mojalista)[1.0])
m = [[1,2,3], [3,4,5], [2,4,5]]
def wskaznik(macierz, wynik=0):
indeksy = list(range(len(macierz)))
if len(macierz) == 2 and len(macierz[0]) == 2:
wartosc = macierz[0][0] * macierz[1][1] - macierz[1][0] * macierz[0][1]
return wartosc
for fc in indeksy:
macierz_kopia = macierz.copy()
macierz_kopia = macierz_kopia[1:]
wysokosc = len(macierz_kopia)
for i in range(wysokosc):
macierz_kopia[i] = macierz_kopia[i][0:fc] + macierz_kopia[i][fc + 1:]
znak = (-1) ** (fc % 2)
pod_wskaznik = wskaznik(macierz_kopia)
wynik += znak * macierz[0][fc] * pod_wskaznik
return wynik
# print(wskaznik(m))
def MetrykaEuklidesowaInaczej(listaA, listaB):
tmp = sum((elem1-elem2)**2 for elem1, elem2 in zip(listaA, listaB))
return math.sqrt(tmp)
def odlegosciOdx(lista, x):
wynik = []
for i in lista:
para = (i[-1], (MetrykaEuklidesowa(x, i)))
wynik.append(para)
return wynik
def segregacjaOdleglosci(lista):
slownik = {}
for i in lista:
if i[0] not in slownik.keys():
slownik[i[0]] = [i[1]]
else:
slownik[i[0]].append(i[1])
return slownik
def sumowanieOdleglosci(lista, k):
slownik = {}
for i in lista.keys():
tmp_list = lista[i]
tmp_list.sort()
slownik[i] = sum(tmp_list[0:k])
return slownik
def getList(dict):
list = []
for key in dict.keys():
list.append(key)
return list
def decyzja(lista):
min = lista[0.0]
dec = 0
for i in getList(lista)[1:]:
if lista[i] == min:
return None
if lista[i] < min:
min = lista[i]
dec = i
return dec
def MetrykaEuklidesowa2(listaA, listaB, czyOstatni=True):
tmp = 0
if czyOstatni:
listaA=listaA[:-1]
listaB=listaB[:-1]
v1 = np.array(listaA)
v2 = np.array(listaB)
c = v1 - v2
tmp =np.dot(c,c)
return math.sqrt(tmp)
def decyzja2(lista, x, k):
odleglosc = odlegosciOdx(lista, x)
slownik = segregacjaOdleglosci(odleglosc)
sumaodleglosci = sumowanieOdleglosci(slownik, k)
buff_lista = [(k, v) for k, v in sumaodleglosci.items()]
min = buff_lista[0][1]
dec = 0
for para in buff_lista[1:]:
if para[1] == min:
return None
if para[1] < min:
min = para[1]
dec = para[0]
return dec
argx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
print(decyzja2(mojalista, argx, 5))
# print(MetrykaEuklidesowa(mojalista[0], mojalista[3]))
# print(MetrykaEuklidesowaInaczej(mojalista[0], mojalista[3]))
# print(segregacjaOdleglosci(odlegosciOdx(mojalista, argx)))
# print(sumowanieOdleglosci(segregacjaOdleglosci(odlegosciOdx(mojalista, argx)), 5))
# print(decyzja(sumowanieOdleglosci(segregacjaOdleglosci(odlegosciOdx(mojalista, argx)), 5)))
# print("------------------------------------------")
# print(MetrykaEuklidesowa(mojalista[0], mojalista[3]))
# print(MetrykaEuklidesowa2(mojalista[0], mojalista[3]))
# slow = {0.0: 17.9, 1.0: 1.2, 3.0: 1.2}
# print(decyzja(slow))
def calki_monte_carlo(f, a, b, n):
result = 0
for i in range(n):
result += f(random.uniform(a, b))
return (result / n) * (b - a)
#print(calki_monte_carlo(lambda x: x**2, 0, 1, 5000))
def calki_kwadraty(f, a, b, n):
step = (b - a) / n
result = 0
for i in range(n):
result += f(a + i * step) * step
return result
#print(calki_kwadraty(lambda x: x**2, 0, 1, 5000))
def segregacjaKolorowan(lista):
slownik = {}
for i in lista:
if i[-1] not in slownik.keys():
slownik[i[-1]] = [i[0:-1]]
else:
slownik[i[-1]].append(i[0:-1])
return slownik
def losoweKolorowanie(lista, iloscTypowZachowan):
# losowe kolorowanie
for i in lista:
i[-1] = float(random.randint(0, iloscTypowZachowan - 1))
return lista
def kMeans2(lista):
buff_lista = copy.deepcopy(lista)
slownikWynik = {}
slownik = segregacjaKolorowan(buff_lista)
punktyCiezkosci = {}
for klasa in slownik:
minimalna = float(math.inf)
for element in slownik[klasa]:
sumaOdleglosci = 0
for i in range(len(slownik[klasa])):
sumaOdleglosci += MetrykaEuklidesowa2(element, slownik[klasa][i])
sredniaOdleglosci = sumaOdleglosci / len(slownik[klasa])
if sredniaOdleglosci < minimalna:
punktyCiezkosci[klasa] = (element, sredniaOdleglosci)
minimalna = sredniaOdleglosci
for klasa in slownik:
for element in slownik[klasa]:
minimalna = float(math.inf)
punkt = ()
for klasaCiezkosci in punktyCiezkosci:
odlegloscDoPunktuCiezkosci = MetrykaEuklidesowa2(element, punktyCiezkosci[klasaCiezkosci][0])
if odlegloscDoPunktuCiezkosci < minimalna:
punkt = punktyCiezkosci[klasaCiezkosci]
minimalna = odlegloscDoPunktuCiezkosci
for klasaCiezkosci in punktyCiezkosci:
if punkt == punktyCiezkosci[klasaCiezkosci]:
if klasaCiezkosci not in slownikWynik.keys():
slownikWynik[klasaCiezkosci] = [element]
else:
slownikWynik[klasaCiezkosci].append(element)
listaWynik = []
for klasa in slownikWynik:
for element in slownikWynik[klasa]:
element.append(klasa)
listaWynik.append(element)
if listaWynik == buff_lista:
return listaWynik
else:
return kMeans2(listaWynik)
# print(mojalista)
# kopia_mojalista = copy.deepcopy(mojalista)
# listaPoKmeans = kMeans2(losoweKolorowanie(kopia_mojalista, 2))
# mojalista.sort()
# print(mojalista)
# listaPoKmeans.sort()
# print(listaPoKmeans)
def sredniaArytmetyczna(listaA, czyOstatni=True):
if czyOstatni:
listaA=listaA[:-1]
v1 = np.array(listaA)
ilosc = len(v1)
srednia = sum(v1)/ilosc
return srednia
def sredniaArytmetycznaWektorowo(listaA, wektorJedynek):
v1 = np.array(listaA)
tmp = np.dot(v1, wektorJedynek)
srednia = tmp/len(v1)
return srednia
c = [1, 1, 1, 1]
print(sredniaArytmetycznaWektorowo([1,2,5,6], c))
#print(sredniaArytmetyczna([1,2,3,4,5],False))
def wariancja(listaA, czyOstatni=True):
srednia = sredniaArytmetyczna(listaA, czyOstatni)
if czyOstatni:
listaA=listaA[:-1]
v1 = np.array(listaA)
sum = 0
for i in v1:
sum += (i - srednia)**2
war = sum/len(v1)
return war
def wariancjaWektorowo(listaA, c):
sr = sredniaArytmetycznaWektorowo(listaA, c)
v1 = np.array(listaA)
vectorOnes = np.ones(len(listaA))
v2 = v1 - sr * vectorOnes
c = np.dot(v2, v2)
return c / len(listaA)
# print(wariancjaWektorowo([1,2,5,6],c))
def odchylenieStandardowe(listaA, czyOstatni=True):
war = wariancja(listaA, czyOstatni)
return math.sqrt(war)
# print(odchylenieStandardowe([7, 4, -2], False))
def sredniaWektorow(lista, czyOstatni=True):
lista_wynik = []
if czyOstatni:
for elem in lista:
elem = elem[:-1]
lista_wynik.append(elem)
else:
lista_wynik = copy.deepcopy(lista)
return [sum(x) / len(x) for x in zip(*lista_wynik)]
#print(sredniaWektorow([[1, 2, 3], [1, 2, 3], [6, 9, 4], [4, 6, 1]], True))
def wariancjaWektorow(lista, czyOstatni=True):
srednia = sredniaWektorow(lista, czyOstatni)
lista_buff = []
if czyOstatni:
for elem in lista:
elem = elem[:-1]
lista_buff.append(elem)
else:
lista_buff = copy.deepcopy(lista)
return [
sum([(x - srednia[i]) ** 2 for i, x in enumerate(elem)]) / len(elem)
for elem in lista_buff
]
# print(wariancjaWektorow([[1, 2, 3], [2, 4, 3], [6, 9, 4], [5, 1, 4]], True))
def odchylenieStandardoweWektorow(lista, czyOstatni=True):
return [math.sqrt(x) for x in wariancjaWektorow(lista, czyOstatni)]
#print(odchylenieStandardoweWektorow([[1, 2, 3], [2, 4, 3], [6, 9, 4]], False))
#(2,1)
#(5,2)
#(7,3)
#(8,3)
# Wynik beta0 =2/7 beta1=5/14
def regersjaLiniowa(list):
x = np.array([i[0] for i in list])
x_transposed = np.array([
np.ones(len(x)),
x
])
x = np.transpose(x_transposed)
y = np.transpose(np.array([i[1] for i in list]))
x_t = np.linalg.inv(np.dot(x_transposed, x))
r = np.dot(x_t, x_transposed)
r = np.dot(r, y)
return r
list = [[2, 1], [5, 2], [7, 3], [8, 3]]
A = np.array([[1, 0, 2], [2, 1, 0], [0, 3, 1]])
A1 = np.array([[1, 0, 1, 0, 1], [1, 1, 0, 1, 0], [0, 1, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 0, 1, 1]])
def funkcajaRzA(list, Q):
return np.dot(np.transpose(Q), list)
def funkcjaProj(vector_v, vecor_u):
L = numpy.dot(vector_v, vecor_u)
M = numpy.dot(vecor_u, vecor_u)
projekcja = (L/M) * vecor_u
return projekcja
def funkcajaQzA(list):
dlugosc_u1 = math.sqrt(numpy.dot(list[0:,0],list[0:,0]))
e1 = (1/dlugosc_u1) * list[0:,0]
Q = np.array([e1])
U = np.array([list[0:,0]])
U = np.transpose(U)
for i in range(0, np.shape(A)[1]-1):
proj_buff = 0
for y in range(i+1):
p = funkcjaProj(list[0:, i+1], U[0:, y])
proj_buff += p
u = list[0:, i+1] - proj_buff
U = np.transpose(U)
U = numpy.append(U, [u], axis=0)
U = np.transpose(U)
dlugosc_u = math.sqrt(numpy.dot(u, u))
if dlugosc_u == 0:
e = u
else:
e = (1/dlugosc_u) * u
Q = numpy.append(Q, [e], axis=0)
return np.transpose(Q)
print("------------------------------------")
Q = funkcajaQzA(A)
Q = np.matrix.round(Q, 3)
print("Macierz Q z A:")
print(Q)
print("---------------------------")
R = funkcajaRzA(A, funkcajaQzA(A))
R = np.matrix.round(R, 3)
print("Macierz R z A:")
print(R)
def A_nastepna(A):
Q = funkcajaQzA(A)
return np.dot(np.dot(np.transpose(Q), A), Q)
def czyMacierzGornoTrojkatna(list):
rozmiar = np.shape(list)[1]
if (np.diag(list)-np.transpose(np.dot(list, np.ones((rozmiar, 1))))).all() > 0.00001:
return True
else:
return False
def wartosciWlasne(list):
buff_A = copy.deepcopy(list)
while czyMacierzGornoTrojkatna(buff_A):
buff_A = A_nastepna(buff_A)
return np.diag(buff_A)
A2 = np.array([[1, 2, 3],
[4, 1., 5],
[7, 5., 1]])
test = np.array([[5, 2, 4],
[2, 4, 0],
[4, 0, 4]])
print("Wartości własne A2:")
print(wartosciWlasne(A2))
def gaussJordan(list):
rozmiar = np.shape(list)[0]
wektor = []
for i in range(rozmiar):
if list[i][i] == 0.0:
return "Wykryto zero!"
for j in range(rozmiar):
if i != j:
ratio = list[j][i] / list[i][i]
for k in range(rozmiar + 1):
list[j][k] = list[j][k] - ratio * list[i][k]
for x in range(rozmiar):
wektor.append(list[x][rozmiar] / list[x][x])
return wektor
def odejmoanieWarotsciWlasnej(list, wartoscWlasna):
buff_list = copy.deepcopy(list)
rozmiar = np.shape(list)[1]
for i in range(rozmiar):
for j in range(rozmiar):
if i == j:
buff_list[i][j] = list[i][j] - wartoscWlasna
return buff_list
def dodanieKolumnyZer(list, wartosciWlasne):
wynik = {}
rozmiar = np.shape(list)[1]
zera = np.zeros((rozmiar, 1))
x = 0
for i in wartosciWlasne:
wynik[x] = np.hstack((odejmoanieWarotsciWlasnej(list, i), zera))
x+=1
return wynik
def wektoryWlasne(list, watosci_wlasne):
macierze = dodanieKolumnyZer(list, watosci_wlasne)
wektory = []
for i in macierze:
macierze[i] = np.delete(macierze[i], len(macierze) - 1, 0)
wektory.append((np.round(gaussJordan(macierze[i]) + [-1.], 3) * -1).tolist())
return wektory
print("Wektory własne A2:")
print(wektoryWlasne(A2, wartosciWlasne(A2)))
A3 = np.array([[1,1,1,0,1,0,0,0],
[1,1,1,0,-1,0,0,0],
[1,1,-1,0,0,1,0,0],
[1,1,-1,0,0,-1,0,0],
[1,-1,0,1,0,0,1,0],
[1,-1,0,1,0,0,-1,0],
[1,-1,0,-1,0,0,0,1],
[1,-1,0,-1,0,0,0,-1]])
def czyOrtogonalnaMacierz(macierz):
macierz_buff = np.dot(np.transpose(macierz), macierz)
x = np.count_nonzero(macierz_buff - np.diag(np.diagonal(macierz_buff)))
if x == 0:
return True
else:
return False
def ortonormalizacja(macierz):
macierz = np.transpose(macierz)
macierz_buff = []
for i in macierz:
dlugosc_wektora = math.sqrt(np.dot(i,i))
print(dlugosc_wektora)
macierz_buff.append(i/dlugosc_wektora)
macierz_wynik = np.dot(np.transpose(macierz_buff), macierz_buff)
return macierz_buff, macierz_wynik # macierz_buff macierz ortonormalna macierz_wynik b* (b^-1)
# print(czyOrtogonalnaMacierz(A3))
wektorA =np.array([8,6,2,3,4,6,6,5])
def Btr_przez_wektor_A(macierz ,wektorA):
return np.dot(macierz, wektorA)
# macierz_ortonormalna, jednostkowa = ortonormalizacja(A3)
# print(np.round(jednostkowa,3))
# print("-------------------------------")
# print(np.round(macierz_ortonormalna,2))
# print("-------------------------------")
# print(np.round(Btr_przez_wektor_A(macierz_ortonormalna,wektorA), 3))
A4 = np.array([[1, 2,0],
[2, 0, 2]])
def SVD(macierz):
row, col = np.shape(macierz)
if col >=row:
AtA = np.dot(np.transpose(macierz),macierz)
wartosci_wlanse = np.sort(np.round(np.linalg.eig(AtA)[0], col))[::-1]
wekrory_v_bezdlugosci = wektoryWlasne(AtA, wartosci_wlanse)
wektory_v = []
for i in range(0, col):
dlugosc = np.round(math.sqrt(np.dot(wekrory_v_bezdlugosci[i],wekrory_v_bezdlugosci[i])),3)
wektor = [x * 1/dlugosc for x in wekrory_v_bezdlugosci[i]]
wektory_v.append(wektor)
wektory_u = []
for j in range(0, row):
if wartosci_wlanse[j] == 0:
return "Nie da się obliczyć tym sposobem"
else:
wektor_u = np.dot(macierz,wektory_v[j]) * 1/math.sqrt(wartosci_wlanse[j])
wektory_u.append(wektor_u)
E = np.zeros((row, col))
for y in range(0, row):
E[y][y] = math.sqrt(wartosci_wlanse[y])
else:
AAt = np.dot(macierz, np.transpose(macierz))
wartosci_wlanse = np.sort(np.round(np.linalg.eig(AAt)[0], row))[::-1]
wekrory_u_bezdlugosci = wektoryWlasne(AAt, wartosci_wlanse)
wektory_u = []
for i in range(0, row):
dlugosc = np.round(math.sqrt(np.dot(wekrory_u_bezdlugosci[i],wekrory_u_bezdlugosci[i])),3)
wektor = [x * 1/dlugosc for x in wekrory_u_bezdlugosci[i]]
wektory_u.append(wektor)
wektory_v = []
for j in range(0, col):
if wartosci_wlanse[j] == 0:
return "Nie da się obliczyć tym sposobem"
else:
wektor_v = np.dot(np.transpose(macierz), wektory_u[j]) * 1 / math.sqrt(wartosci_wlanse[j])
wektory_v.append(wektor_v)
E = np.zeros((row, col))
for y in range(0, col):
E[y][y] = math.sqrt(wartosci_wlanse[y])
print(E)
Vt = np.zeros((col, col))
for x in range(0, col):
for k in range(0, col):
Vt[x][k] = wektory_v[x][k]
U = np.zeros((row, row))
for x in range(0, row):
for k in range(0, row):
U[x][k] = wektory_u[x][k]
U = np.transpose(U)
return U, E, Vt
U, E, Vt = SVD(A4)
print("Macierz U")
print(U)
print("Macierz Epsilon")
print(E)
print("Macierz Vt")
print(Vt)
|
Tomasz-Wegrzynowski/MetodyInzWiedzy
|
main.py
|
main.py
|
py
| 16,554 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
8099610278
|
from datetime import datetime
import os
# from dataclasses import dataclass
from sensor.constant.trainingPipeline_consts import *
class TrainingPipelineConfig:
def __init__(self, timestamp=datetime.now()):
timestamp = timestamp.strftime("%m_%d_%Y_%H_%M_%S")
self.pipeline_name: str = PIPELINE_NAME
self.artifact_dir: str = os.path.join(ARTIFACT_DIR, timestamp)
self.timestamp: str = timestamp
class DataIngestionConfig:
def __init__(self, training_pipeline_config:TrainingPipelineConfig):
self.data_ingestion_dir: str = os.path.join(
training_pipeline_config.artifact_dir, DATA_INGESTION_DIR_NAME
)
self.feature_store_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR, FILE_NAME
)
self.training_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TRAIN_FILE_NAME
)
self.testing_file_path: str = os.path.join(
self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TEST_FILE_NAME
)
self.train_test_split_ratio: float = DATA_INGESTION_TRAIN_TEST_SPLIT_RATION
self.collection_name: str = DATA_INGESTION_COLLECTION_NAME
|
sverma1999/sensor-fault-detection
|
sensor/entity/config_entity.py
|
config_entity.py
|
py
| 1,269 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70945141308
|
# 동이름으로 주소 찾기
try:
dong = input('동이름 입력 :')
#print(dong)
with open('zipcode.txt', mode='r', encoding='euc-kr') as f:
line = f.readline() # readline은 한줄, readlines는 모두 다 읽어옴
#print(line)
while line:
lines = line.split('\t') # 구분자는 tab
#print(lines)
if lines[3].startswith(dong):
#print(lines)
print(lines[0] + ' ' + lines[1] + ' ' \
+ lines[2] + ' ' + lines[3] + ' ' + lines[4])
line = f.readline()
except Exception as e:
print('err : ', e)
|
kangmihee/EX_python
|
pypro1/pack2/fio3.py
|
fio3.py
|
py
| 698 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27260039230
|
"""We are the captain of our ships, and we stay 'till the end. We see our stories through.
"""
"""290. Word Pattern
"""
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
word_map, pattern_map = {}, {}
words = str.split(" ")
n = len(words)
m = len(pattern)
if m != n:
return False
for i in range(n):
if pattern_map.get(pattern[i], -1) != word_map.get(words[i], -1):
return False
pattern_map[pattern[i]] = word_map[words[i]] = i
return True
|
asperaa/back_to_grind
|
bactracking/290. Word Pattern.py
|
290. Word Pattern.py
|
py
| 582 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73886305466
|
"""
Author: Huang liuchao
Contact: [email protected]
Datetime: 2020/9/16 15:55
Software: PyCharm
File description:
"""
import hlc_common_utils as hcu
import onenet_warning_utils as owu
import os
import openpyxl
from openpyxl import load_workbook
import pandas as pd
from pathlib import Path
import win32com.client as win32
def test_files(dir_path):
files_name = os.listdir(dir_path) # 得到所有文件的文件名
print (files_name)
# file_path = os.path.join(dir_path, files_name)
print('在设备-厂家-网元表的基础上,求出单一网元的唯一告警.........')
print(file_path)
def get_onenet_unique_warning(dir_path,to_dir_path):
files_name=os.listdir(dir_path)#得到所有文件的文件名
for file_name in files_name:
file_path=os.path.join(dir_path,file_name)
print(file_path)
get_onenet_unique_warning0(file_path,file_name,to_dir_path)
print('全部单一告警统计完毕!')
def get_onenet_unique_warning0 (file_path,file_name,to_dir_path):
# files_name=os.listdir(dir_path)#得到所有文件的文件名
# file_path=os.path.join(dir_path,files_name)
print('在设备-厂家-网元表的基础上,求出单一网元的唯一告警.........')
# print(file_path)
# for file in files_name:
#读入文件
df = pd.read_excel(file_path, sheet_name=None)
sheets_name = list(df)
# 获得表头,为后边添加表头使用
# df_head = pd.read_excel(file_path, sheet_name=1)
df_head_list = [['告警来源', '对象名称', '(告警)地市名称', '区县', '机房名称', '网元名称', '设备类型', '设备厂家', '网管告警级别', '告警标题', '厂家原始告警级别', '告警发生时间', '告警清除时间', '工单号', '派单所使用的规则', '派单状态', '未派单原因', '派单失败原因', '工单状态', '告警备注', '告警指纹fp0', '告警指纹fp1', '告警指纹fp2', '告警指纹fp3', '告警清除状态', '清除告警指纹fp0', '清除告警指纹fp1', '清除告警指纹fp2', '清除告警指纹fp3']]
# print(df_head_list)
#告警来源 对象名称 (告警)地市名称 区县 网元名称 设备厂家 网管告警级别 告警标题 厂家原始告警级别 告警发生时间 告警清除时间 工单号 派单所使用的规则 派单状态 未派单原因 派单失败原因 工单状态 告警备注 告警指纹fp0 告警指纹fp1 告警指纹fp2 告警指纹fp3 告警清除状态 清除告警指纹fp0 清除告警指纹fp1 清除告警指纹fp2 清除告警指纹fp3
remeber_nrows=[]#记录保存的行
nrows_values = []
for i in range (1,len(sheets_name)):
onenet_list = []
others_list = []
#获取onenet_list和others_list
for j in range(1,len(sheets_name)):
if(i==j):
df_onenet = pd.read_excel(file_path, sheet_name=i)
# print('获取onenet_list')
# print(sheets_name[5])
onenet_list=df_onenet['告警标题'].values.tolist()
# print(onenet_list)
else:
df_others_net = pd.read_excel(file_path,sheet_name=j)
# print('获取others_net_list')
others_list += df_others_net['告警标题'].values.tolist()
# if len(others_list)==0:
#
# break
#对比onenet_list和others_list,求取行数
nrows=[]
for j in range (len(onenet_list)):
for k in range (len(others_list)):
if onenet_list[j] not in others_list:
nrows.append(j)
remeber_nrows.append(j)
nrows=list(set(nrows))#得到唯一告警的行数
remeber_nrows=list(set(remeber_nrows))
#根据行数开始写文件到第一个sheet中
if len(nrows)==0:
print('%s网元不存在单一告警'%sheets_name[i])
else:
print('将%s网元存在单一告警并将单一告警追加写入第一个sheet中'%sheets_name[i])
df_read_net = pd.read_excel(file_path, sheet_name=i)
for n in range (len(nrows)):
nrows_values.append(df_read_net.iloc[nrows[n]])
nrows_values= df_head_list+nrows_values
df_write_net = pd.DataFrame(nrows_values)
to_file_name = '单一告警'+file_name
to_file_path = os.path.join(to_dir_path,to_file_name)
df_write_net.to_excel(to_file_path,index=None,header=None)
print('remeber_nrows=', remeber_nrows)
# print(nrows_values)
print('单一告警写入完毕')
# print('onenet_list的长度',len(onenet_list))
# print('others_list的长度',len(others_list))
if __name__ == '__main__':
#主函数测试用
abspath = os.path.abspath('../../../data') # 设置相对路径(基准路径)
dir_path = abspath + r'\onenet_warning_data1\get_equipment_factory_netcell'
to_dir_path = abspath + r'\onenet_warning_data1\get_onenet_unique_warning'
get_onenet_unique_warning(dir_path,to_dir_path)
|
hlc0216/alarm_think
|
venv/Include/onenet_warning1/get_onenet_unique_warning.py
|
get_onenet_unique_warning.py
|
py
| 5,097 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
35860574373
|
# Anything encountered around the map
objects = {
# things to potentially find at different map locations
"empty" : {
"desc" : "nothing here"
},
"chest" : {
"desc" : "a treasure chest full of valuables"
},
"enemy" : {
"desc" : "some armed and hostile warriors"
},
"ally" : {
"desc" : "some friendly locals who provide supplies"
},
"oasis" : {
"desc" : "an oasis in the middle of the desert"
}
}
|
bpoulin7/ben_p_rpg_map
|
objects.py
|
objects.py
|
py
| 479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24543736299
|
import sys
data = sys.stdin.read().strip()
sum = 0
for row in data.split('\n'):
min = None
max = None
for value in row.split():
value = int(value)
if min is None or value < min:
min = value
if max is None or value > max:
max = value
sum += max - min
print('Sum:', sum)
|
jonaskrogell/adventofcode2017
|
2.py
|
2.py
|
py
| 335 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23268756998
|
def check_true_matrix(column_, matrix_):
for line in matrix_:
if len(line) != column_:
raise ValueError('Не правильно введены данные(Data entered incorrectly)')
if __name__ == '__main__':
line_A, column_A = map(int, (input("Количество строк и столбцов матрицы A(Matrix Size A): ").split()))
matrix_A = [ list(map(int, input(f'Введите {i+1} строку матрицы А: ').split())) for i in range(line_A) ]
check_true_matrix(column_A, matrix_A)
line_B, column_B = map(int, (input("Количество строк и столбцов матрицы B(Matrix Size B): ").split()))
matrix_B = [ list(map(int, input(f'Введите {i+1} строку матрицы B: ').split())) for i in range(line_B) ]
check_true_matrix(column_B, matrix_B)
if column_A!=line_A:
raise ValueError('Не правильно введены данные(Data entered incorrectly)')
new_matrix_multiplication = [ [0]*column_B for _ in range(line_A) ]
for line_first_matrix in range( line_A ):
for column_second_matrix in range( column_B ):
a1 = matrix_A[line_first_matrix]
a2 = [ matrix_B[i][column_second_matrix] for i in range( line_B ) ]
new_matrix_multiplication[line_first_matrix][column_second_matrix] = sum( a1[i]*a2[i] for i in range(len(a1)) )
for k in new_matrix_multiplication:
print(*k)
|
Salazhiev/CalculatorMatrix
|
multiplication_matrix.py
|
multiplication_matrix.py
|
py
| 1,479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36321955212
|
import os
import re
import sys
import glob
import shutil
import pdftotext
def extract_Text_pdf(pdfdir):
print("Starting Text Extraction for pdf files......")
number_of_files = str(len([item for item in os.listdir(pdfdir) if os.path.isfile(os.path.join(pdfdir, item))]))
print("Processing ("+ number_of_files + ") .pdf files.....")
os.chdir(pdfdir)
file_list2 = []
for filename in glob.glob("*.pdf"):
#Get the filename without the extension for nameing later
base=os.path.basename(filename)
filenameNoExt = os.path.splitext(base)[0]
#Create a list of the text files
file_list2.append("pdf_"+filenameNoExt+".txt")
with open(filename, "rb") as f:
pdf = pdftotext.PDF(f)
filecontents = re.sub(' +', ' ', " ".join(pdf).replace("\n"," ").strip())
#Remove Non ASCII characters
filecontents2 = re.sub(r'[^\x00-\x7f]',r'', filecontents)
# content_list = list(filter(None, content_list))
with open ("pdf_"+filenameNoExt+".txt","a")as fp1:
fp1.write(filecontents2)
fp1.close()
print("Text extraction completed for ("+ number_of_files + ") .pdf files ********************")
pdf_files = 'to_process/'
extract_Text_pdf(pdf_files)
|
mstatt/Udemy_HighSpeedDataAnalysis
|
3_PDF_Text_Extraction/pdf_text_extraction.py
|
pdf_text_extraction.py
|
py
| 1,272 |
python
|
en
|
code
| 2 |
github-code
|
6
|
25002203831
|
from osv import fields, osv
class copy_verification_lines(osv.osv_memory):
"""
Copy Verification Lines
"""
_name = "copy.verification.lines"
_description = "Copy Verification Lines"
_columns = {
'audit_src': fields.many2one('mgmtsystem.audit','Choose audit'),
}
def copy(self, cr, uid, ids, context=None):
# Code to copy verification lines from the chosen audit to the current one
if context is None:
context = {}
audit_proxy = self.pool.get(context.get('active_model'))
verification_line_proxy = self.pool.get('mgmtsystem.verification.line')
src_id = self.read(cr, uid, ids, [], context=context)[0]['audit_src']
for line in audit_proxy.browse(cr, uid, src_id, context=context).line_ids:
verification_line_proxy.create(cr,uid, {
'seq' : line.seq,
'name' : line.name,
'audit_id' : context['active_id'],
'procedure_id' : line.procedure_id.id,
'is_conformed' : False,
}, context=context)
return {'type':'ir.actions.act_window_close'}
copy_verification_lines()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
mgmtsystem_audit/wizard/copy_verification_lines.py
|
copy_verification_lines.py
|
py
| 1,235 |
python
|
en
|
code
| 9 |
github-code
|
6
|
28839405440
|
'''
Eduardi Cruz DIV B
Ejercicio 03
Es la gala final de Gran Hermano y la producción nos pide un programa para contar
los votos de los televidentes y saber cuál será el participante que ganará el juego.
Los participantes finalistas son: Nacho, Julieta y Marcos.
El televidente debe ingresar:
● Nombre del votante
● Edad del votante (debe ser mayor a 13)
● Género del votante (masculino, femenino, otro)
● El nombre del participante a quien le dará el voto positivo.
No se sabe cuántos votos entrarán durante la gala.
Se debe informar al usuario:
A. El promedio de edad de las votantes de género femenino
B. Cantidad de personas de género masculino entre 25 y 40 años que votaron a
Nacho o Julieta.
C. Nombre del votante más joven que votó a Nacho.
D. Nombre de cada participante y porcentaje de los votos qué recibió.
E. El nombre del participante que ganó el reality (El que tiene más votos)
'''
def mostrar():
respuesta = "si"
acu_edad_femenino = 0
con_edad_femenino = 0
con_nacho_julieta = 0
con_votos_total = 0
con_votos_nacho = 0
con_votos_julieta = 0
con_votos_marcos = 0
porcentaje_nacho = 0
porcentaje_julieta = 0
porcentaje_marcos = 0
msj_porcentaje_nacho = ""
msj_porcentaje_julieta = ""
msj_porcentaje_marcos = ""
ban_votante_mas_joven = True
votante_mas_joven_nombre = ""
votante_mas_joven_edad = 0
msj_votante_mas_joven = ""
participante_ganador = ""
while respuesta == "si":
nombre_votante = input("Ingrese su nombre: ")
edad_votante = int(input("Ingrese su edad: "))
while edad_votante < 14:
edad_votante = int(input("ERROR: Reingrese su edad: "))
genero_votante = input("Ingrese genero: masculino, femenino, nobinario ")
while genero_votante != "masculino" and genero_votante != "femenino" and genero_votante != "nobinario":
genero_votante = input("ERROR: Reingrese genero: masculino, femenino, nobinario ")
nombre_participante = input("Ingrese nombre del participante: Nacho, Julieta y Marcos ")
while nombre_participante != "Nacho" and nombre_participante != "Julieta" and nombre_participante != "Marcos":
nombre_participante = input("ERROR: Reingrese nombre del participante: Nacho, Julieta y Marcos ")
respuesta = input("Quiere seguir votando? ")
# A. El promedio de edad de las votantes de género femenino
if genero_votante == "femenino":
acu_edad_femenino = acu_edad_femenino + edad_votante
con_edad_femenino += 1
# B. Cantidad de personas de género masculino entre 25 y 40 años que votaron a Nacho o Julieta.
if genero_votante == "masculino":
if (edad_votante > 24 and edad_votante < 41) and (nombre_participante == "Nacho" or nombre_participante == "Julieta"):
con_nacho_julieta += 1
# D. Nombre de cada participante y porcentaje de los votos qué recibió.
con_votos_total = con_votos_total + 1
# match nombre_participante:
# case "Nacho":
# con_votos_nacho = con_votos_nacho + 1
# # C. Nombre del votante más joven que votó a Nacho.
# if ban_votante_mas_joven == True or edad_votante < votante_mas_joven_edad:
# votante_mas_joven_nombre = nombre_votante
# votante_mas_joven_edad = edad_votante
# ban_votante_mas_joven = False
# case "Julieta":
# con_votos_julieta = con_votos_julieta + 1
# case "Marcos":
# con_votos_marcos = con_votos_marcos + 1
if nombre_participante == "Nacho":
con_votos_nacho = con_votos_nacho + 1
# C. Nombre del votante más joven que votó a Nacho.
if ban_votante_mas_joven == True or edad_votante < votante_mas_joven_edad:
votante_mas_joven_nombre = nombre_votante
votante_mas_joven_edad = edad_votante
ban_votante_mas_joven = False
elif nombre_participante == "Julieta":
con_votos_julieta = con_votos_julieta + 1
else:
con_votos_marcos = con_votos_marcos + 1
#FIN WHILE
# A. PROMEDIO FEMENINO
if con_edad_femenino > 0:
promedio_femenino = acu_edad_femenino / con_edad_femenino
else:
promedio_femenino = "NO HUBO"
# D. Nombre de cada participante y porcentaje de los votos qué recibió.
# TOTAL ______ 100%
# 10 PERSONA ______ X = 20 * 100 / TOTAL = %
if con_votos_nacho > 0:
porcentaje_nacho = con_votos_nacho * 100 / con_votos_total
msj_porcentaje_nacho = f"Porcentaje de votos de Nacho: {porcentaje_nacho}%"
# C. Nombre del votante más joven que votó a Nacho.
msj_votante_mas_joven = f"El votante mas joven que voto a nacho: {votante_mas_joven_nombre} {votante_mas_joven_edad}"
else:
msj_porcentaje_nacho = "Porcentaje de votos de Nacho: 0"
msj_votante_mas_joven = "El votante mas joven que voto a nacho: NO HUBO"
if con_votos_julieta > 0:
porcentaje_julieta = con_votos_julieta * 100 / con_votos_total
msj_porcentaje_julieta = f"Porcentaje de votos de Julieta: {porcentaje_julieta}&"
else:
msj_porcentaje_julieta = "Porcentaje de votos de Julieta: 0"
if con_votos_marcos > 0:
porcentaje_marcos = con_votos_marcos * 100 / con_votos_total
msj_porcentaje_marcos = f"Porcentaje de votos de Marcos: {porcentaje_marcos}%"
else:
msj_porcentaje_marcos = "Porcentaje de votos de Marcos: 0"
# E. El nombre del participante que ganó el reality (El que tiene más votos)
if con_votos_marcos > con_votos_nacho and con_votos_marcos > con_nacho_julieta:
participante_ganador = "El ganador es Marcos"
elif con_votos_nacho > con_votos_julieta:
participante_ganador = "El ganador es Nacho"
else:
participante_ganador = "El ganador es Julieta"
print(f"El promedio de mujeres que votaron: {promedio_femenino}")
print(f"Personas entre 25-40 masculinos que votaron a Nacho y Julieta: {con_nacho_julieta}")
print(msj_porcentaje_nacho)
print(msj_porcentaje_julieta)
print(msj_porcentaje_marcos)
print(msj_votante_mas_joven)
print(participante_ganador)
mostrar()
|
EduardoCruzfm/UTN
|
programacion_1/ejercicios_phyton/ejercicio_03.py
|
ejercicio_03.py
|
py
| 6,445 |
python
|
es
|
code
| 0 |
github-code
|
6
|
32584103329
|
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
from app import app
from apps import theme_explorer as te, text
import util
"""
=====================================================================
Helper functions and components
"""
df = px.data.gapminder()
code = util.get_code_file("dash_bootstrap_templates_app.py")
copy_code_div = util.get_copy_code_div(code, id="copy_template_code")
# make control panel
use_templates = dbc.RadioItems(
options=[
{"label": "Use figure templates from dash-bootstrap-templates", "value": 1},
{"label": "Use Plotly default figure template", "value": 2},
],
value=1,
id="use_figure_template",
)
control_panel_text = dcc.Markdown(
text.dash_bootstrap_templates_text, className="border mb-5 p-4"
)
# needed because the theme dropdown also updates "css" on Theme Explorer page but not here
dummy_output = html.Div(id="css", className='d-none')
control_panel = [control_panel_text, te.boostrap_card, use_templates, dummy_output]
carousel = dbc.Carousel(
ride="carousel",
items=[
{
"key": "1",
"src": "https://user-images.githubusercontent.com/72614349/129459807-30c22ffe-7a8c-44b9-9555-6cfd50ec355b.png",
},
{
"key": "2",
"src": "https://user-images.githubusercontent.com/72614349/129459808-40032148-82e1-47ce-a49a-05e598c69400.png",
},
],
)
carousel_text = dcc.Markdown(text.dash_bootstrap_templates_app_text)
"""
===============================================================================
Layout
"""
layout = dbc.Container(
[
util.header,
dbc.Row(
[
dbc.Col(control_panel, lg=4, sm=12),
dbc.Col(
html.Div(
id="db_templates_sample_app", className="mx-1 mb-4 shadow p-4",
),
lg=8,
sm=12,
),
],
),
dbc.Row(
[
dbc.Col([carousel_text, carousel], lg=4, sm=12),
dbc.Col(html.Div(copy_code_div,), lg=8, sm=12,),
],
),
],
fluid=True,
id="bootstrap_templates",
)
"""
=====================================================================
Display Sample App based on theme selected
"""
@app.callback(
Output("db_templates_sample_app", "children"),
Input("themes", "value"),
Input("use_figure_template", "value"),
)
def update_graphs(theme, use_template):
template = util.url_dbc_themes[theme].lower() if use_template == 1 else {}
heading_txt = (
"App with dash-bootstrap-templates"
if use_template == 1
else "App with Plotly default figure template"
)
heading = html.H3(heading_txt, className="bg-primary text-white p-2")
dff = df[df.year.between(1952, 1982)]
dff = dff[dff.continent.isin(df.continent.unique()[1:])]
line_fig = px.line(
dff,
x="year",
y="gdpPercap",
color="continent",
line_group="country",
template=template,
)
dff = dff[dff.year == 1982]
scatter_fig = px.scatter(
dff,
x="lifeExp",
y="gdpPercap",
size="pop",
color="pop",
size_max=60,
template=template,
).update_traces(marker_opacity=0.8)
avg_lifeExp = (dff["lifeExp"] * dff["pop"]).sum() / dff["pop"].sum()
map_fig = px.choropleth(
dff,
locations="iso_alpha",
color="lifeExp",
title="%.0f World Average Life Expectancy was %.1f years" % (1982, avg_lifeExp),
template=template,
)
hist_fig = px.histogram(
dff, x="lifeExp", nbins=10, title="Life Expectancy", template=template
)
graph_height = 300
graphs = html.Div(
[
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=line_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=scatter_fig, style={"height": graph_height}),
lg=6,
),
],
className="mt-4",
),
dbc.Row(
[
dbc.Col(
dcc.Graph(figure=hist_fig, style={"height": graph_height}), lg=6
),
dbc.Col(
dcc.Graph(figure=map_fig, style={"height": graph_height}), lg=6
),
],
className="mt-4",
),
]
)
# These buttons are added to the app just to show the Boostrap theme colors
buttons = html.Div(
[
dbc.Button("Primary", color="primary", className="mr-1"),
dbc.Button("Secondary", color="secondary", className="mr-1"),
dbc.Button("Success", color="success", className="mr-1"),
dbc.Button("Warning", color="warning", className="mr-1"),
dbc.Button("Danger", color="danger", className="mr-1"),
dbc.Button("Info", color="info", className="mr-1"),
dbc.Button("Light", color="light", className="mr-1"),
dbc.Button("Dark", color="dark", className="mr-1"),
dbc.Button("Link", color="link"),
],
)
return [heading, buttons, graphs]
@app.callback(
Output("bootstrap_templates", "className"), Input("light_dark", "value"),
)
def update_css(value):
return "dbc_light" if value == "Light Themes" else "dbc_dark"
|
thigbee/dashBootstrapThemeExplorer
|
apps/bootstrap_templates.py
|
bootstrap_templates.py
|
py
| 5,729 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41244848850
|
'''Desenvolva um programa que leia o nome, idade, e sexo de 4 pessoas.
No final do programa mostre:
* A média de idade do grupo.
*Qual o nome do homem mais velho.
*Quantas mulheres tem menos de 21 anos '''
soma = 0
total = 0
maioridadehomem = 0
nomevelho = ''
totmulher20 = 0
for pessoa in range(1,5):
nome = str(input('Digite o nome: '))
idade = int(input('Digite a idade: '))
sexo = str(input('Masculo ou Femino? '))
soma = soma + idade
total = total + 1
media = soma / total
if pessoa == 1 and sexo in 'Mn':
maioridadehomem = idade
nomevelho = nome
if sexo in 'Mm' and idade > maioridadehomem:
maioridadehomem = idade
nomevelho = nome
if sexo in'Ff' and idade < 20:
totmulher20 = totmulher20 + 1
print('A média de idade do grupo é de {} anos'.format(media))
print('O homem mais velho tem {} anos e se chama {}'.format(maioridadehomem, nomevelho))
print('O total de mulheres com menos de 20 anos é de {} Mulheres'.format(totmulher20))
|
andrematos90/Python
|
CursoEmVideo/Módulo 2/Desafio 056.py
|
Desafio 056.py
|
py
| 1,040 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
8012099265
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str, help="data file")
parser.add_argument("-R", "--rd", type=float, default=1e3, help="resistor on drain")
parser.add_argument("-D", "--diagnose", action="store_true", help="plot aux polynomial")
parser.add_argument("-I", "--init", type=float, default=10, help="iterative seed")
parser.add_argument("-G", "--gain", type=float, default=25, help="gain desired (dB)")
parser.add_argument("-P", "--plot", action="store_true", help="plot fitted polynomial")
args = parser.parse_args()
try:
data = np.genfromtxt(args.file)
except OSError:
print("File {0:s} does not exist".format(args.file))
exit()
vgs_data, id_data = data[:, 0], data[:, 1]
if args.diagnose:
vx = np.linspace(0, 10, 1000)
vy1 = [((vgs_data - v) @ id_data -
np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
np.sum(np.power(vgs_data - v, 4)))
for v in vx]
#vy2 = [((vgs_data @ id_data -
# np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
# np.sum(np.power(vgs_data - v, 4))) / np.sum(id_data))
# for v in vx]
#vy3 = [(vgs_data @ id_data - np.sum(id_data) * v +
# np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) /
# np.sum(np.power(vgs_data - v, 4)))]
plt.plot(vx, vy1, color="r")
#plt.plot(vx, vy2, color="g")
#plt.plot(vx, vy3, color="b")
plt.xlabel(r"$v_t$")
plt.ylabel(r"$p(v_t$)")
plt.grid()
plt.show()
exit()
# Biseccion
va, vt = 0, 0
vb = args.init
while not np.isclose(va, vb, rtol=10 * np.finfo(float).eps, atol=0.0):
vt = (va + vb) / 2
d = ((vgs_data - vt) @ id_data -
(np.square(vgs_data - vt) @ id_data / np.sum(np.power(vgs_data - vt, 4))) *
np.sum(np.power(vgs_data - vt, 3)))
if d > 0:
vb = vt
elif d < 0:
va = vt
else:
va = vb
k = id_data @ np.square(vgs_data - vt) / np.sum(np.power(vgs_data - vt, 4))
k2 = id_data @ (vgs_data - vt) / np.sum(np.power(vgs_data - vt, 3))
if not np.isclose(k, k2, rtol=10 * np.finfo(float).eps, atol=0.0):
print("fit invalid, try a different seed estimated from aux polynomial")
exit()
gain_target = 10 ** (args.gain / 20)
vgsq = gain_target / (2 * k * args.rd) + vt
idq = k * (vgsq - vt) ** 2
print("k = {:3.3f} mA/V^2".format(k * 1000))
print("Vt = {:2.3f} V\n".format(vt))
print("Gain = {} dB".format(args.gain))
print("Rd = {} ohm".format(args.rd))
print("Vgsq = {:3.3} V".format(vgsq))
print("Idq = {:2.4} mA".format(1000 * idq))
if args.plot:
vgs = np.linspace(vgs_data[0], vgs_data[len(vgs_data) - 1], 1000)
i_d = k * np.square(vgs - vt)
plt.title(r"$K = {0:3.3f} mA/V^2, V_t = {1:2.3f}V$".format(k * 1000, vt))
plt.xlabel(r"$v_{gs}$")
plt.ylabel(r"$i_d$")
plt.grid()
plt.plot(vgs_data, id_data, marker="o", linestyle=" ")
plt.plot(vgs, i_d, linestyle="-")
plt.show()
|
mvallina/trts
|
nmosfit.py
|
nmosfit.py
|
py
| 3,076 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35003317168
|
# Домашняя работа по задаче 2.4 курса Python 3
# Задача 2.4. (Условия)
# Пункт A.
# Напишите функцию, которая удаляет все восклицательные знаки из заданной строк.
# Например,
# foo("Hi! Hello!") -> "Hi Hello"
# foo("") -> ""
# foo("Oh, no!!!") -> "Oh, no"
# def remove_exclamation_marks(s):
# pass
# Пункт B.
# Удалите восклицательный знак из конца строки.
# remove("Hi!") == "Hi"
# remove("Hi!!!") == "Hi!!"
# remove("!Hi") == "!Hi"
# def remove_last_em(s):
# pass
# Дополнительно
# Пункт С.
# Удалите слова из предложения, если они содержат ровно один восклицательный знак.
# Слова разделены одним пробелом.
# Например,
# remove("Hi!") === ""
# remove("Hi! Hi!") === ""
# remove("Hi! Hi! Hi!") === ""
# remove("Hi Hi! Hi!") === "Hi"
# remove("Hi! !Hi Hi!") === ""
# remove("Hi! Hi!! Hi!") === "Hi!!"
# remove("Hi! !Hi! Hi!") === "!Hi!"
# def remove_word_with_one_em(s):
# pass
print('\n') # Пустая строка для разделения вывода в консоли
# Напишем проверочный текст (в переменную)
text = '! Это - п!роверочный текст, для! !проверки !работы! ф-й дл!я удаления восклицательных!! знаков!!!'
print('Полный текст для сравнения с результатами:', '\n\n', text) # Для наглядности выведем его в консоль
# Решение п.А.
print('\n', 'Решение А. (убраны все "!")') # Пустая строка для разделения вывода в консоли
def remove_exclamation_marks(text: str) -> str:
'''Удаляет все "!" из строки'''
return text.replace('!', '')
# Проверка работы ф-и (п. А.)
print(remove_exclamation_marks(text))
# Решение п.B.
print('\n', 'Решение B. (убран последний "!")') # Пустая строка для разделения вывода в консоли
def remove_last_em(text: str) -> str:
'''Убирает "!" в конце строки, если он есть.'''
if text[-1] == "!":
return text[:-1]
else:
return text
# Проверка работы ф-и (п. А.)
print(remove_last_em(text))
# Решение 2. п.C.
# Т.к. после Решения 1 я сообразил, что Сплит делает именно список,
# Подумал, что сразу через него будет короче. Ошибся :).
# Решил, что надо как-то иначе написать код (чтобы не повторять Решение 1 по структуре)
print('\n', 'Решение C2. (убраны все слова с 1 "!")') # Пустая строка для разделения вывода в консоли
def remove_word_with_one_em(text: str) -> str:
'''Удаляет слово из строки если оно содержит ровно один "!"'''
word_list = text.split(' ') # получаем текст в виде списка слов.
word_count = len(word_list) # определяем максимальное кол-во слов
count = 0 # счетчик слов
while count < word_count: # проходим по каждому слову в тексте
symbol_count = 0 # задаем счетчик для "!"
word = word_list[count] # получаем каждое слово для проверки
for letter in range(len(word)): # Проходим по каждому символу слова
if word[letter] == '!': symbol_count +=1 # Считаем "!"
if symbol_count == 1:
del word_list[count] # удаляем слово из списка по условию
word_count -= 1 # уменьшаем кол-во слов в списке
count -= 1 # т.к. индекс должен остаться прежним при удалении элемента
count += 1
return ' '.join(word_list)
# Проверка Решения 2. п.С
print(remove_word_with_one_em(text))
# Конец проверки. Конец Решения 2.
# Решение 3. п.C.
# Оптимизированное Решение 1. (через enumerate) [меньше строк и действий]
# И сразу ч-з 1 список, не прописывая каждый раз все через split
print('\n', 'Решение C3. (убраны все слова с 1 "!")') # Пустая строка для разделения вывода в консоли
def _remove_word_with_one_em(text: str) -> str:
'''Удаляет слово из строки если оно содержит ровно один "!"'''
new_text = '' # будущая возвращенная строка
word_list = text.split(' ') # получаем текст в виде списка слов.
for word in word_list: # проходим по каждому слову в тексте (списке)
symbol_count = 0 # задаем счетчик для "!"
for letter in range(len(word)): # Проходим по каждому символу слова
if word[letter] == '!': symbol_count +=1 # Считаем "!"
if symbol_count != 1: new_text += word + ' ' # собираем текст заново
return new_text.strip(' ') # С удалением последнего пробела
# Проверка решения 3. п.С
print(_remove_word_with_one_em(text))
# Конец решения 3.
print('\n') # Пустая строка для разделения вывода в консоли
|
PavelVes/project_01
|
HomeWorks_for_course_Python_3/HW_lvl_1/hw_task_2.4.py
|
hw_task_2.4.py
|
py
| 6,215 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
4406068121
|
import random
import operator
class Node():
def __init__(self, val):
self.val = val
self.next = None
def make_linklist(datas):
head, tail = None, None
for d in datas:
node = Node(d)
if not head:
head = node
tail = node
else:
tail.next = node
tail = node
return head
def dump_to_list(head):
l = []
p = head
while p:
l.append(p.val)
p = p.next
return l
def insert_sort(head):
if not head or not head.next:
return head
cur = head.next
head.next = None
while cur:
next = cur.next
cur.next = None
prev = None
p = head
while p and p.val <= cur.val:
prev = p
p = p.next
if not prev:
cur.next = head
head = cur
else:
prev.next = cur
cur.next = p
cur = next
return head
def get_smallest_node(head):
small_pre = None
small = head
pre = head
cur = head.next
while cur:
if cur.val < small.val:
small_pre = pre
small = cur
pre = cur
cur = cur.next
return small_pre, small
def selection_sort(head):
if not head or not head.next:
return head
tail = None
cur = head
while cur:
small_prev, small = get_smallest_node(cur)
if small_prev:
small_prev.next = small.next
if cur == small:
cur = cur.next
small.next = None
if not tail:
head = small
tail = small
else:
tail.next = small
tail = small
return head
def get_biggest_node(head):
big_pre = None
big = head
pre = head
cur = head.next
while cur:
if cur.val > big.val:
big_pre = pre
big = cur
pre = cur
cur = cur.next
return big_pre, big
def selection_sort2(head):
if not head or not head.next:
return head
new_head = None
while head:
big_prev, big = get_biggest_node(head)
if big_prev:
big_prev.next = big.next
if head == big:
head = head.next
big.next = new_head
new_head = big
return new_head
def test(count, maxval):
datas = []
for _ in range(count):
r = random.randint(0, maxval)
datas.append(r)
head = make_linklist(datas)
head = insert_sort(head)
l = dump_to_list(head)
if not operator.eq(sorted(datas), l):
raise Exception('Error')
head = make_linklist(datas)
head = selection_sort(head)
l = dump_to_list(head)
if not operator.eq(sorted(datas), l):
raise Exception('Error')
head = make_linklist(datas)
head = selection_sort2(head)
l = dump_to_list(head)
if not operator.eq(sorted(datas), l):
raise Exception('Error')
if __name__ == '__main__':
test(0, 100)
test(1, 100)
test(2, 100)
test(10, 100)
test(10, 100)
test(100, 100)
test(1000, 100)
test(1000, 10000)
|
guzhoudiaoke/data_structure_and_algorithms
|
coding_interview_guide/2_link_list/16_selection_sort/selection_sort.py
|
selection_sort.py
|
py
| 3,152 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74190873788
|
__author__ = "ALEX-CHUN-YU ([email protected])"
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import validation_curve
from sklearn.model_selection import GridSearchCV
from sklearn_evaluation.plot import grid_search
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals import joblib
import json
# Random Forest Classifier Alogorithm
class RFC():
# RFC Initialize
def __init__(self, name):
self.model_name = 'model/' + name + '_rfc'
self.image_name = 'image/' + name + '_rfc'
# RFC Parameter
self.n_estimators = 10
self.criterion = 'gini'
self.max_depth = None
self.min_samples_split = 2
self.min_samples_leaf = 1
self.min_weight_fraction_leaf = 0.0
self.max_features = 'auto'
self.max_leaf_nodes = None
self.min_impurity_decrease = 0.0
self.min_impurity_split = None
self.bootstrap = True
self.oob_score = False
self.n_jobs = -1
self.random_state = None
self.verbose = 0
self.warm_start = False
self.class_weight = None
#(Validation Parameter) GridSearchCV, validation_curve
self.cv = 10
self.criterion_range = ['gini', 'entropy'] # 2 * 3
self.max_features_range = ['sqrt', 'log2', 'auto']
self.n_estimators_range = [10, 50, 100, 700, 1000]# 5
# Accuracy(GridSearchCV application)
self.score = 0
self.scoring = 'accuracy'# f1、recall、 precision, your target must binary in sklearn(但貼心的 sklearn 還是有提供 f1_micro、f1_macro...)
# Normalization
self.normalization = False
# Find Best Parameter(RFC 有沒有 normalization 都沒差? 暫且留著)
def tuning_parameters(self, X, y):
# 第一次 tuning (找出 best n_estimators 和 best max_features)
# n_estimators 叢林中要有幾顆樹(default = 10)
# criterion 計算資訊量的的方式(劃分樹分支時所需要的), gini 或 entropy(default = 'gini')
# max_features 選擇最適合屬性時劃分的特徵不能超過此值
clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth,
min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf,
min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features,
max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease,
min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score,
n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose,
warm_start = self.warm_start, class_weight = self.class_weight)
parameter_candidates = {# Set the parameter candidates
'n_estimators': self.n_estimators_range,
'criterion': self.criterion_range,
'max_features': self.max_features_range}
clf_gscv = GridSearchCV(estimator = clf, param_grid = parameter_candidates, cv = self.cv, scoring = self.scoring, n_jobs = self.n_jobs)# Create a classifier with the parameter candidates
clf_gscv.fit(X, y)# No Normalization
normalization_clf_gscv = clf_gscv
normalization_clf_gscv.fit(preprocessing.scale(X), y)# Normalization
if normalization_clf_gscv.best_score_ > clf_gscv.best_score_:
self.normalization = True
X = preprocessing.scale(X)
self.n_estimators = normalization_clf_gscv.best_estimator_.n_estimators
self.criterion = normalization_clf_gscv.best_estimator_.criterion
self.max_features = normalization_clf_gscv.best_estimator_.max_features
self.score = normalization_clf_gscv.best_score_
clf = normalization_clf_gscv
else:
self.n_estimators = clf_gscv.best_estimator_.n_estimators
self.criterion = clf_gscv.best_estimator_.criterion
self.max_features = clf_gscv.best_estimator_.max_features
self.score = clf_gscv.best_score_
clf = clf_gscv
# # Print out the results
# print('Best score for training data:', clf_gscv.best_score_)
# print('Best n_estimators:',clf_gscv.best_estimator_.n_estimators)
# print('Best max_features:',clf_gscv.best_estimator_.max_features)
# print(normalization_clf_gscv.best_score_)
# print(clf.cv_results_['params'])
criterion = [x['criterion'] for x in clf.cv_results_['params']]
# print(criterion)
max_features = [x['max_features'] for x in clf.cv_results_['params']]
# print(max_features)
plt.title("Validation Curve with RFC")
plt.xlabel("Value Of n_estimators For RFC")
plt.ylabel(self.scoring)
# 6 * 5
mean_scores = np.array(clf.cv_results_['mean_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range))
std_scores = np.array(clf.cv_results_['std_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range))
# print(mean_scores)
# print(std_scores)
ind = 0
for i in range(0, len(criterion), len(self.n_estimators_range)):
plt.plot(self.n_estimators_range, mean_scores[ind], "-o", label = 'criterion: ' + criterion[i] + ', max_features: ' + max_features[i])
plt.fill_between(self.n_estimators_range, mean_scores[ind] - std_scores[ind],
mean_scores[ind] + std_scores[ind], alpha = 0.2)
ind += 1
plt.legend(loc = "best") # best location
plt.savefig(self.image_name + '.png')# save image
plt.close()
print("RFC Save Image Finished")
print("RFC Tuning Parameters Finished")
# Produce Model
def train(self, X, y):
# Train
clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth,
min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf,
min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features,
max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease,
min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score,
n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose,
warm_start = self.warm_start, class_weight = self.class_weight)
if self.normalization == True:
X = preprocessing.scale(X)
clf.fit(X, y)
# 透過 joblib 存 model
joblib.dump(clf, self.model_name + '.pkl')
print("RFC Save Model Finished")
# 儲存參數、準確性
parameters = {}
parameters['parameters'] = []
parameters['parameters'].append({
'n_estimators': self.n_estimators,
'criterion': self.criterion,
'max_features': self.max_features,
})
parameters['scoring'] = []
parameters['scoring'].append({
'valid_score': self.score
})
parameters['preprocessing'] = []
parameters['preprocessing'].append({
'normalization': self.normalization
})
with open(self.model_name + '_parameters', 'w', encoding = "utf-8") as rfcf:
json.dump(parameters, rfcf)
print("RFC Save Parameters Finished")
if __name__ == '__main__':
X, y = load_wine().data, load_wine().target
name = 'wine'
rfc = RFC(name)
rfc.tuning_parameters(X, y)
rfc.train(X, y)
# 載入參數並顯示出來
with open(rfc.model_name + '_parameters') as json_file:
data = json.load(json_file)
for p in data['parameters']:
print('n_estimators: ' + str(p['n_estimators']))
print('criterion: ' + p['criterion'])
print('max_features: ' + p['max_features'])
# 不同的評分標準 key 要做更改
for s in data['scoring']:
print('valid_score: ' + str(s['valid_score']))
for p in data['preprocessing']:
print('normalization: ' + str(p['normalization']))
normalization = p['normalization']
# 載入 model 並去預測
if normalization == True:
X = preprocessing.scale(X)
rfc = joblib.load(rfc.model_name + '.pkl')
print(rfc.score(X, y))
|
Alex-CHUN-YU/Recommender-System
|
scenario_algorithm_analysis/rfc.py
|
rfc.py
|
py
| 9,077 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28713863068
|
import torch
import pandas as pd
import os
from shutil import copy
from utils import fix_randomness, save_to_df, _logger, report_results, get_nonexistant_path, copy_Files
from dataloader.dataloader import data_generator
from trainer.training_evaluation import cross_domain_test
from datetime import datetime
from itertools import product
from args import args
import wandb
start_time = datetime.now()
device = torch.device(args.device)
da_method = args.da_method
save_dir = args.save_dir
data_type = args.selected_dataset
data_path = f"./data/{data_type}"
base_model_type = args.base_model
experiment_description = args.experiment_description
if not os.path.exists(save_dir):
os.mkdir(save_dir)
exec(f'from trainer.{da_method} import cross_domain_train')
exec(f'from config_files.{data_type}_Configs import Config as Configs')
exec(f'from models.models import {base_model_type} as base_model')
configs = Configs()
# os.environ["WANDB_MODE"] = "dryrun"
os.environ["WANDB_SILENT"] = 'true'
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# torch.backends.cudnn.enabled = False # another solution for lstm lunch faiulure issue
def main_train_cd():
# find out the domains IDs
data_files = os.listdir(data_path)
data_files = [i for i in data_files if "train" in i]
sources = [i[6] for i in data_files]
src_tgt_product = [sources, sources]
simple_column_names = ['Run ID',
'source_loss', 'source_acc',
'target_loss', 'target_acc',]
column_names_mean = ['Scenario',
'Source_only_loss_mean', 'Source_only_acc_mean',
f'{da_method}_loss_mean', f'{da_method}_acc_mean',
f'Source_only_loss_std', 'Source_only_acc_std',
f'{da_method}_loss_std', f'{da_method}_acc_std']
simple_df= pd.DataFrame(columns=simple_column_names)
mean_df = pd.DataFrame(columns=column_names_mean)
# Logging
# cwd = os.getcwd()
# exp_log_dir = os.path.join(r"D:\Autoregressive Domain Adaptation for Time series data\Last",save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}")
exp_log_dir = os.path.join(os.getcwd(),save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}")
exp_log_dir = get_nonexistant_path(exp_log_dir)
# os.makedirs(exp_log_dir, exist_ok=True)
# copy(f"/home/mohamed/SLARADA/config_files/{data_type}_configs.py", f"{exp_log_dir}/{data_type}_configs.py")
# copy(f"/home/mohamed/SLARADA/trainer/{da_method}.py", f"{exp_log_dir}/{da_method}_script.py")
# copy("/home/mohamed/SLARADA/args.py", f"{exp_log_dir}/args.py")
copy_Files(exp_log_dir, data_type, da_method)
# loop through domains
# loop through domains
counter = 0
src_counter = 0
for src_id, tgt_id in product(*src_tgt_product):
# for src_id in ['a', 'b', 'c']:
# for tgt_id in ['a', 'b','c']:
if src_id != tgt_id:
# prepare save directory
# specify number of consecutive runs
for run_id in range(args.num_runs):
fix_randomness(run_id)
# Logging
log_dir = os.path.join(exp_log_dir, src_id + "_to_" + tgt_id + "_run_"+ str(run_id))
os.makedirs(log_dir, exist_ok=True)
log_file_name = os.path.join(log_dir, f"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log")
logger = _logger(log_file_name)
logger.debug("=" * 45)
logger.debug(f'Dataset: {data_type}')
logger.debug(f'Method: {da_method}')
logger.debug("=" * 45)
logger.debug(f'Source: {src_id} ---> Target: {tgt_id}')
logger.debug(f'Run ID: {run_id}')
logger.debug("=" * 45)
# Load datasets
src_train_dl, src_valid_dl, src_test_dl = data_generator(data_path, src_id, configs)
tgt_train_dl, tgt_valid_dl, tgt_test_dl = data_generator(data_path, tgt_id, configs)
if args.tensorboard:
wandb.init(project="SLARDA", group = f'{da_method}_{data_type}', name=f'{src_id}_to_{tgt_id}_run_{run_id}', config=configs,
sync_tensorboard=False, reinit=True, dir=r"./visualize/", )
source_model, target_model = cross_domain_train(src_train_dl, src_valid_dl, src_test_dl,
tgt_train_dl, tgt_valid_dl, base_model,
src_id, tgt_id,
device, logger, configs)
scores = cross_domain_test(source_model, target_model, src_id, tgt_id,
src_train_dl, tgt_train_dl, src_test_dl, tgt_test_dl,
device, log_dir, logger)
run_name = f"domain_{src_id}_run_{run_id}"
outs = (run_name,) + scores
simple_df.loc[counter] = outs
counter += 1
input_data = [f"{src_id}-->{tgt_id}"]
input_data.extend(simple_df.iloc[-args.num_runs:, 1:].mean().array)
input_data.extend(simple_df.iloc[-args.num_runs:, 1:].std().array)
mean_df.loc[src_counter] = input_data
src_counter += 1
# Printing and saving final results
print(simple_df.to_string())
print(mean_df.to_string())
printed_results = mean_df[['Scenario', 'Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']]
mean = mean_df[['Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']].mean()
printed_results.loc[len(printed_results)] = mean
printed_results.at[len(printed_results)-1, 'Scenario'] = 'Average'
logger.debug(f"Total training time is {datetime.now() - start_time}")
logger.debug('=' * 45)
logger.debug(f'Results using: {da_method}')
logger.debug('=' * 45)
logger.debug(mean_df.to_string())
logger.debug(printed_results.to_string())
print_res_name = os.path.basename(exp_log_dir)
simple_df.to_excel(f'{exp_log_dir}/full_res_results_{print_res_name}.xlsx')
printed_results.to_excel(f'{exp_log_dir}/printed_results_{print_res_name}.xlsx')
if args.tensorboard:
wandb.log({"Full_results": wandb.Table(dataframe=simple_df)})
wandb.log({"Printed_results": wandb.Table(dataframe=printed_results)})
if __name__ == "__main__":
wandb.config = configs
main_train_cd()
|
mohamedr002/SLARDA
|
Autorgressive_Adaptation/train_CD.py
|
train_CD.py
|
py
| 6,844 |
python
|
en
|
code
| 23 |
github-code
|
6
|
39763998514
|
import streamlit as st
import os
from PIL import Image
from ultralytics import YOLO
import re
# Load the model
model = YOLO("model.pt")
# Set the path for results
output_dir = 'temp_out_res'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Function to predict images
def predict_image(image_path):
results = model.predict(source=image_path)
input_filename = os.path.basename(results[0].path)
annotated_img = results[0].plot()
pil_image = Image.fromarray(annotated_img[..., ::-1])
pil_image.save(os.path.join(output_dir, input_filename))
total_polis = 0
total_monos = 0
polis_index = 0
monos_index = 1
verbose_output = results[0].verbose()
polis_match = re.search(r'(\d+) poli', verbose_output)
monos_match = re.search(r'(\d+) mono', verbose_output)
if polis_match:
total_polis += int(polis_match.group(1))
if monos_match:
total_monos += int(monos_match.group(1))
if total_polis + total_monos == 0:
polis_percentage = 0
else:
polis_percentage = (total_polis / (total_polis + total_monos)) * 100
return os.path.join(output_dir, input_filename), total_polis, total_monos, polis_percentage
# Main Streamlit function
def main():
st.title("EndoScan: YOLO Subclinical Endometritis Detector")
uploaded_file = st.file_uploader("Choose an image for prediction", type=['jpg', 'jpeg', 'png'])
if uploaded_file is not None:
image_path = os.path.join(output_dir, uploaded_file.name)
with open(image_path, 'wb') as f:
f.write(uploaded_file.getbuffer())
st.image(image_path, caption='Uploaded image.', use_column_width=True)
if st.button("Predict"):
pred_img_path, polis_count, monos_count, polis_perc = predict_image(image_path)
st.image(pred_img_path, caption='Predicted image.', use_column_width=True)
st.write(f"Total count of polymorphonuclear cells: {polis_count}")
st.write(f"Total count of mononuclear cells: {monos_count}")
st.write(f"Percentage of polymorphonuclear cells: {polis_perc:.2f}%")
if __name__ == '__main__':
main()
|
DawidTobolski/YOLO_cell
|
YOLO_cell.py
|
YOLO_cell.py
|
py
| 2,252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8588345616
|
from collections import namedtuple
from datetime import datetime
from time import sleep
from timeit import default_timer as timer
import re
import requests
def _request_matches(r, regexp) -> bool:
"""Check if request has data and that data matches give regular expresssion
Args:
r: HTTP call result from a status provider, must implement raise_for_status() and .text
regexp: Compiler regular expression to search for in the HTTP request text field.
Returns:
True if a match is found, false if not and None if request contains no .text property
"""
try:
r.raise_for_status()
text = r.text
return regexp.search(text) is not None
except:
return None
def check_status(url: str, regexp, status_provider, src='localhost') -> dict:
"""Check status code of a given Url
Args:
url: URL-string of a resource to check with HTTP GET request.
regexp: Regular expression to check respose against if any
src: Identifier of a requestor used for reporting and returned as result.src
status_provider: Callable used to get a status of a resource.
Returns:
Object representing a status of the given resource
"""
ts = datetime.now()
start_time = timer()
r = status_provider(url)
end_time = timer()
return {
'timestamp': str(ts),
'src': src,
'target': url,
'time': (end_time - start_time),
'code': r.status_code,
'has_match': _request_matches(r, regexp) if regexp else None
}
class RestStatusPoller:
"""A source of REST-resourse status checks.
This Source is issuing REST Get requests to a give resource URL
and yelds a dict descriding resource status.
The source is designed to be used as iterable:
for data in source:
process(data)
Keyword Arguments:
url: URL of the resource to check status
interval: (int or None): time is sec to wait before the next check.
If None is given, the check is performed only once.
regexp (str or None): regular expression to search for in the response body, if any.
If None is given - no search is performed and 'has_match' field of the status
responce is set to None
provider(callable on None): a resource status provider override.
If None is give - requests.get is used. Default is None.
"""
def __init__(self, url, interval, regexp, provider=None):
self.url = url
self.interval = interval
self.pattern = re.compile(regexp) if regexp else None
self.__value_provide = provider or (lambda x: requests.get(
x, headers={'content-type': 'application/json'}))
self.__iter_count = 0
def __iter__(self):
return self
def __next__(self):
if self.__iter_count > 0:
if self.interval is not None:
sleep(self.interval)
else:
raise StopIteration()
self.__iter_count += 1
return check_status(self.url, self.pattern, self.__value_provide)
|
abbyssoul/site_check
|
site_checker/rest_source.py
|
rest_source.py
|
py
| 3,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74285386747
|
import sys
sys.path.append('../python')
sys.path.append('../apps')
import needle as ndl
from d2l import torch as d2l
import torch
import torch.nn as nn
import numpy as np
class MultiHeadAttention(nn.Module):
"""多头注意力"""
def __init__(self, key_size, query_size, value_size, num_hiddens,
num_heads, dropout, bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = d2l.DotProductAttention(dropout)
self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
torch.nn.init.kaiming_uniform_(self.W_q.weight)
torch.nn.init.kaiming_uniform_(self.W_k.weight)
torch.nn.init.kaiming_uniform_(self.W_v.weight)
torch.nn.init.kaiming_uniform_(self.W_o.weight)
### test
self.X1 = None
self.X2 = None
self.X3 = None
self.output = None
self.vl = None
def forward(self, queries, keys, values, valid_lens):
# queries,keys,values的形状:
# (batch_size,查询或者“键-值”对的个数,num_hiddens)
# valid_lens 的形状:
# (batch_size,)或(batch_size,查询的个数)
# 经过变换后,输出的queries,keys,values 的形状:
# (batch_size*num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
queries = self.transpose_qkv(self.W_q(queries), self.num_heads)
keys = self.transpose_qkv(self.W_k(keys), self.num_heads)
values = self.transpose_qkv(self.W_v(values), self.num_heads)
if valid_lens is not None:
# 在轴0,将第一项(标量或者矢量)复制num_heads次,
# 然后如此复制第二项,然后诸如此类。
valid_lens = torch.repeat_interleave(
valid_lens, repeats=self.num_heads, dim=0)
self.vl = valid_lens
# output的形状:(batch_size*num_heads,查询的个数,
# num_hiddens/num_heads)
output = self.attention(queries, keys, values, valid_lens)
self.output = output
# output_concat的形状:(batch_size,查询的个数,num_hiddens)
output_concat = self.transpose_output(output, self.num_heads)
return self.W_o(output_concat)
def transpose_qkv(self, X, num_heads):
"""为了多注意力头的并行计算而变换形状"""
# 输入X的形状:(batch_size,查询或者“键-值”对的个数,num_hiddens)
# 输出X的形状:(batch_size,查询或者“键-值”对的个数,num_heads,
# num_hiddens/num_heads)
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
self.X1 = X.detach().numpy()
# 输出X的形状:(batch_size,num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
X = X.permute(0, 2, 1, 3)
self.X2 = X.detach().numpy()
# 最终输出的形状:(batch_size*num_heads,查询或者“键-值”对的个数,
# num_hiddens/num_heads)
X3 = X.reshape(-1, X.shape[2], X.shape[3])
self.X3 = X3.detach().numpy()
return X3
def transpose_output(self, X, num_heads):
"""逆转transpose_qkv函数的操作"""
X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
X = X.permute(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
num_hiddens, num_heads = 100, 5
batch_size, num_queries = 2, 4
num_kvpairs = 6
valid_lens = torch.tensor([3, 2])
# valid_lens = None
X = torch.randn((batch_size, num_queries, num_hiddens),dtype=torch.float32)
Y = torch.randn((batch_size, num_kvpairs, num_hiddens),dtype=torch.float32)
# d2l.check_shape(attention(X, Y, Y, valid_lens),
# (batch_size, num_queries, num_hiddens))
dropout = 0
attention_ = ndl.nn.MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens,
num_hiddens, num_heads, dropout, device=ndl.cpu(), dtype="float32")
valid_lens_ = valid_lens.detach().numpy() if valid_lens is not None else None
X_ = ndl.Tensor(X.detach().numpy(), device=ndl.cpu(), dtype="float32")
Y_ = ndl.Tensor(Y.detach().numpy(), device=ndl.cpu(), dtype="float32")
attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens,
num_hiddens, num_heads, dropout)
attention.W_q.weight = torch.nn.Parameter(torch.tensor(attention_.W_q.weight.numpy().T, dtype=torch.float32))
attention.W_k.weight = torch.nn.Parameter(torch.tensor(attention_.W_k.weight.numpy().T, dtype=torch.float32))
attention.W_v.weight = torch.nn.Parameter(torch.tensor(attention_.W_v.weight.numpy().T, dtype=torch.float32))
attention.W_o.weight = torch.nn.Parameter(torch.tensor(attention_.W_o.weight.numpy().T, dtype=torch.float32))
print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy()-attention_.W_q.weight.numpy()))
print("W_k.weight:", np.linalg.norm(attention.W_k.weight.T.detach().numpy()-attention_.W_k.weight.numpy()))
print("W_v.weight:", np.linalg.norm(attention.W_v.weight.T.detach().numpy()-attention_.W_v.weight.numpy()))
print("W_o.weight:", np.linalg.norm(attention.W_o.weight.T.detach().numpy()-attention_.W_o.weight.numpy()))
print("X:", np.linalg.norm(X.detach().numpy()-X_.numpy()))
queries = attention.transpose_qkv(attention.W_q(X), attention.num_heads)
queries_ = attention_.transpose_qkv(attention_.W_q(X_))
zq = attention.W_q(X).detach().numpy()
zq_ = attention_.W_q(X_).numpy()
print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy() - attention_.W_q.weight.numpy()))
print("W_q(X):", np.linalg.norm(zq - zq_))
X1 = X.reshape((X.shape[0], X.shape[1], attention.num_heads, -1))
X1_ = X_.reshape((X_.shape[0], X_.shape[1], attention_.num_heads, -1))
print("X1-X1_:", np.linalg.norm(X1.detach().numpy() - X1_.numpy()))
# print("X1.shape", attention.X1.shape)
# print("X1_.shape", attention_.X1.shape)
# print("X2.shape", attention.X2.shape)
# print("X2_.shape", attention_.X2.shape)
# print("X3.shape", attention.X3.shape)
# print("X3_.shape", attention_.X3.shape)
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
keys = attention.transpose_qkv(attention.W_k(Y), attention.num_heads)
keys_ = attention_.transpose_qkv(attention_.W_k(Y_))
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
values = attention.transpose_qkv(attention.W_v(Y), attention.num_heads)
values_ = attention_.transpose_qkv(attention_.W_v(Y_))
# print("X1:", np.linalg.norm(attention.X1-attention_.X1))
# print("X2:", np.linalg.norm(attention.X2-attention_.X2))
# print("X3:", np.linalg.norm(attention.X3-attention_.X3))
print(np.linalg.norm(X.detach().numpy()-X_.numpy()))
print(np.linalg.norm(Y.detach().numpy()-Y_.numpy()))
print(np.linalg.norm(queries.detach().numpy()-queries_.numpy()))
print(np.linalg.norm(keys.detach().numpy()-keys_.numpy()))
print(np.linalg.norm(values.detach().numpy()-values_.numpy()))
attention.eval()
y = attention(X, Y, Y, valid_lens)
print("attn_output.shape:", y.shape)
y_ = attention_(X_, Y_, Y_, valid_lens_)
print("attn_output_.shape:", y_.shape)
if (valid_lens is not None):
print("valid_lens:", np.linalg.norm(attention.vl.detach().numpy()-attention_.vl))
print("output:", np.linalg.norm(attention.output.detach().numpy()-attention_.output.numpy()))
print("attn_output:", np.linalg.norm(y.detach().numpy()-y_.numpy()))
|
Erostrate9/needle
|
tests/MultiHeadAttention.py
|
MultiHeadAttention.py
|
py
| 7,843 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26287041907
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import os
# this program reads input from a script which has assessed how networks react to a particular combination of gradient and division status
# the script has produced for each network a matrix with 0 (migrate) and 1 (divide), which this program will plot and find the consensus for.
if len(sys.argv) <3:
print ("This is the program 'plot_netanalysis_jan.py'")
print ("Usage: ./plot_netanalysis_jan.py <output_file> <plot individuals?> <input filenames>")
sys.exit(1)
else:
outputfile=sys.argv[1]
indiplot=int(sys.argv[2])
arraystorage=[]
filestorage=[]
init=0
count=0
sizes=None
consensus=None
for filename in sys.argv[3:]:
#print ("{}".format(filename))
divmig = np.loadtxt(filename, dtype='i', delimiter='\t')
#print sizes
if not init:
sizes = np.shape(divmig[1:,1:])
consensus=np.zeros((sizes[0]*sizes[1],),dtype=int)
init=1
outfile=os.path.splitext(filename)[0]
#for if you still need to plot the individuals:
if (indiplot):
fig=plt.figure() #!
fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #!
ax = plt.Axes(fig, [0., 0., 1., 1.]) #!
ax.set_axis_off() #!
fig.add_axes(ax) #!
ax.imshow(divmig[1:,1:], cmap='RdYlBu', origin='lower')
divshare=divmig[1:,1:].sum()
migshare=(sizes[0])*(sizes[1])-divshare
migs="%04d" % (migshare,)
#print divs
plt.savefig("div_"+str(migs)+"_"+outfile+".pdf", dpi=sizes[1]) #bbox_inches='tight'
plt.close()
binarystring=divmig[1:,1:].flatten()
consensus=np.add(binarystring, consensus)
#print ("{}".format(consensus))
arraystorage.append(binarystring)
filestorage.append(outfile)
count+=1
#find the consensus sequence
bool_consensus= consensus > count/2
print ("{}".format(bool_consensus))
consensus_sequence=bool_consensus.astype(int)
print ("consensus is {}".format(consensus_sequence))
wfilename="consensussequence_"+outputfile+".dat"
writefile=open(wfilename,"w")
for el in consensus_sequence:
writefile.write(str(el)+" ")
writefile.close()
#display consensus image
imcons=np.reshape(consensus_sequence,sizes)
fig=plt.figure() #!
fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #!
ax = plt.Axes(fig, [0., 0., 1., 1.]) #!
ax.set_axis_off() #!
fig.add_axes(ax) #!
ax.imshow(imcons, cmap='RdYlBu', origin='lower')
#outfile=os.path.splitext(outputfile)[0]
plt.savefig("consensus"+"_"+outputfile+".pdf", dpi=sizes[1]) #bbox_inches='tight'
plt.close()
#find for each individual the distance to the consensus sequence
#writefile=open(outputfile, "w")
#fig=plt.figure() #
#hamms=[]
minhamm=999999999
for fi,seq in zip(filestorage, arraystorage):
hamm=np.count_nonzero(seq!=consensus_sequence)
if hamm<minhamm:
minhamm=hamm
minfile=fi
print ("file with individual closest to consensus: {}".format(minfile))
# hamms.append[hamm]
#writefile.write(fi+"\t"+str(hamm)+"\n")
#maxbina=max(hamms)
#hista, bin_edgesa = np.histogram(hamms, bins = range(maxbina))
#plt.plot(bin_edgesa[:-1],hista)
#writefile.close()
|
RenskeVroomans/regulation_evolution
|
scripts/plot_netanalysis_jan.py
|
plot_netanalysis_jan.py
|
py
| 3,203 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25225150959
|
import time
import random
class NPC():
def __init__(self, trigger_item, speech = "", name = ""):
self.name = name
self.trigger_item = trigger_item
self.speech = speech
self.health = 20
def deliver_speech(self):
print("\nThe patient runs towards you intent on attacking you")
time.sleep(2)
print("but when they get closer, they recognise an item you have.")
time.sleep(2)
print("\nIt used to belong to them. Remembering themselves, they tell you:")
print(self.speech)
def attack(self):
print("\nThe patient runs towards you intent on attacking you")
#10% of the time 40-60 damage
#20% of the time 30 - 40
#30 % of the time 20 - 30
#40 % of the time 10 - 20
if self.health <= 5:
print("but they're too injured")
return 0
else:
damage_dealt = random.randint(10,50)
return damage_dealt
def decide_action(self, player_inv):
if self.trigger_item in player_inv:
self.deliver_speech()
return 0
else:
damage = self.attack()
return damage
|
marivielle/KFC
|
NPC.py
|
NPC.py
|
py
| 1,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5518662883
|
#!/usr/bin/python3
import sys, getopt
#Replace version number in html files
def replace_version (current_version, new_version):
#Files where version number will be replaced
files = ['index.html', 'article.html', './write/index.html']
#Goes through the array replacing the version in each file
for file_name in files:
with open (file_name) as f:
newFileContent = f.read ().replace (current_version, new_version)
with open (file_name, 'w') as f:
f.write (newFileContent)
#Parses and validates command line arguments
def parse_arguments (argv):
from_version = ''
to_version = ''
try:
opts, args = getopt.getopt (sys.argv[1:], '',['from=','to='])
except getopt.GetoptError:
print ('Usage: new_version.py --from <current_version> --to <new_version>')
sys.exit ()
for opt, arg in opts:
if opt == '--help':
print ('Usage: new_version.py --from <current_version> --to <new_version>')
sys.exit ()
elif opt in ('--from'):
from_version = arg
elif opt in ('--to'):
to_version = arg
else:
print ('Usage: new_version.py --from <current_version> --to <new_version>')
sys.exit ()
if (from_version == '' or to_version == ''):
print ('Usage: new_version.py --from <current_version> --to <new_version>')
sys.exit ()
else:
#Returns parsed arguments
return (from_version, to_version)
# Parses command line arguments --from and --to so executes the
if __name__ == '__main__':
from_version, to_version = parse_arguments (sys.argv)
replace_version (from_version, to_version);
|
willgcr/mblog
|
new_version.py
|
new_version.py
|
py
| 1,521 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30728277710
|
import fileinput
from typing import Counter
ll = [l.strip() for l in fileinput.input()]
numbers = []
for line_nr in range(len(ll)):
l = ll[line_nr]
numbers = [int(x) for x in l.split(',')]
def count_fishes(days):
dd = Counter(numbers)
for _ in range(days):
new_fishes = dd[0]
for i in range(0, 8):
dd[i] = dd[i+1]
dd[6] += new_fishes
dd[8] = new_fishes
return sum([dd[i] for i in dd])
print(count_fishes(80), count_fishes(256))
|
mdaw323/alg
|
adventofcode2021/6.py
|
6.py
|
py
| 498 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3848748609
|
from setuptools import setup, Extension
condor_module = Extension('condor',
sources=['c/condor.c', 'c/glutils.c'],
libraries=['GLEW', 'glfw'])
setup (name='Condor',
version='0.1',
description='',
ext_modules=[condor_module])
|
enricozb/Condor
|
condor/setup.py
|
setup.py
|
py
| 301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39685754485
|
with open('input.txt', 'r') as f:
priorities = 0
for line in f:
l = len(line)//2
s1, s2 = line[l:-1], line[:l]
for c in s1:
if c in s2:
priorities += 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(c) + 1
break
print(priorities)
|
SalmonA/adventofcode
|
2022/03/03_1.py
|
03_1.py
|
py
| 320 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19411076487
|
def create_offering(newOffering):
classTimesArray = []
if newOffering.classTimes:
for classTime in newOffering.classTimes:
classTime = {
u'location': classTime.location,
u'startTime': classTime.startTime,
u'endTime': classTime.endTime,
u'sunday': classTime.sunday,
u'monday': classTime.monday,
u'tuesday': classTime.tuesday,
u'wednesday': classTime.wednesday,
u'thursday': classTime.thursday,
u'friday': classTime.friday,
u'saturday': classTime.saturday
}
classTimesArray.append(classTime)
extrasDict = {
u'Attributes': newOffering.attributes,
u'Levels':newOffering.levels,
u'Total Seats': newOffering.totalSeats,
u'Taken Seats': newOffering.takenSeats,
u'Total Waitlist Seats': newOffering.totalWaitlistSeats,
u'Taken Waitlist Seats': newOffering.takenWaitlistSeats
}
return {
u'sectionNumber': newOffering.sectionNumber,
u'status': newOffering.status,
u'id': newOffering.id,
u'instructors': newOffering.instructors,
u'classTimes': classTimesArray,
u'extras': extrasDict
}
class Offering:
status = None
levels = None
id = None
departmentName = None
departmentAcronym = None
departmentNumberString = None
departmentNumber = None
sectionNumber = None
name = None
credit = None
classTimes = None
startDate = None
endDate = None
comment = None
attributes = None
booksLink = None
bulletinLink = None
description = None
instructors = None
totalSeats = None
takenSeats = None
totalWaitlistSeats = None
takenWaitlistSeats = None
class ClassTime:
location = None
startTime = None
endTime = None
sunday = False
monday = False
tuesday = False
wednesday = False
thursday = False
friday = False
saturday = False
import requests
from datetime import datetime
import pytz
from pytz import timezone
eastern = timezone('EST')
import re
from bs4 import BeautifulSoup
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import google.cloud.exceptions
import urllib
print ("-------- EMERSON COURSE SCRAPE ----------")
cred = credentials.Certificate('./credentials.json')
firebase_admin.initialize_app(cred)
#
db = firestore.client()
# Make request and load offerings
data = {'begin_ap':'a','begin_hh':'0','begin_mi':'0','end_ap':'a','end_hh':'0','end_mi':'0',
'sel_attr':['dummy','%'],'sel_camp':['dummy','%'],'sel_crse':'','sel_day':'dummy','sel_from_cred':'',
'sel_insm':'dummy','sel_instr':['dummy','%'],'sel_levl':['dummy','%'],'sel_ptrm':['dummy','%'],
'sel_schd':['dummy','%'],'sel_sess':'dummy','sel_subj':['dummy','BC','MB','CM','CD','CC','DA','DD','EC',
'EXT','FL','LF','HI','HS','IN','JR','LI','MK','MT','MU','PA','PH','PL','PF','PDE','CE','PS','PB','RL',
'SOC','SA','SC','SW','SO','LS','TH','VM','WDC','WR'],'sel_title':'','sel_to_cred':'','term_in':'201910'}
url = "https://ssb.emerson.edu/PURPLE/bwckschd.p_get_crse_unsec"
# get departments and instructors first
print("Fetching homepage...")
dataHomepage = dict(data)
dataHomepage['sel_subj'] = 'dummy'
r = requests.post(url, data=dataHomepage)
soup = BeautifulSoup(r.content, "html.parser")
unlistedDepts = {
"Bsns of Creative Enterprises": "BC",
"Civic Media": "CM",
"External Program Course": "EXT VAL",
"Prof Development Experience":"PDE",
"School of Communication":"SOC",
"Washington Program":"DC"
}
print("Page fetched. Uploading departments...")
departments = soup.find('td', class_='dedefault').find_all('option')
departmentsArray = []
for department in departments:
info = department.text.split("(")
if len(info)>1:
deptDict = {
u'departmentAcronym':re.sub('[^A-Z]','', info[1].strip()),
u'departmentName':info[0].strip()
}
else:
deptDict = {
u'departmentAcronym':unicode(unlistedDepts[info[0].strip()]),
u'departmentName':info[0].strip()
}
departmentsArray.append(deptDict)
doc_ref = db.collection(u'schools/emerson/lists').document('departments')
doc_ref.set({u'list':departmentsArray})
print("Departments uploaded. Uploading instructors...")
instructors = soup.find('select', attrs={"name": "sel_instr"}).find_all('option')
instructorsArray = []
for p in range(1,len(instructors)):
instructor = re.sub(' +', ' ',instructors[p].text.strip())
if not instructor in instructorsArray:
instructorsArray.append(instructor)
doc_ref = db.collection(u'schools/emerson/lists').document('instructors')
doc_ref.set({u'list':instructorsArray})
print("Instructors uploaded. Uploading courses. Fetching all courses on one page...")
# Long, full networking request
r = requests.post(url, data=data)
print("Page fetched. Parsing and uploading...")
soup = BeautifulSoup(r.content,"html.parser")
# Speedier file test
# file = urllib.urlopen("file:///Users/timtraversy/Google Drive//Development/Course Gnome/code/GWU-Scrape-Python/test.html")
# soup = BeautifulSoup(file,"html.parser")
offering_table = soup.find('table', class_='datadisplaytable')
offerings = offering_table.find_all('tr', recursive=False)
courseArray = []
# Loop over offerings two at a time to get both data pieces
count = 0
for i in range(0,len(offerings),2):
# Set up offering object
newOffering = Offering()
data = offerings[i].text.split(' - ')
# Hack to account for class names that have a " - "
offset = 0
if len(data) > 4:
concatName = data[0].strip()
for m in range(1, len(data)-3):
concatName += " - "
concatName += data[m].strip()
offset += 1
newOffering.name = concatName
else:
newOffering.name = data[0].strip()
if newOffering.name == 'Cancelled':
continue
newOffering.id = data[1+offset].strip()
newOffering.departmentAcronym = data[2+offset].strip().split(' ')[0]
if newOffering.departmentAcronym == "EXT":
newOffering.departmentAcronym = unicode("EXT VAL")
newOffering.departmentName = unicode("External Program Course")
else:
for dept in departmentsArray:
if dept[u'departmentAcronym'] == newOffering.departmentAcronym:
newOffering.departmentName = dept[u'departmentName']
newOffering.departmentNumber = data[2+offset].strip().split(' ')[1]
newOffering.sectionNumber = data[3+offset].strip()
# Get seat details + status
url = "https://ssb.emerson.edu" + offerings[i].find('a')['href']
r = requests.post(url)
detailSoup = BeautifulSoup(r.content,"html.parser")
seats = detailSoup.find_all('td', class_="dddefault")
# Seats
newOffering.totalSeats = seats[1].text
newOffering.takenSeats = seats[2].text
# newOffering.totalWaitlistSeats = seats[4].text
# newOffering.takenWaitlistSeats = seats[5].text
# Status
if newOffering.totalSeats > newOffering.takenSeats:
newOffering.status = u'OPEN'
elif newOffering.totalWaitlistSeats == '0':
newOffering.status = u"CLOSED"
else:
newOffering.status = u"WAITLIST"
# get levels and attributes
data = offerings[i+1].find_all('span')
for span in data:
if span.text.strip() == 'Levels:':
newOffering.levels = span.next_sibling.strip()
elif span.text.strip() == 'Attributes:':
newOffering.attributes = span.next_sibling.strip()
# Credits
catalog_entry = offerings[i+1].find('a')
credits = catalog_entry.previous_sibling.previous_sibling.previous_sibling.strip()
credits = re.sub('Credits','', credits).strip()
credits = re.sub('\.0+','', credits).strip()
credits = re.sub('OR','or', credits)
credits = re.sub('TO','to', credits)
credits = re.sub(' +',' ', credits)
newOffering.credit = unicode(credits)
# Description from catalog entry
url = "https://ssb.emerson.edu" + catalog_entry['href']
r = requests.post(url)
catalogSoup = BeautifulSoup(r.content,"html.parser")
newOffering.description = catalogSoup.find('td', class_="ntdefault").text.split('\n')[1].strip()
#Class Times
instructors = []
classTimes=[]
class_time_table = offerings[i+1].find('table',class_='datadisplaytable')
if class_time_table:
class_time_table = class_time_table.find_all('tr')
for j in range(1,len(class_time_table)):
newClassTime = ClassTime()
details = class_time_table[j].find_all('td',class_='dddefault')
for k in range (1,len(details)):
text = details[k].text.strip()
valid = True
if k == 1:
if text != 'TBA':
times = text.split('-')
newClassTime.startTime = eastern.localize(datetime.strptime(times[0].strip(), '%I:%M %p'))
newClassTime.endTime = eastern.localize(datetime.strptime(times[1].strip(), '%I:%M %p'))
else:
valid = False
break
if k == 2:
if 'U' in text:
newClassTime.sunday = True
if 'M' in text:
newClassTime.monday = True
if 'T' in text:
newClassTime.tuesday = True
if 'W' in text:
newClassTime.wednesday = True
if 'R' in text:
newClassTime.thursday = True
if 'F' in text:
newClassTime.friday = True
if 'S' in text:
newClassTime.saturday = True
if k == 3:
# location
newClassTime.location = text
if k == 6:
insts = re.sub('\([A-z]\)','', text).split(',')
for inst in insts:
if inst == "TBA":
instructors = None
break
newInst = inst.strip()
if not newInst in instructors:
instructors.append(newInst)
if valid:
classTimes.append(newClassTime)
if classTimes:
newOffering.classTimes = classTimes
if instructors:
newOffering.instructors = instructors
courseArray.append(newOffering)
print('Parsed: {id}, Count:{len}'.format(id=unicode(newOffering.id), len=len(courseArray)))
count = 0
for indx, course in enumerate(courseArray):
offeringsArray = [create_offering(course)]
index = indx + 1
while index < len(courseArray):
courseTwo = courseArray[index]
if (course.name == courseTwo.name and course.departmentNumber == courseTwo.departmentNumber and course.departmentAcronym == courseTwo.departmentAcronym):
offeringsArray.append(create_offering(courseTwo))
del courseArray[index]
else:
index += 1
dictionary = {
u'departmentName': course.departmentName,
u'departmentAcronym': course.departmentAcronym,
u'departmentNumber': course.departmentNumber,
u'name': course.name,
u'credit': course.credit,
u'description': course.description,
u'offerings': offeringsArray,
}
identifier = unicode(course.departmentAcronym + str(course.departmentNumber))
db.collection(u'schools/emerson/fall2018_courses').document(identifier).set(dictionary)
count += 1
print('Uploaded ({count}/{total}): {id}'.format(count=count, total=len(courseArray), id=course.id))
# Updating version number
doc_ref = db.collection(u'schools').document(u'emerson')
try:
doc = doc_ref.get()
version = doc.to_dict()['version']
print(u'Updating from version {}'.format(doc.to_dict()['version']))
doc_ref.set({u'version':version + 1})
except google.cloud.exceptions.NotFound:
print(u'No metadata, something is wrong.')
exit(1)
print ("----- EMERSON COURSE SCRAPE COMPLETE ------")
|
timtraversy/GWU-Scrape-Python
|
emerson-scrape.py
|
emerson-scrape.py
|
py
| 12,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25022064101
|
# http://docs.python.org/library/htmlparser.html
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print("start tag: %s" % tag)
def handle_endtag(self, tag):
print("end tag: %s" % tag)
def main():
page="<a color=black>poo</a>"
page ="""
<html>
<head>
<title>test</title>
</head>
<body>
</body>
</html>
"""
myparser = MyHTMLParser()
myparser.feed(page)
if __name__ == '__main__':
main()
|
ahbaid/learn
|
python/scae/class-08/html1.py
|
html1.py
|
py
| 495 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13658425408
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
def summarize_qc_resamples(input_df, verbose=False, **resample_kwargs):
time_list = list()
data_list = list()
for time, df in input_df.resample(**resample_kwargs):
if verbose == True:
print("Currently working on: {}".format(time))
time_list.append(time)
df_stats = df.qc.describe()
data_list.append(df_stats.values)
else:
measures = df_stats.index.to_list()
variables = df.columns.to_list()
attrs = resample_kwargs
return xr.DataArray(np.dstack(data_list),
coords = [measures, variables, time_list],
dims = ['measure','variable','time'],
name = "qc_summary",
attrs = attrs)
|
wangsen992/pyqc
|
src/pyqc/tools.py
|
tools.py
|
py
| 855 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19504742337
|
from igraph import Graph
from igraph import plot
grafo = Graph(edges = [(0,1),(2,3),(0,2),(0,3)], directed = True)
grafo.vs['label'] =['Fernando', 'Pedro', 'Jose', 'Antonio']
grafo.vs['nota'] = [100, 40, 60, 20]
grafo.es['tipoAmizade'] = ['Amigo', 'Inimigo', 'Amigo']
grafo.es['devendo'] = [1,3,2,5]
grafo.vs['color'] = ['red', 'yellow','orange', 'green']
plot(grafo, bbox =(300,300),
vertex_size = grafo.vs['nota'],
edge_width = grafo.es['devendo'],
vertex_color = grafo.vs['color'],
edge_curved = 0.4,
vertex_shape = 'square')
|
guibarreta1993Average/data_science_udemy
|
05_Grafos/aula34_impressao.py
|
aula34_impressao.py
|
py
| 557 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31148205537
|
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import numpy as np
import json
import os
def parse_args():
parser = argparse.ArgumentParser(prog='')
parser.add_argument('json', type=str, help='Figure1 JSON.')
parser.add_argument('-o', '--output_dir', default='.', help='')
args = parser.parse_args()
return(args)
def b(paths, outfile):
dar_enrich = pd.read_csv(paths['figure6']['b']['dar_enrichment'], sep='\t')
fp_enrich = pd.read_csv(paths['figure6']['b']['footprint_enrichment'], sep='\t')
f, axes = plt.subplots(1,2, num='b', figsize=(12, 6))
fp_logp = fp_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))
fp_logp = fp_logp.rename('footprint enrichments')
dar_logp = dar_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))
dar_logp.sort_values(ascending=False, inplace=True)
dar_logp = dar_logp.rename('top DAR enrichments')
dar_logp = dar_logp[:10]
sns.set_style("whitegrid")
sns.kdeplot(dar_logp, shade=True, color="#E74C3C", ax=axes[0])
sns.kdeplot(fp_logp, shade=True, color="#3498DB", ax=axes[0])
axes[0].set_xlabel('-log10 pval', fontsize=15)
def label_point(x, y, val, ax):
a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)
for i, point in a.iterrows():
ax.text(point['x']+.02, point['y'], str(point['val']), fontsize=10)
def rand_jitter(arr, c):
stdev = c*(max(arr)-min(arr))
return arr + stdev
fp_enrich['pval_enrichment'] = -1*np.log10(fp_enrich['pval_enrichment'])
fp_enrich.sort_values('pval_enrichment', ascending=False, inplace=True)
fp_enrich.reset_index(drop=True, inplace=True)
sns.scatterplot(x=fp_enrich.index.tolist(), y='pval_enrichment', data=fp_enrich, ax=axes[1])
# label_point(pd.Series(fp_enrich.index.tolist()[:10]), fp_enrich['pval_enrichment'][:10], fp_enrich['name'][:10], axes[1])
axes[1].set_xticks=''
f.savefig(outfile, dpi=300)
def c(paths, outfile):
fp_enrich = pd.read_csv(paths['figure6']['c'], sep='\t')
hic_hit = fp_enrich[fp_enrich['name']=='ZNF416-Zf']
hic_df = pd.melt(hic_hit, id_vars=None, value_vars=['target_freq', 'bg_freq'], var_name='enrichment group', value_name='% total footprints')
hic_df.sort_values('enrichment group', inplace=True)
sns.set_style("whitegrid")
f, axes = plt.subplots(1,1, num='c', figsize=(12, 12))
palette = ['#ABB2B9','#A569BD']
sns.barplot(x='enrichment group', y='% total footprints', data=hic_df, palette=palette, ax=axes)
axes.set_xlabel('', fontsize=15)
axes.set_xticks = ''
axes.set_xticklabels([])
axes.set_ylabel('')
f.savefig(outfile, dpi=300)
def main():
args = parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(args.json) as fp:
paths = json.load(fp)
bof = os.path.join(args.output_dir, 'Figure6b.png')
cof = os.path.join(args.output_dir, 'Figure6c.png')
b(paths, bof)
c(paths, cof)
if __name__ == '__main__':
main()
|
perezja/Leukos
|
presentation/figure6/figure6.py
|
figure6.py
|
py
| 3,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5657507234
|
import os
from functools import reduce
class Photo:
id = None
layout = None # v or h
tags = []
def __init__(self, id, layout, tags):
self.id = id
self.layout = layout
# self.tagalf = "".join(sorted(tags))
self.tagalf = tuple(sorted(tags))
self.tags = tags
def __str__(self):
return str(self.id) + " - " + " ".join(self.tags)
class Slide:
# 2 vertical or 1 horizontal
photo_ids = []
tags = []
def __init__(self, photos):
self.photo_ids = [str(photo.id) for photo in photos]
self.tags = set(reduce(list.__add__, map(lambda x: list(x.tags), photos)))
self.tags_sorted = tuple(sorted(list(self.tags)))
def __str__(self):
return " ".join([str(x) for x in self.photo_ids]) + " - " + " ".join([str(x) for x in self.tags])
class SlideShow:
slides = []
def __init__(self, slides=None):
self.slides = [] if slides is None else slides
def calculate_score(self):
if len(self.slides) == 0:
return 0
score = 0
for i, slide in enumerate(self.slides):
score += self.interest_factor(i)
return score
def interest_factor(self, i):
if i + 1 >= len(self.slides):
return 0
return interest_factor(self.slides[i], self.slides[i + 1])
def interest_factor(slide_1, slide_2):
""" interest of slides
Minimum between
the number of common tags between Si and Si+1
the number of tags in Si but not in Si+1
the number of tags in Si+1 but not in Si
"""
common = set(slide_1.tags) & set(slide_2.tags)
n_common = len(common)
n_left = len(slide_1.tags) - len(set(slide_1.tags) & set(common))
n_right = len(slide_2.tags) - len(set(common) & set(slide_2.tags))
return min(n_common, n_left, n_right)
def n_common_tags(slide_1, slide_2):
# return len(set(slide_1.tags) & set(slide_2.tags))
return len(set(slide_1.tags).intersection(slide_2.tags))
def read_input(filepath):
with open(filepath, 'r') as f:
n = int(f.readline())
i = 0
result = []
while i < n:
line = f.readline()[:-1].split(" ")
result.append(Photo(i, line[0], line[2:]))
i += 1
return result
def write_output(slideshow, output_file):
with open(output_file, "w") as f:
f.write(str(len(slideshow.slides)) + "\n")
for slide in slideshow.slides:
f.write(' '.join(slide.photo_ids) + "\n")
with open(output_file, 'rb+') as f:
f.seek(-2, os.SEEK_END)
f.truncate()
def get_slideshow(photos):
slideshow = SlideShow()
vert = None
slides = []
for photo in sorted(photos, key=lambda x: x.tagalf):
if photo.layout == "H":
slides.append(Slide([photo]))
elif photo.layout == "V" and vert is None:
vert = photo
elif photo.layout == "V" and vert is not None:
slides.append(Slide([photo, vert]))
vert = None
slides.sort(key=lambda x: x.tags_sorted)
return SlideShow(slides)
def main():
files = ['a_example.txt', 'b_lovely_landscapes.txt', 'c_memorable_moments.txt', 'd_pet_pictures.txt',
'e_shiny_selfies.txt']
sum_score = 0
for file in files:
print(file)
photos = read_input(file)
slideshow = get_slideshow(photos)
score = slideshow.calculate_score()
sum_score += score
print("SCORE: {}\n".format(score))
write_output(slideshow, "output/" + file)
print("END, {}".format(sum_score))
return None
if __name__ == "__main__":
main()
|
phyx4/hashcode_2019
|
main.py
|
main.py
|
py
| 3,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24931817284
|
from json import dumps, loads
from State import State
class Api:
"""
A class that provides methods for encoding and decoding
States to and from JSON strings.
Methods:
- Encode(states: list[State]) -> str:
Encodes a list of State objects to a JSON string.
- Decode(jsonString: str) -> State:
Decodes a JSON string to a State object.
"""
def Encode(states: list[State]) -> str:
"""
Encodes a list of State objects to a JSON string.
Args:
- states (list[State]):
A list of State objects to encode.
Returns:
- str:
A JSON string representing the list of State objects.
"""
return dumps([state.__dict__ for state in states])
def Decode(jsonString: str) -> State:
"""
Decodes a JSON string to a State object.
Args:
- jsonString (str):
A JSON string to decode.
Returns:
- State:
A State object representing the decoded JSON string.
"""
obj = loads(jsonString)
return State(
obj['Board'],
obj['Direction'],
(obj['EmptyPoint']['X'], obj['EmptyPoint']['Y'])
)
|
Saeed-Ayman/8-puzzle
|
API.py
|
API.py
|
py
| 1,287 |
python
|
en
|
code
| 1 |
github-code
|
6
|
712141287
|
#! /usr/bin/env python3
# coding: utf-8
import os
import logging as lg
import pandas as pd
import numpy as np
lg.basicConfig(level=lg.DEBUG)
import os
import pandas as pd
class SetOfParliamentMembers:
def __init__(self, name):
self.name = name
def __repr__(self):
return "setOfParliamentMember: {} members".format(len(self.dataframe))
def data_from_csv(self, csv_file):
self.dataframe = pd.read_csv(csv_file, sep=";", engine = 'python')
def data_from_dataframe(self, dataframe):
self.dataframe = dataframe
def display_chart(self):
# à venir, patience !
pass
def split_by_political_party(self):
result = {}
data = self.dataframe
all_parties = data["parti_ratt_financier"].dropna().unique()
for party in all_parties:
data_subset = data[data.parti_ratt_financier == party]
subset = SetOfParliamentMembers('MPs from party "{}"'.format(party))
subset.data_from_dataframe(data_subset)
result[party] = subset
return result
def launch_analysis(data_file, by_party=False, info=False):
sopm = SetOfParliamentMembers("All MPs")
sopm.data_from_csv(os.path.join("data", data_file))
sopm.display_chart()
if by_party:
for party, s in sopm.split_by_political_party().items():
s.display_chart()
if info:
print(sopm)
if __name__ == "__main__":
launch_analysis("current_mps.csv")
|
honorezemagho/python-oc
|
analysis/csv.py
|
csv.py
|
py
| 1,496 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7276876468
|
from django.db import models
from django.contrib.auth.models import User
class Animal(models.Model):
"""Класс описывает объект Животное"""
owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="Владелец")
species = models.CharField(max_length=30, verbose_name="Вид животного")
name = models.CharField(max_length=30, verbose_name="Кличка")
birth = models.DateField(verbose_name="Дата рождения")
breed = models.CharField(max_length=30, verbose_name="Порода")
gender = models.CharField(
max_length=10, choices=[("Ж", "Женский"), ("М", "Мужской")], verbose_name="Пол"
)
class Meta:
verbose_name = "Животное"
verbose_name_plural = "Животные"
def __str__(self):
return self.name
class Vaccination(models.Model):
"""Класс описывающий объект Вакцинация"""
animal = models.ForeignKey(
Animal, on_delete=models.CASCADE, verbose_name="Животное"
)
date = models.DateField(verbose_name="Дата прививки")
vaccine = models.CharField(max_length=50, verbose_name="Вакцина")
class Meta:
verbose_name = "Вакцинация"
verbose_name_plural = "Вакцинация"
def __str__(self):
return f"{self.date}"
class Treatment(models.Model):
"""Класс описывающий объект Обратока от паразитов"""
animal = models.ForeignKey(
Animal, on_delete=models.CASCADE, verbose_name="Животное"
)
parasite_type = models.CharField(
max_length=10,
choices=[("Гельминты", "Гельминты"), ("Клещи", "Клещи")],
verbose_name="Вид паразитов",
)
date = models.DateField(verbose_name="Дата обработки")
medication = models.CharField(max_length=50, verbose_name="Препарат")
dosage = models.CharField(max_length=10, verbose_name="Дозировка")
class Meta:
verbose_name = "Обработка от паразитов"
verbose_name_plural = "Обработка от паразитов"
def __str__(self):
return f"{self.date}"
|
Gamilkar/animal_medical_record
|
main/models.py
|
models.py
|
py
| 2,320 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
12510085973
|
from tqdm import tqdm
import math
import time
import numpy as np
def bingliu_mpqa(utterance_tokenized, file):
feat_ = []
dict1_bing = {}
for line in file:
x = line.split("\t")
dict1_bing[x[0] + "_" + x[1][:-1]] = 1
i=0
for tokens in utterance_tokenized:
res = np.array([0,0,0,0])
for token in tokens:
pos = (token + "_positive")
neg = (token + "_negative")
if (pos in dict1_bing):
res[0]+=1
res[1]+=1
elif (neg in dict1_bing):
res[1]-=1
if res[0]>0:
res[2]=1
if tokens!=[]:
pos = tokens[-1] + "_positive"
neg = tokens[-1] + "_negative"
if pos in dict1_bing:
res[3]=1
elif neg in dict1_bing:
res[3]=-1
feat_.append(res)
return np.array(feat_)
def SENT140(X):
#sentiment140
dict1_S140 = {}
with open("lexicons/3. Sentiment140-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd:
for line in fd:
x = line.split(" ")
dict1_S140[x[0]] = float(x[1])
feat_ = []
for tokens in X:
sent140 = [0,0,0,0]
cnt = 0
for token in tokens:
if("#" not in token):
cnt += 1
if(token in dict1_S140):
sent140[0] += (dict1_S140[token] > 0)
sent140[1] += dict1_S140[token]
sent140[2] = max(sent140[2],dict1_S140[token])
if(len(tokens) >= 1 and tokens[-1] in dict1_S140):
sent140[3] = (dict1_S140[tokens[-1]] > 0)
feat_.append(sent140)
return np.array(feat_)
# print()
def NRC_EMOTION(X):
#NRC emotion
dict1_NRC = {}
cnt_r = 0
len1 = 0;
with open("lexicons/6. NRC-10-expanded.csv", 'r') as fd:
for line in fd:
if(cnt_r == 0):
cnt_r += 1
continue;
x = line.split(" ")
dict1_NRC[x[0]] = [float(i) for i in x[1:]]
len1 = len(x[1:])
feat_ = []
for e,tokens in tqdm(enumerate(X)):
emo_score = [[0,0,0,0] for i in range(len1)]
cnt = 0
for token in tokens:
if("#" in token):
continue
cnt += 1
if(token in dict1_NRC):
for i,val in enumerate(dict1_NRC[token]):
emo_score[i][0] += (val > 0)
emo_score[i][1] += val
emo_score[i][2] = max(emo_score[i][2],val)
if(len(tokens) >= 1 and tokens[-1] in dict1_NRC):
for i,val in enumerate(dict1_NRC[token]):
emo_score[i][3] = (val > 0)
res = []
for i in emo_score:
res.extend(i)
feat_.append(res)
return np.array(feat_)
# print()
def NRC_HASHTAG_SENT(X):
#NRC hashtag
dict1_NRC = {}
with open("lexicons/7. NRC-Hashtag-Sentiment-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd:
for line in fd:
x = line.split(" ")
dict1_NRC[x[0]] = float(x[1])
feat_ = []
for tokens in X:
cnt = 0
f = [0,0,0,0]
for token in tokens:
if("#" not in token):
continue
cnt += 1
if(token in dict1_NRC):
f[0] += (dict1_NRC[token] > 0)
f[1] += dict1_NRC[token]
f[2] = max(f[2],dict1_NRC[token])
if(len(tokens) >= 1 and tokens[-1] in dict1_NRC):
f[3] = (dict1_NRC[tokens[-1]] > 0)
feat_.append(f)
return np.array(feat_)
def lexicons(utterance_tokenized):
filebingliu = open("lexicons/1. BingLiu.csv", "r")
filempqa = open("lexicons/2. mpqa.txt", "r")
start = time.time()
bingliu = bingliu_mpqa(utterance_tokenized, filebingliu)
mpqa = bingliu_mpqa(utterance_tokenized, filempqa)
sent140 = SENT140(utterance_tokenized)
nrcemotion = NRC_EMOTION(utterance_tokenized)
nrchashtag = NRC_HASHTAG_SENT(utterance_tokenized)
end = time.time()
print("time to calculate lexicons: ", end-start)
# y = len(bingliu[0]) + len([mpqa[0]]) + len(sent140[0]) + len(nrcemotion[0]) + len(nrchashtag[0])
feature = np.zeros([len(utterance_tokenized), 56])
for i in range(len(utterance_tokenized)):
feature[i] = np.concatenate((bingliu[i], mpqa[i], sent140[i], nrcemotion[i], nrchashtag[i]))
return feature
if __name__=='__main__':
lexicons(utterance_tokenized)
|
hamzah70/Multi_Modal_Emotion_Analysis
|
lexiconFeatureVector.py
|
lexiconFeatureVector.py
|
py
| 4,491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38353405555
|
import requests
from bs4 import BeautifulSoup #screen-scraping library
#request = requests.get("http://www.google.com")
request = requests.get("https://www.johnlewis.com/house-by-john-lewis-curve-dining-chair-white/p231441579")
content = request.content #getting content of the page
soup = BeautifulSoup(content, "html.parser")
element = soup.find("span",{"itemprop":"price","class":"now-price"}) #dictionary
#print(element.text.strip())
string_price = element.text.strip() #"#£19.00"
price_without_symbol = string_price[1:]
price = (float(price_without_symbol))
if price < 50:
print("You should buy the chair!")
print("The current price is {}.".format(string_price))
else:
print("Don't buy the chair!!")
# <span itemprop="price" class="now-price"> £19.00 </span>
#print(request.content)
|
BrayoKane/python-mongo
|
price-of-a-chair/src/app.py
|
app.py
|
py
| 811 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74022415547
|
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from apps.celery_task.models import PeriodicTask
from apps.celery_task.serializers.periodic_task_serializer import PeriodicTaskSerializer, CreatePeriodicTaskSerializer
from packages.drf.pagination import CustomPageNumberPagination
from packages.drf.renderers import CustomRenderer
from packages.drf.viewsets import ModelViewSet
from django_filters import FilterSet
class PeriodicTaskFilter(FilterSet):
class Meta:
model = PeriodicTask
fields = {"name": ["exact"], "creator": ["contains"]}
class PeriodicTaskViewSet(ModelViewSet):
permission_classes = [AllowAny]
queryset = PeriodicTask.objects.all()
serializer_class = PeriodicTaskSerializer
pagination_class = CustomPageNumberPagination
renderer_classes = (CustomRenderer,)
filter_class = PeriodicTaskFilter
ordering_fields = ["id"]
ordering = ["-id"]
def create(self, request, *args, **kwargs):
serializer = CreatePeriodicTaskSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data["name"]
creator = "test"
serializer.validated_data["name"] = name
serializer.validated_data["creator"] = creator
instance = serializer.save()
instance.set_enabled(True)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(methods=["post"], detail=False)
def create_task(self, request, *args, **kwargs):
"""创建任务
{
"name": "test",
"cron": {"minute":"*/5","hour":"*","day_of_week":"*","day_of_month":"*","month_of_year":"*"},
}
"""
params = request.data
cron_data = params.get("cron")
name = params.get("name")
creator = params.get("creator", "test")
periodic_task = PeriodicTask.objects.create_task(name, cron_data, creator)
periodic_task.set_enabled(True)
return Response({"result": "创建成功"})
|
yaowuya/django-major-core
|
apps/celery_task/views/periodic_task_view.py
|
periodic_task_view.py
|
py
| 2,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18959826347
|
from rest_framework.decorators import api_view, permission_classes
import random
import string
from pprint import pprint as pp
import requests
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from points.views import new_user_point
from .serializers import *
User = get_user_model()
@api_view(['POST'])
@permission_classes([AllowAny])
def kakao_login_and_get_userinfo(request):
code = request.data.get('code')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
}
body = {
'grant_type': 'authorization_code',
'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',
'redirect_uri': 'https://kickin.kr/loading/',
'code': code,
}
response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)
access_token = response.json().get('access_token')
headers = {
'Authorization': f'Bearer {access_token}',
'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',
}
info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)
info_res = info_request.json()
nickname = info_res.get('properties').get('nickname')
email = info_res.get('kakao_account').get('email')
# 해당 이메일을 사용해 가입한 이력이 있는지, 확인한다.
# 해당 이메일로 가입한 이력이 없다면, 새로운 유저를 생성한다.
user = User.objects.filter(email=email)
if not user:
user = User.objects.create_user(email=email, password='Kakao_' + nickname + '977')
user.login_type = 1
user.save()
# 카카오 로그인의 경우 별도의 이메일 인증을 생략
EmailAddress.objects.create(user=user, email=email, verified=True, primary=True)
# 해당 유저의 정보를 업데이트한다. : login_type = 1 (카카오 로그인)
# user Info 생성
user_info, user_created = UserInfo.objects.get_or_create(user=user)
new_user_point(user.id) # 해당 유저의 포인트를 생성한다.
# 소셜 로그인 정보는, 언제든 바뀔 수 았기 때문에 굳이 저장하지 않는다.
kakao_profile = info_res.get('kakao_account').get('profile').get('profile_image_url')
kakao_nickname = info_res.get('properties').get('nickname')
# 로그인 응답 데이터 생성
response_data = {
'kakao_profile': kakao_profile,
'kakao_nickname': kakao_nickname,
'kakao_email': email, # 로그인 처리를 위해 응답 데이터에 이메일을 포함시킨다. / 비밀번호는 패턴화 되어있다. (Kakao_ + nickname + 977)
}
return Response(data=response_data, status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes([AllowAny])
def kakao_test(request):
code = request.data.get('code')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
}
body = {
'grant_type': 'authorization_code',
'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',
'redirect_uri': 'http://localhost:8080/loading/',
'code': code,
}
response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)
pp(response.json())
access_token = response.json().get('access_token')
headers = {
'Authorization': f'Bearer {access_token}',
'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',
}
info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)
info_res = info_request.json()
pp(info_res)
return Response(data=info_res, status=status.HTTP_200_OK)
|
isaacShin-dev/kickin
|
accounts/social_views.py
|
social_views.py
|
py
| 3,846 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
20093575148
|
# General
import os
# Tools/utils
import itertools
import multiprocessing
from tqdm.notebook import tqdm
from tqdm import tqdm as tqdm_cli
from functools import reduce # for aggregate functions
from itertools import chain # for aggregate functions
# Data management
import math
import numpy as np
import pandas as pd
import networkx as nx
import igraph as ig
import leidenalg as la
from community import community_louvain
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
import pygraphviz as pgv
import colorcet as cc
from matplotlib.colors import ListedColormap
from wordcloud import WordCloud, STOPWORDS
from termcolor import colored # colored text output
from sklearn.preprocessing import MinMaxScaler
stopwords = STOPWORDS.union({
'regulation', 'activity', 'positive', 'negative', 'catabolic', 'process', 'protein', 'complex',
'binding', 'response', 'gene', 'genes', 'encoding', 'defining', 'GeneID', 'regulated',
})
def get_tf_targ_ctx(df):
tf_target_dict = {'TF': [], 'target': [], 'importance': []}
tf_target_info = (
df.droplevel(axis=0, level=1).droplevel(axis=1, level=0)['TargetGenes']
.map(set) # transform each list into set
.groupby('TF').agg(lambda x: reduce(lambda a, b: a.union(b), x)) # combine all targets per TF
)
for tf, target_info in tf_target_info.iteritems():
tf_target_dict['TF'] += [tf for target_name, score in target_info]
tf_target_dict['target'] += [target_name for target_name, score in target_info]
tf_target_dict['importance'] += [score for target_name, score in target_info]
return pd.DataFrame(tf_target_dict)
def netgraph_community_layout(G, node_to_community, community_scale=1., node_scale=2., seed=42):
"""
Compute the node positions for a modular graph.
"""
# assert that there multiple communities in the graph; otherwise abort
communities = set(node_to_community.values())
if len(communities) < 2:
warnings.warn("Graph contains a single community. Unable to compute a community layout. Computing spring layout instead.")
return nx.spring_layout(G, weight='importance', **kwargs)
community_size = _get_community_sizes(node_to_community)
community_centroids = _get_community_positions(G, node_to_community, community_scale, seed=seed)
relative_node_positions = _get_node_positions(G, node_to_community, node_scale, seed=seed)
# combine positions
node_positions = dict()
for node, community in node_to_community.items():
xy = community_centroids[node]
delta = relative_node_positions[node] * community_size[community]
node_positions[node] = xy + delta
return node_positions
def _get_community_sizes(node_to_community):
"""
Compute the area of the canvas reserved for each community.
"""
def _invert_dict(mydict):
"""Invert a dictionary such that values map to keys."""
inverse = dict()
for key, value in mydict.items():
inverse.setdefault(value, set()).add(key)
return inverse
scale = (1, 1)
total_nodes = len(node_to_community)
max_radius = np.linalg.norm(scale) / 2
scalar = max_radius / total_nodes
community_to_nodes = _invert_dict(node_to_community)
community_size = {community : len(nodes) * scalar for community, nodes in community_to_nodes.items()}
return community_size
def _get_community_positions(G, node_to_community, community_scale, seed, simple=True):
"""
Compute a centroid position for each community.
"""
# create a weighted graph, in which each node corresponds to a community,
# and each edge weight to the number of edges between communities
between_community_edges = _find_between_community_edges(G, node_to_community)
communities = set(node_to_community.values())
hypergraph = nx.DiGraph()
hypergraph.add_nodes_from(communities)
if not simple:
for (ci, cj), edges in between_community_edges.items():
hypergraph.add_edge(ci, cj, weight=len(edges))
# find layout for communities
pos_communities = nx.spring_layout(hypergraph, scale=community_scale, seed=seed)
# set node positions to position of community
pos = dict()
for node, community in node_to_community.items():
pos[node] = pos_communities[community]
return pos
def _find_between_community_edges(G, node_to_community, fixed_community=None):
"""Convert the graph into a weighted network of communities."""
edges = dict()
for (ni, nj) in G.edges():
ci = node_to_community[ni]
cj = node_to_community[nj]
if fixed_community is not None:
if fixed_community != ci and fixed_community != cj:
continue
if ci != cj:
try:
edges[(ci, cj)] += [(ni, nj)]
except KeyError:
edges[(ci, cj)] = [(ni, nj)]
return edges
def _get_node_positions(G, node_to_community, node_scale, seed):
"""
Positions nodes within communities.
"""
communities = dict()
for node, community in node_to_community.items():
try:
communities[community] += [node]
except KeyError:
communities[community] = [node]
pos = dict()
for ci, nodes in communities.items():
subgraph = G.subgraph(nodes)
pos_subgraph = nx.spring_layout(subgraph, weight='importance', scale=node_scale, seed=seed)
pos.update(pos_subgraph)
return pos
def squeeze_graph(G, partition, approximate_size=4000):
"""
Squeeze graph by picking only top nodes (according to number of connections) in each partition. This
step is needed to speed up the networkx visualization and show only the general POV on the graph.
"""
#### STEP 1 - filtering nodes
# Getting the number of partitions
num_partitions = len(set(partition.values()))
# Getting partition parameters
partition_sizes = {i: len([1 for node, k in partition.items() if k == i]) for i in range(num_partitions)}
min_partition_size = min(partition_sizes.values())
# Normalizing partition size: divide each partition size by the minimal partition size
normalized_partition_size = {i: (size // min_partition_size) for i, size in partition_sizes.items()}
# Getting scale factor - to get approximately size of the graph close to approximate_size
scale_factor = math.ceil(approximate_size / sum(normalized_partition_size.values()))
squeezed_partition = {i: (size * scale_factor) for i, size in normalized_partition_size.items()}
top_nodes = []
for i, num_nodes in squeezed_partition.items():
# Getting partition graph
partition_i = G.subgraph([node for node, k in partition.items() if k == i])
# Finding inter-community edges
intercommunity_edges = _find_between_community_edges(G, partition, i)
# Calculating node importance according to number of inter-community edges
node_importance = {}
for (part_1, part_2), edges in intercommunity_edges.items():
for node_1, node_2 in edges:
curr_node = node_1 if part_1 == i else node_2
if curr_node in node_importance:
node_importance[curr_node] += 1
else:
node_importance[curr_node] = 1
# Getting top nodes in the partition according to maximum number of inter-community edge (node_importance)
top_nodes += list(dict(sorted(node_importance.items(), key=lambda x: x[1], reverse=True)[:squeezed_partition[i]]).keys())
filtered_partition = {node: i for node, i in partition.items() if node in top_nodes}
filtered_G = G.subgraph(top_nodes)
#### STEP 2 - filtering edges
# Setting up the size of the squeezed graph (number of edges)
keep_num_edges = 20000
edges_to_keep = \
list(
dict(
sorted(
{
(st, end): data['importance'] for st, end, data in filtered_G.edges(data=True)
}.items(), key=lambda x: x[1], reverse=True)[:keep_num_edges]
).keys()
)
squeezed_G = filtered_G.edge_subgraph(edges_to_keep)
squeezed_partition = {node: i for node, i in filtered_partition.items() if node in squeezed_G.nodes()}
return squeezed_G, squeezed_partition
def get_elipsis_mask():
h, w = 600, 800
center = (int(w/2), int(h/2))
radius_x = w // 2
radius_y = h // 2
Y, X = np.ogrid[:h, :w]
mask = ((X - center[0])**2/radius_x**2 + (Y - center[1])**2/radius_y**2 >= 1)*255
return mask
def plot_cloud(G, partition, squeezed_pos, ax, anno_db, filter_genes=True,
limit_anno_until=50, display_func=False, if_betweenness=True,
k=3000):
"""
Plot word cloud that indicates the function(s) of each gene cluster.
"""
# Loading the gene functional annotation
gene_func = load_gene_func_db(anno_db, reload=False, as_series=True)
# Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}
partition_genes_ = {}
for gene, i in partition.items():
if i not in partition_genes_.keys():
partition_genes_[i] = [gene]
else:
partition_genes_[i] += [gene]
# If display gene function in the word clouds
if display_func:
# Whether to filter the genes on which we compute the word cloud (most important genes)
if filter_genes:
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
partition_genes = {}
t = tqdm(partition_genes_.items())
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
top_len = min(limit_anno_until, len(genes))
top_gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)[:top_len]
)
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_top_gene_scores = dict(
zip(
top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))
)
)
partition_genes[i] = norm_top_gene_scores
print('Filtered genes for generating the function word cloud..')
else:
partition_genes = {{gene_: 1 for gene_ in gene_list} for i, gene_list in partition_genes_.items()}
# Computing functional annotation for each cluster as a concatenated list of annotations
# Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 ->
# the functional annotation is duplicated and have bigger font in WordCloud)
partition_funcs = {
i: ' '.join(
chain.from_iterable([
gene_func[gene_func.index == gene].to_list()*gene_score
for gene, gene_score in gene_score_list.items()
])) for i, gene_score_list in partition_genes.items()
}
# Generating word counts from aggregated gene annotation texts -> obtaining main (most frequent) function tokens
word_counts = {i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in partition_funcs.items()}
word_counts = {
i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()
} # dealing with no word case
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(freqs) for i, freqs in word_counts.items()
}
# Display main genes in decreasing order of importance (top `top_len` genes)
else:
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
partition_genes = {}
t = tqdm(partition_genes_.items())
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
top_len = min(limit_anno_until, len(genes))
top_gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)[:top_len]
)
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_top_gene_scores = dict(
zip(
top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))
)
)
partition_genes[i] = norm_top_gene_scores
print('Obtained top genes for generating the gene word cloud..')
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(gene_score_dict) for i, gene_score_dict in partition_genes.items()
}
# Plotting
partition_coords = {}
for gene, coords in squeezed_pos.items():
if partition[gene] not in partition_coords:
partition_coords[partition[gene]] = [coords]
else:
partition_coords[partition[gene]] += [coords]
for i, coords in partition_coords.items():
x, y = zip(*coords)
min_x, max_x = min(x), max(x)
min_y, max_y = min(y), max(y)
ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])
return ax
def process_communities(data, pat=None, algo='leiden', filter_quantile=0.95, if_betweenness=True,
limit_anno_until=50, k=5000, save_top_intercommunity_links_until=20,
other_functions_until=20, save_top_new_found_cluster_links=20, seed=42):
"""
Process graph by finding its communities, annotate its communities, and save everything into .tsv format.
"""
from joblib import Parallel, delayed
def highlight_TFs(word, font_size, position, orientation, font_path, random_state):
TF_color = (255, 0, 0) # red
if word in lambert_TF_names or word in dorothea_TF_names:
return TF_color
else:
r, g, b, alpha = plt.get_cmap('viridis')(font_size / 120)
return (int(r * 255), int(g * 255), int(b * 255))
print('\nPerforming community analysis..\n\n')
# Setting pathways to files
_PROJ_PATH = '/gpfs/projects/bsc08/bsc08890'
_FMETA = os.path.join(_PROJ_PATH, 'data/GSE145926_RAW/metadata.tsv')
_DATA_HOME = os.path.join(_PROJ_PATH, 'res/covid_19')
# Loading sample meta data, reordering patients
full_meta = pd.read_csv(_FMETA, sep='\t', index_col=0)
# Prepare everything to save the figs and dataframe
if data == 'all_data':
data = 'raw_data'
elif 'raw_data_' not in data:
data = f'raw_data_{data}'
else:
pass
if pat is None or pat == 'all_data':
# Cell-type aggregated data
data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')
figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', f'raw_data')
data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'raw_data_communities_info.pickle')
elif pat in ['C', 'M', 'S']:
# Patient-type aggregated data
data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')
figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2',
f'raw_data_{pat}_type')
data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'raw_data_{pat}_type_communities_info.pickle')
else:
# Loading patient-specific data
figs_as = os.path.join(_DATA_HOME, pat, 'figs', 'grnboost2', f'{data}')
data_to = os.path.join(_DATA_HOME, pat, 'data', 'grnboost2', f'{algo}_communities')
data_as = os.path.join(data_to, f'{data}_communities_info.pickle')
os.makedirs(data_to, exist_ok=True)
os.makedirs(os.path.dirname(figs_as), exist_ok=True)
# Loading lists of TFs from Lambert 2018 and DoRothEA, in the latter case we will keep only confident regulons
lambert_TF_names = pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/lambert2018.txt'), header=None)[0].to_list()
dorothea_TF_names = list(
pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/dorothea_regulons.tsv'), sep='\t') \
.loc[lambda x: x['confidence'].isin(['A', 'B', 'C'])]['tf'].unique()
)
# Loading the graph
G = get_nx_graph(data=data, data_type='all', pat=pat, get_filtered=filter_quantile)
print(f"Loaded the graph: {colored('pat', 'green')}='{colored(pat, 'red')}', "
f"{colored('data', 'green')}='{colored(data, 'red')}', "
f"{colored('data_type', 'green')}='{colored('all', 'red')}'\n")
###### FINDING COMMUNITIES IN THE GRAPH #######
print('Finding communities in the graph..')
if algo == 'louvain':
partition = community_louvain.best_partition(G.to_undirected(), weight='importance', random_state=seed)
elif algo == 'leiden':
G_igraph = ig.Graph.from_networkx(G.to_undirected())
la_partition = la.find_partition(G_igraph, la.ModularityVertexPartition, weights='importance', seed=seed)
partition = {G_igraph.vs[node]['_nx_name']: i for i, cluster_nodes in enumerate(la_partition) for node in cluster_nodes}
else:
raise NotImplementedError
num_partitions = len(set(partition.values()))
print(f'Number of partitions using {algo} algorithm: {colored(num_partitions, "cyan")}\n')
###### FINDING HIGH-CENTRALITY GENES IN THE WHOLE GRAPH
print('Finding high-centrality genes in the whole graph..')
num_workers = max(multiprocessing.cpu_count() // 2, 1)
whole_G_central_genes = dict(
sorted(betweenness_centrality_parallel(G, processes=num_workers).items(), key=lambda x: x[1], reverse=True)[:limit_anno_until]
)
print(f'Computed the {"betweenness" if if_betweenness else "closeness"} centrality for all genes in the graph\n')
###### FINDING HIGH-CENTRALITY GENES AND CORRESPONDING FUNCTIONS IN EACH COMMUNITY USING GO ANNOTATION ######
print('Finding high-centrality genes/functions in each cluster..')
# Loading the gene functional annotation
anno_db_tags = ['GO', 'KEGG', 'immunological', 'hallmark']
gene_func_dbs = {tag: load_gene_func_db(tag, as_series=True) for tag in anno_db_tags}
# Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}
partition_genes_ = {}
for gene, i in partition.items():
if i not in partition_genes_.keys():
partition_genes_[i] = [gene]
else:
partition_genes_[i] += [gene]
# Whether to filter the genes on which we compute the word cloud (most important genes)
compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality
distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}
all_partition_genes = {}
norm_partition_genes = {}
t = tqdm_cli(partition_genes_.items(), ascii=True)
for i, genes in t:
t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')
gene_scores = dict(
sorted(
compute_centrality(
G.subgraph(genes), k=min(G.subgraph(genes).order(), k), normalized=True, **distance_metric
).items(),
key=lambda x: x[1], reverse=True
)
)
all_partition_genes[i] = gene_scores
central_gene_scores = {gene: gene_scores[gene] for k, gene in enumerate(gene_scores.keys()) if k < limit_anno_until}
# Renormalizing centrality scores between 1 and 100, and rounding them to use later when
# displaying wordclouds (higher score - higher "frequency" or word size)
norm_partition_genes[i] = dict(
zip(
central_gene_scores.keys(),
list(map(lambda x: int(x), scale(list(central_gene_scores.values()), 1, 100)))
)
)
print('Computed centrality scores for each gene in each community\n')
print('Finding functional annotations for each cluster..')
# Computing functional annotation for each cluster as a concatenated list of annotations
# Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 ->
# the functional annotation is duplicated and have bigger font in WordCloud)
# We also do it for different functional annotations like GO, KEGG, Hallmark, etc..
partition_funcs = {
tag:
{
i: ' '.join(
chain.from_iterable([
gene_func[gene_func.index == gene].to_list()*gene_score
for gene, gene_score in gene_score_list.items()
])) for i, gene_score_list in norm_partition_genes.items()
} for tag, gene_func in gene_func_dbs.items()
}
print('Computed functional annotations for each cluster\n')
###### PLOTTING GENE AND FUNC COMMUNITY CLOUDS ######
print('Plotting clusters..')
# Getting positions of squeezed graph - we do not plot every gene on the figure
squeezed_G, squeezed_partition = squeeze_graph(G, partition)
print('Computed a squeezed graph representation..')
squeezed_pos = netgraph_community_layout(squeezed_G, squeezed_partition, seed=seed) # nx.nx_agraph.pygraphviz_layout(G.to_undirected(), prog="sfdp") # nx.nx.spring_layout(G, seed=seed, k=0.2, iterations=20)
partition_coords = {}
for gene, coords in squeezed_pos.items():
if partition[gene] not in partition_coords:
partition_coords[partition[gene]] = [coords]
else:
partition_coords[partition[gene]] += [coords]
print('Computed node positions of the squeezed graph representation..')
cmap = ListedColormap(sns.color_palette(cc.glasbey_bw, n_colors=num_partitions).as_hex())
for plot_type in ['genes'] + list(map(lambda x: f"func_{x}", anno_db_tags)):
if plot_type.startswith('func'):
# Getting current functional annotation
curr_partition_funcs = partition_funcs[plot_type[plot_type.find('_') + 1:]]
f, ax = plt.subplots(figsize=(20, 35))
if plot_type == 'genes':
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(gene_score_dict).recolor(color_func=highlight_TFs)
for i, gene_score_dict in norm_partition_genes.items()
}
else:
word_counts = {
i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in curr_partition_funcs.items()
}
word_counts = {
i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()
} # dealing with no word case
wordclouds = {
i: WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()
).generate_from_frequencies(freqs) for i, freqs in word_counts.items()
}
# Plotting clouds
for i, coords in partition_coords.items():
x, y = zip(*coords)
min_x, max_x = min(x), max(x)
min_y, max_y = min(y), max(y)
ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])
print(f'Finished plotting {plot_type} word cloud..')
nx.draw(squeezed_G, squeezed_pos, ax=ax, arrowstyle="->", arrowsize=20,
connectionstyle=f'arc3, rad = 0.25', edge_color='gray', width=0.4,
node_color='k', node_size=50, alpha=0.02)
nx.draw_networkx_nodes(squeezed_G, squeezed_pos, ax=ax, node_size=100,
nodelist=list(squeezed_partition.keys()),
node_color=list(squeezed_partition.values()),
cmap=cmap, alpha=0.005)
print(f'Finished plotting {plot_type} nodes..')
ax.set_title(f'Found communities ({pat}, "all", {data}), '
f'annotation - {plot_type}',
fontsize=30)
plt.axis('off')
plt.savefig(f'{figs_as}_{plot_type}.png', bbox_inches='tight', dpi=400)
print('Finished plotting..\n')
###### SAVING DATAFRAME CONTAINING INFORMATION ABOUT EACH COMMUNITY ######
def compute_community_info(i):
"""
Parallel saving of the dataframe.
"""
# Getting information for each community
genes = list(all_partition_genes[i].keys())
community_subgraph = G.subgraph(genes)
communities_i = pd.Series(dtype='object')
# Setting tqdm logs
# t.set_description(f'Saving info about {i} cluster, size={community_subgraph.order()}')
# Getting information about cluster genes
central_genes_and_scores = {
gene: all_partition_genes[i][gene] for k, gene in enumerate(genes) if k < limit_anno_until
}
non_lambert_TFs = [
f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in lambert_TF_names
]
non_dorothea_TFs = [
f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in dorothea_TF_names
]
# Filling dataframe with the information
communities_i['num_nodes'] = community_subgraph.number_of_nodes()
communities_i['num_edges'] = community_subgraph.number_of_edges()
communities_i['all_sorted_genes'] = '; '.join(
f'{gene} (score={score})' for gene, score in all_partition_genes[i].items()
)
communities_i['sorted_central_genes_scores'] = '; '.join(
f'{gene} (score={score:.2f})' for gene, score in central_genes_and_scores.items()
)
communities_i['non_lambert_2018_TF_central_genes'] = '; '.join(non_lambert_TFs)
communities_i['non_dorothea_TF_central_genes'] = '; '.join(non_dorothea_TFs)
communities_i['whole_G_central_genes_scores'] = '; '.join(
f'{gene} (score={score:.2f})' for gene, score in whole_G_central_genes.items()
)
# Filling information about newly found gene-gene links (based on absence in KEGG and Hallmark)
top_cluster_links = set()
iter_i = 0
for st, end, edge_info in sorted(community_subgraph.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in top_cluster_links]:
top_cluster_links.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_new_found_cluster_links:
break
for anno_tag in ['KEGG', 'hallmark']:
curr_db = load_gene_func_db(anno_tag)
tmp_list = []
# if `st` gene and `end` gene have non-overlapping annotations..
for st, end, imp in top_cluster_links:
st_anno_IDs = set(curr_db[curr_db.index == st]['ID'])
end_anno_IDs = set(curr_db[curr_db.index == end]['ID'])
if len(st_anno_IDs.intersection(end_anno_IDs)) == 0 and \
(len(st_anno_IDs) != 0 or len(end_anno_IDs) != 0):
tmp_list.append(f"{st} ({' & '.join(st_anno_IDs)}) <-> {end} ({' & '.join(end_anno_IDs)})")
communities_i[f'new_gene_gene_links_{anno_tag}'] = '; '.join(tmp_list)
# Filling information about cluster functions
for tag, gene_func in gene_func_dbs.items():
curr_partition_funcs = partition_funcs[tag]
# Filling main functions - non duplicates at the top
main_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order
func for gene in central_genes_and_scores.keys()
for func in gene_func[gene_func.index == gene].to_list()
]))
gene_with_main_functions = [
','.join(
gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()
) for func in main_functions
]
main_functions = [
f'>>> {func} <<<: {gene}' for gene, func in zip(gene_with_main_functions, main_functions)
]
communities_i[f'main_functions_{tag}'] = '; '.join(main_functions) # saving..
# Saving functions corresponding to each gene
central_functions_per_gene = [
f">>> {gene} <<<: {' & '.join(gene_func[gene_func.index == gene].to_list())}" for gene in central_genes_and_scores.keys()
]
communities_i[f'sorted_central_functions_{tag}'] = '; '.join(central_functions_per_gene) # saving..
# Saving most frequent function words
freq_words = WordCloud(
max_words=30, min_font_size=15, stopwords=stopwords
).process_text(curr_partition_funcs[i])
freq_words = dict(
sorted(freq_words.items(), key=lambda x: x[1], reverse=True)
) if freq_words else {'no found function': 1} # dealing with no word case
communities_i[f'most_frequent_function_words_{tag}'] = '; '.join(freq_words.keys()) # saving
# Saving other functions present in this cluster
other_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order
func for gene in genes if gene not in central_genes_and_scores.keys()
for func in gene_func[gene_func.index == gene].to_list() if func not in main_functions
]))[:other_functions_until]
genes_with_other_functions = [
','.join(
gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()
) for func in other_functions
]
other_functions = [
f'>>> {func} <<<: {gene}' for gene, func in zip(genes_with_other_functions, other_functions)
]
communities_i[f'other_functions_{tag}'] = '; '.join(other_functions) # saving
# Filling information about top inter-community links
# t_sub = tqdm(range(num_partitions), ascii=True, leave=False)
for k in range(num_partitions): # t_sub:
# t_sub.set_description(f'Extracting top inter-community links with {k}')
if i != k:
genes_in_k = list(all_partition_genes[k].keys())
# Getting the subgraph that contains central genes in community_i and all genes in comunity_k
G_central_i_k = G.subgraph(list(central_genes_and_scores.keys()) + genes_in_k)
# Getting the subgraph that contains all genes from community_i and community_k
G_i_k = G.subgraph(genes + genes_in_k)
# Creating two helper sets that allow us to keep only unique links
links_central_i_k = set()
links_i_k = set()
iter_i = 0
# Getting out top links from the second subgraph
for st, end, edge_info in sorted(G_central_i_k.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_central_i_k] and \
((st in genes and end not in genes) or (end in genes and st in genes)):
links_central_i_k.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_intercommunity_links_until:
break
iter_i = 0
# Getting out top links from the second subgraph
for st, end, edge_info in sorted(G_i_k.edges(data=True),
key=lambda t: t[2]['importance'],
reverse=True):
# If the current (reverse directed) link was not encountered previously..
if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_i_k] and \
((st in genes and end not in genes) or (end in genes and st in genes)):
links_i_k.add((st, end, edge_info['importance']))
iter_i += 1
if iter_i == save_top_intercommunity_links_until:
break
# Adding top links to the dataframe
communities_i[f'top_links_scores_central_genes<->community_{k}'] = \
'; '.join(f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_central_i_k)
communities_i[f'top_links_scores_with_community_{k}'] = \
'; '.join([f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_i_k])
return communities_i
print('Saving info dataframe..')
t = tqdm_cli(range(num_partitions), ascii=True)
# Getting dataframe
result = Parallel(n_jobs=num_workers)(delayed(compute_community_info)(i) for i in t)
communities_df = pd.concat(result, axis=1).T.reindex(
columns=[
'num_nodes', 'num_edges',
'main_functions_GO', 'main_functions_KEGG', 'main_functions_immunological', 'main_functions_hallmark',
'non_lambert_2018_TF_central_genes', 'non_dorothea_TF_central_genes',
'new_gene_gene_links_KEGG', 'new_gene_gene_links_hallmark',
'whole_G_central_genes_scores',
'other_functions_GO', 'other_functions_KEGG', 'other_functions_immunological', 'other_functions_hallmark',
'sorted_central_genes_scores',
'sorted_central_functions_GO', 'sorted_central_functions_KEGG', 'sorted_central_functions_immunological', 'sorted_central_functions_hallmark',
'most_frequent_function_words_GO', 'most_frequent_function_words_KEGG', 'most_frequent_function_words_immunological', 'most_frequent_function_words_hallmark',
'all_sorted_genes'] +
[f'top_links_scores_central_genes<->community_{i}' for i in range(num_partitions)] +
[f'top_links_scores_with_community_{i}' for i in range(num_partitions)
]
)
# Saving dataframe
communities_df.to_pickle(data_as)
print(f"Saved the data to {data_as}!\n")
def run_enrichr(data, is_communities=False, is_positive_markers=True, group_types = 'all', on_targets=False, choose_fixed_tf=None,
data_type='all', top_n=50, algo='leiden', enrichr_library='MSigDB_Hallmark_2020'):
"""
Run enrichment analysis with Enrichr.
"""
import json
import requests
import sys
import io
out_folder = 'community_ana' if is_communities else 'cohort_ana'
if is_communities == True:
print('Running EnrichR on communities..')
algo = 'leiden'
_DATA_HOME = '/gpfs/projects/bsc08/bsc08890/res/covid_19'
if data_type == 'all':
community_data = pd.read_pickle(os.path.join(
_DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities',
f'raw_data_communities_info.pickle'
))
else:
community_data = pd.read_pickle(os.path.join(
_DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities',
f'raw_data_{data_type}_type_communities_info.pickle'
))
df = pd.concat([
pd.DataFrame({
'cluster': f'cluster_{i}',
'gene': [el[: el.find(' ')] for el in vals.split('; ')][:top_n]
}) for i, vals in community_data['all_sorted_genes'].iteritems()
], axis=0).reset_index(drop=True)
else:
if on_targets:
print('Running EnrichR on targets between 3 group types..')
types = ['C', 'M', 'S']
df = pd.concat([
pd.read_csv(
f'/gpfs/home/bsc08/bsc08890/tmp/cohort_ana/tmp_enrichr_{data}_{t}_{choose_fixed_tf}_target_list.tsv',
header=None, names=['gene']
).assign(cluster=t) for t in types
], axis=0)
else:
if group_types == 'all':
print('Running EnrichR on TFs between 3 group types..')
df = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{data}.tsv', sep='\t')
else:
print('Running EnrichR on 2 group types..')
if group_types == 'M_S':
group_types = 'S_M'
if group_types == 'C_M':
group_types = 'M_C'
if group_types == 'C_S':
group_types = 'S_C'
df_1 = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{group_types}_{data}.tsv', sep='\t')
df_1['gene'] = df_1.index
df_2 = df_1.copy()
df_2['avg_log2FC'] = - df_2['avg_log2FC']
df_1['cluster'], df_2['cluster'] = group_types.split('_')
df = pd.concat([df_1, df_2], axis=0)
if is_positive_markers:
df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] > 1)]
else:
df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] < -1)]
cluster_dfs = {}
for cl in df['cluster'].unique():
print(f'Processing {cl}..')
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'
genes_str = '\n'.join(df[df['cluster'] == cl]['gene'])
description = f"{data}_{data_type}_{cl}"
if is_communities == True:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'
elif on_targets:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{choose_fixed_tf}_target_{cl}.tsv'
elif group_types == 'all':
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'
else:
filename = f'tmp/{out_folder}/tmp_enrichr_{data}_2_groups_{cl}.tsv'
payload = {
'list': (None, genes_str),
'description': (None, description)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
job_id = json.loads(response.text)
################################################################################
# Get enrichment results
#
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export'
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
user_list_id = str(job_id['userListId'])
gene_set_library = str(enrichr_library)
url = ENRICHR_URL + query_string % (user_list_id, filename, gene_set_library)
response = requests.get(url, stream=True)
print(' Enrichr API : Downloading file of enrichment results: Job Id:', job_id)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print(f' Saved to {filename}')
cluster_dfs[cl] = pd.read_csv(filename, sep='\t')
return cluster_dfs
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
from multiprocessing import Pool
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_subset,
zip(
[G] * num_chunks,
node_chunks,
[list(G)] * num_chunks,
[True] * num_chunks,
['distance'] * num_chunks
),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
|
masyahook/Single-cell-gene-regulatory-networks
|
scGRN/func.py
|
func.py
|
py
| 43,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28031461245
|
#!/usr/bin/python3
from time import sleep
from datetime import date, datetime
from pynput.keyboard import Key, Controller
from logging.handlers import RotatingFileHandler
import sys, signal, argparse, logging, platform, subprocess
# ----------------------------------Configuration--------------------------------
VOLUME = "0.3"
BREAK_NUM = 1
WORK_DURATION = 900
BREAK_DURATION = 120
MAC = False
LINUX = False
WINDOWS = False
LINUX_PATH = ""
MAC_PATH = "/Users/mutnawaz/Desktop/Muteeb/Code/timer/"
WINDOWS_PATH = "C:\\Users\\Muteeb\\Desktop\\RV Major Project\\Personal\\timer\\"
# ---------------------------------end of Configuration---------------------------
log = None
def __init_logger():
global log
if log is not None:
log.debug("logger already initialized.")
return None
try:
"log format <data/time:level:filename:line:function:message>"
log_formatter = logging.Formatter("%(levelname)5.5s %(filename)5s#%(lineno)3s %(message)s")
"Refer the log file path"
PATH = get_path()
log_file = PATH + "timer.log"
"Max size of the log file is 2MB, it rotate if size exceeds"
handler = RotatingFileHandler(
log_file,
mode="a",
maxBytes=(2 * 1024 * 1024),
backupCount=4,
encoding=None,
delay=0,
)
"appy the log format and level"
handler.setFormatter(log_formatter)
handler.setLevel(logging.DEBUG)
log = logging.getLogger("timer.log")
log.setLevel(logging.DEBUG)
"apply the settings to the log"
log.addHandler(handler)
log.debug("Start logging the times")
return handler
except Exception as e:
log.error("Failed to create logger: %s", str(e))
def exit_handler(sig, frame):
print("\nGood bye. Have a nice day!\n")
greet()
sys.exit(0)
def greet():
try:
print(subprocess.check_output("motivate", shell=True, stderr=subprocess.DEVNULL).decode())
except:
print("\n******************************************************")
print("* *")
print("* *")
print("* You can do it! Sending lots of energy to you :) *")
print("* *")
print("* *")
print("******************************************************")
def get_time():
now = datetime.now()
time = now.strftime("%H:%M:%S")
return time
def play_sound(sound_file):
if MAC:
subprocess.check_output("afplay --volume " + VOLUME + " {}".format(sound_file), shell=True)
elif LINUX:
subprocess.check_output("aplay -q {}&".format(sound_file), shell=True)
else:
winsound.PlaySound(sound_file, winsound.SND_ASYNC)
def get_path():
if MAC:
return MAC_PATH
elif LINUX:
return LINUX_PATH
else:
return WINDOWS_PATH
def display_sleep():
if MAC:
# subprocess.check_output("pmset displaysleepnow", shell=True) # Put system to sleep.
subprocess.check_output("open -a ScreenSaverEngine", shell=True)
def wakeup():
if MAC:
# subprocess.check_output("pmset relative wake 1", shell=True) # Wakeup the system.
# log.debug("Waking up.")
keyboard = Controller()
key = Key.esc
keyboard.press(key)
keyboard.release(key)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--slient", action="store_true", help="Run in silent mode.")
args = vars(parser.parse_args())
if platform.system() == "linux" or platform.system() == "linux2":
LINUX = True
elif platform.system() == "darwin" or platform.system() == "Darwin":
MAC = True
elif platform.system() == "win32" or platform.system() == "Windows":
WINDOWS = True
if not args["slient"]:
try:
import winsound
except Exception as e:
print("Sound is not supported in windows. Reason: {0}".format(e))
args["slient"] = True
__init_logger()
PATH = get_path()
signal.signal(signal.SIGINT, exit_handler)
greet()
if args["slient"]:
print("Running in slient mode...")
log.info("Today's date: {0}".format(date.today()))
if not args["slient"]:
play_sound(PATH + "start_timer.wav")
while True:
log.info("Work number {0}, start time {1}".format(BREAK_NUM, get_time()))
sleep(WORK_DURATION)
log.info("Work number {0}, end time {1}".format(BREAK_NUM, get_time()))
if not args["slient"]:
play_sound(PATH + "take_break.wav")
display_sleep()
log.info("Break number {0}, start time {1}".format(BREAK_NUM, get_time()))
sleep(BREAK_DURATION)
log.info("Break number {0}, end time {1}".format(BREAK_NUM, get_time()))
if not args["slient"]:
play_sound(PATH + "two_mins_up.wav")
wakeup()
BREAK_NUM += 1
|
muteebakram/Timer
|
main.py
|
main.py
|
py
| 5,198 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22791755556
|
import sys
sys.path.insert(0, '../../class')
import os
import time
import nnet
import cubelattice as cl
import multiprocessing
from functools import partial
from scipy.io import loadmat
import numpy as np
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Verification Settings')
parser.add_argument('--property', type=str, default='1')
parser.add_argument('--n1', type=int, default=2)
parser.add_argument('--n2', type=int, default=3)
parser.add_argument('--compute_unsafety', action='store_true')
args = parser.parse_args()
i = args.n1
j = args.n2
def verification(afv):
safe = True
return safe
print("neural_network_"+str(i)+str(j))
nn_path = "nets/neural_network_information_"+str(i)+str(j)+".mat"
filemat = loadmat(nn_path)
if not os.path.isdir('logs'):
os.mkdir('logs')
W = filemat['W'][0]
b = filemat['b'][0]
lb = [-0.1,-0.1,-0.1]
ub = [0.1,0.1,0.1]
nnet0 = nnet.nnetwork(W, b)
nnet0.verification = verification
initial_input = cl.cubelattice(lb, ub).to_lattice()
cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(cpus)
nnet0.start_time = time.time()
nnet0.filename = "logs/output_info"+str(i)+str(j)+'.txt'
outputSets = []
nputSets0 = nnet0.singleLayerOutput(initial_input, 0)
pool.map(partial(nnet0.layerOutput, m=1), nputSets0)
pool.close()
elapsed_time = time.time() - nnet0.start_time
print('time elapsed: %f seconds \n' % elapsed_time)
print('result: safe\n')
filex = open(nnet0.filename, 'w')
filex.write('time elapsed: %f seconds \n' % elapsed_time)
filex.write('result: safe\n')
filex.close()
|
Shaddadi/veritex
|
examples/Microbenchmarks/main.py
|
main.py
|
py
| 1,739 |
python
|
en
|
code
| 10 |
github-code
|
6
|
24044811304
|
#compare parameter between abc-smc
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from scipy import stats
from matplotlib.colors import LogNorm, Normalize
from scipy.signal import argrelextrema
filename=["ACDC_X2","ACDC_Y2","ACDC_Z2"]#,"ACDC_all"]
#filename=['ACDC_X2']
filename=['ACDC_X2','ACDC_X21ind']
n=['final']
#n=['1','2','3','4','5','6','7','8','9','10','11','12','final']#'13','14','15','final']
#n=['1','2','3','4','5','6','7','8','9','10','11','12','13','final']#,'12','13','14','final']#,'15']#,'final']
path='C:/Users/Administrator/Desktop/Modeling/AC-DC/'
path='/users/ibarbier/AC-DC/'
sys.path.insert(0, path + filename[0])
import model_equation as meq
parlist=meq.parlist
namelist=[]
for i,par in enumerate(parlist):
namelist.append(parlist[i]['name'])
par0 = {
'K_ARAX':-3.5,#0.01,
'n_ARAX':2,
'K_XY':-2.5,
'n_XY':2,
'K_XZ':-1.55,#-1.25
'n_XZ':2,
'beta_X':1,
'alpha_X':0,
'delta_X':1,
'K_ARAY':-3.5,
'n_ARAY':2,
'K_YZ':-3.5,
'n_YZ':2,
'beta_Y':1,
'alpha_Y':0,
'delta_Y':1,
'K_ZX':-2.5,
'n_ZX':2,
'beta_Z':1,
'alpha_Z':0,
'delta_Z':1,
'beta/alpha_X':2,
'beta/alpha_Y':2,
'beta/alpha_Z':2
}
def pars_to_dict(pars,parlist):
### This function is not necessary, but it makes the code a bit easier to read,
### it transforms an array of pars e.g. p[0],p[1],p[2] into a
### named dictionary e.g. p['k0'],p['B'],p['n'],p['x0']
### so it is easier to follow the parameters in the code
dict_pars = {}
for ipar,par in enumerate(parlist):
dict_pars[par['name']] = pars[ipar]
return dict_pars
def load(number= n,filename=filename,parlist=parlist):
namelist=[]
for i,par in enumerate(parlist):
namelist.append(parlist[i]['name'])
number=str(number)
filepath = path+filename+'/smc/pars_' + number + '.out'
dist_path = path+filename+'/smc/distances_' + number + '.out'
raw_output= np.loadtxt(filepath)
dist_output= np.loadtxt(dist_path)
df = pd.DataFrame(raw_output, columns = namelist)
df['dist']=dist_output
df=df.sort_values('dist',ascending=False)
distlist= sorted(df['dist'])
p=[]
for dist in distlist:
p_0=df[df['dist']==dist]
p0=[]
for n in namelist:
p0.append(p_0[n].tolist()[0])
p0=pars_to_dict(p0,parlist)
p.append(p0)
return p, df
def get_stats(filename,namelist):
stats_df = pd.DataFrame( columns = ['par','file','mean','sd','mode'])
parl = np.append(namelist,'dist')
# for fi,fnm in enumerate(filename):
fnm=filename[0]
p,df= load(n[0],fnm,parlist)
mean=np.mean(df).tolist()
sd=np.std(df).tolist()
mode=stats.mode(df)[0][0]
new_row={'par':parl,'file':[fnm]*len(parl),'mean':mean,'sd':sd,'mode':mode}
df2=pd.DataFrame(new_row)
stats_df =stats_df.append(df2)
return stats_df
def bar_plot(filename,namelist, t="mean"):
stats_df=get_stats(filename,namelist)
# set width of bars
barWidth = 0.20
# Set position of bar on X axis
r1 = np.arange(len(parl))
#mean
if t=="mean":
for i,nm in enumerate(filename):
v=stats_df[stats_df['method']==nm]
plt.bar((r1+barWidth*i),v['mean'],yerr=v['sd'], capsize=2,width=barWidth, label=nm)
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
#mode
if t == "mode":
for i,nm in enumerate(filename):
v=stats_df[stats_df['method']==nm]
plt.bar((r1+barWidth*i),v['mode'],width=barWidth, label=nm)
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
def plot_compare(n,filename,namelist):
parl = np.append(namelist,'dist')
index=1
size=round(np.sqrt(len(parl)))
for i,name in enumerate(parl):
plt.subplot(size,size,index)
plt.tight_layout()
for fi,fnm in enumerate(filename):
p,df= load(n,fnm,namelist1)
sns.kdeplot(df[name],bw_adjust=.8,label=fnm)
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
index=index+1
if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
#sns.kdeplot(df['K_XZ'])
plt.savefig(str(filename)+str(n)+"_compareplot.pdf", bbox_inches='tight')
plt.show()
#plot_compare(n[0],filename,namelist)
def plot_alltime(filename,namelist):
parl = np.append(namelist,'dist')
index=1
for i,name in enumerate(parl):
plt.subplot(4,4,index)
plt.tight_layout()
for ni,nmbr in enumerate(n):
p,df= load(nmbr,filename[0],parlist)
sns.kdeplot(df[name],bw_adjust=.8,label=nmbr)
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
index=index+1
#if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
plt.show()
#plot_alltime(['ACDC_X2'],namelist)
def plotdistpar(filename,namelist):
index=1
for ni,nb in enumerate(n):
p,df= load(nb,filename[0],parlist)
for i,name in enumerate(namelist):
plt.subplot(len(n),len(namelist),index)
# plt.tight_layout()
plt.scatter(df['dist'],df[name],s=1)
mean=np.mean(df[name]).tolist()
mode=stats.mode(df[name])[0][0]
plt.plot([0,40],[mean,mean],'r',label="mean")
plt.plot([0,40],[mode,mode],'g',label="meode")
plt.ylim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))
plt.ylabel(name)
index=index+1
plt.legend(bbox_to_anchor=(1.05, 1))
plt.show()
'''
ARA=np.logspace(-4.5,-2.,10,base=10)
p,df= load(n[0],filename[0],parlist)
stdf=get_stats(filename,namelist)
pmean=pars_to_dict(stdf['mean'])
pmode=pars_to_dict(stdf['mode'])
for i,p in enumerate([p[0],pmean,pmode,p[999]]):
X,Y,Z=meq.model(ARA,p)
df_X=pd.DataFrame(X,columns=ARA)
df_Y=pd.DataFrame(Y,columns=ARA)
df_Z=pd.DataFrame(Z,columns=ARA)
plt.subplot(4,3,(1+3*i))
sns.heatmap(df_X, cmap="Reds")
plt.subplot(4,3,(2+3*i))
sns.heatmap(df_Y, cmap ='Blues')
plt.subplot(4,3,(3+3*i))
sns.heatmap(df_Z, cmap ='Greens')
plt.show()
X,Y,Z=meq.model(ARA,pmode)
plt.plot(X[:,0],label="DCoff")
plt.plot(X[:,3],label="AC1")
plt.plot(X[:,6],label="AC2")
plt.plot(X[:,9],label="DCon")
plt.plot([200,200],[0,1000],'--')
plt.legend(bbox_to_anchor=(1.05, 1))
plt.tight_layout()
plt.show()
'''
#####1indvs2ind
def plotdesnity1vs2():
p2,df2= load('final','ACDC_X2',parlist)
parlist1=parlist.copy()
del parlist1[7:9]
p1,df1= load('final','ACDC_X21ind',parlist1)
namelist=[]
for i,par in enumerate(parlist1):
namelist.append(par['name'])
parl = np.append(namelist,'dist')
index=1
for i,name in enumerate(parl):
plt.subplot(4,4,index)
plt.tight_layout()
sns.kdeplot(df1[name],bw_adjust=.8,label='X_1ind')
sns.kdeplot(df2[name],bw_adjust=.8,label='X_2ind')
#plt.ylim(0,1)
if i < (len(parl)-2):
plt.xlim((parlist1[i]['lower_limit'],parlist1[i]['upper_limit']))
index=index+1
if index==5:
plt.legend(bbox_to_anchor=(1.05, 1))
#sns.kdeplot(df['K_XZ'])
plt.savefig("1vs2ind"+str(n[0])+"_compareplot.pdf", bbox_inches='tight')
#plt.show()
plotdesnity1vs2()
def ind1vs2indmeanandmode():
p2,df2= load('final','ACDC_X',parlist)
df2=df2.drop(columns=['K_ARAY', 'n_ARAY'])
mean_df2=np.mean(df2)
sd_df2=np.std(df2)
mode_df2=stats.mode(df2)[0][0]
parlist1=parlist.copy()
del parlist1[7:9]
p1,df1= load('12','ACDC_1ind',parlist1)
mean_df1=np.mean(df1)
sd_df1=np.std(df1)
mode_df1=stats.mode(df1)[0][0]
namelist=[]
for i,par in enumerate(parlist1):
namelist.append(par['name'])
parl = np.append(namelist,'dist')
# set width of bars
barWidth = 0.30
# Set position of bar on X axis
r1 = np.arange(len(parl))
plt.bar((r1+barWidth*0),mean_df1,yerr=sd_df1, capsize=2,width=barWidth, label="1ind")
plt.bar((r1+barWidth*1),mean_df2,yerr=sd_df2, capsize=2,width=barWidth, label="2ind")
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
plt.bar((r1+barWidth*0),mode_df1,width=barWidth, label="1ind")
plt.bar((r1+barWidth*1),mode_df2,width=barWidth, label="2ind")
plt.xlabel('par', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(parl))], parl)
plt.legend()
plt.show()
def calculateSS(ARA,parUsed):
#sort ss according to their stabilitz
#create stability list of shape : arabinose x steady x X,Y,Z
unstable=np.zeros((len(ARA),3,3))
stable=np.zeros((len(ARA),3,3))
oscillation=np.zeros((len(ARA),3,3))
unstable[:]=np.nan
stable[:]=np.nan
oscillation[:]=np.nan
for ai,a in enumerate(ARA):
ss=meq.findss(a,parUsed)
if len(ss) > 3:
print("error: more than 3 steadystates")
else:
d = b = c=0 # can replace a,b,c by si, but allow to have osccilation on the same level
for si,s in enumerate(ss):
e=meq.stability(a,parUsed,[s])[0][0]
if all(e<0):
stable[ai][d]=s
d+=1
if any(e>0):
pos=e[e>0]
if len(pos)==2:
if pos[0]-pos[1] == 0:
oscillation[ai][b]=s
b+=1
else:
unstable[ai][c]=s
c+=1
else:
unstable[ai][c]=s
c+=1
return unstable,stable,oscillation
#chose parameter
def bifurcation(parUsed=None):
p,df= load('final','ACDC_X2',parlist)
#parUsed=par0
if parUsed == None:
parUsed=p[0]
ARA=np.logspace(-4.5,-2.,20,base=10)
ss=meq.findss(ARA[0],parUsed)[0]
#print(ss)
init=[ss[0],ss[1],ss[2]]
X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)
df_X=pd.DataFrame(X[500:],columns=ARA)
sns.heatmap(df_X, cmap="Reds", norm=LogNorm())
plt.show()
xss,yss,zss = calculateSScurve(ARA,parUsed)
maxX=[]
minX=[]
maxY=[]
minY=[]
maxZ=[]
minZ=[]
# X,Y,Z=meq.model(ARA,parUsed,totaltime=400)
delta=10e-5
for i in np.arange(0,len(ARA)):
min_x=[np.nan,np.nan,np.nan]
max_x=[np.nan,np.nan,np.nan]
ss=meq.findss(ARA[i],parUsed)
for si,s in enumerate(ss):
init=[s[0]+delta,s[1]+delta,s[2]+delta]
X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)
# print(max(X[200:,i]))
max_x[si]=max(X[200:,i])
min_x[si]=min(X[200:,i])
maxX.append(max_x)
minX.append(min_x)
# minX.append(min(X[200:,i]))
maxY.append(max(Y[200:,i]))
minY.append(min(Y[200:,i]))
maxZ.append(max(Z[200:,i]))
minZ.append(min(Z[200:,i]))
plt.subplot(3,1,1)
plt.plot(ARA,xss,'--o')
plt.plot(ARA,maxX,'-b')
plt.plot(ARA,minX,'-g')
#plt.fill_between(ARA,maxX,minX,alpha=0.2,facecolor='red')
plt.yscale("log")
plt.xscale("log")
plt.subplot(3,1,2)
plt.plot(ARA,yss,'--b')
# plt.plot(ARA,maxY,'-b')
# plt.plot(ARA,minY,'-b')
# plt.fill_between(ARA,maxY,minY,alpha=0.2,facecolor='blue')
plt.yscale("log")
plt.xscale("log")
plt.subplot(3,1,3)
plt.plot(ARA,zss,'--g')
# plt.plot(ARA,maxZ,'-g')
# plt.plot(ARA,minZ,'-g')
# plt.fill_between(ARA,maxZ,minZ,alpha=0.2,facecolor='green')
plt.yscale("log")
plt.xscale("log")
plt.show()
def getlimitcycle(ARA,ssl,par,tt=500):
M=np.ones((len(ARA),3,3))*np.nan
m=np.ones((len(ARA),3,3))*np.nan
delta=10e-5
transient=500
for ai,a in enumerate(ARA):
ss=ssl[ai]
for si,s in enumerate(ss):
if any(np.isnan(s)) == False:
init=[s[0]+delta,s[1]+delta,s[2]+delta]
X,Y,Z=meq.model([a],par,totaltime=tt,init=init)
M[ai,si,0]=max(X[transient:])
M[ai,si,1]=max(Y[transient:])
M[ai,si,2]=max(Z[transient:])
m[ai,si,0]=min(X[transient:])
m[ai,si,1]=min(Y[transient:])
m[ai,si,2]=min(Z[transient:])
max_list=argrelextrema(X[transient:], np.greater)
maxValues=X[transient:][max_list]
min_list=argrelextrema(X[transient:], np.less)
minValues=X[transient:][min_list]
maximaStability = abs(maxValues[-2]-minValues[-2])-(maxValues[-3]-minValues[-3])
if maximaStability > 0.01:
print("limit cycle not achieved for ARA["+str(ai)+"]:" + str(a) + " at st.s:"+ str(s))
return M,m
def bifurcation_plot(n,filename):
p,df= load(n,filename,parlist)
ARA=np.logspace(-4.5,-2.,200,base=10)
un,st,osc=calculateSS(ARA,p[1])
M,m=getlimitcycle(ARA,osc,p[1],tt=500)
for i,col in enumerate(['r','b','g']):
plt.subplot(3,1,i+1)
plt.plot(ARA,un[:,:,i],'--'+col)
plt.plot(ARA,st[:,:,i],'-'+col)
plt.plot(ARA,osc[:,:,i],'--'+col)
plt.fill_between(ARA,M[:,0,i],m[:,0,i],alpha=0.2,facecolor=col)
plt.fill_between(ARA,M[:,1,i],m[:,1,i],alpha=0.2,facecolor=col)
plt.fill_between(ARA,M[:,2,i],m[:,2,i],alpha=0.2,facecolor=col)
plt.yscale("log")
plt.xscale("log")
plt.show()
#bifurcation(p[1])
|
icvara/AC-DC
|
compareplot.py
|
compareplot.py
|
py
| 14,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2416692184
|
from pygame import *
from random import randrange
from math import *
from Pong.GameStats import GameStats
from Pong.Player.Goal import Goal
from Pong.Player.PlayerRacket import PlayerRacket
class Ball:
MAX_SPEED_Y = 12
SPEED_X = 6
COLOR = (int(255), int(255), int(255))
RADIUS: int = 10
WIN_SCORE = 10
def __init__(self, players):
self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))
self.pos = (int(GameStats.width/2), int(GameStats.height/2))
self.players = players
def update_move(self):
# if there is collision
self.pos = (self.velocity[0] + self.pos[0], self.pos[1] + self.velocity[1])
self.collision_update()
if self.pos[0] < -5 or self.pos[0] > 640:
self.pos = (320, 320)
self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))
elif self.pos[1] < 0 or self.pos[1] > GameStats.height:
self.velocity = (self.velocity[0], -self.velocity[1])
def draw(self, surface):
self.update_move()
draw.circle(surface, Ball.COLOR, self.pos, Ball.RADIUS)
def collision_update(self):
col_pos = (0, 0)
col_body = None
collision = False
for p in [self.players[0].racket, self.players[1].racket, self.players[0].goal, self.players[1].goal]:
for point in ((self.pos[0] + Ball.RADIUS*cos(theta*0.01), self.pos[1] + Ball.RADIUS*sin(theta*0.01))
for theta in range(0, int(pi*2*100))):
if p[0] < point[0] < p[0] + p[2] and \
p[1] < point[1] < p[1] + p[3]:
col_pos = point
col_body = p
collision = True
break
if collision:
break
if collision:
if type(col_body) is PlayerRacket:
self.velocity = (-self.velocity[0], int((col_pos[1] - col_body[1] -
col_body[3]/2)/col_body[3]*Ball.MAX_SPEED_Y*2))
elif type(col_body) is Goal:
if self.players[0].goal == col_body:
if self.players[0].score() == self.WIN_SCORE:
self.players[0].reset()
self.players[1].reset()
if self.players[1].goal == col_body:
if self.players[1].score() == self.WIN_SCORE:
self.players[0].reset()
self.players[1].reset()
self.pos = (GameStats.width//2, GameStats.height//2)
self.velocity = ((Ball.SPEED_X * ((-1) ** randrange(2))), randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))
def __getitem__(self, key):
return self.pos[key]
|
dogancanalgul/Pong
|
ball.py
|
ball.py
|
py
| 2,820 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11047304211
|
'''----------------------------------------------------------------------------
engine.py
----------------------------------------------------------------------------'''
from engine.ssc.image_ini import *
import numpy as np
#import sunpy.instr.aia
def standard_multitype_ini(observations):
'''Standard initialization for different kind of observation. The
initialization contains ratiation, limb darkening correction, Bz
estimation and limb out region remove.
Parameter
---------
observations - Sunpy map object, it can contain multiple images.
Return
------
observations - Sunpy map object with modified data'''
# Create a new list for the initialized observations
initialized_observations = []
for obs in observations:
if obs.detector == 'HMI':
# Replace np.nan-s with zero for rotating
obs._data = np.nan_to_num(obs.data)
# Rotate the observations
obs = obs.rotate()
# Limb darkening correction, only HIM white lighe image
if obs.measurement == 'continuum':
obs = dark_limb.limb_darkening_correct(obs, limb_cut=0.99)
# Longitudinal magnetic field to Bz estimation
if obs.measurement == 'magnetogram':
obs = blbz.LOS2Bz(obs)
# Cut the limb and replace outlimb region with np.nan
obs = cut.solar_limb(obs)
#if obs.detector == 'AIA':
# Processes a level 1 AIAMap into a level 1.5 AIAMap
#obs = sunpy.instr.aia.aiaprep(obs)
# Append the new maps
initialized_observations.append(obs)
# Delete raw observations
del observations
return initialized_observations
|
gyengen/SheffieldSolarCatalog
|
engine/initialisation.py
|
initialisation.py
|
py
| 1,753 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70747391228
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 12:54:38 2016
@author: Kylin
"""
import math
import quyu
import erfenbijin
import pylab as pl
a = 200
Rx = 10
Ry = 20
V0 = 100
theta = math.pi/5
dt = 0.1
Vx = V0*math.cos(theta)
Vy = V0*math.sin(theta)
R_x = []
V_x = []
i = 0
while 1 :
Rx=Rx+Vx*dt
Ry=Ry+Vy*dt
if Ry*(Ry-Vy*dt)<0:
k=(Ry-0)/(0-(Ry-Vy*dt))
x0=((1+k)*Rx-k*Vx*dt)/(1+k)
R_x+=[x0]
V_x+=[Vx]
if quyu.inZhengfangxing(Rx,Ry,a)==1\
or quyu.inZhengfangxing(Rx,Ry,a)==0:
continue
if quyu.inZhengfangxing(Rx,Ry,a)==-1:
x1=Rx-Vx*dt
y1=Ry-Vy*dt
x2=Rx
y2=Ry
t=erfenbijin.Zhengfangxing_erFenbijin(x1,y1,x2,y2,0,a)
Rx=t[0]
Ry=t[1]
#continue
if quyu.inZhengfangxing(Rx,Ry,a)==0:
if (Rx== a or Rx==-a) and Ry>-a and Ry<a:
Vx=-Vx
Vy= Vy
if (Ry== a or Ry==-a) and Rx>-a and Rx<a:
Vx= Vx
Vy=-Vy
if (Rx== a and Ry== a)or(Rx==-a and Ry==a)or(Rx==a and Ry==-a)or(Rx==-a and Ry==-a):
Vx=-Vx
Vy=-Vy
i+=1
print(i)
if i>10000:
break
pl.plot(R_x, V_x,"o",label="Vx-Rx")
pl.title(u"正方形".encode("gb2312"))
pl.xlabel('Rx')
pl.ylabel('Vx')
pl.legend()
pl.show()
|
52kylin/compuational_physics_N2014301020034
|
exercise_09_new/code/zfxvx.py
|
zfxvx.py
|
py
| 1,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27251269716
|
"""
文件名: Code/Chapter05/C01_ConfigManage/E02_Config.py
创建时间: 2023/2/26 3:47 下午
作 者: @空字符
公众号: @月来客栈
知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest
"""
import os
class ModelConfig(object):
def __init__(self,
train_file_path=os.path.join('data', 'train.txt'),
val_file_path=os.path.join('data', 'val.txt'),
test_file_path=os.path.join('data', 'test.txt'),
split_sep='_!_',
is_sample_shuffle=True,
batch_size=16,
learning_rate=3.5e-5,
max_sen_len=None,
num_labels=3,
epochs=5):
self.train_file_path = train_file_path
self.val_file_path = val_file_path
self.test_file_path = test_file_path
self.split_sep = split_sep
self.is_sample_shuffle = is_sample_shuffle
self.batch_size = batch_size
self.learning_rate = learning_rate
self.max_sen_len = max_sen_len
self.num_labels = num_labels
self.epochs = epochs
#
def train(config):
dataset = get_dataset(config)
model = get_mode(config)
if __name__ == '__main__':
config = ModelConfig(epochs=10)
print(f"epochs = {config.epochs}")
# train(config)
|
moon-hotel/DeepLearningWithMe
|
Code/Chapter05/C01_ConfigManage/E02_Config.py
|
E02_Config.py
|
py
| 1,326 |
python
|
en
|
code
| 116 |
github-code
|
6
|
70402167867
|
import const
import sys, os
import string
import random
QUESTION_TOOL='What are the tools used in the attack?'
QUESTION_GROUP='Who is the attack group?'
INPUT_FILE='input/sample_attack_report_raw.txt'
TRAIN_RATE=0.8
VUL_RATE=0.1
LABEL_TRAIN='train'
LABEL_VAL='dev'
LABEL_TEST='test'
SENTENSE_DELIMETER=". "
WORD_DELIMETER=" "
LAVEL_GROUP='B-AT'
LAVEL_I_GROUP='I-AT'
LAVEL_TOOL='B-TL'
LAVEL_I_TOOL='I-TL'
LAVEL_SEC='B-SC'
LAVEL_I_SEC='I-SC'
LAVEL_COM='B-CM'
LAVEL_I_COM='I-CM'
LAVEL_OTHER='O'
DATASET_DELIMETER="\t"
TRAIN_FILE='train.txt'
VAL_FILE='dev.txt'
TEST_FILE='test.txt'
MAX_WORD_NUM=200
MAX_WORD=1000
NUM_SENTENSE_PER_ROW=100
LONG_SENTENSE='long.txt'
O_RATE=1
EXCLUSIVE_LIST=['at']
LEN_RANDOM=10
alldataset={}
def get_tools():
tools=[]
with open(const.OUT_TOOL_FILE, 'r') as file:
for row in file:
tool = row.replace(const.NEWLINE, "")
#tool = tool.lower()
tools.append(tool)
return tools
def get_groups():
groups=[]
with open(const.OUT_GROUP_FILE, 'r') as file:
for row in file:
group = row.replace(const.NEWLINE, "")
#group=group.lower()
groups.append(group)
return groups
def get_sectors():
sectors=[]
with open(const.OUT_SECTOR_FILE, 'r') as file:
for row in file:
sector = row.replace(const.NEWLINE, "")
#sector=sector.lower()
sectors.append(sector)
return sectors
def get_companies():
companies=[]
with open(const.OUT_COMPANY_FILE, 'r') as file:
for row in file:
company = row.replace(const.NEWLINE, "")
#company=company.lower()
companies.append(company)
return companies
def random_str(word):
dat = string.digits + string.ascii_lowercase + string.ascii_uppercase
return ''.join([random.choice(dat) for i in range(len(word))]).lower()
def get_random_TOOL(start,end):
index=random.randint(start,end)
tool=tools[index]
name=tool.split(" ")[0]
return name
def get_random_TA(start,end):
index=random.randint(start,end)
ta_name=groups[index]
name = ta_name.split(" ")[0]
return name
def create_dataset(mode,num_dataset, start_a, end_a, start_t, end_t):
cnt=0
data=[]
data_O=[]
data_tag = []
if mode == LABEL_TRAIN:
data=lines[:num_train-1]
elif mode==LABEL_VAL:
data=lines[num_train:num_train+num_val]
else:
data = lines[num_train+num_val:]
for row in data:
print("cnt: "+str(cnt))
if cnt>num_dataset:
print("Exceed "+str(num_data))
return
sentenses = row.split(SENTENSE_DELIMETER)
#print(str(len(sentenses)))
for sentense in sentenses:
words= sentense.split(WORD_DELIMETER)
if len(words) >=MAX_WORD_NUM:
# with open(LONG_SENTENSE, "a", encoding='utf8') as out_sentense:
# out_sentense.write(sentense + const.NEWLINE)
continue
len_word=0
for word in words:
len_word=len_word+len(word)
if len_word >= MAX_WORD:
continue
prev=''
prev_org=''
dataset=[]
index=0
for word in words:
lavel = LAVEL_OTHER
word=word.strip()
tmp_word = word
# groups
if tmp_word in groups:
lavel=LAVEL_GROUP
elif prev+WORD_DELIMETER+tmp_word in groups:
lavel = LAVEL_I_GROUP
prev_org = get_random_TA(start_a, end_a)
dataset[index-1]=prev_org + DATASET_DELIMETER + LAVEL_GROUP + const.NEWLINE
# tools
elif tmp_word in tools and tmp_word.lower() not in EXCLUSIVE_LIST:
lavel=LAVEL_TOOL
elif prev + WORD_DELIMETER + tmp_word in tools:
lavel = LAVEL_I_TOOL
prev_org = get_random_TOOL(start_t,end_t)
dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_TOOL + const.NEWLINE
# # sectors
# elif tmp_word in sectors:
# lavel = LAVEL_SEC
#
# elif prev + WORD_DELIMETER + tmp_word in sectors:
# lavel = LAVEL_I_SEC
# dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_SEC + const.NEWLINE
#
# # companies
# elif tmp_word in companies:
# lavel = LAVEL_COM
#
# elif prev + WORD_DELIMETER + tmp_word in companies:
# lavel = LAVEL_I_COM
# dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_COM + const.NEWLINE
if lavel ==LAVEL_GROUP or lavel==LAVEL_I_GROUP:
word=get_random_TA(start_a, end_a)
word=word
elif lavel ==LAVEL_TOOL or lavel==LAVEL_I_TOOL:
word=get_random_TOOL(start_t,end_t)
word = word
dataset.append(word + DATASET_DELIMETER + lavel + const.NEWLINE)
prev=tmp_word
prev_org=word
index=index+1
num_data=0
for item in dataset:
label=item.split(DATASET_DELIMETER)[1].strip()
if label!=LAVEL_OTHER:
num_data=num_data+1
if num_data == 0:
data_O.append(dataset)
else:
data_tag.append(dataset)
cnt = cnt + 1
O_num = len(data_O)
max_O_num = int(O_num* O_RATE)
alldataset[mode]=data_tag+data_O[:max_O_num]
return(mode)
with open(INPUT_FILE, 'r') as file:
lines = file.readlines()
context=len(lines)
print("total context:" +str(context))
if len(sys.argv)>1:
context = int(sys.argv[1])
num_train=round(context*TRAIN_RATE)
num_val=round(context*VUL_RATE)
num_test=context-num_train-num_val
print("num_train:" +str(num_train))
print("num_val:" +str(num_val))
print("num_test:" +str(num_test))
tools=get_tools()
groups=get_groups()
# sectors=get_sectors()
# companies=get_companies()
train_ta_end=round(len(groups)*TRAIN_RATE)
dev_ta_end=train_ta_end+round(len(groups)*VUL_RATE)
test_ta_end=len(groups)-1
train_tl_end=round(len(tools)*TRAIN_RATE)
dev_tl_end=train_tl_end+round(len(tools)*VUL_RATE)
test_tl_end=len(tools)-1
if os.path.exists(TRAIN_FILE):
os.remove(TRAIN_FILE)
if os.path.exists(VAL_FILE):
os.remove(VAL_FILE)
if os.path.exists(TEST_FILE):
os.remove(TEST_FILE)
if os.path.exists(LONG_SENTENSE):
os.remove(LONG_SENTENSE)
create_dataset(LABEL_TRAIN, num_train,0,train_ta_end,0,train_tl_end)
create_dataset(LABEL_VAL, num_val,train_ta_end+1,dev_ta_end,train_tl_end+1,dev_tl_end)
create_dataset(LABEL_TEST, num_test,dev_ta_end+1,test_ta_end,dev_tl_end+1,test_tl_end)
with open(LABEL_TRAIN + '.txt', "a", encoding='utf8') as out:
for dataset in alldataset[LABEL_TRAIN]:
out.writelines(dataset)
out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)
with open(LABEL_VAL + '.txt', "a", encoding='utf8') as out:
for dataset in alldataset[LABEL_VAL]:
out.writelines(dataset)
out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)
with open(LABEL_TEST + '.txt', "a", encoding='utf8') as out:
for dataset in alldataset[LABEL_TEST]:
out.writelines(dataset)
out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)
|
gamzattirev/Ahogrammer
|
create_dataset.py
|
create_dataset.py
|
py
| 7,721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44379710290
|
from random import randint
y=int(randint(1,10))
for i in range(3):
x = int(input("猜数字:\n"))
if x >y:
print("大了")
elif x<y:
print("小了")
else:
print("猜对了")
break
print("Game over!")
|
wuge-1996/Python-Exercise
|
Exercise 39.py
|
Exercise 39.py
|
py
| 247 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17256948742
|
#El bloque else justo después de for / while se ejecuta solo cuando el ciclo
#NO termina con una declaración de interrupción.
"""for i in range(1,5):
print (i)
else :
print("Sin descanso/ se ejecuta porque no hay break")"""
# Program to check if an array consists
# of even number
"""def evenumbers (lista) :
for l in lista :
if l % 2 == 0 :
print ("Hay numeros pares en la lista")
break
else :
print ("No hay numeros pares en la lista")
print ("Lista 1 :" )
evenumbers([5,3,6])
print ("Lista 2 :" )
evenumbers([1,3,5])"""
count = 4
while count < 1 :
count += 1
print (count)
else:
print ("no break")
|
codekacode/Exercisespython
|
Elsefor.py
|
Elsefor.py
|
py
| 682 |
python
|
es
|
code
| 0 |
github-code
|
6
|
17509722663
|
'''
Problem Statement
Your company has a big conference coming up and needs to book conference rooms in a convention center. To help the company save budget, we want to book as few conference rooms as possible given a list of meeting schedules that contains only the starting and ending time of each meeting. Write a program that helps figure out the minumum number of conference rooms needed.
Example:
[(2,7)] -> Output: 1
[(0,30),(5,10),(15,20) (21 22) (21 28) ] -> Explanation: Room1: (0,30) Room2: (5,10),(15,20) -> Output: 2
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
examples
(0,30),
(5,10), (15,20), (21 22)
(21 28)
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
assumptions
approaches
1)
(0,30),
(5,22),
(21 28)
2)
(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)
0 1 2 3 4 5
0 30
count: 1
create a res array
for any new interval, look in res for a place where int has no intersection. this space defines a room!
tradeoffs
this appears to be the only way
'''
from typing import List, Tuple
def roomcount(times: List[Tuple[int, int]]) -> int:
'''
s1------e1
s2-------e2
'''
def intersects(start1, end1, start2, end2):
return min(end1, end2) > max(start1, start2)
def no_intersects(lis):
for int_ in lis:
if intersects(*int_, start, end): # return true if they touch?
return False
return True
rooms = []
for start, end in times:
for lis in rooms:
if no_intersects(lis):
lis.append((start, end))
break
else:
rooms.append([(start, end)])
return len(rooms)
ints = [(2,7)] # -> Output: 1
print(roomcount(ints))
ints = [(0,30),(5,10),(15,20), (21, 22), (21, 28) ] #3
print(roomcount(ints))
ints = [(0,30),(5,10),(15,20),(21, 22), (22, 28) ] #2
print(roomcount(ints))
|
soji-omiwade/cs
|
dsa/before_rubrik/minimum_rooms.py
|
minimum_rooms.py
|
py
| 1,937 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19040286888
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from rl_nav import constants
from rl_nav.environments import wrapper
try:
import cv2
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ModuleNotFoundError:
raise AssertionError(
"To use visualisation wrapper, further package requirements "
"need to be satisfied. Please consult README."
)
class VisualisationEnv(wrapper.Wrapper):
COLORMAP = cm.get_cmap("plasma")
NORMALISE = False
def __init__(self, env):
super().__init__(env=env)
def render(
self,
save_path: Optional[str] = None,
dpi: Optional[int] = 60,
format: str = "state",
) -> None:
"""Method to render environment.
Args:
save_path: optional path to which to save image.
dpi: optional pixel.
format: state of environment to render.
"""
if format == constants.STATE:
assert (
self._env.active
), "To render map with state, environment must be active."
"call reset_environment() to reset environment and make it active."
"Else render stationary environment skeleton using format='stationary'"
if save_path:
fig = plt.figure()
plt.imshow(
self._env._env_skeleton(
rewards=format,
agent=format,
),
origin="lower",
)
fig.savefig(save_path, dpi=dpi)
else:
plt.imshow(
self._env._env_skeleton(
rewards=format,
agent=format,
),
origin="lower",
)
def visualise_episode_history(
self, save_path: str, history: Union[str, List[np.ndarray]] = "train"
) -> None:
"""Produce video of episode history.
Args:
save_path: name of file to be saved.
history: "train", "test" to plot train or test history,
else provide an independent history.
"""
if isinstance(history, str):
if history == constants.TRAIN:
history = self._env.train_episode_history
elif history == constants.TEST:
history = self._env.test_episode_history
elif history == constants.TRAIN_PARTIAL:
history = self._env.train_episode_partial_history
elif history == constants.TEST_PARTIAL:
history = self._env.test_episode_partial_history
SCALING = 20
FPS = 30
map_shape = history[0].shape
frameSize = (SCALING * map_shape[1], SCALING * map_shape[0])
out = cv2.VideoWriter(
filename=save_path,
fourcc=cv2.VideoWriter_fourcc("m", "p", "4", "v"),
fps=FPS,
frameSize=frameSize,
)
for frame in history:
bgr_frame = frame[..., ::-1].copy()
flipped_frame = np.flip(bgr_frame, 0)
scaled_up_frame = np.kron(flipped_frame, np.ones((SCALING, SCALING, 1)))
out.write((scaled_up_frame * 255).astype(np.uint8))
out.release()
def _plot_normalised_heatmap_over_env(
self, heatmap: Dict[Tuple[int, int], float], save_name: str
):
split_save_name = save_name.split(".pdf")[0]
save_name = f"{split_save_name}_normalised.pdf"
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
all_values = list(heatmap.values())
current_max_value = np.max(all_values)
current_min_value = np.min(all_values)
for position, value in heatmap.items():
# remove alpha from rgba in colormap return
# normalise value for color mapping
environment_map[position[::-1]] = self.COLORMAP(
(value - current_min_value) / (current_max_value - current_min_value)
)[:-1]
fig = plt.figure()
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
plt.colorbar()
fig.savefig(save_name, dpi=60)
plt.close()
def _plot_unnormalised_heatmap_over_env(
self, heatmap: Dict[Tuple[int, int], float], save_name: str
):
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
for position, value in heatmap.items():
# remove alpha from rgba in colormap return
environment_map[position[::-1]] = self.COLORMAP(value)[:-1]
fig = plt.figure()
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
plt.colorbar()
fig.savefig(save_name, dpi=60)
plt.close()
def plot_heatmap_over_env(
self,
heatmap: Dict[Tuple[int, int], float],
save_name: str,
) -> None:
"""plot quantities over top of environmen (e.g. value function)
Args:
heatmap: data to plot; dictionary of states (keys) and quantities (values).
fig: figure on which to plot.
ax: axis on which to plot.
save_name: path to which to save plot.
"""
self._plot_unnormalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)
self._plot_normalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)
def plot_numbered_values_over_env(
self, values: Dict[Tuple[int], np.ndarray], save_name: str
):
fig = plt.figure()
environment_map = self._env._env_skeleton(
rewards=None,
agent=None,
)
plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP)
all_states = list(values.keys())
for state, action_values in values.items():
for i, action_value in enumerate(action_values):
if all_states[i] != state:
xytext = np.array(state) + 0.2 * (
np.array(all_states[i]) - np.array(state)
)
plt.annotate(
f"{i}: {round(action_value, 2)}",
xy=state,
xytext=xytext,
arrowprops={
"headlength": 2,
"headwidth": 2,
"width": 0.5,
"linewidth": 0.1,
},
color="y",
size=5,
)
else:
plt.annotate(
i,
xy=state,
color="g",
size=5,
)
fig.savefig(save_name, dpi=60)
plt.close()
|
philshams/Euclidean_Gridworld_RL
|
rl_nav/environments/visualisation_env.py
|
visualisation_env.py
|
py
| 6,969 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12423871357
|
__author__ = "Vanessa Sochat, Alec Scott"
__copyright__ = "Copyright 2021-2023, Vanessa Sochat and Alec Scott"
__license__ = "Apache-2.0"
import json
import os
import re
import shlex
import subprocess
import pakages.builders.spack.cache as spack_cache
import pakages.client
import pakages.oras
import pakages.utils
from pakages.logger import logger
class SpackClient(pakages.client.PakagesClient):
"""
Pakages has a main controller for interacting with pakages.
"""
def parse_package_request(self, packages):
"""
Parse the packages and repo (if any) from it.
This is shared between install and build
"""
# By defualt, assume not adding a repository
repo = None
if not isinstance(packages, list):
packages = shlex.split(packages)
# Case 1: we have an install directed at the present working directory
if packages and packages[0] == ".":
repo = os.getcwd()
packages.pop(0)
# If we have a path (akin to the first)
if packages and os.path.exists(packages[0]):
repo = packages.pop(0)
# OR if we have a github URI TODO, can clone here
if packages and re.search("(http|https)://github.com", packages[0]):
repo = packages.pop(0)
# If we don't have packages and we have a repo, derive from PWD
if repo and not packages:
for path in pakages.utils.recursive_find(repo, "package.py"):
packages.append(os.path.basename(os.path.dirname(path)))
# Finally, add the repository
if repo:
self.add_repository(repo)
return packages
def list_installed(self):
"""
List installed packages
"""
command = ["spack", "find"]
for line in pakages.utils.stream_command(command):
print(line.strip("\n"))
command = ["spack", "find", "--json"]
result = pakages.utils.run_command(command)
return json.loads(result["message"])
def build(self, packages, cache_dir=None, key=None, **kwargs):
"""
Build a package into a cache
"""
packages = self.parse_packages(packages)
# Prepare a cache directory
cache = spack_cache.BuildCache(
spec_name=packages,
cache_dir=cache_dir or self.settings.cache_dir,
username=self.settings.username,
email=self.settings.email,
settings=self.settings,
)
# Install all packages
self._install(packages)
cache.create(packages, key=key)
# Push function is on cache, if desired
return cache
def parse_packages(self, packages):
"""
Helper function to ensure we return consistent names.
"""
packages = self.parse_package_request(packages)
if isinstance(packages, list):
packages = packages[0]
if " " in packages:
logger.exit("We currently only support one package for build.")
logger.info(f"Preparing package {packages}")
return packages
def add_repository(self, path):
"""
Add a repository.
Given a path that exists, add the repository to the
underlying spack. If you need to add a GitHub uri, create a
pakages.repo.PakRepo first.
"""
try:
command = ["spack", "repo", "add", path]
for line in pakages.utils.stream_command(command):
logger.info(line.strip("\n"))
except subprocess.CalledProcessError as e:
if "Repository is already registered" in e.output:
pass
else:
raise e
def download_cache(self, target, download_dir=None):
"""
Download a target to a cache download directory
"""
download_dir = download_dir or pakages.utils.get_tmpdir()
reg = pakages.oras.get_oras_client()
# This will error if not successful, result is a list of files
reg.pull(target=target, outdir=download_dir)
return download_dir
def install(self, packages, **kwargs):
"""
Install one or more packages.
"""
packages = self.parse_packages(packages)
use_cache = kwargs.get("use_cache", False)
if use_cache:
cache_dir = self.download_cache(use_cache)
cache = spack_cache.BuildCache(
packages, cache_dir=cache_dir, settings=self.settings
)
# Cache is named after target, this is a filesystem mirror
cache.add_as_mirror(re.sub("(-|:|/)", "-", use_cache))
# Prepare install command with or without cache
command = ["spack", "install"]
if use_cache:
command.append("--use-cache")
if isinstance(packages, list):
command.append(" ".join(packages))
else:
command.append(packages)
# Install packages using system spack - we aren't responsible for this working
for line in pakages.utils.stream_command(command):
logger.info(line.strip("\n"))
def _install(self, packages):
"""
Install one or more packages.
This eventually needs to take into account using the GitHub packages bulid cache
"""
# Install packages using system spack - we aren't responsible for this working
for line in pakages.utils.stream_command(["spack", "install", packages]):
logger.info(line.strip("\n"))
def uninstall(self, packages):
"""
Uninstall a spack package
"""
for line in pakages.utils.stream_command(["spack", "uninstall", packages]):
logger.info(line.strip("\n"))
|
syspack/pakages
|
pakages/builders/spack/client.py
|
client.py
|
py
| 5,794 |
python
|
en
|
code
| 2 |
github-code
|
6
|
13914723162
|
import sys
import oneflow as flow
import oneflow.typing as tp
import argparse
import numpy as np
import os
import shutil
import json
from typing import Tuple
from textcnn import TextCNN
sys.path.append("../..")
from text_classification.utils import pad_sequences, load_imdb_data
parser = argparse.ArgumentParser()
parser.add_argument('--ksize_list', type=str, default='2,3,4,5')
parser.add_argument('--n_filters', type=int, default=100)
parser.add_argument('--emb_dim', type=int, default=100)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--sequence_length', type=int, default=150)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--model_load_dir', type=str, default='')
parser.add_argument('--model_save_every_n_iter', type=int, default=1000)
parser.add_argument('--n_steps', type=int, default=10000)
parser.add_argument('--n_epochs', type=int, default=15)
parser.add_argument('--model_save_dir', type=str, default='./best_model')
args = parser.parse_args()
assert ',' in args.ksize_list
args.ksize_list = [int(n) for n in args.ksize_list.split(',')]
args.emb_num = 50000
args.n_classes = 2
model = TextCNN(
args.emb_num, args.emb_dim,
ksize_list=args.ksize_list,
n_filters_list=[args.n_filters] * len(args.ksize_list),
n_classes=args.n_classes, dropout=args.dropout)
def get_train_config():
config = flow.function_config()
config.default_data_type(flow.float)
return config
def get_eval_config():
config = flow.function_config()
config.default_data_type(flow.float)
return config
@flow.global_function('train', get_train_config())
def train_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),
label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = model.get_logits(text, is_train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss")
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [args.lr])
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
@flow.global_function('predict', get_eval_config())
def eval_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),
label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)
) -> Tuple[tp.Numpy, tp.Numpy]:
with flow.scope.placement("gpu", "0:0"):
logits = model.get_logits(text, is_train=False)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss")
return label, logits
def suffle_batch(data, label, batch_size):
permu = np.random.permutation(len(data))
data, label = data[permu], label[permu]
batch_n = len(data) // batch_size
x_batch = np.array([data[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)
y_batch = np.array([label[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)
return x_batch, y_batch
def acc(labels, logits, g):
predictions = np.argmax(logits, 1)
right_count = np.sum(predictions == labels)
g["total"] += labels.shape[0]
g["correct"] += right_count
def train(checkpoint):
path = '../imdb'
(train_data, train_labels), (test_data, test_labels) = load_imdb_data(path)
with open(os.path.join(path, 'word_index.json')) as f:
word_index = json.load(f)
word_index = {k: (v + 2) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<UNK>"] = 1
train_data = pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length)
test_data = pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length)
best_accuracy = 0.0
best_epoch = 0
for epoch in range(1, args.n_epochs + 1):
print("[Epoch:{}]".format(epoch))
data, label = suffle_batch(train_data, train_labels, args.batch_size)
for i, (texts, labels) in enumerate(zip(data, label)):
loss = train_job(texts, labels).mean()
if i % 20 == 0:
print(loss)
data, label = suffle_batch(test_data, test_labels, args.batch_size)
g = {"correct": 0, "total": 0}
for i, (texts, labels) in enumerate(zip(data, label)):
labels, logits = eval_job(texts, labels)
acc(labels, logits, g)
accuracy = g["correct"] * 100 / g["total"]
print("[Epoch:{0:d} ] accuracy: {1:.1f}%".format(epoch, accuracy))
if accuracy > best_accuracy:
best_accuracy = accuracy
best_epoch = epoch
if not os.path.exists(args.model_save_dir):
os.mkdir(args.model_save_dir)
else:
shutil.rmtree(args.model_save_dir)
assert not os.path.exists(args.model_save_dir)
os.mkdir(args.model_save_dir)
print("Epoch:{} save best model.".format(best_epoch))
checkpoint.save(args.model_save_dir)
print("Epoch:{} get best accuracy:{}".format(best_epoch, best_accuracy))
if __name__ == '__main__':
checkpoint = flow.train.CheckPoint()
checkpoint.init()
train(checkpoint)
|
Oneflow-Inc/oneflow_nlp_model
|
text_classification/textcnn/train_textcnn.py
|
train_textcnn.py
|
py
| 5,411 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8246901300
|
"""
Module containing the rheologies, fault setup, and ODE cycles code
for the 2D subduction case.
"""
# general imports
import json
import configparser
import numpy as np
import pandas as pd
from scipy.integrate import solve_ivp
from numba import njit, objmode, float64, int64, boolean
from scipy.interpolate import interp1d
from warnings import warn
from abc import ABC
# seqeas imports
from .kernels2d import Glinedisp, Klinedisp
class Rheology(ABC):
"""
Abstract base class for rheologies.
"""
class NonlinearViscous(Rheology):
r"""
Implement a nonlinear viscous fault rheology, where the velocity :math:`v` is
:math:`v = \tau^n / \alpha_n` given the shear stress :math:`\tau`, a strength
constant :math:`\alpha_n`, and a constant exponent :math:`n`.
"""
def __init__(self, n, alpha_n, n_mid=None, alpha_n_mid=None, mid_transition=None,
n_deep=None, alpha_n_deep=None, deep_transition=None,
deep_transition_width=None, n_boundary=None, alpha_n_boundary=None):
r"""
Setup the rheology parameters for a given fault.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : float
Power-law exponent :math:`n` [-]
"""
# input check
assert not np.logical_xor(deep_transition is None, deep_transition_width is None)
# set number of variables
self.n_vars = 2
""" Number of variables to track by rheology [-] """
# initialization
self._n = float(n)
self._n_mid = float(n_mid) if n_mid is not None else self.n
self._n_deep = float(n_deep) if n_deep is not None else self.n_mid
self.n_boundary = float(n_boundary) if n_boundary is not None else self.n_deep
""" Power-law exponent :math:`n` [-] """
self.alpha_n = float(alpha_n)
self.alpha_n_mid = (float(alpha_n_mid) if alpha_n_mid is not None
else self.alpha_n)
self.alpha_n_deep = (float(alpha_n_deep) if alpha_n_deep is not None
else self.alpha_n_mid)
self.alpha_n_boundary = (float(alpha_n_boundary) if alpha_n_boundary is not None
else self.alpha_n_deep)
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
self.mid_transition = None if mid_transition is None else float(mid_transition)
""" Depth [m] for the middle transition point """
self.deep_transition = None if deep_transition is None else float(deep_transition)
""" (Upper) Depth [m] for the deep transition point """
self.deep_transition_width = (None if deep_transition_width is None
else float(deep_transition_width))
""" (Downdip) Width [m] of the deep transition point """
@property
def alpha_n(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n
@alpha_n.setter
def alpha_n(self, alpha_n):
self._alpha_n = float(alpha_n)
self._A = self.calc_A(self._alpha_n, self._n)
@property
def alpha_n_mid(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n_mid
@alpha_n_mid.setter
def alpha_n_mid(self, alpha_n_mid):
self._alpha_n_mid = float(alpha_n_mid)
self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)
@property
def alpha_n_deep(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n_deep
@alpha_n_deep.setter
def alpha_n_deep(self, alpha_n_deep):
self._alpha_n_deep = float(alpha_n_deep)
self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)
@property
def n(self):
""" Power-law exponent :math:`n` [-] """
return self._n
@n.setter
def n(self, n):
self._n = float(n)
self._A = self.calc_A(self._alpha_n, self._n)
@property
def n_mid(self):
""" Power-law exponent :math:`n` [-] """
return self._n_mid
@n_mid.setter
def n_mid(self, n_mid):
self._n_mid = float(n_mid)
self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)
@property
def n_deep(self):
""" Power-law exponent :math:`n` [-] """
return self._n_deep
@n_deep.setter
def n_deep(self, n_deep):
self._n_deep = float(n_deep)
self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)
@property
def A(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A
@property
def A_mid(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A_mid
@property
def A_deep(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A_deep
@staticmethod
def calc_A(alpha_n, n):
""" Calculate A from alpha_n and n """
return alpha_n ** (1 / n)
def get_param_vectors(self, patch_depths, v_eff):
r"""
Calculate the depth-dependent arrays of :math:`\alpha_n`, :math:`n`, and :math:`A`,
assuming :math:`\alpha_n` and :math:`\alpha_{n,eff}` vary log-linearly with depth,
and :math:`n` adapts between the transition points.
"""
assert np.all(np.diff(patch_depths) >= 0)
# start knots list
knots = [patch_depths[0]]
vals_alpha_n = [self.alpha_n]
vals_n = [self.n]
# add optional mid transition
if self.mid_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])
vals_alpha_n.append(self.alpha_n_mid)
vals_n.append(self.n_mid)
# add optional deep transition
if self.deep_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])
vals_alpha_n.append(self.alpha_n_deep)
vals_n.append(self.n_deep)
knots.append(patch_depths[np.argmin(np.abs(patch_depths
- self.deep_transition
- self.deep_transition_width))])
vals_alpha_n.append(self.alpha_n_boundary)
vals_n.append(self.n_boundary)
# add final value
knots.append(patch_depths[-1])
vals_alpha_n.append(self.alpha_n_boundary)
vals_alpha_n = np.array(vals_alpha_n)
vals_n.append(self.n_boundary)
vals_n = np.array(vals_n)
vals_alpha_eff = SubductionSimulation.get_alpha_eff(vals_alpha_n, vals_n, v_eff)
# interpolate alpha_n and alpha_eff
alpha_n_vec = 10**interp1d(knots, np.log10(vals_alpha_n))(patch_depths)
alpha_eff_vec = 10**interp1d(knots, np.log10(vals_alpha_eff))(patch_depths)
# get n and A
n_vec = SubductionSimulation.get_n(alpha_n_vec, alpha_eff_vec, v_eff)
A_vec = alpha_n_vec ** (1 / n_vec)
return alpha_n_vec, n_vec, A_vec
class RateStateSteadyLogarithmic(Rheology):
r"""
Implement a steady-state rate-and-state rheology using the ageing law (effectively
becoming a rate-dependent rheology) with velocity in logarithmic space defined by
:math:`f_{ss} = f_0 + (a - b) * \zeta = \tau / \sigma_E`
where :math:`f_{ss}` is the steady-state friction, :math:`f_0` is a reference
friction, :math:`a` and :math:`b` are the rate-and-state frictional parameters,
:math:`\zeta = \log (v / v_0)` is the logarithmic velocity, :math:`\tau` is the shear
stress, and :math:`\sigma_E` is the effective fault normal stress.
"""
def __init__(self, v_0, alpha_h, alpha_h_mid=None, mid_transition=None,
alpha_h_deep=None, deep_transition=None, deep_transition_width=None,
alpha_h_boundary=None):
r"""
Setup the rheology parameters for a given fault.
Parameters
----------
v_0 : float
Reference velocity [m/s] used for the transformation into logarithmic space.
alpha_h : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`,
where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,
and :math:`\sigma_E` [Pa] is effective fault normal stress.
"""
self.alpha_h = float(alpha_h)
r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
# input check
assert not np.logical_xor(deep_transition is None, deep_transition_width is None)
assert float(v_0) > 0, "RateStateSteadyLogarithmic needs to have positive v_0."
# set number of variables
self.n_vars = 2
""" Number of variables to track by rheology [-] """
# initialization
self.v_0 = float(v_0)
""" Reference velocity :math:`v_0` [m/s] """
self.alpha_h = float(alpha_h)
r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_mid = (float(alpha_h_mid) if alpha_h_mid is not None
else self.alpha_h)
r""" Middle rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_deep = (float(alpha_h_deep) if alpha_h_deep is not None
else self.alpha_h_mid)
r""" Deep rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_boundary = (float(alpha_h_boundary) if alpha_h_boundary is not None
else self.alpha_h_deep)
r""" Boundary-layer rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.mid_transition = None if mid_transition is None else float(mid_transition)
""" Depth [m] for the middle transition point """
self.deep_transition = None if deep_transition is None else float(deep_transition)
""" (Upper) Depth [m] for the deep transition point """
self.deep_transition_width = (None if deep_transition_width is None
else float(deep_transition_width))
""" (Downdip) Width [m] of the deep transition point """
def get_param_vectors(self, patch_depths):
r"""
Calculate the depth-dependent array of :math:`\alpha_h`, assuming it
varies log-linearly with depth.
"""
assert np.all(np.diff(patch_depths) >= 0)
# start knots list
knots = [patch_depths[0]]
vals_alpha_h = [self.alpha_h]
# add optional mid transition
if self.mid_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])
vals_alpha_h.append(self.alpha_h_mid)
# add optional deep transition
if self.deep_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])
vals_alpha_h.append(self.alpha_h_deep)
knots.append(patch_depths[np.argmin(np.abs(patch_depths
- self.deep_transition
- self.deep_transition_width))])
vals_alpha_h.append(self.alpha_h_boundary)
# add final value
knots.append(patch_depths[-1])
vals_alpha_h.append(self.alpha_h_boundary)
vals_alpha_h = np.array(vals_alpha_h)
# interpolate alpha_n and alpha_eff
alpha_h_vec = 10**interp1d(knots, np.log10(vals_alpha_h))(patch_depths)
return alpha_h_vec
@njit(float64[:](float64[:], float64[:], float64[:], float64[:]), cache=True)
def dvdt_plvis(dtaudt, v, A, n):
r"""
Calculate the velocity derivative for a power-law viscous rheology.
From :math:`v = \tau^n / \alpha_n` we get:
:math:`\frac{dv}{dt} = \frac{n}{\alpha_n} \tau^{n-1} \frac{d \tau}{dt}`
where
:math:`\tau^{n-1} = \left( \alpha_n v \right)^{\frac{n-1}{n}}`
simplifying to
:math:`\frac{dv}{dt} = \frac{n}{A} v^{1-\frac{1}{n}} \frac{d \tau}{dt}`
Parameters
----------
dtaudt : numpy.ndarray
1D array of the shear stress derivative
v : numpy.ndarray
1D array of the current velocity
A : numpy.ndarray
Rescaled nonlinear viscous rheology strength constant
n : numpy.ndarray
Power-law exponent
Returns
-------
dvdt : numpy.ndarray
1D array of the velocity derivative.
"""
signs = np.sign(v)
return (n / A) * (signs * v)**(1 - 1 / n) * dtaudt
@njit(float64[:](float64[:], float64[:]), cache=True)
def dzetadt_rdlog(dtaudt, alpha_h_vec):
r"""
Return the velocity derivative in logarithmic space given the current traction
rate in linear space.
Taking the derivative of the steady-state friction gives an explicit
formulation for the slip acceleration :math:`\frac{d \zeta}{dt}`:
:math:`\frac{df_{ss}}{dt} = (a-b) \frac{d \zeta}{dt}`
Recognizing that :math:`\tau = f_{ss} \sigma_E` and assuming
constant effective normal stress leads to
:math:`\frac{d \tau}{dt} = \sigma_E \frac{df_{ss}}{dt}`, which
can be rearranged to give the final expression
:math:`\frac{d \zeta}{dt} = \frac{1}{(a-b) \sigma_E} \frac{d \tau}{dt}`
Parameters
----------
dtaudt : numpy.ndarray
Traction derivative :math:`\frac{d \tau}{dt}` [Pa/s] in linear space
alpha_h_vec : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`
Returns
-------
dzetadt : numpy.ndarray
Velocity derivative :math:`\frac{d \zeta}{dt}` [1/s] in logarithmic space.
"""
return dtaudt / alpha_h_vec
@njit(float64[:](float64[:], float64[:], float64[:], float64[:], float64[:]), cache=True)
def get_new_vel_plvis(v_minus, delta_tau, alpha_n, n, A):
r"""
Calculate the instantaneous velocity change due to an instantaneous stress change
to the fault patches. It is derived from:
:math:`\tau_{+} = \tau_{-} + \Delta \tau`
and plugging in the relationship :math:`v = \tau^n / \alpha_n`, we get
:math:`\sqrt[n]{\alpha_n v_{+}} = \sqrt[n]{\alpha_n v_{-}} + \Delta \tau`
and finally
:math:`v_{+} = \frac{\left( A \sqrt[n]{v_{-}} + \Delta \tau \right)^n}{\alpha_n}`
Parameters
----------
v_minus : numpy.ndarray
Initial velocity :math:`v_{-}` [m/s]
delta_tau : numpy.ndarray
Traction stress change :math:`\Delta \tau` [Pa]
alpha_n : numpy.ndarray
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : numpy.ndarray
Power-law exponent :math:`n` [-]
A : numpy.ndarray
Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)]
Returns
-------
v_plus : numpy.ndarray
Velocity :math:`v_{+}` [m/s] after stress change
"""
signs = np.sign(v_minus)
temp = A * (signs * v_minus)**(1 / n) + (signs * delta_tau)
return np.abs(temp) ** (n - 1) * temp / alpha_n * signs
@njit(float64[:](float64[:], float64[:], float64[:]), cache=True)
def get_new_vel_rdlog(zeta_minus, delta_tau, alpha_h_vec):
r"""
Calculate the instantaneous velocity change (in logarithmic space) due to an
instantaneous stress change to the fault patches. We can kickstart the
derivatuion from the expression in ``RateStateSteadyLinear.get_new_vel``:
:math:`\log (v_{+}/v_0) = \log (v_{-}/v_0) + \Delta\tau / \alpha_h`
and realize that we only have to plug in our definition for :math:`\zeta`
to give us the final result
:math:`\zeta_{+} = \zeta_{-} + \Delta\tau / \alpha_h`
Parameters
----------
zeta_minus : numpy.ndarray
Initial velocity :math:`\zeta_{-}` [-] in logarithmic space
delta_tau : numpy.ndarray, optional
Traction stress change :math:`\Delta \tau` [Pa]
alpha_h_vec : numpy.ndarray
Rate-and-state parameter :math:`(a - b) * \sigma_E`
Returns
-------
zeta_plus : numpy.ndarray
Velocity :math:`\zeta_{+}` [-] in logarithmic space after stress change
See Also
--------
alpha_h
"""
return zeta_minus + delta_tau / alpha_h_vec
@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],
float64[:], float64[:], float64), cache=True)
def flat_ode_plvis(t, state, v_plate, K_int, K_ext, A_upper, n_upper, mu_over_2vs):
r"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in the upper plate interface, and an imposed
constant plate velocity at the lower interface (which can be ignored).
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip and upper velocity.
v_plate : float
Plate velocity.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
A_upper : numpy.ndarray
Upper plate interface rescaled nonlinear viscous rheology strength constant
n_upper : numpy.ndarray
Upper plate interface power-law exponent
mu_over_2vs : float
Radiation damping factor
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper = 2
n_creeping_upper = state.size // n_vars_upper
assert K_int.shape == (n_creeping_upper, n_creeping_upper)
assert K_ext.shape[0] == n_creeping_upper
# extract total velocities
v = state[n_creeping_upper:]
# get shear strain rate
signs = np.sign(v)
temp = mu_over_2vs * (n_upper / A_upper) * (signs * v)**(1 - 1 / n_upper)
dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)
) / (1 + temp)
# get ODE
dstatedt = np.concatenate((v, dvdt_plvis(dtaudt, v, A_upper, n_upper)))
# return
return dstatedt
@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],
float64, float64[:], float64), cache=True)
def flat_ode_rdlog(t, state, v_plate, K_int, K_ext, v_0, alpha_h_vec, mu_over_2vs):
r"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in the upper plate interface, and an imposed
constant plate velocity at the lower interface (which can be ignored).
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip and upper velocity.
v_plate : float
Plate velocity.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
v_0 : float
Reference velocity [m/s]
alpha_h_vec : numpy.ndarray
Rate-and-state parameter :math:`(a - b) * \sigma_E`
mu_over_2vs : float
Radiation damping factor
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper = 2
n_creeping_upper = state.size // n_vars_upper
assert K_int.shape == (n_creeping_upper, n_creeping_upper)
assert K_ext.shape[0] == n_creeping_upper
# extract total velocities
zeta = state[n_creeping_upper:]
v = v_0 * np.exp(zeta)
# get shear strain rate
temp = mu_over_2vs * v / alpha_h_vec
dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)
) / (1 + temp)
# get ODE
dstatedt = np.concatenate((v, dzetadt_rdlog(dtaudt, alpha_h_vec)))
# return
return dstatedt
@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64), cache=True)
def flat_ode_plvis_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower):
"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in both the upper and lower plate interface.
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip, upper velocity,
lower cumulative slip, lower velocity.
n_creeping_upper : int
Number of creeping patches in the upper plate interface.
The number of creeping patches in the lower plate interface can then
be derived from the size of ``state``.
v_plate_vec : float
Initial velocity in all creeping patches.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
A_upper : float
Upper plate interface rescaled nonlinear viscous rheology strength constant
n_upper : float
Upper plate interface power-law exponent
A_lower : float
Lower plate interface rescaled nonlinear viscous rheology strength constant
n_lower : float
Lower plate interface power-law exponent
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = state.size - n_state_upper
n_creeping_lower = n_state_lower // n_vars_lower
n_creeping = n_creeping_lower + n_creeping_upper
assert K_int.shape[0] == K_int.shape[1] == n_creeping
assert K_ext.shape[0] == n_creeping
# split up state
state_upper = state[:n_state_upper]
state_lower = state[n_state_upper:]
# extract total velocities
v_upper = state_upper[n_creeping_upper:]
v_lower = state_lower[n_creeping_lower:]
# get shear strain rate
v = np.concatenate((v_upper, v_lower))
dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))
dtaudt_upper = dtaudt[:n_creeping_upper]
dtaudt_lower = dtaudt[n_creeping_upper:]
# get individual rheologies' ODE
dstatedt_upper = \
np.concatenate((v_upper, dvdt_plvis(dtaudt_upper, v_upper,
np.ones_like(v_upper) * A_upper,
np.ones_like(v_upper) * n_upper)))
dstatedt_lower = \
np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,
np.ones_like(v_lower) * A_lower,
np.ones_like(v_upper) * n_lower)))
# concatenate and return
return np.concatenate((dstatedt_upper, dstatedt_lower))
@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64), cache=True)
def flat_ode_rdlog_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,
v_0, alpha_h_upper, A_lower, n_lower):
r"""
Flattened ODE derivative function for a subduction fault with
rate-dependent (log-space) rheology in the upper and nonlinear viscous
rheology in the lower plate interface.
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip, upper velocity,
lower cumulative slip, lower velocity.
n_creeping_upper : int
Number of creeping patches in the upper plate interface.
The number of creeping patches in the lower plate interface can then
be derived from the size of ``state``.
v_plate_vec : float
Initial velocity in all creeping patches.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
v_0 : float
Reference velocity [m/s]
alpha_h_upper : float
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
A_lower : float
Lower plate interface rescaled nonlinear viscous rheology strength constant
n_lower : float
Lower plate interface power-law exponent
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = state.size - n_state_upper
n_creeping_lower = n_state_lower // n_vars_lower
n_creeping = n_creeping_lower + n_creeping_upper
assert K_int.shape[0] == K_int.shape[1] == n_creeping
assert K_ext.shape[0] == n_creeping
# split up state
state_upper = state[:n_state_upper]
state_lower = state[n_state_upper:]
# extract total velocities
v_upper = v_0 * np.exp(state_upper[n_creeping_upper:])
v_lower = state_lower[n_creeping_lower:]
# get shear strain rate
v = np.concatenate((v_upper, v_lower))
dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))
dtaudt_upper = dtaudt[:n_creeping_upper]
dtaudt_lower = dtaudt[n_creeping_upper:]
# get individual rheologies' ODE
dstatedt_upper = \
np.concatenate((v_upper, dzetadt_rdlog(dtaudt_upper,
np.ones_like(v_lower) * alpha_h_upper)))
dstatedt_lower = \
np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,
np.ones_like(v_lower) * A_lower,
np.ones_like(v_upper) * n_lower)))
# concatenate and return
return np.concatenate((dstatedt_upper, dstatedt_lower))
# simple rk4
@njit(float64[:, :](float64, float64, float64[:], float64[:], int64, float64[:],
float64[:, ::1], float64[:, ::1], float64, float64, float64, float64),
cache=True)
def myrk4(ti, tf, state0, t_eval, n_creeping_upper, v_plate_vec,
K_int, K_ext, A_upper, n_upper, A_lower, n_lower):
h = t_eval[1] - t_eval[0]
num_state = state0.size
num_eval = t_eval.size
sol = np.zeros((num_eval, num_state))
sol[0, :] = state0
for i in range(1, num_eval):
cur = sol[i-1, :]
k1 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + (h / 2) * k1
k2 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + (h / 2) * k2
k3 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + h * k3
k4 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
sol[i, :] = sol[i-1, :] + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return sol
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64[:], float64[:], float64[:], float64), cache=True)
def flat_run_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
alpha_n_vec, n_vec, A_vec, mu_over_2vs):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
alpha_n_vec : numpy.ndarray
Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
at each patch
n_vec : float
Upper plate interface power-law exponent [-] at each patch
A_vec : float
Rescaled upper plate interface nonlinear viscous rheology strength constant
[Pa^n * s/m] at each patch
mu_over_2vs : float
Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear
modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
v_minus_upper = v_init[:n_creeping_upper]
v_minus_lower = v_plate_vec[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),
K_ext[:n_creeping_upper, :], A_vec, n_vec, mu_over_2vs)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
atol = np.ones(n_state_upper) * 1e-6
atol[n_creeping_upper:] = 1e-15
while i < steps.size - 1:
# print(f"{i+1}/{steps.size - 1}")
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_plvis,
t_span=[ti, tf],
y0=state_plus[:n_state_upper],
t_eval=t_eval[ji:jf + 1],
method="LSODA", rtol=1e-6, atol=atol, args=args)
success = sol.success
if success:
sol = sol.y
else:
sol = np.empty((1, 1))
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:n_state_upper, ji:jf + 1] = sol
# fill in the imposed lower state
full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \
* np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))
full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
new_state_upper = new_full_state[:n_state_upper, :]
old_v_upper = old_state_upper[-n_creeping_upper:, -1]
new_v_upper = new_state_upper[-n_creeping_upper:, -1]
lhs_upper = np.abs(old_v_upper - new_v_upper)
rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)
stop_now = np.all(lhs_upper <= rhs_upper)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
v_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
v_plus_upper = get_new_vel_plvis(v_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
alpha_n_vec, n_vec, A_vec)
v_plus_lower = v_minus_lower.ravel()
state_plus = np.concatenate((s_plus_upper, v_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64[:], float64), cache=True)
def flat_run_rdlog(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
v_0, alpha_h_vec, mu_over_2vs):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
v_0 : float
Reference velocity [m/s]
alpha_h_vec : numpy.ndarray
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
mu_over_2vs : float
Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear
modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
assert np.all(v_init[:n_creeping_upper] > 0)
zeta_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)
v_minus_lower = v_plate_vec[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, zeta_minus_upper,
s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),
K_ext[:n_creeping_upper, :], v_0, alpha_h_vec, mu_over_2vs)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# print(f"{i+1}/{steps.size - 1}")
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_rdlog,
t_span=[ti, tf],
y0=state_plus[:n_state_upper],
t_eval=t_eval[ji:jf + 1],
method="LSODA", args=args)
success = sol.success
if success:
sol = sol.y
else:
sol = np.empty((1, 1))
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:n_state_upper, ji:jf + 1] = sol
# fill in the imposed lower state
full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \
* np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))
full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
new_state_upper = new_full_state[:n_state_upper, :]
old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])
new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])
lhs_upper = np.abs(old_v_upper - new_v_upper)
rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)
stop_now = np.all(lhs_upper <= rhs_upper)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
zeta_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
alpha_h_vec)
v_plus_lower = v_minus_lower.ravel()
state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
full_state[n_creeping_upper:n_state_upper, :] = \
v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64, boolean), cache=True)
def flat_run_plvis_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
alpha_n_upper, n_upper, alpha_n_lower, n_lower,
simple_rk4):
"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
alpha_n_upper : float
Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_upper : float
Upper plate interface power-law exponent [-]
alpha_n_lower : float
Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_lower : float
Lower plate interface power-law exponent [-]
simple_rk4 : bool
Decide whether to use the simple RK4 integrator or not
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
A_upper = alpha_n_upper ** (1 / n_upper)
A_lower = alpha_n_lower ** (1 / n_lower)
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
v_minus_upper = v_init[:n_creeping_upper]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)
v_minus_lower = v_init[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
if simple_rk4:
sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T
else:
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_plvis_plvis,
t_span=[ti, tf],
y0=state_plus,
t_eval=t_eval[ji:jf + 1],
method="RK45", rtol=1e-9, atol=1e-12, args=args)
success = sol.success
sol = sol.y
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:, ji:jf + 1] = sol
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
old_state_lower = old_full_state[n_state_upper:, :]
new_state_upper = new_full_state[:n_state_upper, :]
new_state_lower = new_full_state[n_state_upper:, :]
old_v_upper = old_state_upper[-n_creeping_upper:, -1]
old_v_lower = old_state_lower[-n_creeping_lower:, -1]
new_v_upper = new_state_upper[-n_creeping_upper:, -1]
new_v_lower = new_state_lower[-n_creeping_lower:, -1]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)
# new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)
lhs_upper = np.abs(old_v_upper - new_v_upper)
lhs_lower = np.abs(old_v_lower - new_v_lower)
rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)
rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)
stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
v_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
v_plus_upper = get_new_vel_plvis(v_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
np.ones(n_creeping_upper) * alpha_n_upper,
np.ones(n_creeping_upper) * n_upper,
np.ones(n_creeping_upper) * A_upper)
v_plus_lower = get_new_vel_plvis(v_minus_lower,
delta_tau_bounded[n_creeping_upper:, i_slip],
np.ones(n_creeping_upper) * alpha_n_lower,
np.ones(n_creeping_upper) * n_lower,
np.ones(n_creeping_upper) * A_lower)
state_plus = np.concatenate((s_plus_upper, v_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64, boolean), cache=True)
def flat_run_rdlog_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
v_0, alpha_h_upper, alpha_n_lower, n_lower,
simple_rk4):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
v_0 : float
Reference velocity [m/s]
alpha_h_upper : float
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
alpha_n_lower : float
Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_lower : float
Lower plate interface power-law exponent [-]
simple_rk4 : bool
Decide whether to use the simple RK4 integrator or not
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
A_lower = alpha_n_lower ** (1 / n_lower)
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
assert np.all(v_init[:n_creeping_upper] > 0)
v_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)
v_minus_lower = v_init[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (n_creeping_upper, v_plate_vec, K_int, K_ext,
v_0, alpha_h_upper, A_lower, n_lower)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
if simple_rk4:
sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T
else:
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_rdlog_plvis,
t_span=[ti, tf],
y0=state_plus,
t_eval=t_eval[ji:jf + 1],
method="RK45", rtol=1e-9, atol=1e-12, args=args)
success = sol.success
sol = sol.y
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:, ji:jf + 1] = sol
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
old_state_lower = old_full_state[n_state_upper:, :]
new_state_upper = new_full_state[:n_state_upper, :]
new_state_lower = new_full_state[n_state_upper:, :]
old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])
old_v_lower = old_state_lower[-n_creeping_lower:, -1]
new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])
new_v_lower = new_state_lower[-n_creeping_lower:, -1]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)
# new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)
lhs_upper = np.abs(old_v_upper - new_v_upper)
lhs_lower = np.abs(old_v_lower - new_v_lower)
rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)
rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)
stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
zeta_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
np.ones(n_creeping_upper) * alpha_h_upper)
v_plus_lower = get_new_vel_plvis(v_minus_lower,
delta_tau_bounded[n_creeping_upper:, i_slip],
np.ones(n_creeping_upper) * alpha_n_lower,
np.ones(n_creeping_upper) * n_lower,
np.ones(n_creeping_upper) * A_lower)
state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
full_state[n_creeping_upper:n_state_upper, :] = \
v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)
# done
return full_state
@njit(float64[:, :](float64[:, ::1], int64, int64, float64[:, ::1], float64[:, ::1]),
cache=True)
# optional(float64[:, ::1]), optional(float64[:, ::1])))
def get_surface_displacements_plvis_plvis(full_state, n_creeping_upper, n_creeping_lower,
G_surf, deep_creep_slip): # , locked_slip):
"""
Calculate the surface displacements given the output of ``run``.
Parameters
----------
full_state : numpy.ndarray
Full state variable at the end of the integration.
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
G_surf : numpy.ndarray
Surface displacements Green's matrix [-] (dimensions must whether `locked_slip`
and/or `deep_creep_slip` are passed to function)
deep_creep_slip : numpy.ndarray
Timeseries of slip [m] on the deep creep patches
locked_slip : numpy.ndarray, optional
Timeseries of slip [m] on the locked patches
Returns
-------
surf_disp : numpy.ndarray
Surface displacement timeseries.
"""
# extract timeseries from solution
slip_upper = full_state[:n_creeping_upper, :]
slip_lower = full_state[2 * n_creeping_upper:2 * n_creeping_upper + n_creeping_lower, :]
# add the locked and deep patches to the combined upper & lower slip history matrix
slips_all = np.concatenate((slip_upper, slip_lower), axis=0)
# if locked_slip is not None:s
# slips_all = np.concatenate((locked_slip[:, :slip_upper.shape[1]], slips_all),
# axis=0)
# if deep_creep_slip is not None:
slips_all = np.concatenate((slips_all, deep_creep_slip), axis=0)
# calculate all surface displacements for last full cycle
surf_disps = G_surf @ slips_all
return surf_disps
class Fault2D():
"""
Base class for the subduction fault mesh.
"""
def __init__(self, theta, D_lock, H, nu, E, v_s, halflen,
upper_rheo, n_upper, lower_rheo, n_lower_left,
n_lower_right, halflen_factor_lower,
D_max=None, x1_pretrench=None):
"""
Define the fault mesh of the subduction zone fault system, based on the
Elastic Subducting Plate Model (ESPM) of [kanda2010]_.
Parameters
----------
theta : float
Dip angle [rad] of the plate interface (positive).
D_lock : float
Locking depth [m] of the upper plate interface (positive).
H : float
Subducting plate thickness [m].
nu : float
Poisson's ratio [-] of the fault zone.
E : float
Young's modulus [Pa] of the fault zone.
v_s : float
Shear wave velocity [m/s] in the fault zone.
halflen : float
Fault patch half-length [m], used for all locked patches.
If ``D_max`` and ``x1_pretrench`` are not set, this length is also used for all
creeping patches, otherwise, this is their minimum half-length.
upper_rheo : Rheology
Upper plate interface's rheology.
n_upper : int
Number [-] of patches on upper plate interface.
lower_rheo : Rheology
Lower plate interface's rheology. Pass ``None`` if it should not be simulated,
but enforced to have the plate velocity.
n_lower_left : int
Number [-] of patches on lower plate interface (left of the bend).
n_lower_right : int
Number [-] of patches on lower plate interface (right of the bend).
halflen_factor_lower : float
Factor used to get a different minimum half-length of the patches on the lower
plate interface.
D_max : float, optional
Maximum depth [m] of the upper plate interface (positive).
If set, this makes the mesh use linearly-increasing patch sizes away from the
locked zone. (``x1_pretrench`` must be set as well.)
x1_pretrench : float, optional
Horizontal distance [m] of the lower plate interface before the trench (positive).
If set, this makes the mesh use linearly-increasing patch sizes away from the
locked zone. (``D_max`` must be set as well.)
References
----------
.. [kanda2010] Kanda, R. V. S., & Simons, M. (2010).
*An elastic plate model for interseismic deformation in subduction zones.*
Journal of Geophysical Research: Solid Earth, 115(B3).
doi:`10.1029/2009JB006611 <https://doi.org/10.1029/2009JB006611>`_.
"""
# initialize
self.theta = float(theta)
""" Subducting plate dip angle [rad] """
assert 0 < self.theta < np.pi / 2
self.D_lock = float(D_lock)
""" Theoretical locking depth [m] of the upper plate interface """
assert self.D_lock > 0
self.H = float(H)
""" Subducting plate thickness [m] """
assert self.H >= 0
self.nu = float(nu)
""" Poisson's ratio [-] of the fault zone """
self.E = float(E)
""" Young's modulus [Pa] of the fault zone """
self.halflen = float(halflen)
""" Fault patch half-length [m] on upper interface """
assert self.halflen > 0
self.upper_rheo = upper_rheo
""" Upper plate interface's rheology """
assert isinstance(self.upper_rheo, Rheology)
self.n_upper = int(n_upper)
""" Number [-] of patches on upper plate interface """
assert self.n_upper >= 1
self.lower_rheo = lower_rheo
""" Lower plate interface's rheology """
assert isinstance(self.lower_rheo, Rheology) or \
(self.lower_rheo is None)
self.n_lower_left = int(n_lower_left)
""" Number [-] of patches on lower plate interface (left of bend) """
assert self.n_lower_left >= 1
self.n_lower_right = int(n_lower_right)
""" Number [-] of patches on lower plate interface (right of bend) """
assert self.n_lower_right >= 1
self.halflen_factor_lower = float(halflen_factor_lower)
""" Prefactor [-] to change the lower interface half-length """
assert self.halflen_factor_lower >= 1
self.lower_halflen = self.halflen * self.halflen_factor_lower
""" Fault patch half-length [m] on lower interface """
if self.lower_rheo is not None:
assert self.H >= 2 * self.lower_halflen, "Plate too thin for given patch sizes."
self.v_s = float(v_s)
""" Shear wave velocity [m/s] in the fault zone """
self.mu_over_2vs = self.E / (2 * (1 + self.nu) * 2 * self.v_s)
""" Radiation damping term [Pa * s/m] """
# switch between constant or linearly-varying patch sizes
if (D_max is not None) and (x1_pretrench is not None):
D_max = float(D_max)
x1_pretrench = float(x1_pretrench)
assert D_max > 0
assert x1_pretrench > 0
variable_mesh = True
else:
D_max = None
x1_pretrench = None
variable_mesh = False
self.D_max = D_max
""" Maximum depth [m] of the upper plate interface (optional) """
self.x1_pretrench = x1_pretrench
""" Horizontal distance [m] of the lower plate interface before the trench (optional) """
self.variable_mesh = variable_mesh
""" Flag whether the creeping patches are linearly-varying in size, or not """
# create mesh, centered about the x2 axis
if self.variable_mesh:
# project the locking depth onto dip angle
L_lock = self.D_lock / np.sin(self.theta)
# get number of locked and creeping patches on upper interface
n_lock = int(L_lock // (2 * self.halflen))
n_creep_up = self.n_upper - n_lock
assert n_creep_up > 0, "Current geometry yields no upper creeping patches."
# project maximum interface depth onto dip angle
L_max = self.D_max / np.sin(self.theta)
# get length of creeping segment that needs to be linearly varying
delta_L = L_max - n_lock * 2 * self.halflen
# get linear half-length increase necessary given the number of patches
# and length of creeping segment, on all three interface regions
delta_h_upper = ((delta_L - 2 * self.halflen * n_creep_up) /
(n_creep_up**2 - n_creep_up))
delta_h_lower_right = \
((L_max - 2 * self.lower_halflen * self.n_lower_right) /
(self.n_lower_right**2 - self.n_lower_right))
delta_h_lower_left = \
((self.x1_pretrench - 2 * self.lower_halflen * self.n_lower_left) /
(self.n_lower_left**2 - self.n_lower_left))
# check that we're not running into numerical problems from starkly
# increasing patch sizes
if any([d > 0.2 for d in [delta_h_upper / self.halflen,
delta_h_lower_right / self.lower_halflen,
delta_h_lower_left / self.lower_halflen]]):
raise ValueError("Half-length increase greater than 20%.")
# build vector of half-lengths
halflen_vec = np.concatenate([
np.ones(n_lock) * self.halflen,
self.halflen + np.arange(n_creep_up) * delta_h_upper,
(self.lower_halflen + np.arange(self.n_lower_left) * delta_h_lower_left)[::-1],
self.lower_halflen + np.arange(self.n_lower_right) * delta_h_lower_right])
else:
# build half-length vector from constant size
halflen_vec = np.ones(self.n_upper + self.n_lower_left + self.n_lower_right
) * self.halflen
halflen_vec[self.n_upper:] *= self.halflen_factor_lower
self.halflen_vec = halflen_vec
""" Half-lengths [m] for each patch in the fault """
s = self.H * np.tan(self.theta / 2)
R = np.array([[np.cos(-self.theta), -np.sin(-self.theta)],
[np.sin(-self.theta), np.cos(-self.theta)]])
# upper plate interface
upper_right_x1 = np.concatenate([[0], np.cumsum(2*self.halflen_vec[:self.n_upper])])
upper_right_x2 = np.zeros_like(upper_right_x1)
upper_right = R @ np.stack([upper_right_x1, upper_right_x2], axis=0)
# lower left plate interface
temp = self.halflen_vec[self.n_upper + self.n_lower_left - 1:self.n_upper - 1:-1]
lower_left_x1 = -s - np.concatenate([[0], np.cumsum(2*temp)])[::-1]
lower_left_x2 = -self.H * np.ones(self.n_lower_left + 1)
lower_left = np.stack([lower_left_x1, lower_left_x2], axis=0)
# lower right
lower_right_x1 = np.concatenate([
[0], np.cumsum(2*self.halflen_vec[self.n_upper + self.n_lower_left:])])
lower_right_x2 = np.zeros_like(lower_right_x1)
lower_right = (R @ np.stack([lower_right_x1, lower_right_x2], axis=0)
- np.array([[s], [self.H]]))
# concatenate mesh parts
self.end_upper = upper_right
""" 2-element coordinates of upper fault patch endpoints [m] """
self.end_lower = np.concatenate([lower_left, lower_right[:, 1:]], axis=1)
""" 2-element coordinates of lower fault patch endpoints [m] """
self.end = np.concatenate([self.end_upper, self.end_lower], axis=1)
""" 2-element coordinates of fault patch endpoints [m] """
self.mid = np.concatenate([upper_right[:, :-1] + upper_right[:, 1:],
lower_left[:, :-1] + lower_left[:, 1:],
lower_right[:, :-1] + lower_right[:, 1:]],
axis=1) / 2
""" 2-element coordinates of fault patch midpoints [m] """
self.mid_x1 = self.mid[0, :]
""" :math:`x_1` coordinates of fault patch midpoints [m] """
self.mid_x2 = self.mid[1, :]
""" :math:`x_2` coordinates of fault patch midpoints [m] """
# access subparts
self.ix_upper = np.arange(self.mid_x1.size) < upper_right_x1.size
""" Mask of upper fault interface patches """
self.ix_lower = ~self.ix_upper
""" Mask of lower fault interface patches (if existing) """
# locked is the part that slips coseismically on the upper plate interface
self.x1_lock = self.D_lock / np.tan(self.theta)
""" Theoretical surface location [m] of end of locked interface """
ix_locked = self.mid_x1 <= self.x1_lock - self.halflen
ix_locked[self.n_upper:] = False
self.ix_locked = ix_locked
""" Mask of fault patches that are locked interseismically """
self.n_locked = (self.ix_locked).sum()
""" Number [-] of locked patches """
# assert self.n_locked == n_lock
self.n_creeping = (~self.ix_locked).sum()
""" Number [-] of creeping patches """
self.n_creeping_upper = (~self.ix_locked[:self.n_upper]).sum()
""" Number [-] of creeping patches in the upper fault interface """
# assert self.n_creeping_upper == n_creep_up
self.n_creeping_lower = self.n_creeping - self.n_creeping_upper
""" Number [-] of creeping patches in the lower fault interface """
assert self.n_creeping_lower == n_lower_left + n_lower_right
self.mid_x1_locked = self.mid_x1[self.ix_locked]
""" :math:`x_1` coordinates of locked fault patch midpoints [m] """
self.mid_x2_locked = self.mid_x2[self.ix_locked]
""" :math:`x_2` coordinates of locked fault patch midpoints [m] """
self.mid_x1_creeping = self.mid_x1[~self.ix_locked]
""" :math:`x_1` coordinates of creeping fault patch midpoints [m] """
self.mid_x2_creeping = self.mid_x2[~self.ix_locked]
""" :math:`x_2` coordinates of creeping fault patch midpoints [m] """
# for later calculations, need theta and unit vectors in vector form
theta_vec = np.ones_like(self.mid_x1) * self.theta
theta_vec[self.n_upper:self.n_upper + self.n_lower_left] = np.pi
theta_vec[self.n_upper + self.n_lower_left:] += np.pi
self.theta_vec = theta_vec
""" Plate dip angle [rad] for all fault patches """
self.e_f = np.stack([np.sin(self.theta_vec), np.cos(self.theta_vec)], axis=0)
""" Unit vectors [-] normal to fault patches"""
self.e_s = np.stack([-np.cos(self.theta_vec), np.sin(self.theta_vec)], axis=0)
""" Unit vectors [-] in fault patch slip direction """
# get external (from the locked to the creeping patches) stress kernel
K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,
self.mid_x1_locked, self.mid_x2_locked,
self.halflen_vec[self.ix_locked],
self.theta_vec[self.ix_locked], self.nu, self.E
)[:, :self.n_locked]
Kx1x1 = K[:self.n_creeping, :]
Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]
Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]
K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]
).reshape(2, 2, self.n_creeping, self.n_locked).transpose(2, 3, 0, 1)
self.K_ext = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked],
K, self.e_f[:, ~self.ix_locked], optimize=True)
""" External stress kernel [Pa/m] """
# get internal (within creeping patches) stress kernel
K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,
self.mid_x1_creeping, self.mid_x2_creeping,
self.halflen_vec[~self.ix_locked],
self.theta_vec[~self.ix_locked], self.nu, self.E
)[:, :self.n_creeping]
Kx1x1 = K[:self.n_creeping, :]
Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]
Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]
K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]
).reshape(2, 2, self.n_creeping, self.n_creeping).transpose(2, 3, 0, 1)
self.K_int = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked],
K, self.e_f[:, ~self.ix_locked], optimize=True)
""" Internal stress kernel [Pa/m] """
self.n_state_upper = self.upper_rheo.n_vars * self.n_creeping_upper
""" Size [-] of upper plate interface state variable """
self.n_state_lower = (self.lower_rheo.n_vars * self.n_creeping_lower
if self.lower_rheo is not None
else 2 * self.n_creeping_lower)
""" Size [-] of lower plate interface state variable """
if (self.n_creeping_upper == 0) or (self.n_creeping_lower == 0):
raise ValueError("Defined geometry results in zero creeping patches in "
"either the upper or lower plate interface.")
# # if upper rheology is Burgers, tell it our specific shear modulus
# if isinstance(self.upper_rheo, rheologies.LinearBurgers):
# self.upper_rheo.set_G(self.K_int[:self.n_creeping_upper, :self.n_creeping_upper])
# discretized locking depth
self.D_lock_disc = -self.end_upper[1, self.n_locked]
""" Discretized locking depth [m] of the upper plate interface """
self.x1_lock_disc = self.D_lock_disc / np.tan(self.theta)
""" Discretized surface location [m] of end of locked interface """
class SubductionSimulation():
"""
Subduction simulation container class.
"""
def __init__(self, v_plate, n_cycles_max, n_samples_per_eq, delta_tau_max, v_max,
fault, Ds_0, Ds_0_logsigma, T_rec, T_rec_logsigma, D_asp_min,
D_asp_max, T_anchor, T_last, enforce_v_plate, largehalflen,
t_obs, pts_surf):
"""
Create a subduction simulation.
Parameters
----------
v_plate : float
Nominal far-field plate velocity, in the dimensions of the rheology
n_cycles_max : int
Maximum number of cycles to simulate [-]
n_samples_per_eq : int
Number of internal evaluation timesteps between earthquakes [-]
delta_tau_max : float
Maximum shear stress change [Pa] from coseismic slip on locked patches
v_max : float
Maximum slip velocity [m/s] on creeping patches
fault : Fault2D
Fault object
Ds_0 : numpy.ndarray
Nominal coseismic left-lateral shearing [m] of the locked fault patch(es)
Ds_0_logsigma : numpy.ndarray
Standard deviation of the fault slip in logarithmic space
T_rec : numpy.ndarray
Nominal recurrence time [a] for each earthquake
T_rec_logsigma : numpy.ndarray
Standard deviation of the recurrence time in logarithmic space
D_asp_min : numpy.ndarray
Minimum depth [m] for the asperities of each earthquake
D_asp_max : numpy.ndarray
Maximum depth [m] for the asperities of each earthquake
T_anchor : str
Anchor date where observations end
T_last : list
Dates of the last occurence for each earthquake (list of strings)
enforce_v_plate : bool
Flag whether to allow v_plate to vary or not
largehalflen : float
Fault patch half-length of the deep crreep patches [m]
t_obs : numpy.ndarray, pandas.DatetimeIndex
Observation timesteps, either as decimal years relative to the cycle start,
or as Timestamps
pts_surf : numpy.ndarray
Horizontal landward observation coordinates [m] relative to the trench
"""
# save general sequence & fault parameters
self.v_plate = float(v_plate)
""" Nominal far-field plate velocity, in the dimensions of the rheology """
self.n_cycles_max = int(n_cycles_max)
""" Maximum number of cycles to simulate [-] """
self.n_samples_per_eq = int(n_samples_per_eq)
""" Number of internal evaluation timesteps between earthquakes [-] """
self.delta_tau_max = float(delta_tau_max)
""" Maximum shear stress change [Pa] from coseismic slip on locked patches """
self.v_max = float(v_max)
""" Maximum slip velocity [m/s] on creeping patches """
# define fault
assert isinstance(fault, Fault2D)
if not (isinstance(fault.upper_rheo, NonlinearViscous) or
isinstance(fault.upper_rheo, RateStateSteadyLogarithmic)) or \
not (isinstance(fault.lower_rheo, NonlinearViscous) or
(fault.lower_rheo is None)):
raise NotImplementedError("SubductionSimulation is only implemented for "
"NonlinearViscous or RateStateSteadyLogarithmic "
"rheologies in the upper interface, and NonlinearViscous "
"rheology in the lower interface.")
self.fault = fault
""" Fault object """
# cast earthquake slips as NumPy array
self.Ds_0 = np.atleast_1d(Ds_0)
""" Nominal coseismic left-lateral shearing [m] of the locked fault patch(es) """
self.Ds_0_logsigma = np.atleast_1d(Ds_0_logsigma)
""" Standard deviation of the fault slip in logarithmic space """
# load recurrence times
self.T_rec = np.atleast_1d(T_rec)
""" Nominal recurrence time [a] for each earthquake """
self.T_rec_logsigma = np.atleast_1d(T_rec_logsigma)
""" Standard deviation of the recurrence time in logarithmic space """
# load the minimum and maximum depths of the earthquakes
self.D_asp_min = np.atleast_1d(D_asp_min)
""" Minimum depth [m] for the asperities of each earthquake """
self.D_asp_max = np.atleast_1d(D_asp_max)
""" Maximum depth [m] for the asperities of each earthquake """
assert all([D <= self.fault.D_lock for D in self.D_asp_max]), \
f"Asperity depths {self.D_asp_max/1e3} km are deeper than the " \
f"locking depth {self.fault.D_lock/1e3}."
self.T_anchor = str(T_anchor)
""" Anchor date where observations end """
assert isinstance(T_last, list) and all([isinstance(tl, str) for tl in T_last])
self.T_last = T_last
""" Dates of the last occurence for each earthquake """
# create a NumPy array that for each locked asperity has the slip per earthquake
self.slip_mask = np.logical_and(self.fault.mid_x2_locked.reshape(-1, 1)
< -self.D_asp_min.reshape(1, -1),
self.fault.mid_x2_locked.reshape(-1, 1)
> -self.D_asp_max.reshape(1, -1))
""" Mask that matches each earthquake to a fault patch """
self.T_fullcycle = np.lcm.reduce(self.T_rec)
""" Nominal recurrence time [a] for an entire joint earthquake cycle """
self.n_eq = self.Ds_0.size
""" Number of distinct earthquakes in sequence """
self.n_eq_per_asp = (self.T_fullcycle / self.T_rec).astype(int)
""" Number of earthquakes per asperity and full cycle """
# create realization of the slip amount and earthquake timings
rng = np.random.default_rng()
# first, create realizations of occurence times
# note that this will result in a varying plate velocity rate
# (ignore zero-slip earthquakes)
self.T_rec_per_asp = [rng.lognormal(np.log(t), s, n) for t, s, n in
zip(self.T_rec, self.T_rec_logsigma, self.n_eq_per_asp)]
""" Recurrence time [a] realization """
self.Ds_0_per_asp = [rng.lognormal(np.log(d), s, n) if d > 0
else np.array([d] * n) for d, s, n in
zip(self.Ds_0, self.Ds_0_logsigma, self.n_eq_per_asp)]
""" Fault slip [m] realization """
# sanity check that in each asperity, the nominal plate rate is recovered
self.slip_asperities = self.slip_mask.astype(int) * self.Ds_0.reshape(1, -1)
""" Slip [m] for each earthquake in each asperity """
v_eff_in_asp = (self.slip_asperities / self.T_rec.reshape(1, -1)).sum(axis=1)
assert np.allclose(v_eff_in_asp, self.v_plate * 86400 * 365.25), \
"The nominal plate rate is not recovered in all asperities.\n" \
f"Plate velocity = {self.v_plate * 86400 * 365.25}\n" \
f"Effective velocity in each asperity:\n{v_eff_in_asp}"
# second, we need to shift the random realization for each earthquake
# individually such that they all yield the same v_plate (enforced or not)
# get the effective recurrence time as implied by the T_rec realizations
T_fullcycle_per_asp_eff = np.array([sum(t) for t in self.T_rec_per_asp])
# same for the effective cumulative slip
Ds_0_fullcycle_per_asp_eff = np.array([sum(d) for d in self.Ds_0_per_asp])
# we need to scale each individual sequence such that it implies the same
# recurrence time and cumulative slip in each asperity
# (again ignoring zero-slip earthquakes)
T_fullcycle_eff_mean = np.mean(T_fullcycle_per_asp_eff)
Ds_0_fullcycle_mean = np.ma.masked_equal(Ds_0_fullcycle_per_asp_eff, 0).mean()
T_rec_per_asp_adj = [np.array(self.T_rec_per_asp[i]) * T_fullcycle_eff_mean
/ T_fullcycle_per_asp_eff[i] for i in range(self.n_eq)]
Ds_0_per_asp_adj = [np.array(self.Ds_0_per_asp[i]) * Ds_0_fullcycle_mean
/ Ds_0_fullcycle_per_asp_eff[i] if self.Ds_0[i] > 0
else np.array(self.Ds_0_per_asp[i]) for i in range(self.n_eq)]
# now each asperity has the same effective plate velocity, which can be different
# from the nominal one - if we want to enforce the nominal plate velocity,
# we can rescale the recurrence times again
self.enforce_v_plate = bool(enforce_v_plate)
""" Flag whether to allow v_plate to vary or not """
ix_nonzero_slip = np.argmax(self.Ds_0 > 0)
v_plate_eff = (sum(Ds_0_per_asp_adj[ix_nonzero_slip])
/ sum(T_rec_per_asp_adj[ix_nonzero_slip]) / 86400 / 365.25)
if self.enforce_v_plate:
v_plate_factor = self.v_plate / v_plate_eff
for i in range(self.n_eq):
T_rec_per_asp_adj[i] /= v_plate_factor
v_plate_eff = self.v_plate
self.v_plate_eff = v_plate_eff
""" Effective far-field plate velocity [m/s] """
self.T_eff = sum(T_rec_per_asp_adj[0])
""" Effective length [a] of entire earthquake sequence """
# third, we need to create a list of earthquake dates and associated slips
temp_slips = np.vstack([self.slip_mask[:, i].reshape(1, -1)
* Ds_0_per_asp_adj[i].reshape(-1, 1)
for i in range(self.n_eq)])
year_offsets = [(pd.Period(self.T_anchor, "D") - pd.Period(self.T_last[i], "D")
).n / 365.25 for i in range(self.n_eq)]
eq_df_index = np.concatenate(
[self.T_eff -
(np.cumsum(T_rec_per_asp_adj[i]) - T_rec_per_asp_adj[i] + year_offsets[i])
for i in range(self.n_eq)])
# round the dates to the closest day and combine earthquakes
eq_df_index_rounded = np.around(eq_df_index * 365.25) / 365.25
# build a DataFrame with exact and rounded times
eq_df = pd.DataFrame(data=temp_slips)
eq_df["time"] = eq_df_index
eq_df["rounded"] = eq_df_index_rounded
# now aggregate by rounded time, keeping the minimum exact time, and summing slip
agg_dict = {"time": "min"}
agg_dict.update({c: "sum" for c in range(self.fault.n_locked)})
eq_df = eq_df.groupby("rounded").agg(agg_dict)
# convert time column to index and sort
eq_df.set_index("time", inplace=True)
eq_df.sort_index(inplace=True)
assert np.allclose(eq_df.sum(axis=0), eq_df.sum(axis=0)[0])
self.eq_df = eq_df
"""
DataFrame with the dates [decimal year from cycle start] and slips [m]
for each asperity
"""
# fourth, we need to create a list of dates to use internally when evaluating
# the earthquake cycle - this is independent of the observation dates
i_frac_cumsum = np.concatenate([[self.eq_df.index[-1] - self.T_eff],
self.eq_df.index.values])
T_frac = np.diff(i_frac_cumsum)
t_eval = np.concatenate(
[np.logspace(0, np.log10(1 + T_frac[i]), self.n_samples_per_eq, endpoint=False)
- 1 + i_frac_cumsum[i] + j*self.T_eff
for j in range(self.n_cycles_max) for i, t in enumerate(T_frac)])
num_neg = (t_eval < 0).sum()
t_eval = np.roll(t_eval, -num_neg)
t_eval[-num_neg:] += self.n_cycles_max * self.T_eff
self.t_eval = np.sort(np.concatenate(
[t_eval, np.arange(self.n_cycles_max + 1) * self.T_eff]))
""" Internal evaluation timesteps [decimal years since cycle start] """
self.n_eval = self.t_eval.size
""" Number of internal evaluation timesteps [-] """
# fifth, for the integration, we need the indices of the timesteps that mark either
# an earthquake or the start of a new cycle
self.n_slips = self.eq_df.shape[0]
""" Number of slips in a sequence [-] """
self.ix_break = [i*(self.n_slips * self.n_samples_per_eq + 1)
for i in range(self.n_cycles_max + 1)]
""" Indices of breaks between cycles """
self.ix_eq = [self.ix_break[i] + j * self.n_samples_per_eq - num_neg + 1
for i in range(self.n_cycles_max) for j in range(1, 1 + self.n_slips)]
""" Indices of earthquakes """
# sixth and last, for the final loop, we need a joint timesteps array between internal
# and external (observation) timestamps, such that we can debug, check early stopping,
# and restrict the output to the requested timeseries
if isinstance(t_obs, pd.DatetimeIndex):
t_obs = self.T_eff + (t_obs - pd.Timestamp(self.T_anchor)
).total_seconds().values / 86400 / 365.25
elif isinstance(t_obs, np.ndarray):
if np.all(t_obs < 0):
# this format is relative to T_anchor and more stable when T_eff varies
t_obs = self.T_eff + t_obs
assert np.all(t_obs >= 0) and np.all(t_obs < self.T_eff), \
f"Range of 't_obs' ({t_obs.min()}-{t_obs.max():} years) outside of " \
f"the earthquake cycle period ({self.T_eff:} years)."
else:
raise ValueError("Unknown 't_obs' data type.")
self.t_obs = t_obs
""" Observation timesteps [decimal years since cycle start] """
# combine all possible timesteps
t_obs_shifted = self.t_obs + (self.n_cycles_max - 1) * self.T_eff
self.t_eval_joint = np.unique(np.concatenate((self.t_eval, t_obs_shifted)))
"""
Joint internal evaluation and external observation timesteps
[decimal years since cycle start]
"""
# get indices of each individual subset in the new timesteps array
self.ix_break_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_break]))
""" Indices of breaks between cycles in joint timesteps """
self.ix_eq_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_eq]))
""" Indices of earthquakes in joint timesteps """
self.ix_obs_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, t_obs_shifted))
""" Indices of observation timestamps in joint timesteps """
# get vectors of upper plate rheology parameters
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
# alpha_h
self.alpha_h_vec = \
self.fault.upper_rheo.get_param_vectors(
-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper])
r""" Depth-variable :math:`(a - b) * \sigma_E` [Pa] of upper plate interface """
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
# A, alpha_n, and n
alpha_n_vec, n_vec, A_vec = \
self.fault.upper_rheo.get_param_vectors(
-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper], self.v_plate)
self.alpha_n_vec = alpha_n_vec
r""" Depth-variable :math:`\alpha_n` [Pa^n * s/m] of upper plate interface """
self.n_vec = n_vec
r""" Depth-variable :math:`n` [-] of upper plate interface """
self.A_vec = A_vec
r""" Depth-variable :math:`A ` [Pa * (s/m)^(1/n)] of upper plate interface """
else:
raise NotImplementedError
# get unbounded delta_tau
self.delta_tau_unbounded = self.fault.K_ext @ self.eq_df.values.T
""" Unbounded coseismic stress change [Pa] """
# get pseudoinverse of K_int for tapered slip
self.K_int_inv_upper = np.linalg.pinv(
self.fault.K_int[:self.fault.n_creeping_upper, :self.fault.n_creeping_upper])
""" Inverse of K_int [m/Pa] """
self.delta_tau_max_from_v_max_lower = \
((self.fault.lower_rheo.alpha_n * self.v_max)**(1 / self.fault.lower_rheo.n) -
(self.fault.lower_rheo.alpha_n * self.v_plate)**(1 / self.fault.lower_rheo.n)
if self.fault.lower_rheo is not None else np.inf)
""" Maximum shear stress change [Pa] in lower plate from capped velocity """
if isinstance(self.fault.upper_rheo, NonlinearViscous):
delta_tau_max_from_v_max_upper = \
(self.alpha_n_vec * self.v_max)**(1 / self.n_vec) - \
(self.alpha_n_vec * self.v_plate)**(1 / self.n_vec)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
delta_tau_max_from_v_max_upper = self.alpha_h_vec * \
(np.log(self.v_max / self.fault.upper_rheo.v_0) -
np.log(self.v_plate / self.fault.upper_rheo.v_0))
self.delta_tau_max_from_v_max_upper = delta_tau_max_from_v_max_upper
""" Maximum shear stress change [Pa] in upper plate from capped velocity """
self.delta_tau_max_joint_upper = np.fmin(self.delta_tau_max,
self.delta_tau_max_from_v_max_upper)
""" Joint maximum shear stress change [Pa] allowed in upper plate """
self.delta_tau_max_joint_lower = \
(min(self.delta_tau_max, self.delta_tau_max_from_v_max_lower)
if self.fault.lower_rheo is not None else np.inf)
""" Joint maximum shear stress change [Pa] allowed in lower plate """
# create tapered slip by making delta_tau linearly increase until delta_tau_max
delta_tau_bounded = self.delta_tau_unbounded.copy()
delta_tau_bounded[:self.fault.n_creeping_upper, :] = \
np.fmin(self.delta_tau_max_joint_upper.reshape(-1, 1),
self.delta_tau_unbounded[:self.fault.n_creeping_upper, :])
self.delta_tau_bounded = delta_tau_bounded
""" Bounded coseismic stress change [Pa] """
# get the additional slip
self.slip_taper = (self.K_int_inv_upper @
(self.delta_tau_bounded - self.delta_tau_unbounded
)[:self.fault.n_creeping_upper, :])
# check if the lower plate should have been bounded as well
if self.fault.lower_rheo is not None:
assert not np.any(np.abs(self.delta_tau_bounded[self.fault.n_creeping_upper:, :])
> self.delta_tau_max_joint_lower), \
("Maximum stress change delta_tau_bounded "
f"{np.max(np.abs(self.delta_tau_bounded)):.2e} Pa in lower interface "
f"above delta_tau_max = {self.delta_tau_max_joint_lower:.2e} Pa")
self.slip_taper_ts = \
pd.DataFrame(index=self.eq_df.index, data=self.slip_taper.T) \
.cumsum(axis=0).reindex(index=self.t_obs, method="ffill", fill_value=0)
""" Timeseries of tapered slip [m] on the upper creeping fault patches """
# need the imagined location and orientation of the deep creep patches
self.largehalflen = float(largehalflen)
""" Fault patch half-length of the deep crreep patches [m] """
self.mid_deep_x1 = \
np.array([self.fault.mid_x1[self.fault.n_upper - 1]
+ np.cos(self.fault.theta_vec[self.fault.n_upper - 1])
* self.fault.halflen_vec[self.fault.n_upper - 1]
+ np.cos(self.fault.theta_vec[self.fault.n_upper - 1])
* self.largehalflen,
self.fault.mid_x1[self.fault.n_upper + self.fault.n_lower_left - 1]
- self.fault.halflen_vec[self.fault.n_upper + self.fault.n_lower_left - 1]
- self.largehalflen,
self.fault.mid_x1[-1]
+ np.cos(self.fault.theta_vec[-1] - np.pi)
* self.fault.halflen_vec[-1]
+ np.cos(self.fault.theta_vec[-1] - np.pi)
* self.largehalflen])
""" :math:`x_1` coordinates of deep creep fault patch midpoints [m] """
self.mid_deep_x2 = \
np.array([self.fault.mid_x2[self.fault.n_upper - 1]
- np.sin(self.fault.theta_vec[self.fault.n_upper - 1])
* self.fault.halflen_vec[self.fault.n_upper - 1]
- np.sin(self.fault.theta_vec[self.fault.n_upper - 1])
* self.largehalflen,
self.fault.mid_x2[self.fault.n_upper + self.fault.n_lower_left - 1],
self.fault.mid_x2[-1]
- np.sin(self.fault.theta_vec[-1] - np.pi)
* self.fault.halflen_vec[-1]
- np.sin(self.fault.theta_vec[-1] - np.pi)
* self.largehalflen])
""" :math:`x_2` coordinates of deep creep fault patch midpoints [m] """
self.theta_vec_deep = \
np.array([self.fault.theta_vec[self.fault.n_upper - 1],
np.pi,
self.fault.theta_vec[-1]])
""" Plate dip angle [rad] for deep creep fault patches """
# create the Green's matrices
self.pts_surf = pts_surf
""" :math:`x_1` coordinates of surface observation points [m] """
self.n_stations = self.pts_surf.size
""" Number of surface observing stations """
self.G_surf_fault = Glinedisp(
self.pts_surf, 0, self.fault.mid_x1, self.fault.mid_x2,
self.fault.halflen_vec, self.fault.theta_vec, self.fault.nu
)[:, :self.fault.mid_x1.size]
""" Green's matrix [-] relating slip on the main fault patches to surface motion """
self.G_surf_deep = Glinedisp(
self.pts_surf, 0, self.mid_deep_x1, self.mid_deep_x2,
self.largehalflen, self.theta_vec_deep, self.fault.nu)[:, :3]
""" Green's matrix [-] relating slip on the deep creep patches to surface motion """
self.G_surf = np.hstack([self.G_surf_fault, self.G_surf_deep])
""" Joint Green's matrix [-] relating slip on the entire ESPM to surface motion """
# calculate the best initial velocity state from the steady state ODE
v_plate_vec = np.ones(self.fault.n_creeping) * self.v_plate
v_plate_vec[self.fault.n_creeping_upper:] *= -1
self.v_plate_vec = v_plate_vec
""" Vector with the plate velocity for each creeping patch [m/s] """
# get the initial velocity, taking advantage of the option that there could be a
# deep transition zone
v_init = v_plate_vec.copy()
if self.fault.upper_rheo.deep_transition is not None:
ix_deep = np.argmin(np.abs(-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]
- self.fault.upper_rheo.deep_transition
- self.fault.upper_rheo.deep_transition_width))
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
v_init[:ix_deep] = np.linspace(self.v_plate * 1e-6, self.v_plate,
num=ix_deep, endpoint=False)
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
v_init[:ix_deep] = np.linspace(0, self.v_plate, num=ix_deep, endpoint=False)
self.v_init = v_init
""" Initial velocity in all creeping patches [m/s] """
@property
def locked_slip(self):
""" Timeseries of slip [m] on the locked patches for observation timespan """
return self.eq_df.cumsum(axis=0) \
.reindex(index=self.t_obs, method="ffill", fill_value=0).values.T
@property
def deep_creep_slip(self):
""" Timeseries of slip [m] on the deep creep patches for observation timestamps """
return (np.tile(self.t_obs.reshape(1, -1), (3, 1))
* np.array([1, -1, -1]).reshape(3, 1)
* self.v_plate_eff * 86400 * 365.25)
@staticmethod
def read_config_file(config_file):
"""
Read a configuration file and return it as a parsed dictionary.
Parameters
----------
config_file : str
Path to INI configuration file.
Returns
-------
cfg_dict : dict
Parsed configuration file.
"""
# load configuration file
cfg = configparser.ConfigParser()
cfg.optionxform = str
with open(config_file, mode="rt") as f:
cfg.read_file(f)
cfg_seq, cfg_fault, cfg_mesh = cfg["sequence"], cfg["fault"], cfg["mesh"]
# parse rheologies
upper_rheo_dict = dict(cfg["upper_rheo"])
upper_rheo_type = upper_rheo_dict.pop("type")
upper_rheo_kw_args = {k: float(v) for k, v in upper_rheo_dict.items()}
try:
lower_rheo_dict = dict(cfg["lower_rheo"])
except KeyError:
lower_rheo_type = None
lower_rheo_kw_args = None
else:
lower_rheo_type = lower_rheo_dict.pop("type")
lower_rheo_kw_args = {k: float(v) for k, v in lower_rheo_dict.items()}
# parse everything else
cfg_dict = {
"theta": np.deg2rad(cfg_fault.getfloat("theta_deg")),
"D_lock": cfg_fault.getfloat("D_lock"),
"H": cfg_fault.getfloat("H"),
"nu": cfg_fault.getfloat("nu"),
"E": cfg_fault.getfloat("E"),
"v_s": cfg_fault.getfloat("v_s"),
"halflen": cfg_mesh.getfloat("halflen"),
"n_upper": cfg_mesh.getint("n_up"),
"n_lower_left": cfg_mesh.getint("n_low_l"),
"n_lower_right": cfg_mesh.getint("n_low_r"),
"halflen_factor_lower": cfg_mesh.getfloat("halflen_factor_lower"),
"D_max": cfg_mesh.getfloat("D_max", fallback=None),
"x1_pretrench": cfg_mesh.getfloat("x1_pretrench", fallback=None),
"v_plate": cfg_seq.getfloat("v_plate"),
"n_cycles_max": cfg_seq.getint("n_cycles_max"),
"n_samples_per_eq": cfg_seq.getint("n_samples_per_eq"),
"delta_tau_max": cfg_fault.getfloat("delta_tau_max", fallback=np.inf),
"v_max": cfg_fault.getfloat("v_max", fallback=np.inf),
"Ds_0": np.atleast_1d(json.loads(cfg_seq["Ds_0"])),
"Ds_0_logsigma": np.atleast_1d(json.loads(cfg_seq["Ds_0_logsigma"])),
"T_rec": np.atleast_1d(json.loads(cfg_seq["T_rec"])),
"T_rec_logsigma": np.atleast_1d(json.loads(cfg_seq["T_rec_logsigma"])),
"D_asp_min": np.atleast_1d(json.loads(cfg_seq["D_asp_min"])),
"D_asp_max": np.atleast_1d(json.loads(cfg_seq["D_asp_max"])),
"T_anchor": cfg_seq.get("T_anchor"),
"T_last": json.loads(cfg_seq["T_last"]),
"enforce_v_plate": cfg_seq.getboolean("enforce_v_plate"),
"largehalflen": cfg_mesh.getfloat("largehalflen"),
"upper_rheo_type": upper_rheo_type,
"lower_rheo_type": lower_rheo_type,
"upper_rheo_kw_args": upper_rheo_kw_args,
"lower_rheo_kw_args": lower_rheo_kw_args
}
return cfg_dict
@classmethod
def from_config_dict(cls, cfg, t_obs, pts_surf):
"""
Create a SubductionSimulation object from a configuration dictionary.
Parameters
----------
cfg : dict
Dictionary containing all parsed elements from the configuration file
t_obs : numpy.ndarray, pandas.DatetimeIndex
Observation timesteps, either as decimal years relative to the cycle start,
or as Timestamps
pts_surf : numpy.ndarray
Horizontal landward observation coordinates [m] relative to the trench
See Also
--------
read_config_file : To load a configuration file into a dictionary.
"""
# create rheology objects
upper_rheo = globals()[cfg["upper_rheo_type"]](**cfg["upper_rheo_kw_args"])
if cfg["lower_rheo_type"] is None:
lower_rheo = None
else:
lower_rheo = globals()[cfg["lower_rheo_type"]](**cfg["lower_rheo_kw_args"])
# create fault object
fault = Fault2D(theta=cfg["theta"],
D_lock=cfg["D_lock"],
H=cfg["H"],
nu=cfg["nu"],
E=cfg["E"],
v_s=cfg["v_s"],
halflen=cfg["halflen"],
upper_rheo=upper_rheo,
n_upper=cfg["n_upper"],
lower_rheo=lower_rheo,
n_lower_left=cfg["n_lower_left"],
n_lower_right=cfg["n_lower_right"],
halflen_factor_lower=cfg["halflen_factor_lower"],
D_max=cfg["D_max"],
x1_pretrench=cfg["x1_pretrench"])
# create simulation object
return cls(v_plate=cfg["v_plate"],
n_cycles_max=cfg["n_cycles_max"],
n_samples_per_eq=cfg["n_samples_per_eq"],
delta_tau_max=cfg["delta_tau_max"],
v_max=cfg["v_max"],
fault=fault,
Ds_0=cfg["Ds_0"],
Ds_0_logsigma=cfg["Ds_0_logsigma"],
T_rec=cfg["T_rec"],
T_rec_logsigma=cfg["T_rec_logsigma"],
D_asp_min=cfg["D_asp_min"],
D_asp_max=cfg["D_asp_max"],
T_anchor=cfg["T_anchor"],
T_last=cfg["T_last"],
enforce_v_plate=cfg["enforce_v_plate"],
largehalflen=cfg["largehalflen"],
t_obs=t_obs,
pts_surf=pts_surf)
@staticmethod
def get_n(alpha_n, alpha_eff, v_eff):
r"""
Calculate the real linear viscous strength constant from the effective one.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
n : float
Power-law exponent :math:`n` [-]
"""
return (np.log(alpha_n) + np.log(v_eff)) / (np.log(alpha_eff) + np.log(v_eff))
@staticmethod
def get_alpha_n(alpha_eff, n, v_eff):
r"""
Calculate the real linear viscous strength constant from the effective one.
Parameters
----------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
n : float
Power-law exponent :math:`n` [-]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
"""
alpha_n = alpha_eff**n * v_eff**(n-1)
return alpha_n
@staticmethod
def get_alpha_eff(alpha_n, n, v_eff):
r"""
Calculate the effective linear viscous strength constant from the real one.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : float
Power-law exponent :math:`n` [-]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
"""
if isinstance(v_eff, np.ndarray):
temp = v_eff.copy()
temp[temp == 0] = np.NaN
else:
temp = v_eff
alpha_eff = alpha_n**(1/n) * temp**((1-n)/n)
return alpha_eff
@staticmethod
def get_alpha_eff_from_alpha_h(alpha_h, v_eff):
r"""
Calculate the effective viscosity from the rate-dependent friction.
Parameters
----------
alpha_h : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`,
where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,
and :math:`\sigma_E` [Pa] is effective fault normal stress.
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
"""
if isinstance(v_eff, np.ndarray):
temp = v_eff.copy()
temp[temp == 0] = np.NaN
else:
temp = v_eff
alpha_eff = alpha_h / temp
return alpha_eff
def run(self, simple_rk4=False):
"""
Run a full simulation.
"""
# run forward integration
if self.fault.lower_rheo is None:
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
full_state = flat_run_rdlog(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.v_0, self.alpha_h_vec,
self.fault.mu_over_2vs)
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
full_state = flat_run_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.alpha_n_vec, self.n_vec, self.A_vec,
self.fault.mu_over_2vs)
else:
raise NotImplementedError
elif isinstance(self.fault.lower_rheo, NonlinearViscous):
if isinstance(self.fault.upper_rheo, NonlinearViscous):
full_state = flat_run_plvis_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.alpha_n,
self.fault.upper_rheo.n, self.fault.lower_rheo.alpha_n,
self.fault.lower_rheo.n, simple_rk4)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
full_state = flat_run_rdlog_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.v_0,
self.fault.upper_rheo.alpha_h, self.fault.lower_rheo.alpha_n,
self.fault.lower_rheo.n, simple_rk4)
else:
raise NotImplementedError
else:
raise NotImplementedError
# extract the observations that were actually requested
obs_state = full_state[:, self.ix_obs_joint].copy()
# since we're only calculating transient surface displacements, need to
# remove the tapered slip due to bounded stresses
obs_state[:self.fault.n_creeping_upper, :] -= self.slip_taper_ts.values.T
# convert to surface displacements
surf_disps = get_surface_displacements_plvis_plvis(
obs_state, self.fault.n_creeping_upper, self.fault.n_creeping_lower,
np.ascontiguousarray(self.G_surf[:, self.fault.n_locked:]),
self.deep_creep_slip)
return full_state, obs_state, surf_disps
def zero_obs_at_eq(self, surf_disps):
"""
Reset to zero the surface displacement timeseries every time an earthquake happens.
"""
obs_zeroed = surf_disps.copy()
slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,
self.t_obs.max() > self.eq_df.index)
n_slips_obs = slips_obs.sum()
if n_slips_obs == 0:
obs_zeroed -= obs_zeroed[:, 0].reshape(-1, 1)
else:
i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq
in self.eq_df.index.values[slips_obs]]
obs_zeroed[:, :i_slips_obs[0]] -= obs_zeroed[:, i_slips_obs[0] - 1].reshape(-1, 1)
obs_zeroed[:, i_slips_obs[0]:] -= obs_zeroed[:, i_slips_obs[0]].reshape(-1, 1)
for i in range(1, n_slips_obs):
obs_zeroed[:, i_slips_obs[i]:] -= obs_zeroed[:, i_slips_obs[i]].reshape(-1, 1)
return obs_zeroed
def _reduce_full_state(self, data):
# get all NaN columns
cols_all_nan = np.all(np.isnan(data), axis=0)
# check if there was early stopping
if cols_all_nan.sum() > 0:
# get the border indices where integrations have been skipped
ix_last, ix_first = np.flatnonzero(cols_all_nan)[[0, -1]]
ix_last -= 1
ix_first += 1
# get indices before and after the NaN period
ix_valid = np.r_[0:ix_last, ix_first:self.t_eval_joint.size]
# subset data
data = data[:, ix_valid]
t_sub = self.t_eval_joint[ix_valid].copy()
t_sub[ix_last:] -= self.t_eval_joint[ix_first] - self.t_eval_joint[ix_last]
n_cyc_completed = int(np.round(self.t_eval_joint[ix_last] / self.T_eff)) + 1
else:
t_sub = self.t_eval_joint.copy()
n_cyc_completed = self.n_cycles_max + 1
# done
return data, t_sub, n_cyc_completed
def plot_surface_displacements(self, obs_zeroed, obs_noisy=None):
"""
Plot the observers' surface displacement timeseries.
Parameters
----------
obs_zeroed : numpy.ndarray
Surface displacements as output by :meth:`~zero_obs_at_eq`.
obs_noisy : numpy.ndarray, optional
Noisy surface observations.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# some helper variables
isort = np.argsort(self.pts_surf)
i_off = 3 * np.std(obs_zeroed.ravel())
# get float dates of observed earthquakes
slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,
self.t_obs.max() > self.eq_df.index)
n_slips_obs = slips_obs.sum()
if n_slips_obs > 0:
i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq
in self.eq_df.index.values[slips_obs]]
t_last_slips = [self.t_obs[islip] for islip in i_slips_obs]
else:
t_last_slips = []
# start plot
fig, ax = plt.subplots(nrows=2, sharex=True, layout="constrained")
for tslip in t_last_slips:
ax[0].axvline(tslip, c="0.7", zorder=-1)
ax[1].axvline(tslip, c="0.7", zorder=-1)
for i, ix in enumerate(isort):
if obs_noisy is not None:
ax[0].plot(self.t_obs, obs_noisy[ix, :] + i*i_off,
".", c="k", rasterized=True)
ax[1].plot(self.t_obs, obs_noisy[ix + self.n_stations, :] + i*i_off,
".", c="k", rasterized=True)
ax[0].plot(self.t_obs, obs_zeroed[ix, :] + i*i_off, c=f"C{i}")
ax[1].plot(self.t_obs, obs_zeroed[ix + self.n_stations, :] + i*i_off, c=f"C{i}")
ax[1].set_xlabel("Time")
ax[0].set_ylabel("Horizontal [m]")
ax[1].set_ylabel("Vertical [m]")
fig.suptitle("Surface Displacement")
return fig, ax
def plot_fault_velocities(self, full_state):
"""
Plot the velocities on all creeping fault patches.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import SymLogNorm
from cmcrameri import cm
# extract velocities
vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,
self.fault.n_state_upper + self.fault.n_creeping_lower:
self.fault.n_state_upper + self.fault.n_state_lower],
:] / self.v_plate
# check whether the simulation spun up, and NaN data needs to be skipped
vels, t_sub, n_cyc_completed = self._reduce_full_state(vels)
# normalize time
t_sub /= self.T_eff
# prepare plot
norm = SymLogNorm(linthresh=1, vmin=-1, vmax=100)
if self.fault.lower_rheo is None:
fig, ax = plt.subplots(figsize=(10, 5), layout="constrained")
ax = [ax]
else:
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10, 5), layout="constrained")
# plot velocities
c = ax[0].pcolormesh(t_sub,
self.fault.end_upper[0, self.fault.n_locked:] / 1e3,
vels[:self.fault.n_creeping_upper, :-1],
norm=norm, cmap=cm.vik, shading="flat")
ax[0].set_yticks(self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3)
# add vertical lines for cycle breaks
for n in range(1, n_cyc_completed):
ax[0].axvline(n, c="k", lw=1)
# make the y-axis increasing downwards to mimic depth even though we're plotting x1
ax[0].invert_yaxis()
# repeat for lower interface, if simulated
if self.fault.lower_rheo is not None:
c = ax[1].pcolormesh(t_sub,
self.fault.end_lower[0, :] / 1e3,
-vels[self.fault.n_creeping_upper:, :-1],
norm=norm, cmap=cm.vik, shading="flat")
ax[1].set_yticks(self.fault.end_lower[0, [0, -1]] / 1e3)
# add horizontal lines to show where the lower interface is below the locked zone
ax[1].axhline(0, c="k", lw=1)
ax[1].axhline(self.fault.x1_lock / 1e3, c="k", lw=1)
for n in range(1, n_cyc_completed):
ax[1].axvline(n, c="k", lw=1)
ax[1].invert_yaxis()
# finish figure
if self.fault.lower_rheo is None:
ax[0].set_ylabel("Upper Interface\n$x_1$ [km]")
ax[0].set_xlabel("Normalized Time $t/T$")
else:
ax[0].set_ylabel("Upper Interface\n$x_1$ [km]")
ax[1].set_ylabel("Lower Interface\n$x_1$ [km]")
ax[1].set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label="$v/v_{plate}$")
fig.suptitle("Normalized Fault Patch Velocities")
return fig, ax
def plot_fault_slip(self, full_state, deficit=True, include_locked=True, include_deep=True):
"""
Plot the cumulative slip (deficit) for the fault patches.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
deficit : bool, optional
If ``True`` (default), remove the plate velocity to plot slip deficit,
otherwise keep it included.
include_locked : bool, optional
If ``True`` (default), also plot the slip on the locked patches.
include_deep : bool, optional
If ``True`` (default), also plot the slip on the semi-infinite patches
at the end of the interfaces.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, SymLogNorm
from cmcrameri import cm
# extract slip
slip = full_state[np.r_[:self.fault.n_creeping_upper,
self.fault.n_state_upper:
self.fault.n_state_upper + self.fault.n_creeping_lower], :]
# check whether the simulation spun up, and NaN data needs to be skipped
slip, t_sub, n_cyc_completed = self._reduce_full_state(slip)
# normalize to slip per full cycle
cum_slip_per_cycle = self.v_plate_eff * self.T_eff * 86400 * 365.25
slip /= cum_slip_per_cycle
# add optional slip histories, if desired
if include_locked:
eq_df_joint = pd.DataFrame(
index=(self.eq_df.index.values.reshape(1, -1)
+ self.T_eff * np.arange(n_cyc_completed).reshape(-1, 1)
).ravel(),
data=np.tile(self.eq_df.values, (n_cyc_completed, 1)))
locked_slip = eq_df_joint.cumsum(axis=0) \
.reindex(index=t_sub, method="ffill", fill_value=0).values.T
locked_slip /= cum_slip_per_cycle
if include_deep:
deep_creep_slip = (np.tile(t_sub.reshape(1, -1), (3, 1))
* np.array([1, -1, -1]).reshape(3, 1)
* self.v_plate_eff * 86400 * 365.25)
deep_creep_slip /= cum_slip_per_cycle
# remove plate velocity to get slip deficit, if desired
if deficit:
cmap = cm.vik
norm = SymLogNorm(linthresh=1e-2, vmin=-1, vmax=1)
slip[:self.fault.n_creeping_upper] -= t_sub.reshape(1, -1) / self.T_eff
slip[self.fault.n_creeping_upper:] += t_sub.reshape(1, -1) / self.T_eff
slip -= slip[:, -2].reshape(-1, 1)
if include_locked:
locked_slip -= t_sub.reshape(1, -1) / self.T_eff
if include_deep:
deep_creep_slip -= (t_sub.reshape(1, -1)
* np.array([1, -1, -1]).reshape(3, 1)) / self.T_eff
else:
norm = Normalize(vmin=0, vmax=n_cyc_completed)
cmap = cm.batlow
# normalize time
t_sub /= self.T_eff
# prepare figure
nrows = (1 + int(self.fault.lower_rheo is not None)
+ int(include_locked) + int(include_deep) * 3)
hr_locked = ((self.fault.end_upper[0, self.fault.n_locked] - self.fault.end_upper[0, 0])
/ (self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0]))
hr_lower = ((self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0])
/ (self.fault.end_upper[0, -1] - self.fault.end_upper[0, self.fault.n_locked]))
hr = ([hr_locked] * int(include_locked) + [1]
+ [hr_locked, hr_locked] * int(include_deep)
+ [hr_lower] * int(self.fault.lower_rheo is not None)
+ [hr_locked] * int(include_deep))
fig, ax = plt.subplots(nrows=nrows, sharex=True, gridspec_kw={"height_ratios": hr},
figsize=(10, 5), layout="constrained")
iax = 0
# plot locked
if include_locked:
c = ax[iax].pcolormesh(t_sub,
self.fault.end_upper[0, :self.fault.n_locked + 1] / 1e3,
locked_slip[:, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Locked\n$x_1$ [km]")
temp_x1 = self.fault.end_upper[0, [0, self.fault.n_locked]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot upper creeping
c = ax[iax].pcolormesh(t_sub,
self.fault.end_upper[0, self.fault.n_locked:] / 1e3,
slip[:self.fault.n_creeping_upper, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Creeping\n$x_1$ [km]")
temp_x1 = self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot end patch on upper interface
if include_deep:
temp_x1 = np.array([self.fault.end_upper[0, -1],
self.mid_deep_x1[0]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
deep_creep_slip[0, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"])
iax += 1
# plot left end patch on lower interface
if include_deep:
temp_x1 = np.array([self.mid_deep_x1[1],
self.fault.end_lower[0, 0]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
-deep_creep_slip[1, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, ["$-\\infty$", f"{temp_x1[1]:.0f}"])
iax += 1
# plot lower creeping
if self.fault.lower_rheo is not None:
c = ax[iax].pcolormesh(t_sub,
self.fault.end_lower[0, :] / 1e3,
-slip[self.fault.n_creeping_upper:, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].axhline(0, c="k", lw=1)
ax[iax].axhline(self.fault.x1_lock / 1e3, c="k", lw=1)
ax[iax].set_ylabel("Creeping\n$x_1$ [km]")
temp_x1 = self.fault.end_lower[0, [0, -1]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot right end patch on lower interface
if include_deep:
temp_x1 = np.array([self.fault.end_lower[0, -1],
self.mid_deep_x1[2]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
-deep_creep_slip[2, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"])
iax += 1
# finish figure
for iax in range(len(ax)):
for n in range(1, n_cyc_completed):
ax[iax].axvline(n, c="k", lw=1)
ax[iax].invert_yaxis()
ax[-1].set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label="$(s - t*v_{plate})/s_{full}$" if deficit else "$s/s_{full}$")
suptitle = "Normalized Fault Patch Slip"
if deficit:
suptitle += " Deficit"
fig.suptitle(suptitle)
return fig, ax
def plot_eq_velocities(self, full_state):
"""
Plot the before and after velocities on all creeping fault patches
for each distinct earthquake.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# get indices of each last earthquake in last cycle
temp = self.eq_df.astype(bool).drop_duplicates(keep="last")
time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff
tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])
if np.any(tdiff > 0):
warn("Couldn't find exact indices, using time differences of "
f"{tdiff * 365.25 * 86400} seconds.")
ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]
n_eq_found = len(ix_eq_last)
assert n_eq_found == (self.Ds_0 > 0).sum(), \
"Couldn't find indices of each last non-zero earthquake in the " \
"last cycle, check for rounding errors."
# calculate average slip for plotted earthquakes
slip_last = self.eq_df.loc[temp.index, :]
slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()
for ieq in range(n_eq_found)]
# extract velocities
vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,
self.fault.n_state_upper + self.fault.n_creeping_lower:
self.fault.n_state_upper + self.fault.n_state_lower],
:] / self.v_plate
# prepare plot
fig, ax = plt.subplots(nrows=n_eq_found, ncols=1 if self.fault.lower_rheo is None else 2,
sharey=True, layout="constrained")
ax = np.asarray(ax).reshape(n_eq_found, -1)
# loop over earthquakes
for irow, ieq in enumerate(ix_eq_last):
# repeat plot for before and after
for ioff, label in enumerate(["before", "after"]):
ax[irow, 0].set_yscale("symlog", linthresh=1)
ax[irow, 0].plot(self.fault.mid_x1_creeping[:self.fault.n_creeping_upper] / 1e3,
vels[:self.fault.n_creeping_upper, ieq - 1 + ioff],
c=f"C{ioff}", label=label)
if self.fault.lower_rheo is not None:
ax[irow, 1].set_yscale("symlog", linthresh=1)
ax[irow, 1].plot(
self.fault.mid_x1_creeping[self.fault.n_creeping_upper:] / 1e3,
-vels[self.fault.n_creeping_upper:, ieq - 1 + ioff],
c=f"C{ioff}", label=label)
# finish plot
for irow in range(n_eq_found):
ax[irow, 0].set_title(f"Upper Interface: $s={slip_avg[irow]:.2g}$ m")
ax[irow, 0].legend()
ax[irow, 0].set_xlabel("$x_1$ [km]")
ax[irow, 0].set_ylabel("$v/v_{plate}$")
if self.fault.lower_rheo is not None:
ax[irow, 1].set_title(f"Lower Interface: $s={slip_avg[irow]:.2g}$ m")
ax[irow, 1].axvline(0, c="k", lw=1)
ax[irow, 1].axvline(self.fault.x1_lock / 1e3, c="k", lw=1)
ax[irow, 1].tick_params(labelleft=True)
ax[irow, 1].legend()
ax[irow, 1].set_xlabel("$x_1$ [km]")
ax[irow, 1].set_ylabel("$v/v_{plate}$")
fig.suptitle("Normalized Earthquake Velocity Changes")
return fig, ax
def plot_fault(self):
"""
Plot the fault.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 3), layout="constrained")
ax.plot(self.fault.end_upper[0, :self.fault.n_locked + 1]/1e3,
self.fault.end_upper[1, :self.fault.n_locked + 1]/1e3,
marker="|", markeredgecolor="k",
label="Locked")
ax.plot(self.fault.end_upper[0, self.fault.n_locked:]/1e3,
self.fault.end_upper[1, self.fault.n_locked:]/1e3,
marker="|", markeredgecolor="k",
label="Upper Creeping")
ax.plot(self.fault.end_lower[0, :]/1e3,
self.fault.end_lower[1, :]/1e3,
marker="|", markeredgecolor="k",
label="Lower Creeping")
ax.plot(self.pts_surf / 1e3, np.zeros_like(self.pts_surf),
"^", markeredgecolor="none", markerfacecolor="k",
label="Observers")
ax.axhline(0, lw=1, c="0.5", zorder=-1)
ax.legend()
ax.set_xlabel("$x_1$ [km]")
ax.set_ylabel("$x_2$ [km]")
ax.set_title("Fault Mesh and Observer Locations")
ax.set_aspect("equal")
return fig, ax
def plot_slip_phases(self, full_state, post_inter_transition=0.01, normalize=True):
"""
Plot the cumulative slip on the fault for the three different
phases (coseismic, early postseismic, and interseismic).
Only works if there is a single earthquake in the sequence.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
post_inter_transition : float, optional
Fraction of the recurrence time that should be considered
early postseismic and not interseismic.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# check that the sequence only has one earthquake
if not self.n_eq == 1:
raise NotImplementedError("Don't know how to plot slip phases if "
"multiple earthquakes are present in the sequence.")
# get coseismic slip
co = np.concatenate([self.eq_df.values.ravel(),
self.slip_taper.ravel()])
# get index of last earthquake in last cycle
time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff
ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]
- self.ix_break_joint[-2])
# reorganize interseismic slip
slip = full_state[:self.fault.n_creeping_upper, self.ix_break_joint[-2]:]
slip_pre = slip[:, :ix_eq_last]
slip_post = slip[:, ix_eq_last:]
slip_pre += (slip_post[:, -1] - slip_pre[:, 0]).reshape(-1, 1)
slip_joint = np.hstack([slip_post, slip_pre])
slip_joint -= slip_joint[:, 0].reshape(-1, 1)
# same for time
t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()
t_last_pre = t_last[:ix_eq_last]
t_last_post = t_last[ix_eq_last:]
t_last_pre += t_last_post[-1] - t_last_pre[0]
t_last_joint = np.concatenate([t_last_post, t_last_pre])
t_last_joint -= t_last_joint[0]
# since slip_joint is now already cumulative slip since the earthquake,
# with the tapered slip removed, we can just read out the early
# postseismic and rest interseismic cumulative slip distributions
post = interp1d(t_last_joint, slip_joint)(post_inter_transition * self.T_eff)
inter = slip_joint[:, -1] - post
post = np.concatenate([np.zeros(self.fault.n_locked), post])
inter = np.concatenate([np.zeros(self.fault.n_locked), inter])
# optionally, normalize by total expected cumulative slip over the entire cycle
if normalize:
total_slip = self.T_eff * self.v_plate * 86400 * 365.25
co /= total_slip
post /= total_slip
inter /= total_slip
# make figure
fig, ax = plt.subplots(layout="constrained")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, co, label="Coseismic")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, post, label="Postseismic")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, inter, label="Interseismic")
ax.legend()
ax.set_xlabel("$x_1$ [km]")
ax.set_ylabel("Normalized cumulative slip [-]" if normalize
else "Cumulative Slip [m]")
ax.set_title("Slip Phases (Post-/Interseismic cutoff at "
f"{post_inter_transition:.1%} " "$T_{rec}$)")
return fig, ax
def plot_viscosity(self, full_state, return_viscosities=False):
"""
Plot the viscosity structure with depth for the steady state, as well as
for the immediate pre- and coseismic velocities.
For multiple earthquakes, it will use the minimum preseismic and maximum
postseismic velocities.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
return_viscosities : bool, optional
Also return the preseismic, steady-state, and postseismic viscosities.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# get indices of each last earthquake in last cycle
temp = self.eq_df.astype(bool).drop_duplicates(keep="last")
time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff
tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])
if np.any(tdiff > 0):
warn("Couldn't find exact indices, using time differences of "
f"{tdiff * 365.25 * 86400} seconds.")
ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]
n_eq_found = len(ix_eq_last)
assert n_eq_found == (self.Ds_0 > 0).sum(), \
"Couldn't find indices of each last non-zero earthquake in the " \
"last cycle, check for rounding errors."
# calculate average slip for plotted earthquakes
slip_last = self.eq_df.loc[temp.index, :]
slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()
for ieq in range(n_eq_found)]
# extract preseismic velocities
vels_pre = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,
ix - 1] for ix in ix_eq_last]).T
vels_post = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,
ix] for ix in ix_eq_last]).T
if isinstance(self.fault.upper_rheo, NonlinearViscous):
# calculate viscosity profiles
vis_pre = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels_pre)
vis_ss = SubductionSimulation.get_alpha_eff(self.alpha_n_vec,
self.n_vec,
self.v_plate_eff)
vis_post = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels_post)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
vis_pre = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels_pre)
vis_ss = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), self.v_plate_eff)
vis_post = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels_post)
else:
raise NotImplementedError()
vis_mins = 10**np.floor(np.log10(np.ma.masked_invalid(vis_post*0.999).min(axis=0)))
vis_maxs = 10**np.ceil(np.log10(np.ma.masked_invalid(vis_pre*1.001).max(axis=0)))
# make plot
fig, ax = plt.subplots(ncols=n_eq_found, sharey=True, layout="constrained")
ax = np.atleast_1d(ax)
ax[0].set_ylabel("$x_2$ [km]")
for i in range(n_eq_found):
ax[i].fill_betweenx([0, self.fault.mid_x2_creeping[1] / 1e3],
vis_mins[i], vis_maxs[i], facecolor="0.8", label="Locked")
ax[i].fill_betweenx(self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,
vis_pre[:, i], vis_post[:, i], alpha=0.5, label="Simulated")
ax[i].plot(vis_ss,
self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,
label="Plate Rate")
ax[i].set_xscale("log")
ax[i].legend(loc="lower left")
ax[i].set_ylim(self.fault.mid_x2_creeping[self.fault.n_creeping_upper - 1] / 1e3,
0)
ax[i].set_xlim(vis_mins[i], vis_maxs[i])
ax[i].set_title(f"$s={slip_avg[i]:.2g}$ m")
ax[i].set_xlabel(r"$\alpha_{eff}$ [Pa * s/m]")
# finish
if return_viscosities:
return fig, ax, vis_pre, vis_ss, vis_post
else:
return fig, ax
def plot_viscosity_timeseries(self, full_state, return_viscosities=False):
"""
Plot the viscosity timeseries with depth for the entire last cycle.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
return_viscosities : bool, optional
Also return the viscosity timeseries.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from cmcrameri import cm
# check that the sequence only has one earthquake
if not self.n_eq == 1:
raise NotImplementedError("Don't know how to plot viscosity timeseries if "
"multiple earthquakes are present in the sequence.")
# get index of last earthquake in last cycle
time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff
ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]
- self.ix_break_joint[-2])
# reorganize interseismic velocities
vels = full_state[self.fault.n_creeping_upper:2*self.fault.n_creeping_upper,
self.ix_break_joint[-2]:]
vels_pre = vels[:, :ix_eq_last]
vels_post = vels[:, ix_eq_last:]
vels = np.hstack([vels_post, vels_pre])
# same for time
t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()
t_last_pre = t_last[:ix_eq_last]
t_last_post = t_last[ix_eq_last:]
t_last_pre += t_last_post[-1] - t_last_pre[0]
t_last_joint = np.concatenate([t_last_post, t_last_pre])
t_last_joint -= t_last_joint[0]
# convert velocities to effective viscosity
if isinstance(self.fault.upper_rheo, NonlinearViscous):
vis_ts = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
vis_ts = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels)
else:
raise NotImplementedError()
# get index of deep transition
patch_depths = -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]
ix_deep = np.argmin(np.abs(patch_depths - self.fault.upper_rheo.deep_transition))
# subset vels to skip zero-velocity uppermost patch
vis_ts = vis_ts[1:, :]
# get percentage of final viscosity
rel_vis = vis_ts / vis_ts[:, -1][:, None]
rel_vis_masked = np.ma.MaskedArray(rel_vis, np.diff(rel_vis, axis=1,
prepend=rel_vis[:, 0][:, None]
) <= 0).filled(np.NaN)
levels = [0.2, 0.4, 0.6, 0.8]
rel_vis_iquant = np.concatenate([np.nanargmax(rel_vis_masked > lvl, axis=1, keepdims=True)
for lvl in levels], axis=1)
# normalize time
t_sub = t_last_joint / self.T_eff
# prepare plot
fig, ax = plt.subplots(figsize=(10, 5), layout="constrained")
# plot velocities
c = ax.pcolormesh(
t_sub,
np.abs(self.fault.end_upper[1, self.fault.n_locked+1:self.fault.n_locked+ix_deep+1]
/ 1e3),
vis_ts[:ix_deep-1, :-1],
norm=LogNorm(vmin=10**np.floor(np.log10(np.median(vis_ts[:ix_deep-1, 0]))),
vmax=10**np.ceil(np.log10(np.max(vis_ts[:ix_deep-1, -1])))),
cmap=cm.batlow, shading="flat")
for i in range(len(levels)):
ax.plot(t_sub[rel_vis_iquant[:ix_deep-1, i]],
patch_depths[1:ix_deep] / 1e3,
color="w")
ax.set_xscale("symlog", linthresh=1e-3)
ax.set_xlim([0, 1])
# make the y-axis increasing downwards to mimic depth even though we're plotting x1
ax.invert_yaxis()
# finish figure
ax.set_ylabel("Depth $x_2$ [km]")
ax.set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label=r"$\alpha_{eff}$")
fig.suptitle("Effective Viscosity Timeseries")
# finish
if return_viscosities:
return fig, ax, t_sub, vis_ts
else:
return fig, ax
|
tobiscode/seqeas-public
|
seqeas/subduction2d.py
|
subduction2d.py
|
py
| 145,621 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21721374854
|
import os
import math
import json
import librosa
from settings import (
SAMPLE_RATE,
NUM_MFCC,
N_FTT,
HOP_LENGTH,
NUM_SEGMENTS,
DURATION,
)
DATASET_PATH = "data\\archive\\Data\\genres_original" # loaded using the GTZAN Music Genre Classification dataset at https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification
JSON_PATH = "data\\data.json"
SAMPLES_PER_TRACK = SAMPLE_RATE * DURATION
def dump_mfccs_to_json(dataset_path=None):
"""
Processes test data as MFCCs and labels
"""
dataset_path = dataset_path if dataset_path is not None else DATASET_PATH
data = {
"mapping": [],
"mfcc": [],
"labels" : [],
}
samples_per_segment = int(SAMPLES_PER_TRACK/NUM_SEGMENTS)
expected_mfcc = math.ceil(samples_per_segment/HOP_LENGTH)
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
dirpath_components = dirpath.split("\\")
label = dirpath_components[-1]
data["mapping"].append(label)
print(f"Processing: {label}")
for f in filenames:
file_path = os.path.join(dirpath, f)
signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)
for s in range(NUM_SEGMENTS):
start_sample = samples_per_segment * s
finish_sample = start_sample + samples_per_segment
mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample], sr=sr, n_fft=N_FTT, n_mfcc=NUM_MFCC, hop_length=HOP_LENGTH)
mfcc = mfcc.T
if len(mfcc) == expected_mfcc:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i-1)
print(f"{file_path}, segment:{s+1}")
with open(JSON_PATH, "w") as fp:
json.dump(data, fp, indent=4)
if __name__ == "__main__":
dump_mfccs_to_json()
|
jmrossi98/genre_detect
|
src/preprocess_data.py
|
preprocess_data.py
|
py
| 2,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42111163390
|
from fastapi import Body, FastAPI
from pydantic import BaseModel
from typing import Annotated
from enum import Enum
app = FastAPI()
class ModelName(str, Enum):
afs = "afs"
har = "har1"
class Item(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: set[str] = set()
fake_items_db = [{"item_name": "Foo"}, {"item_name": "Bar"}, {"item_name": "Baz"}]
@app.post("/items/create_item/")
async def create_items(item: Item):
item_dict = item.model_dump()
if item.tax:
price_with_tax = item.price + item.tax
item_dict.update({"price with tax": price_with_tax})
return item_dict
@app.get("/")
async def home():
return {"Data": "Test"}
@app.get("/items/")
async def read_item(skip: int = 0, limit: int = 10):
return fake_items_db[skip: skip + limit]
@app.put("/add_items/{item_id}")
async def add_item(item_id: int, item: Item):
return {"item_id": item_id, **item.model_dump()}
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Annotated[Item, Body(examples={"name": "foo", "description": "cool item", "price": "24", "tax": 3})]):
result = {"item_id": item_id, "item": item}
return result
@app.get("/models/{model_name}")
async def get_model(model_name: ModelName):
if model_name is ModelName.afs:
return {"model_name": model_name, "message": 1}
if model_name.value == "har":
return {"model_name": model_name, "message": 2}
return {"model_name": model_name, "message": -1}
@app.get("/files/{file_path:path}")
async def read_file(file_path: str):
return {"file_path": file_path}
|
mkilic20/task
|
testing.py
|
testing.py
|
py
| 1,663 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2987884048
|
from urllib2 import urlopen, HTTPError
from django.template.defaultfilters import slugify
from django.core.files.base import ContentFile
from django.db import transaction, IntegrityError
from item.models import Item, Link
from movie.models import Movie, Actor, Director, Genre
from decorators.retry import retry
class LoadMovie():
"""
This manager inserts a movie into the database along with its
corresponding genres, actors, and directors.
"""
exists = False
def __init__(self, title, imdb_id, runtime,
synopsis, theater_date, keywords):
"""
Inserts the movie into the database if it doesn't already
exist in the database.
"""
try:
self.movie, self.created = Movie.objects.get_or_create(
title=title,
imdb_id=imdb_id,
runtime=runtime,
synopsis=synopsis,
theater_date=theater_date,
keywords = keywords,
url=slugify(title)
)
except IntegrityError:
print('TRANSACTION FAILED ON MOVIE INSERT: Rolling back now...')
transaction.rollback()
def insert_genres(self, genres):
"""
Inserts the genres for the movie.
"""
genre_list = []
try:
for g in genres:
genre, created = Genre.objects.get_or_create(
name=g, url=slugify(g))
genre_list.append(genre)
self.movie.genre.add(*genre_list)
except IntegrityError:
print('TRANSACTION FAILED ON GENRE INSERT: Rolling back now...')
transaction.rollback()
def insert_actors(self, actors):
"""
Inserts the actors for the movie.
"""
actor_list = []
try:
for a in actors:
actor, created = Actor.objects.get_or_create(
name=a, url=slugify(a))
actor_list.append(actor)
self.movie.actors.add(*actor_list)
except IntegrityError:
print('TRANSACTION FAILED ON ACTOR INSERT: Rolling back now...')
transaction.rollback()
def insert_directors(self, directors):
"""
Inserts the directors for the movie.
"""
director_list = []
try:
for d in directors:
director, created = Director.objects.get_or_create(
name=d, url=slugify(d))
director_list.append(director)
self.movie.directors.add(*director_list)
except IntegrityError:
print('TRANSACTION FAILED ON DIRECTOR INSERT: Rolling back now...')
transaction.rollback()
@retry(HTTPError)
def insert_image(self, url):
"""
Inserts the image for the movie.
"""
try:
if 'default.jpg' in self.movie.image.url or self.created:
image = urlopen(url, timeout=15)
self.movie.image.save(
self.movie.url+u'.jpg',
ContentFile(image.read())
)
except IntegrityError:
print('TRANSACTION FAILED ON IMAGE INSERT: Rolling back now...')
transaction.rollback()
def insert_trailer(self, url):
"""
Inserts the trailer as a link.
"""
try:
Link.objects.get_or_create(
item=self.movie.item,
partner="YouTube",
url=url
)
except IntegrityError:
print('TRANSACTION FAILED ON TRAILER INSERT: Rolling back now...')
transaction.rollback()
|
sameenjalal/mavenize-beta
|
mavenize/lib/db/loadmovie.py
|
loadmovie.py
|
py
| 3,712 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11332000472
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 1 10:10:45 2021
@author: 82106
"""
import cv2
import os
import sys
if not os.path.exists('result'):
os.makedirs('result')
capture = cv2.VideoCapture(1)
if not capture.isOpened():
print('Camera open failed!')
sys.exit()
'''
frameWidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(capture.get(cv2.CAP_PROP_FRMAE_HEIGHT))
frameSize = (frameWidth, frameHeight)
print('frame size : {}'.format(frameSize))
'''
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
count = 1
while True:
ret, frame = capture.read()
if not ret:
print('Frame read error!')
sys.exit()
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if key == ord('s'):
print('Screenshot saved!')
cv2.imwrite('result/screenshot{}.png'.format(count), frame, params=[cv2.IMWRITE_PNG_COMPRESSION, 0])
count += 1
elif key == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
|
dongwooky/Personal-Project
|
container/camera_screenshot.py
|
camera_screenshot.py
|
py
| 1,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5759183851
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@AUTHOR:Joselyn Zhao
@CONTACT:[email protected]
@HOME_PAGE:joselynzhao.top
@SOFTWERE:PyCharm
@FILE:main.py
@TIME:2019/6/13 10:32
@DES:
'''
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
from tensorflow.examples.tutorials.mnist import input_data
from Lenet import *
from PIL import Image
mnist = input_data.read_data_sets('../../../data/mnist', one_hot=True)
x_test = np.reshape(mnist.test.images, [-1, 28, 28, 1])
x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)),
'constant') # print("Updated Image Shape: {}".format(X_train[0].shape))
tf.logging.set_verbosity(old_v)
iteratons = 1000
batch_size = 64
ma = 0
sigma = 0.1
lr = 0.01
def get_sample100(label):
sample100_x=[]
sample100_y=[]
count = 0
for i in range(len(mnist.test.images)):
if mnist.test.labels[i][label]==1:
count+=1
sample100_y.append(mnist.test.labels[i])
sample100_x.append(mnist.test.images[i])
if count>=100:
break
return sample100_x,sample100_y
def train_lenet(lenet):
with tf.Session() as sess: #这个session需要关闭么?
sess.run(tf.global_variables_initializer())
tf.summary.image("input",lenet.x,3)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("LOGDIR/4/",sess.graph) # 保存到不同的路径下
# writer.add_graph(sess.graph)
for ii in range(iteratons):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
batch_xs = np.reshape(batch_xs,[-1,28,28,1])
batch_xs = np.pad(batch_xs,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
sess.run(lenet.train_step,feed_dict ={lenet.x:batch_xs,lenet.y_:batch_ys})
if ii % 50 == 1:
acc,s = sess.run([lenet.accuracy,merged_summary],feed_dict ={lenet.x:x_test,lenet.y_:mnist.test.labels})
writer.add_summary(s,ii)
print("%5d: accuracy is: %4f" % (ii, acc))
sample100_x,sample100_y = get_sample100(4) #随便选了一个label 输入0-9的值
sample100_x = np.reshape(sample100_x,[-1,28,28,1])
sample100_x = np.pad(sample100_x, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
x_min = tf.reduce_min(lenet.fc2)
x_max = tf.reduce_max(lenet.fc2)
fc2 = (lenet.fc2 - x_min) / (x_max - x_min)
fc2 = sess.run(fc2,feed_dict={lenet.x:sample100_x,lenet.y_:sample100_y})
plt.imshow(fc2)
plt.show()
print('[accuracy,loss]:', sess.run([lenet.accuracy], feed_dict={lenet.x:x_test,lenet.y_:mnist.test.labels}))
if __name__ =="__main__":
act = "sigmoid"
lenet = Lenet(ma,sigma,lr,act)
train_lenet(lenet)
|
joselynzhao/DeepLearning.Advanceing
|
DL_6/work/main.py
|
main.py
|
py
| 2,860 |
python
|
en
|
code
| 5 |
github-code
|
6
|
44602770515
|
import pytesseract
import PIL
from os import system
import re
system("tesseract -l")
class workout:
reps = 0
exercise_name = ""
def compile_text_to_workouts(text):
workouts = []
num = 0
for word in text:
new_workout = workout()
if word.isdigit():
new_workout.reps = word
num+=1
while num < len(text) and not text[num].isdigit() :
new_workout.exercise_name += " " + str(text[num])
num +=1
if not new_workout.reps == 0 or not new_workout.exercise_name == "":
workouts.append(new_workout)
return workouts
####MAIN:###############################################################
letters = (pytesseract.image_to_string(r'../GetFit/workout_routine1.png'))
print(letters)
sentence = re.findall(r'\w+', letters) ##turns letters into words and makes list
print(sentence)
compile_text_to_workouts(sentence) ###turns into actual workout routine
|
reeyagup/GetFit
|
image_to_text.py
|
image_to_text.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35299316629
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
from xml.etree import ElementTree as etree
from xml.dom import minidom
import untangle
def xml_generator(input_filename, input_foldername, exif_list, root_path):
root = ET.Element('annotation')
source = ET.SubElement(root, 'source')
image_date = ET.SubElement(source, 'date')
image_date.text = str(exif_list[0])
folder_name = ET.SubElement(source, 'folder')
folder_name.text = input_foldername
file_name = ET.SubElement(source, 'filename')
file_name.text = input_filename
gpsinfo = ET.SubElement(root, 'gpsinfo')
gps_altitude = ET.SubElement(gpsinfo, 'GPSAltitude')
gps_altitude.text = str(exif_list[1])
gps_latitude = ET.SubElement(gpsinfo, 'GPSLatitude')
gps_latitude.text = str(exif_list[2])
gps_latitude_ref = ET.SubElement(gpsinfo, 'GPSLatitudeRef')
gps_latitude_ref.text = str(exif_list[3])
gps_longitude = ET.SubElement(gpsinfo, 'GPSLongitude')
gps_longitude.text = str(exif_list[4])
gps_longitude_ref = ET.SubElement(gpsinfo, 'GPSLongitudeRef')
gps_longitude_ref.text = str(exif_list[5])
'''
There should be position annotation inside 'object' tag
'''
#ann_obj = ET.SubElement(root, 'object')
xml_string = etree.tostring(root)
tree = minidom.parseString(xml_string)
xml_string = tree.toxml()
save_path = '%s/ob_%s/%s.xml' % (root_path, input_foldername, input_filename[:-4])
f=open(save_path,'wb')
f.write(tree.toprettyxml(encoding='utf-8'))
f.close()
def xml_parsing(input_xml_file):
obj = untangle.parse(input_xml_file)
date_time = obj.annotation.source.date.cdata
GPSAltitude = obj.annotation.gpsinfo.GPSAltitude.cdata
GPSLatitude = obj.annotation.gpsinfo.GPSLatitude.cdata
GPSLatitudeRef = obj.annotation.gpsinfo.GPSLatitudeRef.cdata
GPSLongitude = obj.annotation.gpsinfo.GPSLongitude.cdata
GPSLongitudeRef = obj.annotation.gpsinfo.GPSLongitudeRef.cdata
xml_info_keys = ['Date', 'GPSAltitude', 'GPSLatitude', 'GPSLatitudeRef', 'GPSLongitude', 'GPSLongitudeRef']
xml_info_value = [date_time, GPSAltitude, GPSLatitude, GPSLatitudeRef, GPSLongitude, GPSLongitudeRef]
xml_info_dict = dict(zip(xml_info_keys, xml_info_value))
return xml_info_dict
#im = '/Users/xiang/ml_ann/ann_tools_eric/dataset/ob_curr/00001.xml'
#xml_parsing(im)
|
simonchanper/ml_ann
|
ann_tools_eric/xml_process.py
|
xml_process.py
|
py
| 2,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19699008636
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
d={}
while headA:
d[headA] = 1
headA = headA.next
while headB:
if headB in d:
return headB
headB=headB.next
# the idea is to traverse list A and store the address/reference to each node
# in a hash set. Then check every node bi in list B: if bi appears in the hash set,
# then bi is the intersection node.
# I did not realize that the hash set can be created like this
|
Superhzf/python_exercise
|
Linked List/Intersection of Two Linked Lists/solution.py
|
solution.py
|
py
| 692 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19516521842
|
#for X in range(1,10):
#print(X)
#for char in "cofee":
#print(char * 10)
#for num in range (0,20,2):#if you start with odd nums it will print odd(1,20,2)
#print(num)
#times = input("how many times do i have to tell you? ")
#times = int(times)
#for time in range(times) :
# print ("clean up your room!")
#for num in range(1,21):
#if num ==4 or num ==13:
#print(f"{num} is unlucky")
#elif num % 2==0:
#print(f"{num} is even")
# else:
#print(f"{num} is odd")
#for num in range(1,21):
#if num ==4 or num ==13:
#print(f"{num} is unlucky")
#elif num % 2==0:
#print(f"{num} is even")
#else:
#state()
#print(f"{num} is odd")
#while loop
#msg = input("whats your password?")
#while msg != "bananas":
#print("wrong!")
#msg = input("whats your password?")
#print("correct!")
#num =1
#while num < 11:
#print(num)
#num += 1
#for num in range(1,11) :
#print(" \U0001f600" * num)
#times = 1
#while times < 11:
#print(" \U0001f600" * times)
#times += 1
#msg = input("say something: ")
#while msg != "stop copying me":
#print(msg)
#msg = input()
#print("you win!")
while True:
command = input("type 'exit' to exit:")
if (command == "exit"):
break
times = int(input("how many times do i have to tell yah?"))
for time in range(times):
print("claen your room!")
if time >= 3:
print("do you even listen anymore")
break
|
mevine/seen
|
jee.py
|
jee.py
|
py
| 1,511 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71504118267
|
from __future__ import annotations
from io import BufferedIOBase, BytesIO
from typing import List, Optional
from helper import (
byte_to_int,
encode_varstr,
hash160,
int_to_byte,
int_to_little_endian,
little_endian_to_int,
read_varint,
sha256,
)
from op import (
decode_num,
encode_minimal_num,
is_number_op_code,
number_to_op_code,
op_code_to_number,
OP_0,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_DROP,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_FROMALTSTACK,
OP_HASH160,
OP_IF,
OP_NOTIF,
OP_PUSHDATA1,
OP_PUSHDATA2,
OP_TOALTSTACK,
OP_VERIFY,
OP_CODE_NAMES,
OP_CODE_FUNCTIONS,
)
from timelock import Locktime, Sequence
from witness import Witness
class Script(list):
def __add__(self, other: Script) -> Script:
return self.__class__(super().__add__(other))
def __radd__(self, other: Script) -> Script:
o = self.__class__(other)
return o + self
def __new__(cls,
commands: Optional[List[Union(bytes, str)]] = None) -> Script:
if commands is None:
commands = []
for current in commands:
if type(current) not in (bytes, ):
raise ValueError(
f'Every command should be bytes or str, got {current} instead'
)
return super().__new__(cls, commands)
def __repr__(self) -> str:
result = ''
for current in self:
if OP_CODE_NAMES.get(current):
result += f'{OP_CODE_NAMES[current]} '
elif type(current) == str:
result += f'<{current}> '
else:
result += f'{current.hex()} '
return result
@classmethod
def parse(cls, s: BufferedIOBase) -> Script:
# get the length of the entire field
length = read_varint(s)
# initialize the commands array
commands = []
# initialize the number of bytes we've read to 0
count = 0
# loop until we've read length bytes
while count < length:
# get the current byte
current = s.read(1)
# increment the bytes we've read
count += 1
# convert the current byte to an integer
current_int = current[0]
# if the current byte is between 1 and 75 inclusive
if current_int <= 75:
# add the next n bytes as a command
commands.append(s.read(current_int))
count += current_int
elif current == OP_PUSHDATA1:
# op_pushdata1
data_length = byte_to_int(s.read(1))
commands.append(s.read(data_length))
count += data_length + 1
elif current == OP_PUSHDATA2:
# op_pushdata2
data_length = little_endian_to_int(s.read(2))
commands.append(s.read(data_length))
count += data_length + 2
else:
# add the command to the list of commands
commands.append(current)
if count != length:
raise SyntaxError(f'parsing script failed {commands}')
return cls(commands)
def miniscript(self):
from miniscript import MiniScript
return MiniScript.from_script(Script(self[:]))
def is_locktime_locked(self) -> bool:
'''Returns whether the script starts with
<locktime> OP_CLTV OP_DROP'''
return len(self) >= 3 and \
(is_number_op_code(self[0]) or len(self[0]) > 1) and \
self[1] == OP_CHECKLOCKTIMEVERIFY and self[2] == OP_DROP
def is_multisig(self) -> bool:
'''Returns whether the script follows the
OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern'''
if self[-1] != OP_CHECKMULTISIG:
return False
if not is_number_op_code(self[-2]):
return False
n = op_code_to_number(self[-2])
if len(self) < n + 3:
return False
for current in self[-n - 2:-2]:
if len(current) != 33:
return False
if not is_number_op_code(self[-n - 3]):
return False
k = op_code_to_number(self[-n - 3])
if k < 1 or k > 15:
return False
if n < k or n > 15:
return False
return True
def is_multisig_timelock(self) -> bool:
'''Returns whether the script follows the
<locktime> OP_CLTV/OP_CSV OP_DROP OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern'''
return (self.is_sequence_locked() or self.is_locktime_locked()) and \
self.is_multisig()
def is_p2pkh(self) -> bool:
'''Returns whether the script follows the
OP_DUP OP_HASH160 <20 byte hash> OP_EQUALVERIFY OP_CHECKSIG pattern.'''
# there should be exactly 5 commands
# OP_DUP, OP_HASH160, 20-byte hash, OP_EQUALVERIFY, OP_CHECKSIG
return len(self) == 5 and self[0] == OP_DUP and self[1] == OP_HASH160 \
and len(self[2]) == 20 and self[3] == OP_EQUALVERIFY \
and self[4] == OP_CHECKSIG
def is_p2sh(self) -> bool:
'''Returns whether the script follows the
OP_HASH160 <20 byte hash> OP_EQUAL pattern.'''
# there should be exactly 3 commands
# OP_HASH160, 20-byte hash, OP_EQUAL
return len(self) == 3 and self[0] == OP_HASH160 and len(self[1]) == 20 \
and self[2] == OP_EQUAL
def is_p2wpkh(self) -> bool:
'''Returns whether the script follows the
OP_0 <20 byte hash> pattern.'''
return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 20
def is_p2wsh(self) -> bool:
'''Returns whether the script follows the
OP_0 <32 byte hash> pattern.'''
return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 32
def is_segwit(self) -> bool:
return self.is_p2wpkh() or self.is_p2wsh()
def is_sequence_locked(self) -> bool:
'''Returns whether the script starts with
<sequence> OP_CSV OP_DROP'''
return len(self) >= 3 and \
(is_number_op_code(self[0]) or len(self[0]) > 1) and \
self[1] == OP_CHECKSEQUENCEVERIFY and self[2] == OP_DROP
def is_timelock(self) -> bool:
'''Returns whether the script follows the
locktime OP_CLTV OP_DROP <pubkey> OP_CHECKSIG pattern'''
return (self.is_sequence_locked() or self.is_locktime_locked()) and \
len(self) == 5 and len(self[3]) == 33 and self[4] == OP_CHECKSIG
def pubkeys(self) -> List[bytes]:
pubkeys = []
for item in self:
if len(item) == 33 and item[0] in (2, 3):
pubkeys.append(item)
return pubkeys
def raw_serialize(self) -> bytes:
# initialize what we'll send back
result = b''
# go through each command
for current in self:
if current == OP_0:
result += int_to_byte(0)
elif OP_CODE_NAMES.get(current) is None:
# this is an element
# get the length in bytes
length = len(current)
# for large lengths, we have to use a pushdata op code
if length < 75:
# turn the length into a single byte integer
result += int_to_byte(length)
elif length > 75 and length < 0x100:
# 76 is pushdata1
result += OP_PUSHDATA1
result += int_to_byte(length)
elif length >= 0x100 and length <= 520:
# 77 is pushdata2
result += OP_PUSHDATA2
result += int_to_little_endian(length, 2)
else:
raise ValueError('too long a command')
result += current
return result
def serialize(self) -> bytes:
return encode_varstr(self.raw_serialize())
class ScriptPubKey(Script):
'''Represents a ScriptPubKey in a transaction'''
@classmethod
def parse(cls, s: BufferedIOBase) -> ScriptPubKey:
script_pubkey = super().parse(s)
if script_pubkey.is_p2pkh():
return PKHScriptPubKey.from_hash(script_pubkey[2])
elif script_pubkey.is_p2sh():
return SHScriptPubKey.from_hash(script_pubkey[1])
elif script_pubkey.is_p2wpkh():
return WPKHScriptPubKey.from_hash(script_pubkey[1])
elif script_pubkey.is_p2wsh():
return WSHScriptPubKey.from_hash(script_pubkey[1])
else:
return script_pubkey
def redeem_script(self) -> RedeemScript:
'''Convert this ScriptPubKey to its RedeemScript equivalent'''
return RedeemScript(self)
class PKHScriptPubKey(ScriptPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> PKHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_DUP, OP_HASH160, h160, OP_EQUALVERIFY, OP_CHECKSIG])
def hash160(self) -> bytes:
return self[2]
class SHScriptPubKey(ScriptPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> SHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_HASH160, h160, OP_EQUAL])
def hash160(self) -> bytes:
return self[1]
class RedeemScript(Script):
'''Subclass that represents a RedeemScript for p2sh'''
def hash160(self) -> bytes:
'''Returns the hash160 of the serialization of the RedeemScript'''
return hash160(self.raw_serialize())
def script_pubkey(self) -> SHScriptPubKey:
'''Returns the ScriptPubKey that this RedeemScript corresponds to'''
return SHScriptPubKey.from_hash(self.hash160())
class SegwitPubKey(ScriptPubKey):
def hash(self) -> bytes:
return self[1]
class WPKHScriptPubKey(SegwitPubKey):
@classmethod
def from_hash(cls, h160: bytes) -> WPKHScriptPubKey:
if len(h160) != 20:
raise TypeError('h160 should be 20 bytes')
return cls([OP_0, h160])
class WSHScriptPubKey(SegwitPubKey):
@classmethod
def from_hash(cls, s256: bytes) -> WSHScriptPubKey:
if len(s256) != 32:
raise TypeError('s256 should be 32 bytes')
return cls([OP_0, s256])
class WitnessScript(Script):
'''Subclass that represents a WitnessScript for p2wsh'''
def redeem_script(self) -> RedeemScript:
return self.script_pubkey().redeem_script()
def script_pubkey(self) -> WSHScriptPubKey:
'''Generates the ScriptPubKey for p2wsh'''
# get the sha256 of the current script
# return new p2wsh script using p2wsh_script
return WSHScriptPubKey.from_hash(self.sha256())
def sha256(self) -> bytes:
'''Returns the sha256 of the raw serialization for witness program'''
return sha256(self.raw_serialize())
class MultiSigScript(Script):
@classmethod
def from_pubkeys(cls, k: int, sec_pubkeys: List[bytes]) -> MultiSigScript:
n = len(sec_pubkeys)
if k == 0 or k > n:
raise ValueError(f'cannot do {k} of {n} keys')
return cls([
number_to_op_code(k), *sorted(sec_pubkeys),
number_to_op_code(n), OP_CHECKMULTISIG
])
class MultiSigRedeemScript(RedeemScript, MultiSigScript):
pass
class MultiSigWitnessScript(WitnessScript, MultiSigScript):
pass
class TimelockScript(Script):
@classmethod
def from_time(cls,
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> List[bytes]:
if locktime is not None:
return [
encode_minimal_num(locktime), OP_CHECKLOCKTIMEVERIFY, OP_DROP
]
elif sequence is not None:
return [
encode_minimal_num(sequence), OP_CHECKSEQUENCEVERIFY, OP_DROP
]
else:
raise ValueError('locktime or sequence required')
class SingleSigTimelockScript(TimelockScript):
@classmethod
def from_pubkey_time(
cls,
sec: bytes,
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> SingleSigTimelockScript:
script = cls.from_time(locktime, sequence) + [sec, OP_CHECKSIG]
return cls(script)
class SingleSigTimelockRedeemScript(RedeemScript, SingleSigTimelockScript):
pass
class SingleSigTimelockWitnessScript(WitnessScript, SingleSigTimelockScript):
pass
class MultiSigTimelockScript(TimelockScript, MultiSigScript):
@classmethod
def from_pubkeys_time(
cls,
k: int,
sec_pubkeys: List[bytes],
locktime: Optional[Locktime] = None,
sequence: Optional[Sequence] = None) -> MultiSigTimelockScript:
script = cls.from_time(locktime, sequence) + cls.from_pubkeys(
k, sec_pubkeys)
return cls(script)
class MultiSigTimelockRedeemScript(RedeemScript, MultiSigTimelockScript):
pass
class MultiSigTimelockWitnessScript(WitnessScript, MultiSigTimelockScript):
pass
|
jimmysong/minipy
|
script.py
|
script.py
|
py
| 13,382 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7626498457
|
vertices = []
arestas = []
matriz = []
class Grafo:
def __init__(self, no, noAux, prioridade):
self.no = no
self.noAux = noAux
self.prioridade = prioridade
grafo = open('arquivomatriz.txt', 'r')
for i in grafo:
linha = i.split()
arestas.append(Grafo(int(linha[0]), int(linha[1]), int(linha[2])))
grafo.close()
def Inserir(vector):
inserido = False
for i in range( len(vertices) ):
if (vector == vertices[i]):
inserido = True
break
return inserido
for i in range( len(arestas) ):
if(not Inserir(arestas[i].no)):
vertices.append(arestas[i].no)
if(not Inserir(arestas[i].noAux)):
vertices.append(arestas[i].noAux)
vertices = sorted(vertices)
for i in range( len(vertices) ): #Preenche matriz com 0's
linha = []
for j in range( len(vertices) ):
linha.append(0)
matriz.append(linha)
for i in range( len(arestas) ): # matriz adjacente
matriz[arestas[i].no][arestas[i].noAux] = arestas[i].prioridade
matriz[arestas[i].noAux][arestas[i].no] = arestas[i].prioridade
print()
print("Matriz Adja: ")
for i in range( len(matriz) ):
print(matriz[i])
print()
print("O grau de cada vértice é: ")
for i in range( len(matriz) ):
g = 0
for j in range( len(matriz[i]) ):
if(matriz[i][j] != 0):
g += 1
print('grau do {}: {}'.format(i,g) )
|
gustavoadl06/Gustavo
|
6.py
|
6.py
|
py
| 1,535 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
36670049284
|
import matplotlib.pyplot as plt
# from mpl_toolkits.axes_grid1 import ImageGrid
# import numpy as np
from os import listdir
from os import chdir
from os import path
from PIL import Image
# import matplotlib.gridspec as gridspec
import argparse
parser = argparse.ArgumentParser(description="generate plot for report")
parser.add_argument("--input_dir", required=True, help="Input ROS bag.")
parser.add_argument("--rows", required=True, help="numer of rows in figure")
parser.add_argument("--cols", required=True, help="number of columns in figure")
args = parser.parse_args()
# chdir('/Volumes/macOS Big Sur/Users/pmvanderburg/matplotlib_test/')
chdir(args.input_dir)
files = listdir(args.input_dir)
files.sort()
for i, f in enumerate(files):
if f!='.DS_Store':
print(i,f)
else:
del files[i]
images = [Image.open(f) for f in files]
print(len(images))
max_rows = 7
max_cols = 3
# max_rows = 3
# max_cols = 2
methods=['Input image',
'640x480 N+FT',
'832x256 K+FT',
'640x480 N',
'832x256 N',
'640x480 K',
'832x256 K']
fig, axes = plt.subplots(nrows=7, ncols=3, figsize=(9,10),sharex=True, sharey=True)
for idx, image in enumerate(images):
# print(files[idx])
print(idx)
row = idx % max_rows
col = idx // max_rows
print(row,' row')
print(col,' col')
# if col>0:
# axes[row, col].axis("off")
axes[row,col].spines['bottom'].set_color('#ffffff')
axes[row,col].spines['top'].set_color('#ffffff')
axes[row,col].spines['right'].set_color('#ffffff')
axes[row,col].spines['left'].set_color('#ffffff')
if image.size==(1280, 720):
image = image.resize((640,480))
axes[row, col].imshow(image, cmap="gray", aspect="auto")
axes[row, 0].set_ylabel(methods[row])
plt.subplots_adjust(wspace=.05, hspace=.05)
plt.xticks([])
plt.yticks([])
# fig.savefig(path.join)
plt.show()
|
ThijsvdBurg/Husky_scripts
|
data_visualization/plot scripts/plot_results.py
|
plot_results.py
|
py
| 1,911 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10858272527
|
# coding: utf-8
# In[1]:
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import sys
from time import time
import numpy as np
# In[2]:
df = pd.read_csv('lyrics.csv')
df.head(10)
# In[3]:
df['lyrics'].replace('', np.nan, inplace=True)
df.dropna(subset=['lyrics'], inplace=True)
ind_drop = df[df['genre'].apply(lambda x: x.startswith('Other'))].index
df = df.drop(ind_drop)
# In[4]:
ind_drop = df[df['genre'].apply(lambda x: x.startswith('Not Available'))].index
df = df.drop(ind_drop)
# In[5]:
ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('INSTRUMENTAL'))].index
df = df.drop(ind_drop)
df.drop(columns=['index'])
ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('instrumental'))].index
df = df.drop(ind_drop)
df.drop(columns=['index'])
# In[6]:
genre=df['genre'].values
lyrics=df['lyrics'].values
true_k = len(np.unique(genre))
print(np.unique(genre), "The total number of genres are", true_k)
#shaping:
lyrics = np.array(lyrics)[:,None]
print(lyrics.shape)
genre = np.array(genre)[:,None]
print(genre.shape)
# In[7]:
data = np.append(lyrics,genre,axis=1)
data.shape
print(data)
# In[8]:
np.random.shuffle(data)
data_test = data[10001:20001,]
data = data[:10000,]
# In[9]:
data_lyrics=data[:,0]
data_genre=data[:,1]
data_lyrics_test = data_test[:,0]
data_genre_test = data_test[:,1]
# print(data_lyrics)
# print(data_genre.shape)
# In[10]:
vectorizer = TfidfVectorizer(
max_df=0.75, # max doc freq (as a fraction) of any word to include in the vocabulary
min_df=0.3, # min doc freq (as doc counts) of any word to include in the vocabulary
max_features=10000, # max number of words in the vocabulary
stop_words='english', # remove English stopwords
use_idf=True )
# In[11]:
labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,
'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
vectorizer.fit(data_lyrics)
X = vectorizer.transform(data_lyrics)
Y = [labels[i] for i in data_genre]
X_test = vectorizer.transform(data_lyrics_test)
Y_test = [labels[i] for i in data_genre_test]
n_features = X.shape[1]
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
# In[12]:
doc_ind = 1 # Index of an example document
xi = X[doc_ind,:].todense()
term_ind = xi.argsort()[:, ::-1]
xi_sort = xi[0,term_ind]
terms = vectorizer.get_feature_names()
for i in range(n_features):
term = terms[term_ind[0,i]]
tfidf = xi[0,term_ind[0,i]]
print('{0:20s} {1:f} '.format(term, tfidf))
# In[13]:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=True)
# In[14]:
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
# In[15]:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
# In[16]:
labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,
'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}
print(labels.values)
# genre_names
# data_genre
genre_labels=[]
#print(genre_labels.shape)
for j,i in enumerate(data_genre):
x=labels[i]
#print(x)
np.append(genre_labels,x)
genre_labels.append(x)
#print(genre_labels)
# In[17]:
print((Y_test == km.predict(X_test)).sum() / len(Y_test))
# In[18]:
labelkm = km.labels_
print(labelkm.shape)
print(type(labelkm))
# In[19]:
#print(data_genre)
labelkm = km.labels_
from sklearn.metrics import confusion_matrix
C = confusion_matrix(genre_labels,labelkm)
Csum = np.sum(C,axis=0)
Cnorm = C / Csum[None,:]
print(Cnorm)
print(np.array_str(C, precision=3, suppress_small=True))
plt.imshow(C, interpolation='none')
plt.colorbar()
|
TejaishwaryaGagadam/music_genre_predictor
|
K_Means_Clustering.py
|
K_Means_Clustering.py
|
py
| 4,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10931063926
|
import unittest
import requests_mock
from alertaclient.api import Client
class PermissionTestCase(unittest.TestCase):
def setUp(self):
self.client = Client()
self.perm = """
{
"id": "584f38f4-b44e-4d87-9b61-c106d21bcc7a",
"permission": {
"href": "http://localhost:8080/perm/584f38f4-b44e-4d87-9b61-c106d21bcc7a",
"id": "584f38f4-b44e-4d87-9b61-c106d21bcc7a",
"match": "websys",
"scopes": [
"admin:users",
"admin:keys",
"write"
]
},
"status": "ok"
}
"""
@requests_mock.mock()
def test_permission(self, m):
m.post('http://localhost:8080/perm', text=self.perm)
perm = self.client.create_perm(role='websys', scopes=['admin:users', 'admin:keys', 'write'])
self.assertEqual(perm.match, 'websys')
self.assertEqual(sorted(perm.scopes), sorted(['admin:users', 'admin:keys', 'write']))
|
alerta/python-alerta-client
|
tests/unit/test_permissions.py
|
test_permissions.py
|
py
| 1,065 |
python
|
en
|
code
| 27 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.