hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b4148b82caffcb3d401203b514031ef55ddaf4b5 | 1,279 | py | Python | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
]
| null | null | null | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
]
| null | null | null | CodigoSOLID.py | JhonGalarza/SOLID | 5ee45f136a24cb2300837c8ed89accdc2f299c83 | [
"MIT"
]
| null | null | null | #DATOS DE ENTRADA
ANIMAL= int(input("¿De cual animal quiere conocer la caracteristicas? 1.Leon 2.Ballena 3.Tucan? "))
class Animal:
def __init__(self, ANIMAL):
self.ANIMAL = ANIMAL
def acciones_comun():
comun = "Comer"
return comun
def sentido_vista():
vista = "Puede ver"
return vista
class Animal_Tierra:
def acciones_Tierra():
Tierra = "camina en cuatro patas"
return Tierra
class Animal_Agua:
def acciones_Agua():
return "Nada bajo el agua"
class Animal_Aire (Animal):
def acciones_Aire():
return "Vuela"
class Leon (Animal, Animal_Tierra):
def llamar():
caracteristicas = ()
return caracteristicas
class Ballena(Animal, Animal_Agua):
def llamar():
caracteristicas = ()
return caracteristicas
class Tucan(Animal, Animal_Aire):
def llamar():
caracteristicas = ()
return caracteristicas
if ANIMAL == 1 :
print ("debe imprimir las caracteristicas del leon, el leon es clase hija de animal y debe agragar animal_tierra" )
elif ANIMAL == 2 :
print ("lo mismo que el leon, pero con la ballena")
elif ANIMAL == 3 :
print("Lo mismo pero con el tucan") | 24.596154 | 120 | 0.620797 | 848 | 0.6625 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.267969 |
b414e74ae421f14965c6e966091b96bde22167db | 8,249 | py | Python | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
]
| null | null | null | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
]
| null | null | null | orca/topology/infra/k8s/__init__.py | filwie/orca | 84cfd53d309d85f7a7fb8649ba4abc8c2df9feac | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orca.topology import bundle
from orca.topology.infra.istio import linker as istio_linker
from orca.topology.infra.k8s import cluster, linker, probe
def get_probes():
return [
bundle.ProbeBundle(
probe=probe.PodPullProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PodPushProbe,
linkers=[
linker.PodToServiceLinker,
linker.PodToReplicaSetLinker,
linker.PodToStatefulSetLinker,
linker.PodToDaemonSetLinker,
linker.PodToNodeLinker,
linker.ConfigMapToPodLinker,
linker.SecretToPodLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePullProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.ServicePushProbe,
linkers=[
linker.PodToServiceLinker,
linker.EndpointsToServiceLinker,
istio_linker.VirtualServiceToServiceLinker,
istio_linker.DestinationRuleToServiceLinker,
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPullProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.EndpointsPushProbe,
linkers=[
linker.EndpointsToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.DeploymentPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToDeploymentLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPullProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ReplicaSetPushProbe,
linkers=[
linker.PodToReplicaSetLinker,
linker.ReplicaSetToDeploymentLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPullProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.DaemonSetPushProbe,
linkers=[
linker.PodToDaemonSetLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPullProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.StatefulSetPushProbe,
linkers=[
linker.PodToStatefulSetLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPullProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.ConfigMapPushProbe,
linkers=[
linker.ConfigMapToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPullProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.SecretPushProbe,
linkers=[
linker.SecretToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.StorageClassPushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePullProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumePushProbe,
linkers=[
linker.PersistentVolumeToStorageClassLinker,
linker.PersistentVolumeToPersistentVolumeClaimLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPullProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.PersistentVolumeClaimPushProbe,
linkers=[
linker.PersistentVolumeToPersistentVolumeClaimLinker,
linker.PersistentVolumeClaimToPodLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPullProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.HorizontalPodAutoscalerPushProbe,
linkers=[
linker.DeploymentToHorizontalPodAutoscalerLinker,
linker.ReplicaSetToHorizontalPodAutoscalerLinker,
linker.StatefulSetToHorizontalPodAutoscalerLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePullProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.NodePushProbe,
linkers=[
linker.PodToNodeLinker,
linker.NodeToClusterLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPullProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=probe.IngressPushProbe,
linkers=[
linker.IngressToServiceLinker
]
),
bundle.ProbeBundle(
probe=cluster.ClusterProbe,
linkers=[
linker.NodeToClusterLinker
]
)
]
| 29.566308 | 74 | 0.562856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.068614 |
b415b852eb1504fe65a58d7db038c31b5386abda | 2,616 | py | Python | thelma/repositories/rdb/view.py | fogathmann/TheLMA | ac330a0005da4fea2f1387da9ff9938611ad1481 | [
"MIT"
]
| 1 | 2020-07-12T22:47:58.000Z | 2020-07-12T22:47:58.000Z | thelma/repositories/rdb/view.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
]
| null | null | null | thelma/repositories/rdb/view.py | papagr/TheLMA | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | [
"MIT"
]
| 1 | 2020-07-12T22:40:36.000Z | 2020-07-12T22:40:36.000Z | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Utilities to create/drop views.
Based on a recipe published in:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/Views
"""
from sqlalchemy.sql import table
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
__docformat__ = 'reStructuredText en'
__all__ = ['CreateView',
'DropView',
'view_factory',
]
class CreateView(DDLElement):
def __init__(self, name, selectable): # pylint: disable=W0231
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name): # pylint: disable=W0231
self.name = name
@compiler.compiles(CreateView, 'postgresql')
def create_view_compile_postgresql(element, compiler, **kw): # pylint: disable=W0621,W0613
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE OR REPLACE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(CreateView, 'sqlite')
def create_view_compile_sqlite(element, compiler, **kw): # pylint: disable=W0621,W0613
# FIXME: duplicate code
# FIXME: it seems that there is a bug in SQLAlchemy and creating views
# this way emits an exception
selection = compiler.sql_compiler.process(element.selectable)
stmt = "CREATE VIEW %s AS %s" % (element.name, selection)
# FIXME: we should not combine the statement and params here.
# it is a SQLAlchemy bug... report it.
params = {}
for k, v in element.selectable.compile().params.iteritems():
params[k] = ("'%s'" % v) if isinstance(v, basestring) else v
return stmt % params
@compiler.compiles(DropView)
def drop_view_compile(element, compiler, **kw): # pylint: disable=W0621,W0613
return "DROP VIEW %s" % (element.name)
def view_factory(name, metadata, selectable):
if not hasattr(metadata, 'views'):
metadata.views = {}
metadata.views[name] = table(name)
for c in selectable.c:
c._make_proxy(metadata.views[name]) # pylint: disable=W0212
CreateView(name, selectable).execute_at('after-create', metadata)
DropView(name).execute_at('before-drop', metadata)
return metadata.views[name]
| 33.974026 | 90 | 0.69419 | 263 | 0.100535 | 0 | 0 | 1,404 | 0.536697 | 0 | 0 | 976 | 0.373089 |
b415cd56b8b968d2043025ce5a7780e981f5488b | 960 | py | Python | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
]
| null | null | null | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
]
| null | null | null | msblog/models.py | designermanjeets/mscreativepixel | 8fefa48296c97fc541bc6d4f9ad8fa7048d0e377 | [
"Apache-2.0"
]
| null | null | null | from django.db import models
from datetime import datetime
import string, random
import uuid
# Create your models here.
class HeaderNavs(models.Model):
title = models.CharField(max_length = 50)
url = models.CharField(max_length = 50)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "HeaderNavs"
class Blogs(models.Model):
title = models.CharField(max_length = 50)
short_description = models.TextField(max_length = 100)
description = models.TextField()
created_at = models.DateTimeField(default=datetime.now, blank=True)
avatar = models.ImageField(upload_to = 'static/img/avatar/', default = 'static/img/avatar_1.jpg')
slug = models.CharField(max_length=40, blank=True, default=uuid.uuid4, unique=True)
def __str__(self):
return self.title
class Meta:
verbose_name_plural = "Blogs"
| 28.235294 | 114 | 0.660417 | 832 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.09375 |
b415f8911ff14da18af621c103440493a6703472 | 1,281 | py | Python | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
]
| 1 | 2022-03-16T16:47:22.000Z | 2022-03-16T16:47:22.000Z | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
]
| null | null | null | Practical/Easy/HSV color wheel/colorwheel.py | saintwithataint/Pro-g-rammingChallenges4 | 3f720a375b89ee289237819c2dc89226634b7a5b | [
"Apache-2.0"
]
| 2 | 2022-02-02T18:02:03.000Z | 2022-03-16T16:47:34.000Z | import colour
import matplotlib.pyplot as plt
import numpy as np
COLOUR_STYLE = colour.plotting.colour_style()
COLOUR_STYLE.update(
{
"figure.figsize": (11, 11),
"legend.framealpha": colour.plotting.COLOUR_STYLE_CONSTANTS.opacity.low,
}
)
plt.style.use(COLOUR_STYLE)
plt.style.use("dark_background")
colour.utilities.describe_environment()
colour.utilities.filter_warnings(*[True] * 4)
def colour_wheel(samples=1024, clip_circle=True, method="Colour"):
xx, yy = np.meshgrid(
np.linspace(-1, 1, samples), np.linspace(-1, 1, samples)
)
S = np.sqrt(xx**2 + yy**2)
H = (np.arctan2(xx, yy) + np.pi) / (np.pi * 2)
HSV = colour.utilities.tstack([H, S, np.ones(H.shape)])
RGB = colour.HSV_to_RGB(HSV)
if clip_circle:
RGB[S > 1] = 0
A = np.where(S > 1, 0, 1)
else:
A = np.ones(S.shape)
if method.lower() == "matplotlib":
RGB = colour.utilities.orient(RGB, "90 CW")
elif method.lower() == "nuke":
RGB = colour.utilities.orient(RGB, "Flip")
RGB = colour.utilities.orient(RGB, "90 CW")
R, G, B = colour.utilities.tsplit(RGB)
return colour.utilities.tstack([R, G, B, A])
COLOUR_WHEEL = colour_wheel(method="Nuke")
colour.plotting.plot_image(COLOUR_WHEEL)
| 26.6875 | 80 | 0.640125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.081187 |
b4162ac39dacfccdd55b041dd156a4ebc43907ba | 40,090 | py | Python | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
]
| 3 | 2020-07-12T08:17:42.000Z | 2022-02-11T15:44:49.000Z | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
]
| null | null | null | kojen/smgen.py | kohjaen/kojen | e61855e48617e691d1fa0ddac4fdabac6b6a1eff | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eugene'
'''
MIT License
Copyright (c) 2015 Eugene Grobbelaar (email : [email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Step 1) Load template files to memory
Step 2) Search and replace these tags in memory (including filenames).
<<<NAMESPACE>>>
<<<STATEMACHINENAME>>> or <<<CLASSNAME>>>
<<<AUTHOR>>>
Step 3) Search for the following pairs of tags
<<<PER_STATE_BEGIN>>>
<<<PER_STATE_END>>>
<<<PER_EVENT_BEGIN>>>
<<<PER_EVENT_END>>>
<<<PER_ACTION_BEGIN>>>
<<<PER_ACTION_END>>>
<<<PER_ACTION_SIGNATURE_BEGIN>>>
<<<PER_ACTION_SIGNATURE_END>>>
<<<PER_GUARD_BEGIN>>>
<<<PER_GUARD_END>>>
and duplicate the following for each item, replacing each tag with the item name
<<<STATENAME>>>
<<<EVENTNAME>>>
<<<ACTIONNAME>>>
<<<GUARDNAME>>>
These need to be expanded for event structs
<<<EVENTSIGNATURE>>>
<<<EVENTMEMBERSINSTANTIATE>>>
<<<EVENTMEMBERSDECLARE>>>
When looping <<<ALPH>>> should increment from a through Z.
When looping <<<NUM>>> should increment from 1 through 10000.
When reading the transition table, first state name (top, left) should be set to the value for this tag : <<<STATE_0>>>
Then, the transition table needs to go here, following the rules.
<<<TTT_BEGIN>>>
<<<TTT_END>>>
or
<<<TTT_LITE_BEGIN>>>
<<<TTT_LITE_END>>>
or
<<<TTT_LITE_SML_BEGIN>>>
<<<TTT_LITE_SML_END>>>
# EMBEDDED SM SUPPORT.
Step 4) In each <<PER_XXX tag, there might be more expansion required. The following tags apply in this pass
<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>
<<<PER_EVENT_NEXT_STATE_END>>>
and the following replacement tags will be correctly set
<<<EVENTSTATECURRENT>>>
<<<EVENTSTATENEXT>>>
Also, the original SM only allows a single state-based action to happen.
I want there to be several actions allowed in a State, based on several events valid in that state.
These tags provide for that.
<<<PER_STATE_ACTION_EVENT_BEGIN>>>
<<<PER_STATE_ACTION_EVENT_END>>>
and the following replacement tags will be correctly set
<<<PER_STATE_ACTION>>>
<<<PER_STATE_EVENT>>>
# END EMBEDDED SM SUPPORT.
'''
__TAG_AUTHOR__ = '<<<AUTHOR>>>'
__TAG_GROUP__ = '<<<GROUP>>>'
__TAG_BRIEF__ = '<<<BRIEF>>>'
__TAG_NAMESPACE__ = '<<<NAMESPACE>>>'
__TAG_SM_NAME__ = '<<<STATEMACHINENAME>>>'
__TAG_SM_NAME_UPPER__ = '<<<STATEMACHINENAMEUPPER>>>'
__TAG_CLASS_NAME__ = '<<<CLASSNAME>>>'
__TAG_PyIFGen_NAME__ = '<<<PYIFGENNAME>>>'
__TAG_PS_BEGIN__ = "<<<PER_STATE_BEGIN>>>"
__TAG_PS_END__ = "<<<PER_STATE_END>>>"
__TAG_PE_BEGIN__ = "<<<PER_EVENT_BEGIN>>>"
__TAG_PE_END__ = "<<<PER_EVENT_END>>>"
__TAG_PA_BEGIN__ = "<<<PER_ACTION_BEGIN>>>"
__TAG_PA_END__ = "<<<PER_ACTION_END>>>"
__TAG_PASIG_BEGIN__ = "<<<PER_ACTION_SIGNATURE_BEGIN>>>"
__TAG_PASIG_END__ = "<<<PER_ACTION_SIGNATURE_END>>>"
__TAG_PG_BEGIN__ = "<<<PER_GUARD_BEGIN>>>"
__TAG_PG_END__ = "<<<PER_GUARD_END>>>"
__TAG_EVENT_SIGNATURE__ = "<<<EVENTSIGNATURE>>>"
__TAG_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSINSTANTIATE>>>"
__TAG_LITE_EVENT_MEMBERINST__ = "<<<EVENTMEMBERSLITEINSTANTIATE>>>"
__TAG_EVENT_MEMBERDECL__ = "<<<EVENTMEMBERSDECLARE>>>"
__TAG_STATENAME__ = '<<<STATENAME>>>'
__TAG_EVENTNAME__ = '<<<EVENTNAME>>>'
__TAG_EVENTNAME_SMALL_CAMEL__ = '<<<EVENTNAMESMALLCAMEL>>>'
__TAG_ACTIONNAME__ = '<<<ACTIONNAME>>>'
__TAG_GUARDNAME__ = '<<<GUARDNAME>>>'
__TAG_ABC__ = '<<<ALPH>>>'
__TAG_123__ = '<<<NUM>>>'
__TAG_INIT_STATE__ = '<<<STATE_0>>>'
__TAG_TTT_BEGIN__ = '<<<TTT_BEGIN>>>'
__TAG_TTT_END___ = '<<<TTT_END>>>'
__TAG_TTT_LITE_BEGIN__ = '<<<TTT_LITE_BEGIN>>>'
__TAG_TTT_LITE_END__ = '<<<TTT_LITE_END>>>'
__TAG_TTT_LITE_SML_BEGIN__ = '<<<TTT_LITE_SML_BEGIN>>>'
__TAG_TTT_LITE_SML_END__ = '<<<TTT_LITE_SML_END>>>'
__TAG_DECLSPEC_DLL_EXPORT__ = "<<<DLL_EXPORT>>>"
# EMBEDDED SM SUPPORT.
__TAG_EVENT_CURNEX_ST_BEG__ = "<<<PER_EVENT_CURRENT_NEXT_STATE_BEGIN>>>"
__TAG_EVENT_CURNEX_ST_END__ = "<<<PER_EVENT_NEXT_STATE_END>>>"
__TAG_EVENT_ST_CUR__ = "<<<EVENTSTATECURRENT>>>"
__TAG_EVENT_ST_NXT__ = "<<<EVENTSTATENEXT>>>"
__TAG_PSAE_BEGIN__ = "<<<PER_STATE_ACTION_EVENT_BEGIN>>>"
__TAG_PSAE_END__ = "<<<PER_STATE_ACTION_EVENT_END>>>"
__TAG_PSAE_ACTION__ = "<<<PER_STATE_ACTION>>>"
__TAG_PSAE_EVENT__ = "<<<PER_STATE_EVENT>>>"
# END EMBEDDED SM SUPPORT.
# Python2 -> 3 shennanigans...try support both
try:
from interface_base import * # py2
except (ModuleNotFoundError, ImportError) as e:
from .interface_base import * # py3
try:
from .preservative import *
except (ModuleNotFoundError, ImportError) as e:
from preservative import *
try:
from .cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
except (ModuleNotFoundError, ImportError) as e:
from cgen import CBASEGenerator, CCodeModel, alpha, __getnextalphabet__, __resetalphabet__, even_space, FileCopyUtil, caps, camel_case_small, camel_case
try:
from LanguageCPP import LanguageCPP
except (ModuleNotFoundError, ImportError) as e:
from .LanguageCPP import LanguageCPP
# Model that describes a state machine.
class CStateMachineModel:
def __init__(self):
self.statemachinename = ""
self.namespacename = ""
self.declspecdllexport = ""
self.pythoninterfacegeneratorfilename = ""
self.states = []
self.actions = []
self.events = []
self.guards = []
# EMBEDDED SM SUPPORT.
self.event_transitions_per_state = {} # ['event', ['next state,current state' , ...]]
self.actionevents_per_state = {} # ['state', [['event', 'action'] , ...]
# END EMBEDDED SM SUPPORT.
self.actionsignatures = OrderedDict()
# Transition Table Model uses State Machine Model to generate all code required for a working state machine.
class CTransitionTableModel(CStateMachineModel):
START_STATE = 0
EVENT = 1
NEXT_STATE = 2
ACTION = 3
GUARD = 4
def __init__(self, tt, nn, smn, dclspc = ""):
CStateMachineModel.__init__(self)
self.transition_table = tt
self.statemachinename = smn
self.namespacename = nn
self.declspecdllexport = dclspc
tstate = OrderedDict()
taction = OrderedDict()
tevent = OrderedDict()
tguard = OrderedDict()
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
tevent_transitions_tmp = {}
# END EMBEDDED SM SUPPORT.
# Filter
for tableline in self.transition_table:
if tableline[self.START_STATE] != "" and tableline[self.START_STATE].lower() != "none":
tstate[tableline[self.START_STATE]] = 0
if tableline[self.NEXT_STATE] != "" and tableline[self.NEXT_STATE].lower() != "none":
tstate[tableline[self.NEXT_STATE]] = 0
if tableline[self.EVENT] != "" and tableline[self.EVENT].lower() != "none":
tevent[tableline[self.EVENT]] = 0
# EMBEDDED SM SUPPORT. ['current state, event', 'next state']
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
raise Exception('Events that dont change state should re-enter the current state.\nPlease fix your transition table')
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
TODO : For the case below, how to support a different 'action' on the in-state-event???? Ie that event might have gotten the machine
to this state with a particular action, but perhaps the user has configured a different action for this event in-state???
'''
if tableline[self.NEXT_STATE] == "" or tableline[self.NEXT_STATE].lower() == "none":
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.START_STATE]
else:
tevent_transitions_tmp[tableline[self.START_STATE] + ',' + tableline[self.EVENT]] = tableline[self.NEXT_STATE]
# This is for in-state-actions based on events...
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
if not (tableline[self.START_STATE] in self.actionevents_per_state):
self.actionevents_per_state[tableline[self.START_STATE]] = []
self.actionevents_per_state[tableline[self.START_STATE]].append([tableline[self.EVENT], tableline[self.ACTION]])
# END EMBEDDED SM SUPPORT.
if tableline[self.ACTION] != "" and tableline[self.ACTION].lower() != "none":
taction[tableline[self.ACTION]] = 0
if not ((tableline[self.ACTION] + tableline[self.EVENT]) in self.actionsignatures):
self.actionsignatures[tableline[self.ACTION] + tableline[self.EVENT]] = (tableline[self.ACTION], tableline[self.EVENT]) #, tableline[self.START_STATE],tableline[self.NEXT_STATE]))
if tableline[self.GUARD] != "" and tableline[self.GUARD].lower() != "none":
tguard[tableline[self.GUARD]] = 0
# Populate CStateMachineModel
for s in tstate:
self.states.append(s)
for e in tevent:
self.events.append(e)
for a in taction:
self.actions.append(a)
for g in tguard:
self.guards.append(g)
# EMBEDDED SM SUPPORT.
for e in tevent:
self.event_transitions_per_state[e] = []
for s in tstate:
key = s+','+e
if key in tevent_transitions_tmp:
self.event_transitions_per_state[e].append([tevent_transitions_tmp[key], s])
else:
self.event_transitions_per_state[e].append(['EVENT_IGNORED', s])
# END EMBEDDED SM SUPPORT.
def __getfirststate__(self):
if not self.transition_table:
return "NO TT PRESENT!"
return self.transition_table[0][0]
class CStateMachineGenerator(CBASEGenerator):
def __init__(self, inputfiledir, outputfiledir, events_interface=None, language=None, author='Anonymous', group='', brief=''):
CBASEGenerator.__init__(self,inputfiledir,outputfiledir,language, author, group, brief)
self.events_interface = events_interface
def __loadtemplates_firstfiltering__(self, smmodel):
"""
See baseclass implementation. This just prepares the dictionary of things to replace
for this type of codegeneration.
@param smmodel:
@return: cgen.CCodeModel, a dictionary -> {filename,[lines]}
"""
dict_to_replace_lines = {}
dict_to_replace_lines[__TAG_SM_NAME_UPPER__] = caps(smmodel.statemachinename)
dict_to_replace_lines[__TAG_SM_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_CLASS_NAME__] = smmodel.statemachinename
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = smmodel.pythoninterfacegeneratorfilename.replace('.py', '') # hack : for tcpgen simple templates,
if not dict_to_replace_lines[__TAG_PyIFGen_NAME__]:
dict_to_replace_lines[__TAG_PyIFGen_NAME__] = self.vpp_filename
dict_to_replace_lines[__TAG_NAMESPACE__] = smmodel.namespacename
dict_to_replace_lines[__TAG_AUTHOR__] = self.author
dict_to_replace_lines[__TAG_GROUP__] = self.group
dict_to_replace_lines[__TAG_BRIEF__] = self.brief
dict_to_replace_lines[__TAG_DECLSPEC_DLL_EXPORT__] = smmodel.declspecdllexport
dict_to_replace_filenames = {}
dict_to_replace_filenames["TEMPLATE_"] = smmodel.statemachinename
#dict_to_replace_filenames['.ty'] = '.py'
#dict_to_replace_filenames['.t#'] = '.cs'
#dict_to_replace_filenames['.t'] = '.h'
#dict_to_replace_filenames['.hpp'] = '.cpp' # there are no '.hpp' templates...but search and replace will apply '.t -> .h' first so '.tpp' becomes '.hpp'...grrr
return CBASEGenerator.__loadtemplates_firstfiltering__(self,dict_to_replace_lines,dict_to_replace_filenames)
def __get_event_signature__(self,name):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
return self.language.ParameterString(self.language.GetFactoryCreateParams(s, self.events_interface))
return ""
def __instantiate_event_struct_member(self, name, whitespace_cnt, is_ptr=True, instancename="data"):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.InstantiateStructMembers(s, self.events_interface, '', instancename, self.language.Accessor(is_ptr))
result = ''
cnt = 0
for g in guts:
result = result + (whitespace_cnt*' ' if cnt > 0 else '') + g + '\n'
cnt = cnt + 1
return result
return ""
def __declare_event_struct_members(self, name, whitespace_cnt):
if self.events_interface is None or self.language is None:
return ""
for s in self.events_interface.Structs():
if s.Name == name:
guts = self.language.DeclareStructMembers(s, self.events_interface, '', False)
result = ''
cnt = 0
for g in guts:
result = result + ((whitespace_cnt+1)*' ' if cnt > 0 else ' ') + g + '\n'
cnt = cnt + 1
# remove last '\n'
result = result[:-1]
return result
return ""
def hasTag(self, line, tag):
return line.find(tag.replace("<<<", "").replace(">>>", "")) > 0
def hasMemberName(self, a):
return a.find("::") > 0
def extractMemberNameAndTag(self, a):
member = a[a.find("::"):a.find(">>>")].replace("::", "")
tag = a.strip()
return [tag, member]
def __innerexpand__secondfiltering__(self, names2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for name in names2x:
for line in lines2x:
newline = line
newline = newline.replace(__TAG_STATENAME__, name)
newline = newline.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(name))
newline = newline.replace(__TAG_EVENTNAME__, name)
newline = newline.replace(__TAG_ACTIONNAME__, name)
newline = newline.replace(__TAG_GUARDNAME__, name)
newline = newline.replace(__TAG_ABC__, chr(alpha))
newline = newline.replace(__TAG_123__, str(cnt))
# EMBEDDED SM SUPPORT.
newline = newline.replace(__TAG_EVENT_CURNEX_ST_BEG__, __TAG_EVENT_CURNEX_ST_BEG__ + '<<<' + name + '>>>') # put a marker (event name) for mapping
newline = newline.replace(__TAG_PSAE_BEGIN__, __TAG_PSAE_BEGIN__ + '<<<' + name + '>>>') # put a marker (state name) for mapping
# END EMBEDDED SM SUPPORT.
tabcnt = newline.count(' ')
newline = newline.replace(__TAG_EVENT_SIGNATURE__, self.__get_event_signature__(name))
# __TAG_EVENT_MEMBERINST__ -> PTR
if self.hasTag(newline,__TAG_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, True, line_member[1]))
else:
newline = newline.replace(__TAG_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, True)) # PTR
# __TAG_LITE_EVENT_MEMBERINST__ -> NO PTR
if self.hasTag(newline,__TAG_LITE_EVENT_MEMBERINST__) and self.hasMemberName(newline):
line_member = self.extractMemberNameAndTag(newline)
newline = newline.replace(line_member[0],self.__instantiate_event_struct_member(name, tabcnt, False, line_member[1]))
else:
newline = newline.replace(__TAG_LITE_EVENT_MEMBERINST__, self.__instantiate_event_struct_member(name, tabcnt, False)) # NO PTR
newline = newline.replace(__TAG_EVENT_MEMBERDECL__, self.__declare_event_struct_members(name, tabcnt))
# END EMBEDDED SUPPORT
puthere.append(newline)
cnt = cnt + 1
__getnextalphabet__()
def __innerexpand_actionsignatures__(self, states2x, lines2x, puthere):
global alpha
__resetalphabet__()
cnt = 0
for key, (actionname, eventname) in states2x.items():
if eventname == "" or eventname.lower() == 'none':
eventname = "NONE"
elif eventname.lower() == 'any':
eventname = "ANY"
for line in lines2x:
puthere.append(line
.replace(__TAG_ACTIONNAME__, actionname)
.replace(__TAG_EVENTNAME_SMALL_CAMEL__, camel_case_small(eventname))
.replace(__TAG_EVENTNAME__, eventname)
.replace(__TAG_ABC__, chr(alpha))
.replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __transitiontable_replace_NONE__(self, val):
if val == "" or val.lower() == 'none':
val = "msmf::none"
return val
def __transitiontableLITE_guard_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = "boost::msm::gnone"
return val
def __transitiontableLITE_action_replace_NONE__(self, val):
tmp_val = val.replace('__', '')
if tmp_val == "" or tmp_val.lower() == 'none' or tmp_val.lower().find('::none<') > -1:
val = "boost::msm::none"
return val
''' This SM doesnt seem to allow 'none' transitions -> make it transition to the source state'''
def __transitiontableLITE_nextstate_replace_NONE__(self, val, source_state):
tmp_val = val.replace('__', '')
tmp_val = tmp_val.replace('msmf::', '')
if tmp_val == "" or tmp_val.lower() == 'none':
val = source_state
return val
def __expand_secondfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
ex_action = False
ex_actionsig = False
ex_guard = False
ex_tt = False
ex_tt_lite = False
ex_tt_lite_sml = False
snipped_to_expand = []
alllinesexpanded = []
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_PS_BEGIN__) > -1 or \
line.find(__TAG_PE_BEGIN__) > -1 or \
line.find(__TAG_PA_BEGIN__) > -1 or \
line.find(__TAG_PASIG_BEGIN__) > -1 or \
line.find(__TAG_PG_BEGIN__) > -1 or \
line.find(__TAG_TTT_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_BEGIN__) > -1 or \
line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1
ex_state = line.find(__TAG_PS_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_PE_BEGIN__) > -1 or ex_event
ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
ex_actionsig = line.find(__TAG_PASIG_BEGIN__) > -1 or ex_actionsig
ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
ex_tt = line.find(__TAG_TTT_BEGIN__) > -1 or ex_tt
ex_tt_lite = line.find(__TAG_TTT_LITE_BEGIN__) > -1 or ex_tt_lite
ex_tt_lite_sml = line.find(__TAG_TTT_LITE_SML_BEGIN__) > -1 or ex_tt_lite_sml
if not ex_state and not ex_event and not ex_action and not ex_actionsig and not ex_guard and not ex_tt and not ex_tt_lite and not ex_tt_lite_sml:
alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PS_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.states, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_PE_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.events, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_event = False
if ex_action and line.find(__TAG_PA_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.actions, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_action = False
if ex_actionsig and line.find(__TAG_PASIG_END__) > -1:
self.__innerexpand_actionsignatures__(smmodel.actionsignatures, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_actionsig = False
if ex_guard and line.find(__TAG_PG_END__) > -1:
self.__innerexpand__secondfiltering__(smmodel.guards, snipped_to_expand, alllinesexpanded)
snipped_to_expand = []
ex_guard = False
if ex_tt and line.find(__TAG_TTT_END___) > -1:
len_tt = len(smmodel.transition_table)
tt_out = " // " + len("msmf::Row < ") * ' ' + even_space("Start") + even_space("Event") + even_space("Next") + even_space("Action") + even_space("Guard") + '\n'
for i, ttline in enumerate(smmodel.transition_table):
tt_out += ' msmf::Row < '
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.NEXT_STATE] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.ACTION] )) + ','
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.GUARD] )) + '> '
if i != len_tt-1:
tt_out += ","
tt_out += " // " + str(i) + '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt = False
if ex_tt_lite and line.find(__TAG_TTT_LITE_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action") + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__'+ttline[smmodel.ACTION]))
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite = False
if ex_tt_lite_sml and line.find(__TAG_TTT_LITE_SML_END__) > -1:
tt_out = " // " + even_space("Start + ") + even_space("Event") + even_space("[ Guard ] ") + even_space("/ Action", 100) + even_space(" = Next") + '\n'
startStateHasEntryExit = {}
for i, ttline in enumerate(smmodel.transition_table):
if i == 0: # initial state
tt_out += " *"
else:
tt_out += " , "
tt_out += even_space(self.__transitiontable_replace_NONE__(ttline[smmodel.START_STATE])) + '+'
tt_out += even_space('event<' + self.__transitiontable_replace_NONE__(ttline[smmodel.EVENT]) + ">") + ' '
tt_out += even_space('['+self.__transitiontableLITE_guard_replace_NONE__('__'+ttline[smmodel.GUARD])+']') + ' / '
#tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('call(this,&CONCRETE::' + ttline[smmodel.ACTION] + '<' + ttline[smmodel.EVENT] + ">)"), 100)
tt_out += even_space(self.__transitiontableLITE_action_replace_NONE__('__' + ttline[smmodel.ACTION]), 100)
if ttline[smmodel.NEXT_STATE].lower() != 'none': # to not get transitions into/outof state on actions that dont change the state...
tt_out += ' = ' + even_space(self.__transitiontableLITE_nextstate_replace_NONE__(ttline[smmodel.NEXT_STATE], ttline[smmodel.START_STATE]))
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
# State entry/exit, once only
if not (ttline[smmodel.START_STATE] in startStateHasEntryExit):
startStateHasEntryExit[ttline[smmodel.START_STATE]] = True
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_entry<_> / __" + ttline[smmodel.START_STATE] + 'OnEntry\n'
tt_out += " , "+ttline[smmodel.START_STATE]+" + msm::on_exit<_> / __" + ttline[smmodel.START_STATE] + 'OnExit'
tt_out += '\n'
alllinesexpanded.append(tt_out)
tt_out = ""
ex_tt_lite_sml = False
if (ex_state or ex_event or ex_action or ex_actionsig or ex_guard or ex_tt or ex_tt_lite or ex_tt_lite_sml) and not begin:
snipped_to_expand.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# EMBEDDED SM SUPPORT.
def __innerexpand__thirdfiltering__eventtransitionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
currentstate = _map[1]
nextstate = _map[0]
for line in lines3x:
#puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_EVENT_ST_CUR__, currentstate).replace(__TAG_EVENT_ST_NXT__, nextstate).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
# this function is pretty much the same as the one above...
def __innerexpand__thirdfiltering__eventactionsperstate(self, namesmap3x, lines3x, puthere):
global alpha
__resetalphabet__()
cnt = 0
# First find the mapping marker
for _map in namesmap3x:
action = _map[1]
event = _map[0]
for line in lines3x:
# puthere.append(line.replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
puthere.append(line.replace(__TAG_PSAE_ACTION__, action).replace(__TAG_PSAE_EVENT__, event).replace(__TAG_ABC__, chr(alpha)).replace(__TAG_123__, str(cnt)))
cnt = cnt + 1
__getnextalphabet__()
def __expand_thirdfiltering__(self, smmodel, cmmodel):
for file in cmmodel.filenames_to_lines:
ex_state = False
ex_event = False
#ex_action = False
#ex_guard = False
snippet_to_expand = []
alllinesexpanded = []
state_action_map = ''
event_map = ''
for line in cmmodel.filenames_to_lines[file]:
begin = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or line.find(__TAG_PSAE_BEGIN__) > -1 #or line.find(__TAG_PA_BEGIN__) > -1 or line.find(__TAG_PG_BEGIN__) > -1
if begin:
event_map = line.replace(__TAG_EVENT_CURNEX_ST_BEG__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
state_action_map = line.replace(__TAG_PSAE_BEGIN__, '').replace('<<<', '').replace('>>>', '').replace('\t', '').replace('\n', '').replace(" ","")
end_event = (line.find(__TAG_EVENT_CURNEX_ST_END__) > -1)
end_state = (line.find(__TAG_PSAE_END__) > -1)
ex_state = line.find(__TAG_PSAE_BEGIN__) > -1 or ex_state
ex_event = line.find(__TAG_EVENT_CURNEX_ST_BEG__) > -1 or ex_event
#ex_action = line.find(__TAG_PA_BEGIN__) > -1 or ex_action
#ex_guard = line.find(__TAG_PG_BEGIN__) > -1 or ex_guard
#if not ex_state and not ex_event and not ex_action and not ex_guard:
# alllinesexpanded.append(line.replace(__TAG_INIT_STATE__, smmodel.__getfirststate__()))
if ex_state and line.find(__TAG_PSAE_END__) > -1:
if state_action_map in smmodel.actionevents_per_state:
self.__innerexpand__thirdfiltering__eventactionsperstate(smmodel.actionevents_per_state[state_action_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_state = False
if ex_event and line.find(__TAG_EVENT_CURNEX_ST_END__) > -1:
self.__innerexpand__thirdfiltering__eventtransitionsperstate(smmodel.event_transitions_per_state[event_map], snippet_to_expand, alllinesexpanded)
snippet_to_expand = []
ex_event = False
#if ex_action and line.find(__TAG_PA_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.actions, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_action = False
#if ex_guard and line.find(__TAG_PG_END__) > -1:
# self.__innerexpand__thirdfiltering__(smmodel.guards, snippet_to_expand, alllinesexpanded)
# snippet_to_expand = []
# ex_guard = False
#if (ex_state or ex_event or ex_action or ex_guard) and not begin:
if (ex_event or ex_state) and not begin:
snippet_to_expand.append(line)
elif not begin and not end_event and not end_state: # Unlike the second pass, this needs to preserve what was done there...
alllinesexpanded.append(line)
cmmodel.filenames_to_lines[file] = alllinesexpanded
# END EMBEDDED SM SUPPORT.
''' Used for State Machine Generation
'''
def Generate(self, transitiontable, namespacenname, statemachinename, dclspc="", copyotherfiles = True):
print("*************************************")
print("******* SMGen ***********************")
print("*************************************")
print(" Output Dir : " + self.output_gen_file_dir)
print(" State Machine: " + statemachinename)
print(" Executing in : " + os.path.realpath(__file__))
print("*************************************")
sm = CTransitionTableModel(transitiontable, namespacenname, statemachinename, dclspc)
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# EMBEDDED SM SUPPORT.
self.__expand_thirdfiltering__(sm, cm)
# END EMBEDDED SM SUPPORT.
# Preserve user tags.
self.__preserve_usertags_in_files__(cm)
'''
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
preservation = Preservative(self.output_gen_file_dir)
preservation.Emplace(cm.filenames_to_lines)
'''
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# Copy non-autogenerated required files to output.
if isinstance(self.language, LanguageCPP) and copyotherfiles:
# Files...
files_to_copy = []
files_to_copy.append("allocator.h")
files_to_copy.append("allocator.cpp")
files_to_copy.append("basetypes.h")
files_to_copy.append("CMakeLists.txt")
files_to_copy.append("Fault.h")
files_to_copy.append("Fault.cpp")
files_to_copy.append("stl_allocator.h")
files_to_copy.append("thread_FreeRTOS.h")
files_to_copy.append("thread_FreeRTOS.cpp")
files_to_copy.append("threaded_dispatcher.h")
files_to_copy.append("threaded_dispatcher_FreeRTOS.h")
files_to_copy.append("threadsafe_queue.h")
files_to_copy.append("threadsafe_queue_FreeRTOS.h")
files_to_copy.append("waitcondition.h")
files_to_copy.append("waitcondition.cpp")
files_to_copy.append("xallocator.h")
files_to_copy.append("xallocator.cpp")
files_to_copy.append("xlist.h")
files_to_copy.append("xmap.h")
files_to_copy.append("xqueue.h")
files_to_copy.append("xset.h")
files_to_copy.append("xsstream.h")
files_to_copy.append("xstring.h")
allplatformsfrom = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.join("allplatforms", "CPP"))
allplatformsto = os.path.join(os.path.abspath(self.output_gen_file_dir), "allplatforms")
FileCopyUtil(allplatformsfrom, allplatformsto, files_to_copy)
# Boost SML ...
smlfrom = os.path.join(allplatformsfrom, os.path.join("sml", os.path.join("include","boost")))
smlto = os.path.join(allplatformsto, "boost")
smlfiles_to_copy = []
smlfiles_to_copy.append("sml.hpp")
FileCopyUtil(smlfrom, smlto, smlfiles_to_copy)
# Tests...
testfiles_to_copy = []
testfiles_to_copy.append("CMakeLists.txt")
testfiles_to_copy.append("Test.ThreadingConcepts.cpp")
testfiles_to_copy.append("test_main.cpp")
tests_allplatformsfrom = os.path.join(allplatformsfrom, "testsuite")
tests_allplatformsto = os.path.join(allplatformsto, "testsuite")
FileCopyUtil(tests_allplatformsfrom, tests_allplatformsto, testfiles_to_copy)
# Micro Unit Test Framework
microunit_files_to_copy = []
microunit_files_to_copy.append("minunit.h")
microunit_files_to_copy.append("minunit.cpp")
microunit_allplatformsfrom = os.path.join(tests_allplatformsfrom, "minunit")
microunit_allplatformsto = os.path.join(tests_allplatformsto, "minunit")
FileCopyUtil(microunit_allplatformsfrom, microunit_allplatformsto, microunit_files_to_copy)
''' Used for Protocol Generation
'''
def GenerateProtocol(self, pythoninterfacegeneratorfilename, namespacenname, classname, dclspc="", preserve_dir=""):
sm = CTransitionTableModel([], namespacenname, classname, dclspc)
sm.pythoninterfacegeneratorfilename = pythoninterfacegeneratorfilename
cm = self.__loadtemplates_firstfiltering__(sm)
self.__expand_secondfiltering__(sm, cm)
# Round-trip Code Preservation. Will load the code to preserve upon creation (if the output dir is not-empty/the same as the one in the compile path).
# TCP gen might have a different output directory (typically COG will put files into an intermediate dir, and them copy them elsewhere
preservation = None
if preserve_dir == "":
preservation = Preservative(self.output_gen_file_dir)
else:
preservation = Preservative(preserve_dir)
preservation.Emplace(cm.filenames_to_lines)
# Write output to file.
self.__createoutput__(cm.filenames_to_lines)
# return the filenames
filenames = []
for filename in cm.filenames_to_lines.keys():
filenames.append(filename)
return filenames
| 50.301129 | 200 | 0.60464 | 33,363 | 0.832203 | 0 | 0 | 0 | 0 | 0 | 0 | 10,961 | 0.27341 |
b419bda7c8455defc3ecb61092c5f3412e12801a | 1,744 | py | Python | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
]
| null | null | null | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
]
| null | null | null | roku/discovery.py | metagrapher/python-roku | 0cd209ec94531e7c4c29ca7f6a41a6199374c206 | [
"BSD-3-Clause"
]
| null | null | null | """
Code adapted from Dan Krause.
https://gist.github.com/dankrause/6000248
http://github.com/dankrause
"""
import socket
from http.client import HTTPResponse
from io import BytesIO
ST_DIAL = 'urn:dial-multiscreen-org:service:dial:1'
ST_ECP = 'roku:ecp'
class _FakeSocket(BytesIO):
def makefile(self, *args, **kw):
return self
class SSDPResponse(object):
def __init__(self, response):
self.location = response.getheader('location')
self.usn = response.getheader('usn')
self.st = response.getheader('st')
self.cache = response.getheader('cache-control').split('=')[1]
def __repr__(self):
return '<SSDPResponse({location}, {st}, {usn})'.format(**self.__dict__)
def discover(timeout=2, retries=1, st=ST_ECP):
group = ('239.255.255.250', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}'.format(*group),
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
m = message.format(st=st)
sock.sendto(m.encode(), group)
while 1:
try:
rhttp = HTTPResponse(_FakeSocket(sock.recv(1024)))
rhttp.begin()
if rhttp.status == 200:
rssdp = SSDPResponse(rhttp)
responses[rssdp.location] = rssdp
except socket.timeout:
break
return responses.values()
| 26.830769 | 79 | 0.598624 | 465 | 0.266628 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.193234 |
b41a1df236c0501272e47ba309bb8f6eaa3a041a | 4,113 | py | Python | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
]
| 10 | 2020-11-21T04:13:33.000Z | 2022-01-03T23:08:09.000Z | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
]
| null | null | null | Approxilyzer/gem5/scripts/relyzer/run_gem5_gl.py | cornell-zhang/GLAIVE | 8e29ac621a95a25c19ccfeb5071a9d3595093ef7 | [
"BSD-3-Clause"
]
| null | null | null | import os, sys
from argparse import ArgumentParser
from datetime import datetime as dt
from pprint import pprint as pp
import shutil, glob
#from pyfiglet import figlet_format, Figlet
import datetime
'''
python run_gem5_gl.py -a radix -l inst
python run_gem5_gl.py -a radix -l bit
'''
def app(args):
if not args:
return []
else:
return args.split(',')
parser = ArgumentParser()
parser.add_argument('-a', "--apps", help='Target application names seperated by comma', \
dest='targetapp', required=True)
parser.add_argument('-l', "--info_level", help='Target application architecture', \
dest='info_level', default='bit')
args = parser.parse_args()
apps = app(args.targetapp)
level = args.info_level
#num = args.num_progs
src_dir = os.environ.get('GRAPHLEARN')
gem5_dir= os.environ.get('APPROXGEM5') + '/gem5/scripts/relyzer/'
dest_dir = os.environ.get('APPROXGEM5') + '/workloads/x86/apps/'
for app in apps:
app1 = app + '_' + level
os.chdir(gem5_dir)
if level == 'bit':
# cp result from src to dest
gl_src_file = src_dir + 'sdc_output' +'/' + app1 + '_post.txt'
gl_dest_file = dest_dir + app +'/' + app1 + '_post.txt'
cmd = 'cp ' + gl_src_file + ' ' + gl_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in gl failure ' + app1)
exit(-1)
bit_rf_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_rf.txt'
bit_rf_dest_file = dest_dir + app +'/' + app1 + '_post_rf.txt'
cmd = 'cp ' + bit_rf_src_file + ' ' + bit_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in rf_bit faigem5_dirlure ' + app1)
exit(-1)
bit_mlpc_src_file = src_dir + 'sdc_output_ml_bit' +'/' + app1 + '_post_mlpc.txt'
bit_mlpc_dest_file = dest_dir + app +'/' + app1 + '_post_mlpc.txt'
cmd = 'cp ' + bit_mlpc_src_file + ' ' + bit_mlpc_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in mlpc_bit failure ' + app1)
exit(-1)
#call sdc_comp
print('this is for %s comp_sdc under graph learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'gl'
status = os.system(cmd)
if status != 0:
print('sdc comp in gl_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under random forest learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'rf'
status = os.system(cmd)
if status != 0:
print('sdc comp in rf_bit failure ' + app1)
exit(-1)
print('this is for %s comp_sdc under MLP learning ' % app)
cmd = 'python comp_sdc.py ' + app + ' ' + 'x86' + ' ' + 'mlpc'
status = os.system(cmd)
if status != 0:
print('sdc comp in mlpc_bit failure ' + app1)
exit(-1)
# call coverage_comp
log_file = src_dir + 'glog/' + app + '.log'
cmd = 'python sdc_coverage.py ' + app + ' ' + '5' + ' ' + '105' + ' > ' + log_file
status = os.system(cmd)
if status != 0:
print('coverage comp for all methods failure ' + app)
exit(-1)
elif level == 'inst':
inst_rf_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_rf.sdclist'
inst_rf_dest_file = dest_dir + app +'/' + app1 + '_rf.sdclist'
cmd = 'cp ' + inst_rf_src_file + ' ' + inst_rf_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_rf failure ' + app1)
exit(-1)
inst_svm_src_file = src_dir + 'sdc_output_classic' +'/' + app1 + '_svm.sdclist'
inst_svm_dest_file = dest_dir + app +'/' + app1 + '_svm.sdclist'
cmd = 'cp ' + inst_svm_src_file + ' ' + inst_svm_dest_file
status = os.system(cmd)
if status != 0:
print('cp data in inst_svm failure ' + app1)
exit(-1)
| 32.904 | 90 | 0.556042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.324581 |
b41ac0fb4f1e55fdca39a67f5c6756119ab70fed | 68 | py | Python | onnxsim/__init__.py | Wheest/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
]
| 1,977 | 2019-04-01T10:48:18.000Z | 2022-03-31T07:43:03.000Z | onnxsim/__init__.py | fedral/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
]
| 167 | 2019-05-03T08:21:15.000Z | 2022-03-31T10:21:03.000Z | onnxsim/__init__.py | fedral/onnx-simplifier | 70e3b52aadbc73fe01029dc7ba0d4965d8fc1a8c | [
"Apache-2.0"
]
| 251 | 2019-04-01T12:21:42.000Z | 2022-03-30T18:14:39.000Z | from onnxsim.onnx_simplifier import simplify
__version__ = '0.0.0'
| 17 | 44 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.102941 |
b41c9702fa909cdc15c31981b7aeb56a1df4c9bb | 534 | py | Python | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
]
| null | null | null | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
]
| null | null | null | src/commands/__init__.py | lysol/lvlss | ca068de516159be732d2cb8c4752dee4f4ef2e09 | [
"MIT"
]
| null | null | null | from quit import Quit
from set_name import SetName
from who import Who
from say import Say
from look import Look
from go import Go
from take import Take
from inventory import Inventory
from drop import Drop
from make import Make
from landfill import Landfill
from item_info import ItemInfo
from script import SetScript, GetScript
from image_editing import ImageEditing
all_commands = (Quit, SetName, Who, Say, Look,
Go, Take, Inventory, Drop, Make, Landfill,
SetScript, GetScript, ItemInfo, ImageEditing)
| 28.105263 | 50 | 0.773408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b41db3bb0788a43b8d82ec7b22eb82e644666c44 | 2,141 | py | Python | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
]
| null | null | null | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
]
| null | null | null | Softmax.py | tranbamanh229289/Machine-and-Data-mining- | b43a3815b74365e6e5b05b49bb92f3db4606ffca | [
"Apache-2.0"
]
| null | null | null | import Common
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
RATIO = 0.8
EPOCHS = 500
LEARN_RATE = 0.01
INDENTIFICATION_RATE = 0.6
# Read training data
X_train, Y_train, X_test, Y_test,scale_train,scale_test = Common.process(RATIO)
def preprocessing (X_train,Y_train ,X_test ,Y_test):
X_train = np.concatenate((X_train, np.ones((X_train.shape[0], 1))), axis=1)
X_test = np.concatenate((X_test, np.ones((X_test.shape[0], 1))), axis=1)
Y_train = Common.onehot(Y_train)
Y_test = Common.onehot(Y_test)
return X_train,Y_train,X_test,Y_test
X_train, Y_train, X_test, Y_test =preprocessing(X_train,Y_train ,X_test ,Y_test)
def softmax(Z):
Z = np.array(Z, dtype=np.float32)
return (np.exp(Z) / np.sum(np.exp(Z), axis=1).reshape(Z.shape[0], 1))
# Cross Entropy
def cost(X, Y, W):
Z = np.array(np.dot(X, W), dtype=np.float32)
return -np.sum(Y * np.log(softmax(Z)))
def gradient(Y, X, W, learningrate, k):
loss = []
for i in range(k):
Z = np.array(np.dot(X, W), dtype=np.float32)
delta = np.dot((Y - softmax(Z)).T, X).T
W = W + learningrate * delta
loss.append(cost(X, Y, W))
return W, loss
W = np.zeros((5, 3))
W, loss = gradient(Y_train, X_train, W, LEARN_RATE, EPOCHS)
def accuracy(W, X_test, Y_test, ratio):
Y_predict = softmax(np.dot(X_test, W))
Y_predict[np.where(Y_predict > ratio)] = 1
Y_predict[np.where(Y_predict < ratio)] = 0
result = np.sum(np.abs(Y_test - Y_predict), axis=1)
count = 0
for i in result:
if (i != 0):
count = count + 1
N = Y_test.shape[0]
acc = (N - count) / N
return acc, Y_predict
acc, Y_predict = accuracy(W, X_test ,Y_test, INDENTIFICATION_RATE)
def graph_cost(loss, EPOCHS):
plt.title("Loss", size=20)
plt.xlabel('$epochs$', size=20)
plt.ylabel('$error$', size=20)
plt.plot(np.arange(EPOCHS), loss)
plt.show()
X_train=Common.inverse(scale_train ,X_train[:,:-1])
X_test=Common.inverse(scale_test,X_test[:,:-1])
graph_cost(loss, EPOCHS)
Common.graph_accuracy(X_test, Y_test, Y_predict)
print("Accuracy :")
print(acc * 100, "%")
| 29.328767 | 80 | 0.652032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.03503 |
b41e6039b9544ca2bf93ee054b91393cabc444ec | 1,343 | py | Python | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
]
| 4 | 2020-04-17T06:39:23.000Z | 2021-12-25T11:05:16.000Z | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
]
| null | null | null | Wallpaper change.py | Arbazkhan4712/Wallpaper-Changer-using-Python | a221443bc7e7b5410f06653fa741b9d7af0fe10f | [
"MIT"
]
| 3 | 2020-04-03T12:36:20.000Z | 2020-06-06T15:12:04.000Z | import ctypes
import os
import time
from pynput.keyboard import Key,Controller
import Bing
def closeTerminal():
keyboard=Controller()
keyboard.press(Key.alt)
keyboard.press(Key.f4)
keyboard.release(Key.alt)
keyboard.release(Key.f4)
def changeWallpaper(image_path):
start=time.time()
end=time.time()
while True:
for dirname,dirnames,filenames in os.walk(image_path):
for file_name in filenames:
if (end-start)//3600 > 6:
try:
Bing.wallpaper_of_the_day(image_path)
start=time.time()
except:
pass
if file_name.endswith('.png') or file_name.endswith('.jpg'):
image=os.path.join(image_path,dirname,file_name)
SPI_SETDESKTOPWALLPAPER=20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKTOPWALLPAPER,0,image,3)
time.sleep(30)
end=time.time()
def main():
closeTerminal()
#configure own folder
image_path = r'D:\Wallpapers'
try:
os.makedirs(image_path)
except:
pass
try:
Bing.wallpaper_of_the_day(image_path)
except:
pass
changeWallpaper(image_path)
if __name__=='__main__':
main()
| 26.86 | 97 | 0.581534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.043931 |
b41e78f19f2060ee9b4a3efdc51b5e3c612a3ca4 | 968 | py | Python | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
]
| null | null | null | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
]
| null | null | null | tests/sensitivity/sf2/sf2_test.py | vic-c137/mpi-boids-simulation | a822f20f5c1cd7cd2a6261a53adeb24e2c0115ec | [
"Apache-2.0"
]
| null | null | null | # Import statements
import subprocess
from os import system
# Variable declarations
np = "10"
cexe = "./Boids"
nboids = "50"
nloops = "500"
k = "7"
maxv = "10"
acc = "1.25"
width = "1000"
height = "1000"
sf1 = "1"
sf2 = "32"
min = "50"
sf3 = "8"
sf4 = "10"
dataPath = "./data/"
jexe = "BoidModelTest"
bdata = "boid_data.boid"
# Test calls
collection = [0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 1048576]
for i in collection:
print "Running test %s" % (str(i))
boidData = "run"+str(i)+".boid"
gif = "run"+str(i)+".gif"
sf2 = str(i)
subprocess.call("mpirun -np " + np +" "+ cexe +" "+ nboids +" "+ nloops +" "+ k +" "+ maxv +" "+ acc +" "+ width +" "+ height +" "+ sf1 +" "+ sf2 +" "+ min +" "+ sf3 +" "+ sf4 + " > " + dataPath + boidData, shell=True)
subprocess.call("java " + jexe + " " + gif + " " + boidData, shell=True)
system('gnuplot ./data/boid_script.gp') | 31.225806 | 220 | 0.558884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.313017 |
b41e7a6675758027f59252fdd90ad0a28c111058 | 976 | py | Python | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
]
| null | null | null | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
]
| 95 | 2021-09-13T21:23:12.000Z | 2022-03-31T21:22:32.000Z | flask_start/flask_start/public/email.py | kostekci/flask_start | fa279fc8907aff9868e2596f4ed9c4d9428d2f75 | [
"MIT"
]
| null | null | null | from flask_mail import Message
from flask import render_template
from flask_start.extensions import mail
'''
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
'''
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
#Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('Reset Your Password',
sender='[email protected]',
recipients=[user.email],
text_body=render_template('public/reset_password_mail.txt',
user=user, token=token),
html_body=render_template('public/reset_password_mail.html',
user=user, token=token))
| 32.533333 | 75 | 0.646516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.286885 |
b41f08666a2d2b54abb8df40e1f44d9b70d9644a | 7,784 | py | Python | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
]
| null | null | null | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
]
| null | null | null | demo/trace_model.py | furkankirac/maskrcnn-benchmark | a348dc36600e577c3ba569320f3a6a8e15986f72 | [
"MIT"
]
| null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import os
import numpy
from io import BytesIO
from matplotlib import pyplot
import requests
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from maskrcnn_benchmark.structures.image_list import ImageList
if __name__ == "__main__":
# load config from file and command-line arguments
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cfg.merge_from_file(
os.path.join(project_dir,
"configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"))
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=480,
)
def single_image_to_top_predictions(image):
image = image.float() / 255.0
image = image.permute(2, 0, 1)
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
image = image * 255
else:
image = image[[2, 1, 0]]
# we absolutely want fixed size (int) here (or we run into a tracing error (or bug?)
# or we might later decide to make things work with variable size...
image = image - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
# should also do variance...
image_list = ImageList(image.unsqueeze(0), [(int(image.size(-2)), int(image.size(-1)))])
result, = coco_demo.model(image_list)
scores = result.get_field("scores")
keep = (scores >= coco_demo.confidence_threshold)
result = (result.bbox[keep],
result.get_field("labels")[keep],
result.get_field("mask")[keep],
scores[keep])
return result
@torch.jit.script
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=True, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
# mask = torch.zeros((height, width), dtype=torch.uint8)
# mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
return mask
@torch.jit.script
def add_annotations(image, labels, scores, bboxes, class_names=','.join(coco_demo.CATEGORIES), color=torch.tensor([255, 255, 255], dtype=torch.long)):
# type: (Tensor, Tensor, Tensor, Tensor, str, Tensor) -> Tensor
result_image = torch.ops.maskrcnn_benchmark.add_annotations(image, labels, scores, bboxes, class_names, color)
return result_image
@torch.jit.script
def combine_masks(image, labels, masks, scores, bboxes, threshold=0.5, padding=1, contour=True, rectangle=False, palette=torch.tensor([33554431, 32767, 2097151])):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, float, int, bool, bool, Tensor) -> Tensor
height = image.size(0)
width = image.size(1)
image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
image_with_mask = add_annotations(image_with_mask, labels, scores, bboxes)
return image_with_mask
def process_image_with_traced_model(image):
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
boxes, labels, masks, scores = traced_model(image)
# todo: make this in one large thing
result_image = combine_masks(original_image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
def fetch_image(url):
response = requests.get(url)
return Image.open(BytesIO(response.content)).convert("RGB")
if __name__ == "__main__":
pil_image = fetch_image(
url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# convert to BGR format
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
original_image = image
if coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY:
assert (image.size(0) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0
and image.size(1) % coco_demo.cfg.DATALOADER.SIZE_DIVISIBILITY == 0)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
traced_model = torch.jit.trace(single_image_to_top_predictions, (image,))
@torch.jit.script
def end_to_end_model(image):
boxes, labels, masks, scores = traced_model(image)
result_image = combine_masks(image, labels, masks, scores, boxes, 0.5, 1, rectangle=True)
return result_image
end_to_end_model.save('end_to_end_model.pt')
result_image = process_image_with_traced_model(original_image)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image[:, :, [2, 1, 0]])
pyplot.show()
# second image
image2 = fetch_image(
url='http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg')
image2 = image2.resize((640, 480), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
result_image2 = process_image_with_traced_model(image2)
# self.show_mask_heatmaps not done
pyplot.imshow(result_image2[:, :, [2, 1, 0]])
pyplot.show()
| 41.185185 | 163 | 0.654034 | 0 | 0 | 0 | 0 | 3,731 | 0.479317 | 0 | 0 | 1,564 | 0.200925 |
b42110e69fbba6f3cc1175f605afe65f09844634 | 5,211 | py | Python | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
]
| 136 | 2015-05-07T05:47:43.000Z | 2022-02-16T03:07:40.000Z | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
]
| 184 | 2015-05-03T09:27:54.000Z | 2021-12-20T04:22:48.000Z | validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | f4378114dbf02d666fe6423de45798add5c42806 | [
"Python-2.0",
"OLDAP-2.7"
]
| 70 | 2015-03-18T07:35:22.000Z | 2021-11-01T07:07:29.000Z | """Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import myid, finalize, distribute
from anuga import Domain as Domain
from math import cos
from numpy import zeros, ones, array, interp, polyval, ones_like, zeros_like
from numpy import where, logical_and
from time import localtime, strftime, gmtime
from scipy.interpolate import interp1d
from anuga.geometry.polygon import inside_polygon, is_inside_triangle
#from balanced_dev import *
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'varying_width'+time
output_dir = '.'
output_file = 'varying_width'
#anuga.copy_code_files(output_dir,__file__)
#start_screen_catcher(output_dir+'_')
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 1500.
W = 60.
#===============================================================================
# Create sequential domain
#===============================================================================
if myid == 0:
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.,-W/2.))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
#------------------------------------------------------------------------------
# Setup Algorithm, either using command line arguments
# or override manually yourself
#------------------------------------------------------------------------------
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 12.0)
XX = array([0.,50.,100.,150.,250.,300.,350.,400.,425.,435.,450.,470.,475.,500.,
505.,530.,550.,565.,575.,600.,650.,700.,750.,800.,820.,900.,950.,
1000.,1500.])
ZZ = array([0.,0.,2.5,5.,5.,3.,5.,5.,7.5,8.,9.,9.,9.,9.1,9.,9.,6.,5.5,5.5,5.,
4.,3.,3.,2.3,2.,1.2,0.4,0.,0.])
WW = array([40.,40.,30.,30.,30.,30.,25.,25.,30.,35.,35.,40.,40.,40.,45.,45.,50.,
45.,40.,40.,30.,40.,40.,5.,40.,35.,25.,40.,40.])/2.
depth = interp1d(XX, ZZ)
width = interp1d(XX, WW)
def bed_elevation(x,y):
z = 25.0*ones_like(x)
wid = width(x)
dep = depth(x)
z = where( logical_and(y < wid, y>-wid), dep, z)
return z
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#===========================================================================
# Create Parallel domain
#===========================================================================
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.0):
#print(domain.timestepping_statistics(track_speeds=True))
if myid == 0 and verbose: print(domain.timestepping_statistics())
#vis.update()
if myid == 0 and verbose: print('That took %s sec' % str(time.time()-t0))
domain.sww_merge(delete_old=True)
finalize()
| 36.440559 | 96 | 0.459797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,748 | 0.527346 |
b424151af9b357850be4c70639941f09ba348b96 | 253 | py | Python | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
]
| null | null | null | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
]
| null | null | null | src/temperature/urls.py | JohanGovers/home-mon-server | e22ee05508597f11b313f8fa600a4867ad15f759 | [
"MIT"
]
| null | null | null | from django.conf.urls import patterns, url
from temperature import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^save_temp_reading$', views.save_temp_reading, name='save_temp_reading'),
)
| 31.625 | 88 | 0.660079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.217391 |
b425096bf56f11b8a01b6bd3c09874f67758b609 | 5,767 | py | Python | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
]
| 38 | 2021-06-18T12:56:15.000Z | 2022-03-12T20:38:40.000Z | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
]
| 2 | 2021-06-20T16:28:12.000Z | 2021-11-17T21:33:56.000Z | FictionTools/amitools/amitools/binfmt/elf/BinFmtELF.py | polluks/Puddle-BuildTools | c1762d53a33002b62d8cffe3db129505a387bec3 | [
"BSD-2-Clause"
]
| 6 | 2021-06-18T18:18:36.000Z | 2021-12-22T08:01:32.000Z | from amitools.binfmt.BinImage import *
from .ELFFile import *
from .ELF import *
from .ELFReader import ELFReader
from .DwarfDebugLine import DwarfDebugLine
class BinFmtELF:
"""Handle Amiga m68k binaries in ELF format (usually from AROS)"""
def is_image(self, path):
"""check if a given file is a supported ELF file"""
with open(path, "rb") as f:
return self.is_image_fobj(f)
def is_image_fobj(self, fobj):
"""check if a given fobj is a supported ELF file"""
try:
pos = fobj.tell()
# read identifier
ident = ELFIdentifier()
ident_data = fobj.read(16)
ident.parse(ident_data)
# read header
hdr = ELFHeader()
hdr_data = fobj.read(36)
hdr.parse(hdr_data)
# seek back
fobj.seek(pos, 0)
# check header
return self.is_supported_elf(ident, hdr)
except ELFParseError:
return False
def is_supported_elf(self, ident, hdr):
"""check ELF header if its a m68k binary"""
if hdr.machine != EM_68K:
return False
if ident.osabi not in (ELFOSABI_SYSV, ELFOSABI_AROS):
return False
return True
def load_image(self, path):
"""load a BinImage from an ELF file given via path"""
with open(path, "rb") as f:
return self.load_image_fobj(f)
def load_image_fobj(self, fobj):
"""load a BinImage from an ELF file given via file object"""
# read elf file
reader = ELFReader()
elf = reader.load(fobj)
# create bin image and assign elf file
bi = BinImage(BIN_IMAGE_TYPE_ELF)
bi.set_file_data(elf)
# walk through elf sections
sect_to_seg = {}
for sect in elf.sections:
# determine segment type
seg_type = None
name = sect.name_str
flags = 0
if name == b".text":
seg_type = SEGMENT_TYPE_CODE
elif name == b".data":
seg_type = SEGMENT_TYPE_DATA
elif name == b".rodata":
seg_type = SEGMENT_TYPE_DATA
flags = SEGMENT_FLAG_READ_ONLY
elif name == b".bss":
seg_type = SEGMENT_TYPE_BSS
# we got a segment
if seg_type is not None:
size = sect.header.size
data = sect.data
seg = Segment(seg_type, size, data, flags)
bi.add_segment(seg)
# assign section to segment
seg.set_file_data(sect)
sect_to_seg[sect] = seg
# now run through segments to add relocations
bi_segs = bi.get_segments()
for seg in bi_segs:
# retrieve associated ELF section
sect = seg.get_file_data()
# any relocations?
rela = sect.get_rela()
num_rela = len(rela)
if num_rela > 0:
self.add_elf_rela(sect, seg, sect_to_seg)
# any symbols?
symbols = sect.get_symbols()
num_syms = len(symbols)
if num_syms > 0:
self.add_elf_symbols(symbols, seg)
# try to add debug info
ddl = DwarfDebugLine()
got = ddl.decode(elf)
if got:
self.add_debug_line(ddl, bi, sect_to_seg)
return bi
def add_elf_rela(self, sect, seg, sect_to_seg):
for tgt_sect in sect.get_rela_sections():
# is this a relocation to a used section?
if tgt_sect in sect_to_seg:
to_seg = sect_to_seg[tgt_sect]
rl = Relocations(to_seg)
seg.add_reloc(to_seg, rl)
# add relocations
for rel in sect.get_rela_by_section(tgt_sect):
r = Reloc(rel.offset, addend=rel.section_addend)
rl.add_reloc(r)
def add_elf_symbols(self, symbols, seg):
symtab = SymbolTable()
seg.set_symtab(symtab)
for sym in symbols:
# add entry
off = sym.value
name = sym.name_str
file_sym = sym.file_sym
if file_sym is not None:
file_name = file_sym.name_str
else:
file_name = None
symbol = Symbol(off, name, file_name)
symtab.add_symbol(symbol)
def add_debug_line(self, ddl, bi, sect_to_seg):
seg_to_dl = {}
matrix = ddl.get_matrix()
for row in matrix:
sect = row.section
if sect in sect_to_seg:
segment = sect_to_seg[sect]
# fetch debug info
if segment in seg_to_dl:
dl, file_to_df = seg_to_dl[segment]
else:
dl = DebugLine()
file_to_df = {}
segment.set_debug_line(dl)
seg_to_dl[segment] = (dl, file_to_df)
# fetch file instance
fid = row.file
if fid in file_to_df:
df = file_to_df[fid]
else:
df = DebugLineFile(ddl.get_file_name(fid), ddl.get_file_dir(fid))
dl.add_file(df)
file_to_df[fid] = df
# add entry
e = DebugLineEntry(row.address, row.line)
df.add_entry(e)
# mini test
if __name__ == "__main__":
import sys
bf = BinFmtELF()
for a in sys.argv[1:]:
if bf.is_image(a):
print("loading", a)
bi = bf.load_image(a)
print(bi)
else:
print("NO ELF:", a)
| 31.686813 | 85 | 0.521415 | 5,341 | 0.926131 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.149124 |
b425e1b4a3766b7202ee32581542acc01753bfbd | 11,532 | py | Python | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
]
| null | null | null | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
]
| null | null | null | recordtransform.py | Andresfgomez970/Managing-.wav-files-in-python | 2bf344a3217efe9dc15349ef4be14f2e5cb53ace | [
"MIT"
]
| null | null | null | import pyaudio
import wave
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pylab as plt
from scipy.io import wavfile
import cmath as cm
from scipy.fftpack import fft
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.io.wavfile import write
from scipy import signal
def recordaudio(chunk,formato,Channels,Rate,Record_seconds,
wave_output_name):
'''Record and audio and get it as wave output.
chunk:
formato:
Channels:
Rate:
Record_seconds:
wave_output_name:
'''
p=pyaudio.PyAudio()
stream=p.open(format=formato,
channels=Channels,
rate=Rate,
input=True,
frames_per_buffer=chunk)
print("Recording..")
frames=[]
for i in range(0,int(Rate/chunk*Record_seconds)):
data=stream.read(chunk)
frames.append(data)
print("Done recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(wave_output_name, 'wb')
wf.setnchannels(Channels)
wf.setsampwidth(p.get_sample_size(formato))
wf.setframerate(Rate)
wf.writeframes(b''.join(frames))
wf.close()
def generatetones(secondpertone,steptone,littletone,maxtone,rate,name):
t = np.linspace(0,secondpertone,rate*secondpertone)
lista = []
for i in range(littletone,maxtone+steptone,steptone):
data = np.sin(t*i*2*np.pi)
lista += list(data)
scaled = np.int16(lista/np.max(np.abs(lista)) * 32000)
write(name, rate, scaled )
def getsignal(wave_output_name,Record_seconds):
fs, data = wavfile.read(wave_output_name)
#Necessary parameters to the fourier transform
try:
tamdata = data[:,0].size
except:
tamdata = data.size
dt = Record_seconds*1./tamdata
t = np.arange(0,Record_seconds-dt/2,dt)
try:
return t,data[:,0],dt
except:
return t,data,dt
def Fourier1(time, data,dt):
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(data)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*time.size),1/(dt*time.size))
return freq,dataft
def dft(f,w,t,sign):
if type(f)==type(w):
F = f
else:
F = f(t)
DFT = []
for j in w:
r2 = 0
for i in range(len(t)):
r2 += F[i]*np.exp(1j*j*t[i]*sign)
DFT.append( (t[-1]-t[-2]) *r2)
return t,np.array(DFT)
def plotfourier(freq,dataft,fi,ff,norm):
if norm=='yes':
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=25)
else:
plt.plot(freq,abs(dataft),'b',linewidth='5')
plt.title('Spectrum of frquencies',fontsize=25)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=20)
plt.xlabel('Frecuencia (Hz)',fontsize=20)
plt.grid()
def recordtransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
recordaudio(chunk,formato,Channels,Rate,Record_seconds,wave_output_name)
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plotfourier(freq,dataft,fi,ff,norm)
plt.show()
def plotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plotfourier(freq,dataft,fi,ff,norm)
plt.show()
def zoomplotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm):
time, data,dt = getsignal(wave_output_name,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
plt.subplot(2,1,1)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.subplot(2,1,2)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Zoom to measured frequency',fontsize=15)
con1 = abs(dataft)==abs(dataft).max()
ft=abs(freq[con1])
ft = ft[0]
plt.xlim(ft-0.5,ft+0.5)
plt.ylabel('a.u.',fontsize=20)
plt.xlabel('Frecuencia (Hz)',fontsize=20)
plt.grid()
con1 = abs(dataft)==abs(dataft).max()
print ('Frequency found at maximum value: %.2f \n ' % (abs(freq[con1])) )
plt.show()
def comparing(chunk,formato,Channels,Rate,Record_seconds,wavename1,
wavename2,fi,ff,norm,tol):
time, data,dt = getsignal(wavename1,Record_seconds)
freq,dataft = Fourier1(time, data,dt)
time2, data2,dt = getsignal(wavename2,Record_seconds)
freq2,dataft2 = Fourier1(time2, data2,dt)
plt.figure(figsize=(20,10))
plt.subplot(2,2,1)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,2)
plt.plot(freq,abs(dataft)/abs(dataft).sum(),'b',linewidth='5')
plt.title('Zoom to measured frequency',fontsize=15)
con1 = abs(dataft)==abs(dataft).max()
ft1= abs(freq[con1])
plt.xlim(ft1-tol,ft1+tol)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,3)
plt.plot(freq2,abs(dataft2)/abs(dataft2).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
plt.xlim(fi,ff)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
plt.subplot(2,2,4)
plt.plot(freq2,abs(dataft2)/abs(dataft2).sum(),'b',linewidth='5')
plt.title('Normalized spectrum of frequencies',fontsize=15)
con2 = abs(dataft2)==abs(dataft2).max()
ft2=abs(freq2[con2])
plt.xlim(ft2-tol,ft2+tol)
plt.ylabel('a.u.',fontsize=10)
plt.xlabel('Frecuencia (Hz)',fontsize=10)
plt.grid()
print ('The difference was of %.2f Hz' %(abs(ft1-ft2)) )
plt.show()
def f(wave_output_name,Record_seconds,time):
t,data,dt = getsignal(wave_output_name,Record_seconds)
datapersecond = len(data)/Record_seconds
freqtimes = []
dataft_times = []
times = []
for i in range(Record_seconds/time):
datai = data[i*time*datapersecond:(i+1)*time*datapersecond]
timei = t[i*time*datapersecond:(i+1)*time*datapersecond]
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(datai)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*timei.size),1/(dt*timei.size))
freqtimes.append(freq)
dataft_times.append(dataft)
times.append( (i+1)*time )
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = times
Y = freqtimes
for i in range(len(times)):
#plt.plot(np.array([1,2]), np.array([1,2]), np.array([1,2]) ,'o')
plt.plot( np.ones(len(freqtimes[i]))*times[i] , freqtimes[i] , abs(dataft_times[i]))
ax.set_xlabel('Time')
ax.set_ylabel('Freq')
ax.set_zlabel('A.U.')
plt.show()
for i in range(1000,20000,1000):
plt.plot( i,freqtimes[i/1000].max() ,'ko')
plt.show()
for i in range(len(times)):
plt.plot(freqtimes[i], abs(dataft_times[i] ) )
plt.show()
def f(wave_output_name,Record_seconds,time,Rate):
tm = 1./Rate
a = time%tm
if a>=tm/2.:
time = time + (tm - time%tm)
else:
time = time - time%tm
t,data,dt = getsignal(wave_output_name,Record_seconds)
datapersecond = len(data)/Record_seconds
freqtimes = []
dataft_times = []
times = []
for i in range( int(Record_seconds/time) ):
s1 , s2 = int(i*time*datapersecond),int( (i+1)*time*datapersecond)
datai = data[s1:s2]
timei = t[s1:s2]
dataft = np.fft.fftshift(np.fft.fft(np.fft.fftshift(datai)))*dt
freq = np.arange(-1/(2*dt),1/(2*dt)-1/(2*dt*timei.size),1/(dt*timei.size))
freqtimes.append(freq)
dataft_times.append(dataft)
times.append( (i+1)*time )
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = times
Y = freqtimes
for i in range(len(times)):
plt.plot( np.ones(len(freqtimes[i]))*times[i] , freqtimes[i] , abs(dataft_times[i]))
ax.set_xlabel('Time')
ax.set_ylabel('Freq')
ax.set_zlabel('A.U.')
if __name__ == "__main__":
chunk=1024 #number of frames
formato=pyaudio.paInt16 #format of the record
Channels=2 #Number of channels to record (this alter data)
Rate=16040 #Number of frames per second
Record_seconds=38 #lenghth of the recording
wavename1="records/test1withegeneratednoise.wav" #output file name
fi,ff=0,20000
norm = 'yes'
wavename2 = "records/test1.wav"
### Example 1
print("\nThe transform of the file 'test1withegeneratednoise.wav' is \n shown:\n")
plotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename1,fi,ff,norm)
### Example 2
print("\nThe transform of the file '3200.wav' is shown and also a \n zoom to the maximum value of the fourirer tranform:\n")
### This part measure a given frequency that is already in a wave format in the program; in
### addition a zoom is made to it with some tolerance
Rate=44100
Record_seconds=4.99
wavename2 = "records/3200.wav"
fi, ff = 0, 10000
zoomplotonlytransform(chunk,formato,Channels,Rate,Record_seconds,wavename2,fi,ff,norm)
### Example 3
### This part record with the computer microphone and after that
### show the fourier transform of the record
#You could change the paramters of the record that is going to be made
Record_seconds=5
wave_output_name = 'recorded.wav'
recordtransform(chunk,formato,Channels,Rate,Record_seconds,wave_output_name,fi,ff,norm)
### Example 4
###This part plot the transform of the two wave files and permits
### to compare the amplitues and the frequencies at the maximum
### amplitude
Record_seconds= 3.0
wavename1="records/1000.wav"
wavename2="records/1000t.wav"
ft = 3265
tol = 3
comparing(chunk,formato,Channels,Rate,Record_seconds,wavename1,
wavename2,fi,ff,norm,tol)
### Example 4
###This is basically the short fourier transform
### it is important to know that the algorithm
### chose as step time the nearer on to the one that
### you give that satisfy being a multiple of the
### recorded seconds.
wave_output_name = "records/1000.wav"
Record_seconds = 3
time = 0.1
Rate = 46080
f(wave_output_name,Record_seconds,time,Rate)
plt.show()
### Example 5
###This algorithm compare the Fourier transform given by python
### with one that I made, it is a way to test the the programed is
### expected to work with some cases at least, a further analysis
### could explain the differences (The graphs were scales for a value)
### chosen at hand.
wavename = 'records/3265.wav'
Record_seconds = 3
t, data, dt = getsignal(wavename,Record_seconds)
freq, dataft = Fourier1(t, data,dt)
data = data[1000:1500]
t = t[1000:1500]
w = np.arange(-np.pi/dt,np.pi/dt,2*np.pi/(len(t)*dt) )
t, ft = dft(data,w,t,1)
plt.plot(w/(2*np.pi),abs(ft.real)/abs(ft.real).sum()*(0.0169/0.0881) ,'b')
plt.plot(freq,abs(dataft.real)/abs(dataft.real).sum() ,'g')
plt.show()
| 31.508197 | 129 | 0.644034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,506 | 0.217308 |
b4266c4983e7f09a613d7773116f8f267c2d1a3a | 2,994 | py | Python | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
]
| null | null | null | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
]
| null | null | null | AllSidesScraper/allsides.py | Epicrider/polibalance | 88a0adf54d09baeac3dcad36ce119640d6aa990b | [
"MIT"
]
| null | null | null | from bs4 import BeautifulSoup
import requests
from communityFeedback import *
from time import sleep
from rich.progress import track
import json
page = [
'https://www.allsides.com/media-bias/media-bias-ratings',
]
def table(full_table):
# The main table
print('Web scraper is parsing the table!')
for url in page:
source = requests.get(url)
soup = BeautifulSoup(source.content, 'lxml')
main_table = soup.select('tbody tr')
for row in main_table:
f = dict() # dictionary
f['News Source'] = row.select_one('.source-title').text.strip()
f['AllSides Bias Rating'] = row.select_one(
'.views-field-field-bias-image a')['href'].split('/')[-1]
f['News Media Info'] = 'https://www.allsides.com' + \
row.select_one('.source-title a')['href']
f['Agree'] = int(row.select_one('.agree').text)
f['Disagree'] = int(row.select_one('.disagree').text)
f['Ratio'] = (f['Agree'] / f['Disagree'])
f['Community feedback'] = communityVote(f['Ratio'])
f['Ratio'] = "{:.3f}".format(f['Ratio'])
full_table.append(f) # adds it to the empty list
sleep(10) # this is due to the ten seconds before request in robots.txt
return full_table
def website(full_table):
# Enters into the info page and parses out the info
for f in track(full_table, description="Parsing..."):
source = requests.get(f['News Media Info'])
soup = BeautifulSoup(source.content, 'lxml')
try:
# getting the website link to news source
locate_html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate_html_class.find('a')['href']
f['News Source Site'] = locate_paragraph
except TypeError:
pass
try:
# getting the creation date of the news source
locate__html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate__html_class.find_all('p')[1].text.split('.')[-1].strip()
f['Established'] = locate_paragraph
except IndexError:
pass
try:
# Who the news source owned by
locate__html_class = soup.find('div', {'class': 'dynamic-grid'})
locate_paragraph = locate__html_class.find_all('p')[2].text.split(':')[-1].strip()
f['Owned by'] = locate_paragraph
except IndexError:
pass
sleep(10)
return full_table
def saving_data(full_table):
# Saves the data into a json file with no lines
with open('all-sides.json', 'w', newline="") as i:
json.dump(full_table, i)
def main():
# main function
full_table = [] # empty list
full_table = table(full_table)
full_table = website(full_table)
saving_data(full_table)
print('Parsing has finished!')
if __name__ == '__main__':
main()
| 32.193548 | 94 | 0.59352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.320975 |
b426f99a8bac6c3327cab3da97ce79ef51269da3 | 1,068 | py | Python | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
]
| 2 | 2018-12-21T19:09:49.000Z | 2018-12-22T10:41:36.000Z | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
]
| null | null | null | commands/calc.py | periodicaidan/dalton-cli | 6a83e1a2675e335bf807c43c4201d78e5b389837 | [
"MIT"
]
| null | null | null | """
File: commands/calc.py
Purpose: Performs calculations in response to user input, and outputs the result
"""
from sys import argv
import click
from calculator import *
from models import History
from models.Config import Config
from help_menus import calc_help
@click.group("calc", invoke_without_command=True)
@click.option("-M", "--mass-spec",
is_flag=True, default=False,
help="Get a theoretical mass spectrum of a molecule")
@click.option("-i", "--histogram",
is_flag=True, default=False,
help="Use with -M/--mass-spec to display the mass spec as a histogram")
@click.argument("formula", required=False)
def calc(mass_spec, histogram, formula):
config = Config.setup() # todo: Pass as context
if not any(locals().items()) or len(argv) == 2:
calc_help()
else:
if mass_spec:
click.echo(get_mass_spec(formula, histogram))
else:
mass = History.get(formula)["mass"] or get_mass(formula)
click.echo("%.3f %s" % (mass, config.units))
| 31.411765 | 85 | 0.652622 | 0 | 0 | 0 | 0 | 798 | 0.747191 | 0 | 0 | 310 | 0.290262 |
b42826894cb5a72b4000d0d8ef3a13b2f541b2b5 | 3,271 | py | Python | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
]
| null | null | null | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
]
| null | null | null | aot/meta_triggers/metatrigger_treasure.py | jaycheungchunman/age-of-triggers | f2a75685a0b0cc9e26132d4f52b6ed2c4798f6b4 | [
"MIT"
]
| null | null | null | from aot import *
from aot.model.trigger import *
from aot.model.condition import *
from aot.model.effect import *
from aot.meta_triggers.metatrigger import MetaTrigger
from aot.model.enums.resource import EnumResource
from aot.model.enums.player import PlayerEnum
from aot.model.enums.unit import UnitConstant, UnitType
class Treasure(MetaTrigger):
def __init__(self, x, y, unit, amount, resource, players=range(1, 9), create_the_unit=False,
trigger_name="treasure"):
self.players = players
self.trigger_name = trigger_name
self.x = x
self.amount = amount
self.resource = resource
self.unit = unit
self.y = y
self.create_the_unit = create_the_unit
def setup(self, scenario):
if self.create_the_unit:
scenario.units.new(owner=0, x=self.x, y=self.y)
for p in self.players:
t = Trigger(self.trigger_name+" (P{})".format(p), enable=True)
t.if_(ObjectInArea(0,
amount=1,
unit_cons=self.unit,
x1=max(0, self.x - 1), y1=max(0, self.y - 1),
x2=min(scenario.map.width, self.x + 1),
y2=min(scenario.map.height, self.y + 1)))\
.if_(ObjectInArea(p,
amount=1,
x1=max(0, self.x - 1), y1=max(0, self.y - 1),
x2=min(scenario.map.width, self.x + 1),
y2=min(scenario.map.height, self.y + 1))) \
.then_(Tribute(p, self.amount, self.resource, silent=False)) \
.then_(SendChat(player=p, message="You found a treasure !")) \
.then_(RemoveObject(player=PlayerEnum.GAIA.value,
unit_cons=self.unit, x1=self.x, x2=self.x, y1=self.y, y2=self.y))
scenario.triggers.add(t)
class TreasureLoot(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True):
super().__init__(x=x, y=y, unit=UnitConstant.LOOT.value,
amount=amount,
create_the_unit=create_the_unit,
players=players, resource=EnumResource.GOLD.value,
trigger_name="TreasureLoot({},{})".format(x,y))
class TreasureLumber(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True):
super().__init__(x=x, y=y, unit=UnitConstant.LUMBER.value,
create_the_unit=create_the_unit,
amount=amount,
players=players, resource=EnumResource.WOOD.value,
trigger_name="TreasureLumber({},{})".format(x,y))
class TreasureQuarry(Treasure):
def __init__(self, x, y, amount, players=range(1, 9), create_the_unit=True, ):
super().__init__(x=x, y=y, unit=UnitConstant.QUARRY.value,
create_the_unit=create_the_unit,
amount=amount,
players=players, resource=EnumResource.STONE.value,
trigger_name="TreasureQuarry({},{})".format(x,y))
| 45.430556 | 101 | 0.551819 | 2,939 | 0.898502 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.033323 |
b4283b91c4a94a15dbf38eab20ef16e0e0641f20 | 2,625 | py | Python | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
]
| 2 | 2020-11-15T22:54:39.000Z | 2022-02-15T07:58:55.000Z | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
]
| 2 | 2022-02-18T19:36:45.000Z | 2022-03-16T23:07:44.000Z | agent/lm_agent/server_interfaces/lsdyna.py | omnivector-solutions/license-manager | 9eb1e4569d692aef83a2388096e7413bc010be61 | [
"MIT"
]
| null | null | null | """LS-Dyna license server interface."""
import typing
from lm_agent.config import settings
from lm_agent.exceptions import LicenseManagerBadServerOutput
from lm_agent.parsing import lsdyna
from lm_agent.server_interfaces.license_server_interface import LicenseReportItem, LicenseServerInterface
from lm_agent.server_interfaces.utils import run_command
class LSDynaLicenseServer(LicenseServerInterface):
"""Extract license information from LS-Dyna license server."""
def __init__(self, license_servers: typing.List[str]):
"""Initialize the license server instance with the license server host and parser."""
self.license_servers = license_servers
self.parser = lsdyna.parse
def get_commands_list(self):
"""Generate a list of commands with the available license server hosts."""
host_ports = [(server.split(":")[1:]) for server in self.license_servers]
commands_to_run = []
for host, port in host_ports:
command_line = f"{settings.LSDYNA_PATH} -s {port}@{host} -R"
commands_to_run.append(command_line)
return commands_to_run
async def get_output_from_server(self):
"""Override abstract method to get output from Ls-Dyna license server."""
# get the list of commands for each license server host
commands_to_run = self.get_commands_list()
# run each command in the list, one at a time, until one succeds
for cmd in commands_to_run:
output = await run_command(cmd)
# try the next server if the previous didn't return the expected data
if output is None:
continue
return output
raise RuntimeError("None of the checks for LS-Dyna succeeded!")
async def get_report_item(self, product_feature: str):
"""Override abstract method to parse LS-Dyna license server output into License Report Item."""
server_output = await self.get_output_from_server()
parsed_output = self.parser(server_output)
(_, feature) = product_feature.split(".")
current_feature_item = parsed_output.get(feature)
# raise exception if parser didn't output license information
if current_feature_item is None:
raise LicenseManagerBadServerOutput("Invalid data returned from parser.")
report_item = LicenseReportItem(
product_feature=product_feature,
used=current_feature_item["used"],
total=current_feature_item["total"],
used_licenses=current_feature_item["uses"],
)
return report_item
| 38.602941 | 105 | 0.694476 | 2,269 | 0.864381 | 0 | 0 | 0 | 0 | 1,484 | 0.565333 | 826 | 0.314667 |
b428942d04da784eb0b105b8727b2b0340163593 | 2,634 | py | Python | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
]
| null | null | null | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
]
| null | null | null | examples/gan/gan_embeddings.py | ojmakhura/DIGITS | f34e62c245054b51ea51fcb8949d2ca777f162d1 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
| 29.595506 | 88 | 0.678056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.149962 |
b42ac51788c29cf80e7fbe118ac8d2f98639006c | 30 | py | Python | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
]
| null | null | null | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
]
| null | null | null | punch_version.py | joshua-s/punch | c29751844ecf654cc21966a14842e8165e0bc300 | [
"ISC"
]
| null | null | null | major = 1
minor = 4
patch = 5
| 7.5 | 9 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b42bbf006e731b294f9bc03e50361e650c57e4a5 | 97 | py | Python | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
]
| null | null | null | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
]
| null | null | null | 17.py | yonghuatang/python | 6d53bf2f1e75ac03b07eb56ff83e26d8b5155bb1 | [
"MIT"
]
| null | null | null | from datetime import date
now = date.today()
print('The date today is', now, now.strftime("%A"))
| 24.25 | 51 | 0.701031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.237113 |
b42bc72a01713bbb619aec869a9dad62431b9ce2 | 4,613 | py | Python | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
]
| 127 | 2018-09-21T22:27:17.000Z | 2022-03-30T21:11:49.000Z | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
]
| 171 | 2018-08-06T07:10:24.000Z | 2022-03-29T00:59:53.000Z | pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 32da046a2bde542279824d6377aea116b679a2e7 | [
"MIT"
]
| 50 | 2018-08-12T22:50:46.000Z | 2022-03-23T07:52:47.000Z | from pyxtal.molecule import *
from ase.build import molecule
from pymatgen.core import Molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return Molecule(symbols, pos)
if __name__ == "__main__":
# ---------------------------------------------------
for name in ["H2", "H2O", "HCl", "CS2", "C2Cl4", "PH3", "CH4", "C6H6", "C60"]:
mol = get_ase_mol(name)
pga = PointGroupAnalyzer(mol)
# Symmetrize the molecule using pymatgen
mol = pga.symmetrize_molecule()["sym_mol"]
pga = PointGroupAnalyzer(mol)
print(name, " has point group symmetry: ", pga.get_pointgroup())
# Check if orders of rotation are detected correctly
pg = pga.get_pointgroup()
for op in pg:
opa = OperationAnalyzer(op)
if opa.order == "irrational":
print(opa)
elif opa.order > 10:
print(opa)
# orientation_in_wyckoff_position(mol, sg, WP's index in sg)
# returns a list of orientations consistent with the WP's symmetry.
# We can choose any of these orientations at random using np.random.choice
# To use an orientation, do mol.apply_operation(orientation)
# Spacegroup 16, index 6 has .2. symmetry
# check 2 fold rotation
allowed = orientation_in_wyckoff_position(mol, 16, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check reflection
allowed = orientation_in_wyckoff_position(mol, 25, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 3 fold rotation
allowed = orientation_in_wyckoff_position(mol, 147, 4, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 3",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check -1
allowed = orientation_in_wyckoff_position(mol, 2, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm -1",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 2/m
allowed = orientation_in_wyckoff_position(mol, 64, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2/m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 6
allowed = orientation_in_wyckoff_position(mol, 168, 3, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 6",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
| 36.904 | 82 | 0.506829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,097 | 0.237806 |
b42d69d014401c8b0ab94e331591c7f7f7c7c313 | 2,650 | py | Python | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
]
| null | null | null | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
]
| null | null | null | my_project/evolution_forces.py | Abhigyan-Mishra/Quantum-Animation | 675ac367461f6f2b3e0cee3a99db9e1541567e7a | [
"MIT"
]
| null | null | null | from manimlib.imports import *
"""
TODO:
[ ] fix arrow head size
auto scale according to size?
have a default size, but, if the arrow size is too short, then shrink the head
[ ] slide the point according to the gradient
"""
class ParaboloidPlot(SpecialThreeDScene):
CONFIG = {
"three_d_axes_config": {
"num_axis_pieces": 1,
"number_line_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
"axis_config": {
"unit_size": 1,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [],
"stroke_width": 2,
},
"x_min": 0,
"x_max": 7,
"y_min": 0,
"y_max": 7,
"z_min": 0,
"z_max": 7,
},
"init_camera_orientation": {
"phi": 80 * DEGREES,
# "theta": -135 * DEGREES,
"theta": 290 * DEGREES,
},
"paraboloid_config": {
"r_max": 1,
"center_point": 2*X_AXIS + 2*Y_AXIS,
},
"axes_center_point": -2.5*X_AXIS - 2.5*Y_AXIS - 0.75*Z_AXIS,
}
def construct(self):
self.init_camera()
self.init_axes()
self.init_paraboloid()
## add dot
x, y = 2.1, 2.9
# x, y = 2.1, 2.1
# x, y = 3, 2
z = self.paraboloid.get_value_at_point([x,y])
point = np.array([x,y,z])
sphere = Sphere(radius=0.05, fill_color=WHITE, checkerboard_colors=False)
sphere.shift(point)
sphere.shift(self.axes_center_point)
self.add(sphere)
self.rotate_phi()
## add force
gradient = self.paraboloid.get_gradient(point)
step = np.array([
gradient[0],
gradient[1],
gradient[0]**2 + gradient[1]**2
])
end = point - step
end = self.paraboloid_config["center_point"]
force = Arrow3d(start=point, end=end)
force.shift(self.axes_center_point)
self.add(force)
self.wait()
self.rotate_phi()
self.wait()
def init_camera(self):
self.set_camera_orientation(**self.init_camera_orientation)
def init_axes(self):
self.axes = self.get_axes()
self.axes.x_axis.set_color(BLUE)
self.axes.y_axis.set_color(GREEN)
self.axes.z_axis.set_color(RED)
# self.set_axes_labels()
self.axes.shift(self.axes_center_point)
self.add(self.axes)
def init_paraboloid(self):
paraboloid = self.paraboloid = ParaboloidPolar(**self.paraboloid_config)
paraboloid.shift(self.axes_center_point)
self.add(paraboloid)
def rotate_phi(self, duration=2, degrees=+20):
# e.g. duration=2 ; degrees = 20
# going 20 degrees in 2 seconds
# 60 frames per seconds
# 20 degrees in 120 frames
rate = - degrees / (60*duration)
# it won't be exact, but it'll be close enough
self.begin_ambient_camera_rotation(rate=rate, about="phi")
self.wait(2)
self.stop_ambient_camera_rotation(about="phi")
| 24.311927 | 80 | 0.669057 | 2,418 | 0.912453 | 0 | 0 | 0 | 0 | 0 | 0 | 832 | 0.313962 |
b42dd19edf20cbabd2658c3670786d63ec526613 | 13,056 | py | Python | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
]
| 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
]
| null | null | null | tests/python/tensor_graph/test/test_internal/performance/build_time_resnet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
]
| 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
def test1():
print("test 1 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
begin = time.time()
# change data layout
forward_space = ForwardGraphSpace()
forward_tuner = RandomForwardTuner(forward_space)
layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
fgraph = layout_generator.generate()
after_layout = time.time()
# autodiff
bgraph = fgraph.make_backward(ce_loss, sgd)
after_autodiff = time.time()
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
# labels = [x.tvm_tensor for x in bgraph.labels]
# loss = bgraph.loss.tvm_tensor
# gradients = [x.tvm_tensor for x in bgraph.gradients]
# updates = [x.tvm_tensor for x in bgraph.updates]
labels = []
loss = None
gradients = []
lr = None
updates = []
tgraph = PyTIRGraph(
inputs,
labels,
outputs,
weights,
loss,
gradients,
lr,
updates)
after_tir_graph = time.time()
# subgraph partition
partition_space = PartitionSpace()
partition_tuner = RandomPartitionTuner(partition_space)
cut_candidates = form_cut_candidates(tgraph)
# print(cut_candidates)
for i, candidate in enumerate(cut_candidates):
name = "graph_cut_" + str(i)
partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
partition_generator.generate()
# for op, stat in tgraph.op_stat_dict.items():
# print(op, " head=", stat.head)
tgraph.partition_graph()
after_partition = time.time()
print("num subgraphs:", len(tgraph.subgraphs))
target = "cuda"
dev = 0
# update the op stat dict of subgraphs
# do auto-schedule
total_build_trials = 0
build_time_record = []
for mark, subgraph in tgraph.subgraphs.items():
# print("subgraph", mark)
tensors = list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) \
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = tgraph.op_map[op]
if v in tgraph.op_stat_dict:
op_stat_dict[op] = tgraph.op_stat_dict[v]
c_list = form_connected_sets(subgraph, op_stat_dict, tensors, ops, down_graph)
# print("c_list_length=", len(c_list))
# print("check connected set")
# for connected_set in c_list:
# print(connected_set)
scheduler = Scheduler()
# sch = tgraph.schedules[mark]
for i, connected_set in enumerate(c_list):
name = "subgraph_" + str(mark) + "_connect_" + str(i)
assert not connected_set.empty()
build_success = False
for trial in range(10):
total_build_trials += 1
tgraph.create_schedule_for(mark=mark)
sch = tgraph.schedules[mark]
if connected_set.has_master():
if connected_set.iso_base():
PrimitiveScheduler = GPUScheduleMasterBaseSet
else:
PrimitiveScheduler = GPUScheduleMasterSet
primitive_generator = PrimitiveScheduler(
name, subgraph, connected_set, down_graph, op_stat_dict, scheduler)
else:
PrimitiveScheduler = GPUScheduleBaseSet
primitive_generator = PrimitiveScheduler(
name, connected_set, scheduler)
primitive_generator.generate(sch)
# try:
# print(tvm.lower(sch, tgraph.bufs[mark], simple_mode=True))
# except Exception as e:
# print(e)
# print("prologue")
# for p in connected_set.prologue:
# print(p.body)
# print("epilogue")
# for e in connected_set.epilogue:
# print(e.body)
# print("base")
# print(connected_set.base.body)
# print("master")
# print(connected_set.master.body)
# print(connected_set.master.input_tensors)
# for op, master in connected_set.prologue.items():
# in_input = False
# for inp in master.input_tensors:
# if op == inp.op:
# in_input = True
# break
# if not in_input:
# print(op, "not in the inputs of", master)
build_beg = time.time()
build_success = tgraph.build_for(target, mark=mark)
build_end = time.time()
build_time_record.append(build_end - build_beg)
if build_success:
break
if not build_success:
raise RuntimeError("Can't build for subgraph", mark)
after_schedule = time.time()
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
# tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
# tgraph.set_lr(optimize_engine.get_lr())
tgraph.allocate_buffer(target, dev)
beg = time.time()
for mark in tgraph.call_order:
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
func_beg = time.time()
func(*real_bufs)
func_end = time.time()
print((func_end - func_beg) * 1e3, "ms")
end = time.time()
print("End to end time:", (end - beg) * 1e3, "ms")
print("total build trails=", total_build_trials)
print("layout change time cost=", (after_layout - begin) * 1e3, "ms")
print("autodiff time cost=", (after_autodiff - after_layout) * 1e3, "ms")
print("make tir_graph time cost=", (after_tir_graph - after_autodiff) * 1e3, "ms")
print("subgraph partition time cost=", (after_partition - after_tir_graph) * 1e3, "ms")
print("schedule time cost=", (after_schedule - after_partition) * 1e3, "ms. average=",
(after_schedule - after_partition) * 1e3 / total_build_trials, "ms")
print("average build time cost=", np.array(build_time_record).mean() * 1e3, "ms")
print("total build time cost=", (after_schedule - begin) * 1e3, "ms")
print("Success!")
def test2(file=sys.stdout):
print("test 2 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())), file=file)
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())), file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for k, v in tir_graph.op_map.items():
print(k.name, v.name, file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
tmp = {}
for f in set(tir_graph.op_feature_dict.values()):
if f.split(")")[-1] not in tmp:
tmp[f.split(")")[-1]] = []
tmp[f.split(")")[-1]].append(f)
print("different kinds of ops:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
print("####################################################", file=file)
tmp = {}
for f in set(tir_graph.subgraph_features.values()):
key = ";".join([x.split(")")[-1] for x in f.split(";")])
if key not in tmp:
tmp[key] = []
tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
for k, v in tir_graph.subgraph_features.items():
key = ";".join([x.split(")")[-1] for x in v.split(";")])
if key == "collect_3_dim4;grad_bn2d_to_conv2d_nchw_8;grad_bn2d_var_to_conv2d_nchw_10;grad_bn2d_mean_to_conv2d_nchw_2;collect_2_dim1":
i = 1
for op in tir_graph.subgraphs[k].op_list:
print(i, ". #####")
i += 1
print(op.body)
print(op.input_tensors)
break
# target = "cuda"
# dev = 0
# print("begin schedule")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], [label_np], sgd.get_lr(), target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success", file=file)
def test3():
print("test 3 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())))
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())))
tmp = {}
# for f in set(tir_graph.op_feature_dict.values()):
# if f.split(")")[-1] not in tmp:
# tmp[f.split(")")[-1]] = []
# tmp[f.split(")")[-1]].append(f)
# for k, v in tmp.items():
# print(k)
# for vv in v:
# print(" ", vv)
print("####################################################")
tmp = {}
# for f in set(tir_graph.subgraph_features.values()):
# key = ";".join([x.split(")")[-1] for x in f.split(";")])
# if key not in tmp:
# tmp[key] = []
# tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp))
for k, v in tmp.items():
print(k)
for vv in v:
print(" ", vv)
# target = "cuda"
# dev = 1
# print("begin build")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success")
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3() | 32.157635 | 137 | 0.644455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,911 | 0.299556 |
b42f2c192af4e02268e2e461bdd471fe5bb67342 | 2,300 | py | Python | Python3/src/basicExample.py | emanuelen5/XPlaneConnect | 0d462ac306bc802a3b269227d3b98d2507abcd40 | [
"Unlicense"
]
| 457 | 2015-01-02T14:21:11.000Z | 2022-03-27T02:56:47.000Z | Python3/src/basicExample.py | fseconomy/XPlaneConnect | 11a5f350bd6888873d293bf3c9f59b0fba1331c1 | [
"Unlicense"
]
| 211 | 2015-03-24T16:41:33.000Z | 2022-03-27T18:36:11.000Z | Python3/src/basicExample.py | fseconomy/XPlaneConnect | 11a5f350bd6888873d293bf3c9f59b0fba1331c1 | [
"Unlicense"
]
| 258 | 2015-01-01T17:02:27.000Z | 2022-03-31T19:36:03.000Z | from time import sleep
import xpc
def ex():
print("X-Plane Connect example script")
print("Setting up simulation")
with xpc.XPlaneConnect() as client:
# Verify connection
try:
# If X-Plane does not respond to the request, a timeout error
# will be raised.
client.getDREF("sim/test/test_float")
except:
print("Error establishing connection to X-Plane.")
print("Exiting...")
return
# Set position of the player aircraft
print("Setting position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.524, -122.06899, 2500, 0, 0, 0, 1]
client.sendPOSI(posi)
# Set position of a non-player aircraft
print("Setting NPC position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.52465, -122.06899, 2500, 0, 20, 0, 1]
client.sendPOSI(posi, 1)
# Set angle of attack, velocity, and orientation using the DATA command
print("Setting orientation")
data = [\
[18, 0, -998, 0, -998, -998, -998, -998, -998],\
[ 3, 130, 130, 130, 130, -998, -998, -998, -998],\
[16, 0, 0, 0, -998, -998, -998, -998, -998]\
]
client.sendDATA(data)
# Set control surfaces and throttle of the player aircraft using sendCTRL
print("Setting controls")
ctrl = [0.0, 0.0, 0.0, 0.8]
client.sendCTRL(ctrl)
# Pause the sim
print("Pausing")
client.pauseSim(True)
sleep(2)
# Toggle pause state to resume
print("Resuming")
client.pauseSim(False)
# Stow landing gear using a dataref
print("Stowing gear")
gear_dref = "sim/cockpit/switches/gear_handle_status"
client.sendDREF(gear_dref, 0)
# Let the sim run for a bit.
sleep(4)
# Make sure gear was stowed successfully
gear_status = client.getDREF(gear_dref)
if gear_status[0] == 0:
print("Gear stowed")
else:
print("Error stowing gear")
print("End of Python client example")
input("Press any key to exit...")
if __name__ == "__main__":
ex() | 31.944444 | 81 | 0.541304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.415652 |
b42fa4f8536cb94842b8b435241c9e24e5dca076 | 27,419 | py | Python | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
]
| null | null | null | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
]
| null | null | null | venv/lib/python3.6/site-packages/pelican/readers.py | RyanHelgoth/CMPUT404-Lab5 | 82424bf5a9b80ff186bd69d224457c8b70a3bdf3 | [
"Apache-2.0"
]
| null | null | null | import datetime
import logging
import os
import re
from collections import OrderedDict
from html import escape
from html.parser import HTMLParser
from io import StringIO
import docutils
import docutils.core
import docutils.io
from docutils.parsers.rst.languages import get_language as get_docutils_lang
from docutils.writers.html4css1 import HTMLTranslator, Writer
from pelican import rstdirectives # NOQA
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.plugins import signals
from pelican.utils import get_date, pelican_open, posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader:
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en')
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning("Docutils has no localization for '%s'."
" Using 'en' instead.", lang_code)
self._language_code = 'en'
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
'Document title missing in file %s: '
'Ensure exactly one top level section',
source_path)
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'language_code': self._language_code,
'halt_level': 2,
'traceback': True,
'warning_stream': StringIO(),
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
# prevent metadata extraction in fields
self._md.preprocessors.deregister('meta')
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape(v, quote=False))
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
# same name has already been specified then either convert to
# list or append to list
if isinstance(self.metadata[name], list):
self.metadata[name].append(contents)
else:
self.metadata[name] = [self.metadata[name], contents]
else:
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
reader_metadata = _filter_discardable_metadata(reader_metadata)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
if typogrify_dashes == 'oldschool':
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted':
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], str):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata['modified'] = metadata['date']
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ''))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': datetime.datetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| 36.607477 | 79 | 0.581276 | 19,598 | 0.71476 | 0 | 0 | 211 | 0.007695 | 0 | 0 | 7,556 | 0.275575 |
b4314fe64ec815899c36c9b326b930ecd497d54b | 4,017 | py | Python | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | xmuda/models/CP_v5.py | anhquancao/xmuda-extend | 4b670ec2f6766e3a624e81dbe5d97b209c1c4f76 | [
"Apache-2.0"
]
| null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from xmuda.models.DDR import Bottleneck3D
from xmuda.models.LMSCNet import SegmentationHead, ASPP
import numpy as np
from xmuda.models.modules import Process, Upsample, Downsample
import math
from xmuda.data.utils.preprocess import create_voxel_position
class AggregationModule(nn.Module):
"""Aggregation Module"""
def __init__(self,
feature, out_feature):
super(AggregationModule, self).__init__()
dilations = [1, 2, 3] # kitti
# dilations = [1, 1, 1] # NYU
self.b1 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[0], dilations[0], dilations[0]])
self.b2 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[1], dilations[1], dilations[1]])
self.b3 = Bottleneck3D(feature, feature // 4, norm_layer=nn.BatchNorm3d, dilation=[dilations[2], dilations[2], dilations[2]])
self.resize = nn.Conv3d(feature * 4, out_feature, kernel_size=1, padding=0)
self.aspp = ASPP(out_feature, [1, 2, 3])
def forward(self, x):
x1 = self.b1(x)
x2 = self.b2(x1)
x3 = self.b3(x2)
x = torch.cat([x, x1, x2, x3], dim=1)
x = self.aspp(self.resize(x))
return x
class CPMegaVoxels(nn.Module):
def __init__(self, out_channels, feature, size,
n_relations=4,
bn_momentum=0.0003):
super().__init__()
self.size = size
self.n_relations = n_relations
print("n_relations", self.n_relations)
self.flatten_size = size[0] * size[1] * size[2]
self.context_feature = feature
self.agg = AggregationModule(feature, self.context_feature)
self.mega_context = nn.AvgPool3d(kernel_size=2, stride=2)
self.flatten_context_size = (size[0]//2) * (size[1]//2) * (size[2]//2)
self.context_prior_logits = nn.ModuleList([
nn.Sequential(
nn.Conv3d(self.context_feature, self.flatten_context_size, padding=0, kernel_size=1),
) for i in range(n_relations)
])
self.resize = nn.Sequential(
nn.Conv3d(self.context_feature * self.n_relations + feature, out_channels, kernel_size=3, padding=1),
)
self.mega_context_logit = nn.Sequential(
nn.Conv3d(self.context_feature, 12, kernel_size=1, padding=0)
)
def forward(self, input):
ret = {}
bs, c, h, w, d = input.shape
x_agg = self.agg(input)
# get the mega context
x_mega_context = self.mega_context(x_agg) # bs, 512, 7, 4, 7
x_mega_context = x_mega_context.reshape(bs, x_mega_context.shape[1], -1) # bs, 512, 196
x_mega_context = x_mega_context.permute(0, 2, 1) # bs, 196, 512
# get context prior map
x_context_prior_logits = []
x_context_rels = []
for rel in range(self.n_relations):
x_context_prior_logit = self.context_prior_logits[rel](x_agg) # bs, 784, 15, 9, 15
x_context_prior_logit = x_context_prior_logit.reshape(bs, 1, self.flatten_context_size, self.flatten_size)
x_context_prior_logits.append(x_context_prior_logit)
x_context_prior = torch.sigmoid(x_context_prior_logit).squeeze(dim=1).permute(0, 2, 1) # bs, 2025, 196
x_context_rel = torch.bmm(x_context_prior, x_mega_context) # bs, 2025, 1024
x_context_rels.append(x_context_rel)
x_context = torch.cat(x_context_rels, dim=2)
x_context = x_context.permute(0, 2, 1)
x_context = x_context.reshape(bs, self.context_feature * self.n_relations, self.size[0], self.size[1], self.size[2])
x = torch.cat([input, x_context], dim=1)
x = self.resize(x)
x_context_prior_logits = torch.cat(x_context_prior_logits, dim=1) # bs, n_relations, 196, 2025
ret["P_logits"] = x_context_prior_logits
ret["x"] = x
return ret
| 40.17 | 134 | 0.638536 | 3,689 | 0.918347 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.065472 |
b432caf11213235d03484242de9f5514f01637df | 10,511 | py | Python | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
]
| 1 | 2021-10-14T03:36:15.000Z | 2021-10-14T03:36:15.000Z | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
]
| null | null | null | gala/potential/potential/tests/helpers.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
]
| null | null | null | # coding: utf-8
from __future__ import division, print_function
# Standard library
import time
# Third-party
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import derivative
from astropy.extern.six.moves import cPickle as pickle
import pytest
# Project
from ..io import load
from ..core import CompositePotential
from ....units import UnitSystem, DimensionlessUnitSystem
from ....dynamics import PhaseSpacePosition
from ....integrate import LeapfrogIntegrator
def partial_derivative(func, point, dim_ix=0, **kwargs):
xyz = np.array(point, copy=True)
def wraps(a):
xyz[dim_ix] = a
return func(xyz)
return derivative(wraps, point[dim_ix], **kwargs)
class PotentialTestBase(object):
name = None
potential = None # MUST SET THIS
tol = 1E-5
show_plots = False
@classmethod
def setup_class(cls):
if cls.name is None:
cls.name = cls.__name__[4:] # remove Test
print("Testing potential: {}".format(cls.name))
cls.w0 = np.array(cls.w0)
cls.ndim = cls.w0.size // 2
# TODO: need to test also quantity objects and phasespacepositions!
# these are arrays we will test the methods on:
w0_2d = np.repeat(cls.w0[:,None], axis=1, repeats=16)
w0_3d = np.repeat(w0_2d[...,None], axis=2, repeats=8)
w0_list = list(cls.w0)
w0_slice = w0_2d[:,:4]
cls.w0s = [cls.w0, w0_2d, w0_3d, w0_list, w0_slice]
cls._grad_return_shapes = [cls.w0[:cls.ndim].shape + (1,),
w0_2d[:cls.ndim].shape,
w0_3d[:cls.ndim].shape,
cls.w0[:cls.ndim].shape + (1,),
w0_slice[:cls.ndim].shape]
cls._hess_return_shapes = [(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_2d[:cls.ndim].shape,
(cls.ndim,) + w0_3d[:cls.ndim].shape,
(cls.ndim,) + cls.w0[:cls.ndim].shape + (1,),
(cls.ndim,) + w0_slice[:cls.ndim].shape]
cls._valu_return_shapes = [x[1:] for x in cls._grad_return_shapes]
def test_unitsystem(self):
assert isinstance(self.potential.units, UnitSystem)
def test_energy(self):
assert self.ndim == self.potential.ndim
for arr,shp in zip(self.w0s, self._valu_return_shapes):
v = self.potential.energy(arr[:self.ndim])
assert v.shape == shp
g = self.potential.energy(arr[:self.ndim], t=0.1)
g = self.potential.energy(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.energy(arr[:self.ndim], t=t)
g = self.potential.energy(arr[:self.ndim], t=t*self.potential.units['time'])
def test_gradient(self):
for arr,shp in zip(self.w0s, self._grad_return_shapes):
g = self.potential.gradient(arr[:self.ndim])
assert g.shape == shp
g = self.potential.gradient(arr[:self.ndim], t=0.1)
g = self.potential.gradient(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.gradient(arr[:self.ndim], t=t)
g = self.potential.gradient(arr[:self.ndim], t=t*self.potential.units['time'])
def test_hessian(self):
for arr,shp in zip(self.w0s, self._hess_return_shapes):
g = self.potential.hessian(arr[:self.ndim])
assert g.shape == shp
g = self.potential.hessian(arr[:self.ndim], t=0.1)
g = self.potential.hessian(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.hessian(arr[:self.ndim], t=t)
g = self.potential.hessian(arr[:self.ndim], t=t*self.potential.units['time'])
def test_mass_enclosed(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.mass_enclosed(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1)
g = self.potential.mass_enclosed(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.mass_enclosed(arr[:self.ndim], t=t)
g = self.potential.mass_enclosed(arr[:self.ndim], t=t*self.potential.units['time'])
def test_circular_velocity(self):
for arr,shp in zip(self.w0s, self._valu_return_shapes):
g = self.potential.circular_velocity(arr[:self.ndim])
assert g.shape == shp
assert np.all(g > 0.)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1)
g = self.potential.circular_velocity(arr[:self.ndim], t=0.1*self.potential.units['time'])
t = np.zeros(np.array(arr).shape[1:]) + 0.1
g = self.potential.circular_velocity(arr[:self.ndim], t=t)
g = self.potential.circular_velocity(arr[:self.ndim], t=t*self.potential.units['time'])
def test_repr(self):
pot_repr = repr(self.potential)
if isinstance(self.potential.units, DimensionlessUnitSystem):
assert "dimensionless" in pot_repr
else:
assert str(self.potential.units['length']) in pot_repr
assert str(self.potential.units['time']) in pot_repr
assert str(self.potential.units['mass']) in pot_repr
for k in self.potential.parameters.keys():
assert "{}=".format(k) in pot_repr
def test_compare(self):
# skip if composite potentials
if len(self.potential.parameters) == 0:
return
other = self.potential.__class__(units=self.potential.units,
**self.potential.parameters)
assert other == self.potential
pars = self.potential.parameters.copy()
for k in pars.keys():
if k != 0:
pars[k] = 1.1*pars[k]
other = self.potential.__class__(units=self.potential.units, **pars)
assert other != self.potential
# check that comparing to non-potentials works
assert not self.potential == "sup"
assert not self.potential == None
def test_plot(self):
p = self.potential
if self.show_plots:
f = p.plot_contours(grid=(np.linspace(-10., 10., 100), 0., 0.),
labels=["X"])
# f.suptitle("slice off from 0., won't have cusp")
# f.savefig(os.path.join(plot_path, "contour_x.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
np.linspace(-10., 10., 100),
0.),
cmap='Blues')
# f.savefig(os.path.join(plot_path, "contour_xy.png"))
f = p.plot_contours(grid=(np.linspace(-10., 10., 100),
1.,
np.linspace(-10., 10., 100)),
cmap='Blues', labels=["X", "Z"])
# f.savefig(os.path.join(plot_path, "contour_xz.png"))
plt.show()
plt.close('all')
def test_save_load(self, tmpdir):
"""
Test writing to a YAML file, and reading back in
"""
fn = str(tmpdir.join("{}.yml".format(self.name)))
self.potential.save(fn)
p = load(fn)
p.energy(self.w0[:self.w0.size//2])
p.gradient(self.w0[:self.w0.size//2])
def test_numerical_gradient_vs_gradient(self):
"""
Check that the value of the implemented gradient function is close to a
numerically estimated value. This is to check the coded-up version.
"""
dx = 1E-3 * np.sqrt(np.sum(self.w0[:self.w0.size//2]**2))
max_x = np.sqrt(np.sum([x**2 for x in self.w0[:self.w0.size//2]]))
grid = np.linspace(-max_x,max_x,8)
grid = grid[grid != 0.]
grids = [grid for i in range(self.w0.size//2)]
xyz = np.ascontiguousarray(np.vstack(map(np.ravel, np.meshgrid(*grids))).T)
def energy_wrap(xyz):
xyz = np.ascontiguousarray(xyz[None])
return self.potential._energy(xyz, t=np.array([0.]))[0]
num_grad = np.zeros_like(xyz)
for i in range(xyz.shape[0]):
num_grad[i] = np.squeeze([partial_derivative(energy_wrap, xyz[i], dim_ix=dim_ix, n=1, dx=dx, order=5)
for dim_ix in range(self.w0.size//2)])
grad = self.potential._gradient(xyz, t=np.array([0.]))
assert np.allclose(num_grad, grad, rtol=self.tol)
def test_orbit_integration(self):
"""
Make we can integrate an orbit in this potential
"""
w0 = self.w0
w0 = np.vstack((w0,w0,w0)).T
t1 = time.time()
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
print("Integration time (10000 steps): {}".format(time.time() - t1))
if self.show_plots:
f = orbit.plot()
f.suptitle("Vector w0")
plt.show()
plt.close(f)
us = self.potential.units
w0 = PhaseSpacePosition(pos=w0[:self.ndim]*us['length'],
vel=w0[self.ndim:]*us['length']/us['time'])
orbit = self.potential.integrate_orbit(w0, dt=1., n_steps=10000,
Integrator=LeapfrogIntegrator)
if self.show_plots:
f = orbit.plot()
f.suptitle("Object w0")
plt.show()
plt.close(f)
def test_pickle(self, tmpdir):
fn = str(tmpdir.join("{}.pickle".format(self.name)))
with open(fn, "wb") as f:
pickle.dump(self.potential, f)
with open(fn, "rb") as f:
p = pickle.load(f)
p.energy(self.w0[:self.w0.size//2])
class CompositePotentialTestBase(PotentialTestBase):
@pytest.mark.skip(reason="Skip composite potential repr test")
def test_repr(self):
pass
@pytest.mark.skip(reason="Skip composite potential compare test")
def test_compare(self):
pass
| 38.785978 | 113 | 0.560936 | 9,806 | 0.932927 | 0 | 0 | 1,606 | 0.152792 | 0 | 0 | 1,137 | 0.108172 |
b4343b1a76985ec5d57d6a76843b7a4f2ed671b3 | 9,677 | py | Python | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
]
| null | null | null | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
]
| null | null | null | main.py | ailzy/Reinforcement-learning-in-portfolio-management- | 6d850bf52637482636ed8336480343e0e4cef1bd | [
"MIT"
]
| 1 | 2019-05-13T00:54:08.000Z | 2019-05-13T00:54:08.000Z | # -*- coding: utf-8 -*-
from argparse import ArgumentParser
import json
import time
import pandas as pd
import tensorflow as tf
import numpy as np
import math
from decimal import Decimal
import matplotlib.pyplot as plt
from agents.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
eps=10e-8
epochs=0
M=0
class StockTrader():
def __init__(self):
self.reset()
def reset(self):
self.wealth = 10e3
self.total_reward = 0
self.ep_ave_max_q = 0
self.loss = 0
self.actor_loss=0
self.wealth_history = []
self.r_history = []
self.w_history = []
self.p_history = []
self.noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(M))
def update_summary(self,loss,r,q_value,actor_loss,w,p):
self.loss += loss
self.actor_loss+=actor_loss
self.total_reward+=r
self.ep_ave_max_q += q_value
self.r_history.append(r)
self.wealth = self.wealth * math.exp(r)
self.wealth_history.append(self.wealth)
self.w_history.extend([','.join([str(Decimal(str(w0)).quantize(Decimal('0.00'))) for w0 in w.tolist()[0]])])
self.p_history.extend([','.join([str(Decimal(str(p0)).quantize(Decimal('0.000'))) for p0 in p.tolist()])])
def write(self,epoch):
wealth_history = pd.Series(self.wealth_history)
r_history = pd.Series(self.r_history)
w_history = pd.Series(self.w_history)
p_history = pd.Series(self.p_history)
history = pd.concat([wealth_history, r_history, w_history, p_history], axis=1)
history.to_csv('result' + str(epoch) + '-' + str(math.exp(np.sum(self.r_history)) * 100) + '.csv')
def print_result(self,epoch,agent):
self.total_reward=math.exp(self.total_reward) * 100
print('*-----Episode: {:d}, Reward:{:.6f}%, ep_ave_max_q:{:.2f}, actor_loss:{:2f}-----*'.format(epoch, self.total_reward,self.ep_ave_max_q,self.actor_loss))
agent.write_summary(self.loss, self.total_reward,self.ep_ave_max_q,self.actor_loss, epoch)
agent.save_model()
def plot_result(self):
pd.Series(self.wealth_history).plot()
plt.show()
def action_processor(self,a,ratio):
a = np.clip(a + self.noise() * ratio, 0, 1)
a = a / (a.sum() + eps)
return a
def parse_info(info):
return info['reward'],info['continue'],info[ 'next state'],info['weight vector'],info ['price'],info['risk']
def traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable):
info = env.step(None,None)
r,contin,s,w1,p,risk=parse_info(info)
contin=1
t=0
while contin:
w2 = agent.predict(s,w1)
if noise_flag=='True':
w2=stocktrader.action_processor(w2,(epochs-epoch)/epochs)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p,risk = parse_info(env_info)
if framework=='PG':
agent.save_transition(s,p,w2,w1)
else:
agent.save_transition(s, w2, r-risk, contin, s_next, w1)
loss, q_value,actor_loss=0,0,0
if framework=='DDPG':
if not contin and trainable=="True":
agent_info= agent.train(method,epoch)
loss, q_value=agent_info["critic_loss"],agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PPO':
if not contin and trainable=="True":
agent_info = agent.train(method, epoch)
loss, q_value = agent_info["critic_loss"], agent_info["q_value"]
if method=='model_based':
actor_loss=agent_info["actor_loss"]
elif framework=='PG':
if not contin and trainable=="True":
agent.train()
stocktrader.update_summary(loss,r,q_value,actor_loss,w2,p)
s = s_next
t=t+1
def backtest(agent,env):
print("starting to backtest......")
from agents.UCRP import UCRP
from agents.Winner import WINNER
from agents.Losser import LOSSER
agents=[]
agents.append(agent)
agents.append(WINNER())
agents.append(UCRP())
agents.append(LOSSER())
labels=['PG','Winner','UCRP','Losser']
wealths_result=[]
rs_result=[]
for i,agent in enumerate(agents):
info = env.step(None, None)
r, contin, s, w1, p, risk = parse_info(info)
contin = 1
wealth=10000
wealths = [wealth]
rs=[1]
while contin:
w2 = agent.predict(s, w1)
if i==0:
print(w2)
env_info = env.step(w1, w2)
r, contin, s_next, w1, p, risk = parse_info(env_info)
wealth=wealth*math.exp(r)
rs.append(math.exp(r)-1)
wealths.append(wealth)
s=s_next
print('finish one agent')
wealths_result.append(wealths)
rs_result.append(rs)
for i in range(len(agents)):
plt.plot(wealths_result[i],label=labels[i])
print(labels[i],' ',np.mean(rs_result[i]),' ',np.std(rs_result[i]))
plt.legend()
plt.show()
def parse_config(config,mode):
codes = config["session"]["codes"]
start_date = config["session"]["start_date"]
end_date = config["session"]["end_date"]
features = config["session"]["features"]
agent_config = config["session"]["agents"]
market = config["session"]["market_types"]
noise_flag, record_flag, plot_flag=config["session"]["noise_flag"],config["session"]["record_flag"],config["session"]["plot_flag"]
predictor, framework, window_length = agent_config
reload_flag, trainable=config["session"]['reload_flag'],config["session"]['trainable']
method=config["session"]['method']
global epochs
epochs = int(config["session"]["epochs"])
if mode=='test':
record_flag='True'
noise_flag='False'
plot_flag='True'
reload_flag='True'
trainable='False'
method='model_free'
print("*--------------------Training Status-------------------*")
print('Codes:',codes)
print("Date from",start_date,' to ',end_date)
print('Features:',features)
print("Agent:Noise(",noise_flag,')---Recoed(',noise_flag,')---Plot(',plot_flag,')')
print("Market Type:",market)
print("Predictor:",predictor," Framework:", framework," Window_length:",window_length)
print("Epochs:",epochs)
print("Trainable:",trainable)
print("Reloaded Model:",reload_flag)
print("Method",method)
print("Noise_flag",noise_flag)
print("Record_flag",record_flag)
print("Plot_flag",plot_flag)
return codes,start_date,end_date,features,agent_config,market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method
def session(config,mode):
from data.environment import Environment
codes, start_date, end_date, features, agent_config, market,predictor, framework, window_length,noise_flag, record_flag, plot_flag,reload_flag,trainable,method=parse_config(config,mode)
env = Environment(start_date, end_date, codes, features, int(window_length),market)
global M
M=len(codes)+1
if framework == 'DDPG':
print("*-----------------Loading DDPG Agent---------------------*")
from agents.ddpg import DDPG
agent = DDPG(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PPO':
print("*-----------------Loading PPO Agent---------------------*")
from agents.ppo import PPO
agent = PPO(predictor, len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
elif framework == 'PG':
print("*-----------------Loading PG Agent---------------------*")
from agents.pg import PG
agent = PG(len(codes) + 1, int(window_length), len(features), '-'.join(agent_config), reload_flag,trainable)
stocktrader=StockTrader()
if mode=='train':
print("Training with {:d}".format(epochs))
for epoch in range(epochs):
print("Now we are at epoch", epoch)
traversal(stocktrader,agent,env,epoch,noise_flag,framework,method,trainable)
if record_flag=='True':
stocktrader.write(epoch)
if plot_flag=='True':
stocktrader.plot_result()
agent.reset_buffer()
stocktrader.print_result(epoch,agent)
stocktrader.reset()
elif mode=='test':
backtest(agent, env)
def build_parser():
parser = ArgumentParser(description='Provide arguments for training different DDPG or PPO models in Portfolio Management')
parser.add_argument("--mode",dest="mode",help="download(China), train, test",metavar="MODE", default="train",required=True)
parser.add_argument("--model",dest="model",help="DDPG,PPO",metavar="MODEL", default="DDPG",required=False)
return parser
def main():
parser = build_parser()
args=vars(parser.parse_args())
with open('config.json') as f:
config=json.load(f)
if args['mode']=='download':
from data.download_data import DataDownloader
data_downloader=DataDownloader(config)
data_downloader.save_data()
else:
session(config,args['mode'])
if __name__=="__main__":
main() | 36.516981 | 190 | 0.602563 | 2,049 | 0.211739 | 0 | 0 | 0 | 0 | 0 | 0 | 1,492 | 0.15418 |
b43620ea470685e6e28c7e7bc58a0b84c3272e13 | 7,365 | py | Python | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
]
| 2 | 2017-09-11T15:25:14.000Z | 2019-09-27T17:08:31.000Z | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
]
| 6 | 2019-08-22T06:29:45.000Z | 2021-09-19T18:59:46.000Z | packages/structural_dhcp_mriqc/structural_dhcp_mriqc/utils/fs2gif.py | amakropoulos/structural-pipeline-measures | 70e22f9ad94cc57e72e510576cfc3129da83f7fc | [
"Apache-2.0"
]
| 1 | 2018-02-12T14:38:33.000Z | 2018-02-12T14:38:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2016-03-16 11:28:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-04-04 13:50:50
"""
Batch export freesurfer results to animated gifs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path as op
import subprocess as sp
from shutil import rmtree
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from tempfile import mkdtemp
from errno import EEXIST
import glob
from six import string_types
import numpy as np
import nibabel as nb
from skimage import exposure
def main():
"""Entry point"""
parser = ArgumentParser(description='Batch export freesurfer results to animated gifs',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-S', '--subjects-dir', action='store', default=os.getcwd())
g_input.add_argument('-s', '--subject-id', action='store')
g_input.add_argument('-t', '--temp-dir', action='store')
g_input.add_argument('--keep-temp', action='store_true', default=False)
g_input.add_argument('--zoom', action='store_true', default=False)
g_input.add_argument('--hist-eq', action='store_true', default=False)
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store', default='fs2gif')
opts = parser.parse_args()
if opts.temp_dir is None:
tmpdir = mkdtemp()
else:
tmpdir = op.abspath(opts.temp_dir)
try:
os.makedirs(tmpdir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
out_dir = op.abspath(opts.output_dir)
try:
os.makedirs(out_dir)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
subjects_dir = op.abspath(opts.subjects_dir)
subject_list = opts.subject_id
if subject_list is None:
subject_list = [name for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
elif isinstance(subject_list, string_types):
if '*' not in subject_list:
subject_list = [subject_list]
else:
all_dirs = [op.join(subjects_dir, name) for name in os.listdir(subjects_dir)
if op.isdir(os.path.join(subjects_dir, name))]
pattern = glob.glob(op.abspath(op.join(subjects_dir, opts.subject_id)))
subject_list = list(set(pattern).intersection(set(all_dirs)))
environ = os.environ.copy()
environ['SUBJECTS_DIR'] = subjects_dir
# tcl_file = pkgr.resource_filename('structural_dhcp_mriqc', 'data/fsexport.tcl')
tcl_contents = """
SetOrientation 0
SetCursor 0 128 128 128
SetDisplayFlag 3 0
SetDisplayFlag 22 1
set i 0
"""
for sub_path in subject_list:
subid = op.basename(sub_path)
tmp_sub = op.join(tmpdir, subid)
try:
os.makedirs(tmp_sub)
except OSError as exc:
if exc.errno != EEXIST:
raise exc
niifile = op.join(tmp_sub, '%s.nii.gz') % subid
ref_file = op.join(sub_path, 'mri', 'T1.mgz')
sp.call(['mri_convert', op.join(sub_path, 'mri', 'norm.mgz'), niifile],
cwd=tmp_sub)
data = nb.load(niifile).get_data()
data[data > 0] = 1
# Compute brain bounding box
indexes = np.argwhere(data)
bbox_min = indexes.min(0)
bbox_max = indexes.max(0) + 1
center = np.average([bbox_min, bbox_max], axis=0)
if opts.hist_eq:
modnii = op.join(tmp_sub, '%s.nii.gz' % subid)
ref_file = op.join(tmp_sub, '%s.mgz' % subid)
img = nb.load(niifile)
data = exposure.equalize_adapthist(img.get_data(), clip_limit=0.03)
nb.Nifti1Image(data, img.get_affine(), img.get_header()).to_filename(modnii)
sp.call(['mri_convert', modnii, ref_file], cwd=tmp_sub)
if not opts.zoom:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, '%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'T1.mgz', 'lh.pial', '-aux-surface', 'rh.pial', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-*.tif' % (tmp_sub, subid),
'%s/%s.gif' % (out_dir, subid)])
else:
# Export tiffs for left hemisphere
tcl_file = op.join(tmp_sub, 'lh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] + 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-lh-' % (tmp_sub, subid) + '%03d.tif" $i]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'lh.white', '-tcl', tcl_file], env=environ)
# Export tiffs for right hemisphere
tcl_file = op.join(tmp_sub, 'rh-%s.tcl' % subid)
with open(tcl_file, 'w') as tclfp:
tclfp.write(tcl_contents)
tclfp.write('SetZoomLevel 2')
tclfp.write('for { set slice %d } { $slice < %d } { incr slice } {' % (bbox_min[2], bbox_max[2]))
tclfp.write(' SetZoomCenter %d %d $slice\n' % (center[0] - 30, center[1] - 10))
tclfp.write(' SetSlice $slice\n')
tclfp.write(' RedrawScreen\n')
tclfp.write(' SaveTIFF [format "%s/%s-rh-' % (tmp_sub, subid) + '%03d.tif" $slice]\n')
tclfp.write(' incr i\n')
tclfp.write('}\n')
tclfp.write('QuitMedit\n')
sp.call(['tkmedit', subid, 'norm.mgz', 'rh.white', '-tcl', tcl_file], env=environ)
# Convert to animated gif
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-lh-*.tif' % (tmp_sub, subid),
'%s/%s-lh.gif' % (out_dir, subid)])
sp.call(['convert', '-delay', '10', '-loop', '0', '%s/%s-rh-*.tif' % (tmp_sub, subid),
'%s/%s-rh.gif' % (out_dir, subid)])
if not opts.keep_temp:
try:
rmtree(tmp_sub)
except:
pass
if __name__ == '__main__':
main()
| 39.810811 | 118 | 0.567549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,933 | 0.262458 |
b4366804d5c82535ca7d92caff9e07608cd7136b | 10,751 | py | Python | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
]
| null | null | null | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
]
| null | null | null | DE_DataBase.py | almirjgomes/DE_DataBaseConnect | 2a369d77498c4c6c42b7447871472e5c4320b2ff | [
"MIT"
]
| null | null | null | import os
import sqlite3 as sq3
import cx_Oracle as ora
import pandas as pd
import psycopg2 as ps2
import mysql.connector as mysql
import sqlalchemy
# Reponsabilidades desta classe:
# Apenas se conectar a uma das bases de dados abaixo especificadas
# Bases conhecidas: SQLITE, ORACLE, MYSQL, POSTGRES
class DATABASE:
def __init__(self):
pass
def ORACLE(selfself, string_conect: dict):
instancia = None
try:
# Definindo a Library ORACLE
if string_conect["path_library"] is None:
pathlib = os.getenv("ORACLE_LIB")
else:
pathlib = string_conect["path_library"]
# Consistindo se a biblioteca do oracle ja esta iniciada
try:
ora.init_oracle_client(lib_dir=pathlib)
except:
pass
# não faz nada (e para deixar assim se nao da erro)
database = string_conect["database"]
driver = "cx_oracle"
user = string_conect["username"]
pwd = string_conect["password"]
host = string_conect["host"]
port = string_conect["port"]
# Definindo o tipo de instancia SID/SERVICE_NAME
if string_conect["sid"] is not None:
#dnsName = ora.makedsn(host=string_conect["host"], port=string_conect["port"], sid=string_conect["sid"])
dnsName = f"""{string_conect["host"]}:{string_conect["port"]}/{string_conect["sid"]}"""
else:
#dnsName = ora.makedsn(host=string_conect["host"], port=string_conect["port"], service_name=string_conect["service_name"])
dnsName = f"""{string_conect["host"]}:{string_conect["port"]}/{string_conect["service_name"]}"""
str_cnn = f"""{database.lower()}+{driver}://{user}:{pwd}@{dnsName}"""
engine = sqlalchemy.create_engine(str_cnn, arraysize=1000)
except Exception as error:
engine = error
finally:
return engine
def ORACLE_NAT(self, string_connect: dict):
pathlib, cnn = None, None
try:
# Definindo a Library ORACLE
if "library" in string_connect.keys():
if string_connect["library"] is None:
pathlib = os.getenv("ORACLE_LIB")
else:
pathlib = string_connect["library"]
else:
pathlib = os.getenv("ORACLE_LIB")
# Consistindo se a biblioteca do oracle ja esta iniciada
try:
ora.init_oracle_client(lib_dir=pathlib)
except:
pass
# não faz nada (e para deixar assim se nao da erro)
# Definindo o tipo de instancia SID/SERVICE_NAME
if string_connect["sid"] is not None:
dnsName = ora.makedsn(host=string_connect["host"], port=string_connect["port"], sid=string_connect["sid"])
else:
dnsName = ora.makedsn(host=string_connect["host"], port=string_connect["port"], service_name=string_connect["service_name"])
# Efetuando a conexao com a instancia do BANCO
cnn = ora.connect(string_connect["username"], string_connect["password"], dnsName, threaded=True)
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados ORACLE [{string_connect["name_conection"]}].\nErro: {error} """
cnn = msg
finally:
return cnn
def SQLITE(self, database):
DATABASE_NAME, result, msg, conn = None, False, None, None
try:
if os.path.isfile(database):
conn = sq3.connect(database)
msg = f"""SQLITE [{database}]- Conexao efetuada com sucesso!"""
else:
msg = f"""SQLITE [{database}]- Não existe no local informado!"""
raise Exception
except Exception as error:
msg = f"""Falha ao tentar conectar com o banco de dados SQLITE "{DATABASE_NAME}". Erro: {error} """
cnn = False
finally:
return conn
def POSTGRES(selfself, string_connect: dict):
msg, cnn = None, None
try:
# Efetuando a conexao com a instancia do BANCO
cnn = ps2.connect(user=string_connect["username"], password=string_connect["password"], database=string_connect["instance"], host=string_connect["host"])
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados POSTGRES.\n """
cnn = msg
finally:
return cnn
def MYSQL(selfself, string_connect: dict):
msg, cnn = None, None
try:
# Efetuando a conexao com a instancia do BANCO
cnn = mysql.connect(user=string_connect["username"], password=string_connect["password"], database=string_connect["instance"], host=string_connect["host"])
except Exception as error:
msg = f"""Falha ao tentar se conectar com o banco de dados MYSQL.\n """
cnn = msg
finally:
return cnn
def METADATA(self,
conexao: object,
database: str,
nome_tabela: str,
alias: str = 'x',
quoted: bool = False,
rowid: bool = False,
join: str = None,
where: str = None,
orderby: str = None,
limit: int = 0
) -> str:
try:
querys = {"ORACLE": f"""Select * from all_tab_columns where table_name = '{nome_tabela}' order by column_id""""",
"POSTGRES": f"""Select * from information_schema.columns where table_name = '{nome_tabela}' order by ordinal_position""",
"SQLITE": f"""Select * from pragma_table_info('{nome_tabela}') order by cid""",
"MYSQL": f"""Select * from information_schema.columns where table_name = '{nome_tabela}' order by ordinal_position"""}
qry = querys[database]
df = pd.read_sql(con=conexao, sql=qry)
nom_owner, column_list = None, []
# OBTEM AS COLUNAS
for index, row in df.iterrows():
# -----------------------------------------
# Banco SQLITE
if database == "SQLITE":
column = df.loc[index, "name"]
# OWNER
nom_owner = ""
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# -----------------------------------------
# Banco ORACLE
elif database == 'ORACLE':
column = df.loc[index, "column_name"]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
# OWNER
nom_owner = f"""\"{row.owner}"."""
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = f"""{row.owner}."""
# Banco MYSQL
elif database == "MYSQL":
column = df.loc[index, "column_name"]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = ""
# -----------------------------------------
# Banco POSTGRES
elif database == "POSTGRES":
column = df.loc[index, "column_name".lower()]
# QUOTED
if quoted:
column_list.append(f"""{alias}.\"{column}\"""")
else:
column_list.append(f"""{alias}.{column}""")
# OWNER
nom_owner = ""
# ROWID
if rowid:
# -----------------------------------------
# Banco SQLITE
if database == "SQLITE":
column_list.append(f"""{alias}.ROWID ROWID_TABELA""")
# -----------------------------------------
# Banco ORACLE
elif database == "ORACLE":
column_list.append(f"""rowidtochar({alias}.Rowid) "ROWID_TABELA" """)
# -----------------------------------------
# Banco MYSQL
elif database == "MYSQL":
# não implementado
# tem que identificar qual a coluna do MYSQL que representa esta informação
pass
# -----------------------------------------
# Banco POSTGRES
elif database == "POSTGRES":
column_list.append(f"""{alias}.row_number() OVER () ROWID_TABELA""")
# Estruturando as colunas
colunas = "\n ,".join(column_list)
select = f"""select {colunas}"""
# NOME TABELA
tabela = f"""\n from {nome_tabela.strip()} {alias.strip()}"""
# JOIN
if join is None:
join = ""
else:
join = f"""\n {join}"""
#WHERE
if where is None:
if database == "ORACLE" and limit > 0:
where = f"""\n where rownum <= {limit}"""
else:
where = ""
else:
if database == "ORACLE" and limit > 0:
where = f"""\n {where.strip()}\n and rownum <= {limit}"""
else:
where = f"""\n {where.strip()}"""
#ORDERBY
if orderby is None:
orderby = ""
else:
orderby = f"""\n {orderby.strip()}"""
# LIMIT
if database in ["MYSQL", "SQLITE", "POSTGRES"]:
if limit > 0:
limit = f"""\nlimit {limit}"""
else:
limit = ""
else:
limit = ""
qry = f"""{select}{tabela}{join}{where}{orderby}{limit}""".lstrip()
msg = qry
except Exception as error:
msg = error + qry
finally:
return msg
if __name__ == "__main__":
pass
| 40.878327 | 167 | 0.474467 | 10,416 | 0.9683 | 0 | 0 | 0 | 0 | 0 | 0 | 3,792 | 0.352515 |
b4372d11f9380b54abe868161855c4d8eb68fe8d | 3,301 | py | Python | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
]
| null | null | null | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
]
| 8 | 2021-05-12T05:53:42.000Z | 2022-03-31T04:08:18.000Z | peter_lists/blog/views.py | pvize1/peter_lists | 77e9f30cfc45f500e059b7b163db541335180332 | [
"MIT"
]
| null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from django.shortcuts import render
from django.db.models import Count
from django.db.models.functions import Trim, Lower
from django.urls import reverse_lazy
from .models import Blog
from .forms import EditBlogForm
def tag_count(blog_user, topn=0):
# TODO Move to model manager
raw_tags = (
Blog.blog.filter(user=blog_user)
.order_by("tag")
.values("tag")
.annotate(count=Count("tag"), tag_new=Trim(Lower("tag")))
)
count_tags = dict()
# TODO Split by tags with "," and those without
for record in raw_tags:
for tag in record["tag_new"].split(","):
k = tag.strip()
if len(k) > 0:
count_tags[k] = count_tags.get(k, 0) + record["count"]
# TODO Sort by value (desc) and then key (ascend) for common values
if topn == 0:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)
}
else:
return {
k: count_tags[k]
for k in sorted(count_tags, key=count_tags.get, reverse=True)[:topn]
}
# Create your views here.
def BlogHome(request):
blog_all = Blog.blog.filter(user=request.user)
blogs = blog_all.order_by("-modified")[:3]
blog_count = blog_all.count()
tag_sorted = tag_count(request.user, topn=5)
return render(
request,
"blog/blog_home.html",
{"blogs": blogs, "tags": tag_sorted, "blog_count": blog_count},
)
class BlogListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(user=self.request.user)
def BlogAllTagsView(request):
# TODO turn into ListView with paginate
tag_sorted = tag_count(request.user)
return render(request, "blog/blog_tags.html", {"tags": tag_sorted})
class BlogTagListView(PermissionRequiredMixin, ListView):
model = Blog
paginate_by = 3
template_name = "blog/blog_list.html"
permission_required = "blog.view_blog"
def get_queryset(self):
return Blog.blog.filter(tag__contains=self.kwargs["tag_name"], user=self.request.user)
class BlogDetailView(PermissionRequiredMixin, DetailView):
model = Blog
template_name = "blog/blog_detail.html"
permission_required = "blog.view_blog"
class BlogCreateView(PermissionRequiredMixin, LoginRequiredMixin, CreateView):
form_class = EditBlogForm
model = Blog
action = "Add"
template_name = "blog/blog_form.html"
permission_required = "blog.add_blog"
class BlogUpdateView(PermissionRequiredMixin, LoginRequiredMixin, UpdateView):
form_class = EditBlogForm
model = Blog
action = "Edit"
template_name = "blog/blog_form.html"
permission_required = "blog.change_blog"
class BlogDeleteView(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = Blog
success_url = reverse_lazy("blog:list")
permission_required = "blog.delete_blog"
| 29.212389 | 94 | 0.684035 | 1,370 | 0.415026 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.171766 |
b4378b3e91302a7b53287f43ef0ed313d4ff8c2f | 1,992 | py | Python | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
]
| 26 | 2021-02-18T20:49:41.000Z | 2022-02-08T21:06:20.000Z | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
]
| null | null | null | tests/test_pythonpath.py | browniebroke/pytest-srcpaths | c0bf4a9b521c8f7af029f9923b344936cf425bf1 | [
"MIT"
]
| 2 | 2021-04-04T01:45:37.000Z | 2022-02-07T11:28:51.000Z | import sys
from typing import Generator
from typing import List
from typing import Optional
import pytest
from _pytest.pytester import Pytester
def test_one_dir_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub\n")
result = pytester.runpytest("test_foo.py")
assert result.ret == 0
result.assert_outcomes(passed=1)
def test_two_dirs_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub sub2\n")
result = pytester.runpytest("test_foo.py", "test_bar.py")
assert result.ret == 0
result.assert_outcomes(passed=2)
def test_unconfigure_unadded_dir_pythonpath(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_configure(config):
config.addinivalue_line("pythonpath", "sub")
"""
)
pytester.makepyfile(
"""
import sys
def test_something():
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_clean_up_pythonpath(pytester: Pytester) -> None:
"""Test that the srcpaths plugin cleans up after itself."""
pytester.makefile(".ini", pytest="[pytest]\npythonpath=I_SHALL_BE_REMOVED\n")
pytester.makepyfile(test_foo="""def test_foo(): pass""")
before: Optional[List[str]] = None
after: Optional[List[str]] = None
class Plugin:
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_unconfigure(self) -> Generator[None, None, None]:
nonlocal before, after
before = sys.path.copy()
yield
after = sys.path.copy()
result = pytester.runpytest_inprocess(plugins=[Plugin()])
assert result.ret == 0
assert before is not None
assert after is not None
assert any("I_SHALL_BE_REMOVED" in entry for entry in before)
assert not any("I_SHALL_BE_REMOVED" in entry for entry in after)
| 30.181818 | 81 | 0.676205 | 266 | 0.133534 | 899 | 0.451305 | 244 | 0.12249 | 0 | 0 | 476 | 0.238956 |
b4379f94d32e1eef87fdbc70ab371bde034c9874 | 1,735 | py | Python | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
]
| null | null | null | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
]
| null | null | null | coretemp.py | InScene/dht22-mqtt-daemon | 9a73715f4074f11222d1a6b263c12c897fadf0de | [
"MIT"
]
| null | null | null | #!/usr/bin/env python2
import paho.mqtt.client as mqtt
import time
import Adafruit_DHT
from configparser import ConfigParser
import json
config = ConfigParser(delimiters=('=', ))
config.read('config.ini')
sensor_type = config['sensor'].get('type', 'dht22').lower()
if sensor_type == 'dht22':
sensor = Adafruit_DHT.DHT22
elif sensor_type == 'dht11':
sensor = Adafruit_DHT.dht11
elif sensor_type == 'am2302':
sensor = Adafruit_DHT.AM2302
else:
raise Exception('Supported sensor types: DHT22, DHT11, AM2302')
pin = config['sensor'].get('pin', 10)
topic = config['mqtt'].get('topic', 'temperature/dht22')
decim_digits = config['sensor'].getint('decimal_digits', 2)
sleep_time = config['sensor'].getint('interval', 60)
user = config['mqtt'].get('user', 'guest')
password = config['mqtt'].get('password', 'guest')
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code {}".format(rc))
client = mqtt.Client()
client.on_connect = on_connect
client.username_pw_set(user, password)
client.connect(config['mqtt'].get('hostname', 'homeassistant'),
config['mqtt'].getint('port', 1883),
config['mqtt'].getint('timeout', 60))
client.loop_start()
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
data = {'temperature': round(temperature, decim_digits),
'humidity': round(humidity, decim_digits)}
client.publish(topic, json.dumps(data))
print('Published. Sleeping ...')
else:
print('Failed to get reading. Skipping ...')
time.sleep(sleep_time)
| 30.438596 | 79 | 0.688184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.292795 |
b437d795dd924c40c4d023f3c55940133611431e | 663 | py | Python | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
]
| null | null | null | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
]
| 21 | 2019-04-12T17:54:51.000Z | 2021-11-04T18:47:45.000Z | mythril/support/support_utils.py | step21/mythril | d26a68e5473a57bd38091e1a5cad96a2b4e2c2ab | [
"MIT"
]
| 1 | 2021-09-06T03:14:58.000Z | 2021-09-06T03:14:58.000Z | """This module contains utility functions for the Mythril support package."""
from typing import Dict
class Singleton(type):
"""A metaclass type implementing the singleton pattern."""
_instances = {} # type: Dict
def __call__(cls, *args, **kwargs):
"""Delegate the call to an existing resource or a a new one.
This is not thread- or process-safe by default. It must be protected with
a lock.
:param args:
:param kwargs:
:return:
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| 28.826087 | 81 | 0.627451 | 558 | 0.841629 | 0 | 0 | 0 | 0 | 0 | 0 | 380 | 0.573152 |
b437ff845481fd16be2f8fc1d410e6c3c3a17c1d | 554 | py | Python | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
]
| null | null | null | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
]
| null | null | null | tests/functions/list/test_lists_map.py | sukovanej/mplisp | a3faf8c06936bcc5cde59899abf41a1b379090f5 | [
"MIT"
]
| null | null | null | import unittest
import mplisp.evaluator as evaluator
class TestListMap(unittest.TestCase):
def map_test(self):
input1 = """
(map (lambda (x) (* 2 x)) (list 1 2 3))
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[0], [2, 4, 6])
def map_test_2(self):
input1 = """
(import "sys")
(def a (list 1 2 3 4))
(map (lambda (x) (* 2 x)) a)
"""
output1 = list(evaluator.evaluate(input1))
self.assertEqual(output1[2], [2, 4, 6, 8])
| 22.16 | 50 | 0.534296 | 498 | 0.898917 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.305054 |
b4387eea371c6bde1ade7a6d0d94c1c04a7c6258 | 1,210 | py | Python | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
]
| null | null | null | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
]
| null | null | null | malpickle/main.py | erose1337/malpickle | 3c708426d5f5e33d3e232d77cbbfca0a955d6ebf | [
"MIT"
]
| null | null | null | import argparse
from __init__ import insert_code
def main():
parser = argparse.ArgumentParser(description="Inject code into pickle files")
parser.add_argument("pickle_file", help="The pickle file to inject code into")
parser.add_argument("code_file", help="The shell script to inject")
#parser.add_argument("-u", "--unittest", help="Only run the unit test; Ignores pickle_file and code_file", type=bool)
args = parser.parse_args()
# if args.unittest:
# return test_insert_code()
filename = args.pickle_file
code_file = args.code_file
with open(filename, "rb+") as pickle_file, open(code_file, 'r') as code_file:
saved_data = pickle_file.read()
_malpickle = insert_code(code_file.read(), saved_data)
pickle_file.truncate(0)
pickle_file.seek(0)
pickle_file.write(_malpickle)
def test_insert_code():
import pickle
shell_code = "echo RCE"
data = ({1 : ['a', None, (.1, 0xff)]}, object, tuple)
saved = pickle.dumps(data)
malpickle = insert_code(shell_code, saved)
output = pickle.loads(malpickle)
assert output == data, (output, data)
if __name__ == "__main__":
#test_insert_code()
main()
| 31.842105 | 121 | 0.680165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.283471 |
b43894ad3119624561e61e4cdbc634a63ac5df12 | 1,923 | py | Python | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
]
| null | null | null | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
]
| null | null | null | src/redis_lock/django_cache.py | suligap/python-redis-lock | 369e95bb5e26284ef0944e551f93d9f2596e5345 | [
"BSD-2-Clause"
]
| null | null | null | from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django_redis.cache import RedisCache as PlainRedisCache
from redis_lock import Lock
from redis_lock import reset_all
class RedisCache(PlainRedisCache):
@property
def __client(self):
try:
return self.client.get_client()
except Exception as exc:
raise NotImplementedError(
"RedisCache doesn't have a raw client: %r. "
"Use 'redis_cache.client.DefaultClient' as the CLIENT_CLASS !" % exc
)
def lock(self, key, expire=None, id=None):
return Lock(self.__client, key, expire=expire, id=id)
def locked_get_or_set(self, key, value_creator, version=None,
expire=None, id=None, lock_key=None,
timeout=DEFAULT_TIMEOUT):
"""
Fetch a given key from the cache. If the key does not exist, the key is added and
set to the value returned when calling `value_creator`. The creator function
is invoked inside of a lock.
"""
if lock_key is None:
lock_key = 'get_or_set:' + key
val = self.get(key, version=version)
if val is not None:
return val
with self.lock(lock_key, expire=expire, id=id):
# Was the value set while we were trying to acquire the lock?
val = self.get(key, version=version)
if val is not None:
return val
# Nope, create value now.
val = value_creator()
if val is None:
raise ValueError('`value_creator` must return a value')
self.set(key, val, timeout=timeout, version=version)
return val
def reset_all(self):
"""
Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
"""
reset_all(self.__client)
| 33.155172 | 92 | 0.598024 | 1,737 | 0.903276 | 0 | 0 | 322 | 0.167447 | 0 | 0 | 577 | 0.300052 |
b438f353825f2b371f64bd83071ca8831b7f58ce | 3,510 | py | Python | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
]
| null | null | null | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
]
| null | null | null | nets/facenet.py | QiongWang-l/llfr | 00f62f03dd2964add1ff1b007292d06afff708f4 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
from nets.inception_resnetv1 import InceptionResnetV1
from nets.mobilenet import MobileNetV1
class mobilenet(nn.Module):
def __init__(self, pretrained):
super(mobilenet, self).__init__()
self.model = MobileNetV1()
if pretrained:
state_dict = load_state_dict_from_url("https://github.com/bubbliiiing/facenet-pytorch/releases/download/v1.0/backbone_weights_of_mobilenetv1.pth", model_dir="model_data",
progress=True)
self.model.load_state_dict(state_dict)
del self.model.fc
del self.model.avg
def forward(self, x):
x = self.model.stage1(x)
x = self.model.stage2(x)
x = self.model.stage3(x)
return x
class inception_resnet(nn.Module):
def __init__(self, pretrained):
super(inception_resnet, self).__init__()
self.model = InceptionResnetV1()
if pretrained:
state_dict = load_state_dict_from_url("https://github.com/bubbliiiing/facenet-pytorch/releases/download/v1.0/backbone_weights_of_inception_resnetv1.pth", model_dir="model_data",
progress=True)
self.model.load_state_dict(state_dict)
def forward(self, x):
x = self.model.conv2d_1a(x)
x = self.model.conv2d_2a(x)
x = self.model.conv2d_2b(x)
x = self.model.maxpool_3a(x)
x = self.model.conv2d_3b(x)
x = self.model.conv2d_4a(x)
x = self.model.conv2d_4b(x)
x = self.model.repeat_1(x)
x = self.model.mixed_6a(x)
x = self.model.repeat_2(x)
x = self.model.mixed_7a(x)
x = self.model.repeat_3(x)
x = self.model.block8(x)
return x
class Facenet(nn.Module):
def __init__(self, backbone="mobilenet", dropout_keep_prob=0.5, embedding_size=128, num_classes=None, mode="train", pretrained=False):
super(Facenet, self).__init__()
if backbone == "mobilenet":
self.backbone = mobilenet(pretrained)
flat_shape = 1024
elif backbone == "inception_resnetv1":
self.backbone = inception_resnet(pretrained)
flat_shape = 1792
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, inception_resnetv1.'.format(backbone))
self.avg = nn.AdaptiveAvgPool2d((1,1))
self.Dropout = nn.Dropout(1 - dropout_keep_prob)
self.Bottleneck = nn.Linear(flat_shape, embedding_size,bias=False)
self.last_bn = nn.BatchNorm1d(embedding_size, eps=0.001, momentum=0.1, affine=True)
if mode == "train":
self.classifier = nn.Linear(embedding_size, num_classes)
def forward(self, x):
x = self.backbone(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.Dropout(x)
x = self.Bottleneck(x)
x = self.last_bn(x)
x = F.normalize(x, p=2, dim=1)
return x
def forward_feature(self, x):
x = self.backbone(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.Dropout(x)
x = self.Bottleneck(x)
before_normalize = self.last_bn(x)
x = F.normalize(before_normalize, p=2, dim=1)
return before_normalize, x
def forward_classifier(self, x):
x = self.classifier(x)
return x
| 37.340426 | 189 | 0.61396 | 3,267 | 0.930769 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.104274 |
b439967634fbd815c14f34a574722d653f74e466 | 367 | py | Python | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
]
| null | null | null | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
]
| 51 | 2019-03-22T00:31:06.000Z | 2021-06-10T21:17:30.000Z | distributed_social_network/posts/migrations/0003_auto_20190308_2055.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
]
| 1 | 2019-02-08T01:33:57.000Z | 2019-02-08T01:33:57.000Z | # Generated by Django 2.1.7 on 2019-03-08 20:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20190221_0234'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='visiblilty',
new_name='visibility',
),
]
| 19.315789 | 47 | 0.588556 | 282 | 0.768392 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.297003 |
b439fd956c9d132bc84b304fc1984cd145eb18b5 | 2,260 | py | Python | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
]
| null | null | null | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
]
| null | null | null | minify/migrations/0004_auto__del_unique_urlminify_short_url__add_unique_urlminify_short_url_s.py | djsan15/url-minifier | 00ff087dadc7e14015cc5640e135f8454afd11dc | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'URLMinify', fields ['short_url']
db.delete_unique(u'minify_urlminify', ['short_url'])
# Adding unique constraint on 'URLMinify', fields ['short_url', 'short_url_domain']
db.create_unique(u'minify_urlminify', ['short_url', 'short_url_domain_id'])
def backwards(self, orm):
# Removing unique constraint on 'URLMinify', fields ['short_url', 'short_url_domain']
db.delete_unique(u'minify_urlminify', ['short_url', 'short_url_domain_id'])
# Adding unique constraint on 'URLMinify', fields ['short_url']
db.create_unique(u'minify_urlminify', ['short_url'])
models = {
u'minify.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'minify.urlminify': {
'Meta': {'unique_together': "(('short_url', 'short_url_domain'),)", 'object_name': 'URLMinify'},
'date_added': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_url': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'long_url_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'long_url_domain'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['minify.Domain']"}),
'short_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'short_url_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'short_url_domain'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['minify.Domain']"})
}
}
complete_apps = ['minify'] | 52.55814 | 217 | 0.615487 | 2,093 | 0.926106 | 0 | 0 | 0 | 0 | 0 | 0 | 1,437 | 0.635841 |
b43cafc5d4e3e3709f5f5f9476d5698dfa194510 | 1,182 | py | Python | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | Validation/EcalRecHits/test/EcalTBValidationData_cfg.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
]
| null | null | null | import FWCore.ParameterSet.Config as cms
process = cms.Process("h4ValidData")
# initialize MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:./ECALH4TB_data_hits.root')
)
process.tbValidData = cms.EDAnalyzer("EcalTBValidation",
rootfile = cms.untracked.string('EcalTBValidationData.root'),
eventHeaderProducer = cms.string('ecalTBunpack'),
hitProducer = cms.string('ecal2006TBWeightUncalibRecHit'),
digiCollection = cms.string(''),
tdcRecInfoCollection = cms.string('EcalTBTDCRecInfo'),
data_ = cms.untracked.int32(0),
digiProducer = cms.string('ecalUnsuppressedDigis'),
xtalInBeam = cms.untracked.int32(1104),
hitCollection = cms.string('EcalUncalibRecHitsEB'),
hodoRecInfoProducer = cms.string('ecal2006TBHodoscopeReconstructor'),
eventHeaderCollection = cms.string(''),
hodoRecInfoCollection = cms.string('EcalTBHodoscopeRecInfo'),
tdcRecInfoProducer = cms.string('ecal2006TBTDCReconstructor')
)
process.p = cms.Path(process.tbValidData)
| 36.9375 | 73 | 0.756345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.310491 |
b43dc0c04bfb765d1057fbf1d173d5c4374ca965 | 1,948 | py | Python | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
]
| null | null | null | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
]
| null | null | null | database/domains.py | changyc9928/Genshin-Discord-Bot | be64481f43755c0031b469e79271ec7f0753cb0a | [
"MIT"
]
| null | null | null | import asyncio
from query_graphql import query_artifact_domains, query_weapon_materials_book
class Domains:
leylines = {
"Blossom of Revelation": "Character EXP Materials",
"Blossom of Wealth": "Mora"
}
weapon_domains = {}
talent_domains = {}
artifact_domains = {}
trounce_domains = {
"Wolf of the North Challenge": "Andrius (Lupus Boreas), Dominator of Wolves",
"Beneath the Dragon-Queller": "Azhdaha, Sealed Lord of Vishaps",
"Enter the Golden House": "Childe, Eleventh of the Fatui Harbingers",
"Narukami Island: Tenshukaku": "La Signora (Rosalyne-Kruzchka Lohefalter), The Fair Lady",
"End of the Oneiric Euthymia": "Magatsu Mitake Narukami no Mikoto, Raiden no Inazuma Tono"
}
world_bosses = {
"Anemo Hypostasis": None,
"Electro Hypostasis": None,
"Cryo Regisvine": None,
"Cryo Hypostasis": None,
"Oceanid": None,
"Pyro Regisvine": None,
"Geo Hypostasis": None,
"Primo Geovishap": None,
"Maguu Kenki": None,
"Pyro Hypostasis": None,
"Perpetual Mechanical Array": None,
"Hydro Hypostasis": None,
"Thunder Manifestation": None,
"Golden Wolflord": None,
"Bathysmal Vishap Herd": None,
"Ruin Serpent": None,
}
@staticmethod
async def initialize():
Domains.artifact_domains = await query_artifact_domains()
tuple = await query_weapon_materials_book()
Domains.weapon_domains = tuple[0]
Domains.talent_domains = tuple[1]
Domains.domains = {
"Ley Line Outcrops": Domains.leylines,
"Weapon Ascension Materials": Domains.weapon_domains,
"Talent Books": Domains.talent_domains,
"Artifacts": Domains.artifact_domains,
"Trounce Domains": Domains.trounce_domains,
"World Bosses": Domains.world_bosses
}
| 34.785714 | 98 | 0.627823 | 1,851 | 0.950205 | 0 | 0 | 606 | 0.311088 | 588 | 0.301848 | 834 | 0.428131 |
b43e6c43008ba217cff97642ff4168d07bf643bc | 23,644 | py | Python | policy.py | nyu-dl/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
]
| 34 | 2016-12-01T07:59:43.000Z | 2021-09-13T10:46:15.000Z | policy.py | yifanjun233/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
]
| 1 | 2020-09-14T08:35:00.000Z | 2020-09-14T08:35:00.000Z | policy.py | yifanjun233/dl4mt-simul-trans | 392ff3148e944be6fbc475d5285441807902e2e0 | [
"BSD-3-Clause"
]
| 18 | 2016-12-15T01:43:33.000Z | 2021-09-29T07:24:08.000Z | """
-- Policy Network for decision making [more general]
"""
from nmt_uni import *
from layers import _p
import os
import time, datetime
import cPickle as pkl
# hyper params
TINY = 1e-7
PI = numpy.pi
E = numpy.e
A = 0.2
B = 1
class Controller(object):
def __init__(self, trng,
options,
n_in=None, n_out=None,
recurrent=False, id=None):
self.WORK = options['workspace']
self.trng = trng
self.options = options
self.recurrent = recurrent
self.type = options.get('type', 'categorical')
self.n_hidden = 128
self.n_in = n_in
self.n_out = n_out
if self.options.get('layernorm', True):
self.rec = 'lngru'
else:
self.rec = 'gru'
if not n_in:
self.n_in = options['readout_dim']
if not n_out:
if self.type == 'categorical':
self.n_out = 2 # initially it is a WAIT/COMMIT action.
elif self.type == 'gaussian':
self.n_out = 100
else:
raise NotImplementedError
# build the policy network
print 'parameter initialization'
params = OrderedDict()
if not self.recurrent:
print 'building a feedforward controller'
params = get_layer('ff')[0](options, params, prefix='policy_net_in',
nin=self.n_in, nout=self.n_hidden)
else:
print 'building a recurrent controller'
params = get_layer(self.rec)[0](options, params, prefix='policy_net_in',
nin=self.n_in, dim=self.n_hidden)
params = get_layer('ff')[0](options, params, prefix='policy_net_out',
nin=self.n_hidden,
nout=self.n_out if self.type == 'categorical' else self.n_out * 2)
# bias the forget probability
# if self.n_out == 3:
# params[_p('policy_net_out', 'b')][-1] = -2
# for the baseline network.
params_b = OrderedDict()
# using a scalar baseline [**]
# params_b['b0'] = numpy.array(numpy.random.rand() * 0.0, dtype='float32')
# using a MLP as a baseline
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_in',
nin=self.n_in, nout=128)
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_out',
nin=128, nout=1)
if id is not None:
print 'reload the saved model: {}'.format(id)
params = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params)
params_b = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params_b)
else:
id = datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d-%H%M%S')
print 'start from a new model: {}'.format(id)
self.id = id
self.model = self.WORK + '.policy/{}-{}'.format(id, self.options['base'])
# theano shared params
tparams = init_tparams(params)
tparams_b = init_tparams(params_b)
self.tparams = tparams
self.tparams_b = tparams_b
# build the policy network
self.build_sampler(options=options)
self.build_discriminator(options=options)
print 'policy network'
for p in params:
print p, params[p].shape
def build_batchnorm(self, observation, mask=None):
raise NotImplementedError
def build_sampler(self, options):
# ==================================================================================== #
# Build Action function: samplers
# ==================================================================================== #
observation = tensor.matrix('observation', dtype='float32') # batch_size x readout_dim (seq_steps=1)
prev_hidden = tensor.matrix('p_hidden', dtype='float32')
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observation,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observation,
options, prefix='policy_net_in', mask=None,
one_step=True, _init_state=prev_hidden)[0]
act_inps = [observation, prev_hidden]
if self.type == 'categorical':
act_prob = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='softmax') # batch_size x n_out
act_prob2 = tensor.clip(act_prob, TINY, 1 - TINY)
# compiling the sampling function for action
# action = self.trng.binomial(size=act_prop.shape, p=act_prop)
action = self.trng.multinomial(pvals=act_prob).argmax(1) # 0, 1, ...
print 'build action sampling function [Discrete]'
self.f_action = theano.function(act_inps, [action, act_prob, hiddens, act_prob2],
on_unused_input='ignore') # action/dist/hiddens
elif self.type == 'gaussian':
_temp = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
mean, log_std = _temp[:, :self.n_out], _temp[:, self.n_out:]
mean, log_std = -A * tanh(mean), -B-relu(log_std)
action0 = self.trng.normal(size=mean.shape, dtype='float32')
action = action0 * tensor.exp(log_std) + mean
print 'build action sampling function [Gaussian]'
self.f_action = theano.function(act_inps, [action, mean, log_std, hiddens],
on_unused_input='ignore') # action/dist/hiddens
else:
raise NotImplementedError
def build_discriminator(self, options):
# ==================================================================================== #
# Build Action Discriminator
# ==================================================================================== #
observations = tensor.tensor3('observations', dtype='float32')
mask = tensor.matrix('mask', dtype='float32')
if self.type == 'categorical':
actions = tensor.matrix('actions', dtype='int64')
elif self.type == 'gaussian':
actions = tensor.tensor3('actions', dtype='float32')
else:
raise NotImplementedError
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observations,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observations,
options, prefix='policy_net_in', mask=mask)[0]
act_inputs = [observations, mask]
if self.type == 'categorical':
act_probs = get_layer('ff')[1](self.tparams, hiddens, options, prefix='policy_net_out',
activ='softmax') # seq_steps x batch_size x n_out
act_probs = tensor.clip(act_probs, TINY, 1 - TINY)
print 'build action distribiution'
self.f_probs = theano.function(act_inputs, act_probs,
on_unused_input='ignore') # get the action probabilities
elif self.type == 'gaussian':
_temps = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
means, log_stds = _temps[:, :, :self.n_out], _temps[:, :, self.n_out:]
means, log_stds = -A * tanh(means), -B-relu(log_stds)
act_probs = [means, log_stds]
print 'build Gaussian PDF'
self.f_pdf = theano.function(act_inputs, [means, log_stds],
on_unused_input='ignore') # get the action probabilities
else:
raise NotImplementedError
# ==================================================================================== #
# Build Baseline Network (Input-dependent Value Function) & Advantages
# ==================================================================================== #
print 'setup the advantages & baseline network'
reward = tensor.matrix('reward') # seq_steps x batch_size :: rewards for each steps
# baseline is estimated with a 2-layer neural network.
hiddens_b = get_layer('ff')[1](self.tparams_b, observations, options,
prefix='baseline_net_in',
activ='tanh')
baseline = get_layer('ff')[1](self.tparams_b, hiddens_b, options,
prefix='baseline_net_out',
activ='linear')[:, :, 0] # seq_steps x batch_size or batch_size
advantages = self.build_advantages(act_inputs, reward, baseline, normalize=True)
# ==================================================================================== #
# Build Policy Gradient (here we provide two options)
# ==================================================================================== #
if self.options['updater'] == 'REINFORCE':
print 'build RENIFROCE.'
self.build_reinforce(act_inputs, act_probs, actions, advantages)
elif self.options['updater'] == 'TRPO':
print 'build TRPO'
self.build_trpo(act_inputs, act_probs, actions, advantages)
else:
raise NotImplementedError
# ==================================================================================== #
# Controller Actions
# ==================================================================================== #
def random(self, states, p=0.5):
live_k = states.shape[0]
return (numpy.random.random(live_k) > p).astype('int64'), \
numpy.ones(live_k) * p
def action(self, states, prevhidden):
return self.f_action(states, prevhidden)
def init_hidden(self, n_samples=1):
return numpy.zeros((n_samples, self.n_hidden), dtype='float32')
def init_action(self, n_samples=1):
states0 = numpy.zeros((n_samples, self.n_in), dtype='float32')
return self.f_action(states0, self.init_hidden(n_samples))
def get_learner(self):
if self.options['updater'] == 'REINFORCE':
return self.run_reinforce
elif self.options['updater'] == 'TRPO':
return self.run_trpo
else:
raise NotImplementedError
@staticmethod
def kl(prob0, prob1):
p1 = (prob0 + TINY) / (prob1 + TINY)
# p2 = (1 - prob0 + TINY) / (1 - prob1 + TINY)
return tensor.sum(prob0 * tensor.log(p1), axis=-1)
@staticmethod
def _grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[1]
max_len = probs.shape[0]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[tensor.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
def cross(self, probs, actions):
# return tensor.log(probs) * actions + tensor.log(1 - probs) * (1 - actions)
return self._grab_prob(tensor.log(probs), actions)
def build_advantages(self, act_inputs, reward, baseline, normalize=True):
# TODO: maybe we need a discount factor gamma for advantages.
# TODO: we can also rewrite advantages with value functions (GAE)
# Advantages and Normalization the return
reward_adv = reward - baseline
mask = act_inputs[1]
if normalize:
reward_mean = tensor.sum(mask * reward_adv) / tensor.sum(mask)
reward_mean2 = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
reward_std = tensor.sqrt(tensor.maximum(reward_mean2 - reward_mean ** 2, TINY)) + TINY
# reward_std = tensor.maximum(reward_std, 1)
reward_c = reward_adv - reward_mean # independent mean
advantages = reward_c / reward_std
else:
advantages = reward_adv
print 'build advantages and baseline gradient'
L = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
dL = tensor.grad(L, wrt=itemlist(self.tparams_b))
lr = tensor.scalar(name='lr')
inps_b = act_inputs + [reward]
oups_b = [L, advantages]
f_adv, f_update_b = adam(lr, self.tparams_b, dL, inps_b, oups_b)
self.f_adv = f_adv
self.f_update_b = f_update_b
return advantages
# ===================================================================
# Policy Grident: REINFORCE with Adam
# ===================================================================
def build_reinforce(self, act_inputs, act_probs, actions, advantages):
mask = act_inputs[1]
if self.type == 'categorical':
negEntropy = tensor.sum(tensor.log(act_probs) * act_probs, axis=-1)
logLikelihood = self.cross(act_probs, actions)
elif self.type == 'gaussian':
means, log_stds = act_probs
negEntropy = -tensor.sum(log_stds + tensor.log(tensor.sqrt(2 * PI * E)), axis=-1)
actions0 = (actions - means) / tensor.exp(log_stds)
logLikelihood = -tensor.sum(log_stds, axis=-1) - \
0.5 * tensor.sum(tensor.sqr(actions0), axis=-1) - \
0.5 * means.shape[-1] * tensor.log(2 * PI)
else:
raise NotImplementedError
# tensor.log(act_probs) * actions + tensor.log(1 - act_probs) * (1 - actions)
H = tensor.sum(mask * negEntropy, axis=0).mean() * 0.001 # penalty
J = tensor.sum(mask * -logLikelihood * advantages, axis=0).mean() + H
dJ = grad_clip(tensor.grad(J, wrt=itemlist(self.tparams)))
print 'build REINFORCE optimizer'
lr = tensor.scalar(name='lr')
inps = act_inputs + [actions, advantages]
outps = [J, H]
if self.type == 'gaussian':
outps += [actions0.mean(), actions.mean()]
f_cost, f_update = adam(lr, self.tparams, dJ, inps, outps)
self.f_cost = f_cost
self.f_update = f_update
print 'done'
def run_reinforce(self, act_inputs, actions, reward, update=True, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
inps_reinfoce = act_inputs + [actions, advantages]
if self.type == 'gaussian':
J, H, m, s = self.f_cost(*inps_reinfoce)
info = {'J': J, 'G_norm': H, 'B_loss': L, 'Adv': advantages.mean(), 'm': m, 's': s}
else:
J, H = self.f_cost(*inps_reinfoce)
info = {'J': J, 'Entropy': H, 'B_loss': L, 'Adv': advantages.mean()}
info['advantages'] = advantages
if update: # update the parameters
self.f_update_b(lr)
self.f_update(lr)
return info
# ==================================================================================== #
# Trust Region Policy Optimization
# ==================================================================================== #
def build_trpo(self, act_inputs, act_probs, actions, advantages):
assert self.type == 'categorical', 'in this stage not support TRPO'
# probability distribution
mask = act_inputs[1]
probs = act_probs
probs_old = tensor.matrix(dtype='float32')
logp = self.cross(probs, actions)
logp_old = self.cross(probs_old, actions)
# policy gradient
J = tensor.sum(mask * -tensor.exp(logp - logp_old) * advantages, axis=0).mean()
dJ = flatgrad(J, self.tparams)
probs_fix = theano.gradient.disconnected_grad(probs)
kl_fix = tensor.sum(mask * self.kl(probs_fix, probs), axis=0).mean()
kl_grads = tensor.grad(kl_fix, wrt=itemlist(self.tparams))
ftangents = tensor.fvector(name='flat_tan')
shapes = [self.tparams[var].get_value(borrow=True).shape for var in self.tparams]
start = 0
tangents = []
for shape in shapes:
size = numpy.prod(shape)
tangents.append(tensor.reshape(ftangents[start:start + size], shape))
start += size
gvp = tensor.add(*[tensor.sum(g * t) for (g, t) in zipsame(kl_grads, tangents)])
# Fisher-vectror product
fvp = flatgrad(gvp, self.tparams)
entropy = tensor.sum(mask * -self.cross(probs, probs), axis=0).mean()
kl = tensor.sum(mask * self.kl(probs_old, probs), axis=0).mean()
print 'compile the functions'
inps = act_inputs + [actions, advantages, probs_old]
loss = [J, kl, entropy]
self.f_pg = theano.function(inps, dJ)
self.f_loss = theano.function(inps, loss)
self.f_fisher = theano.function([ftangents] + inps, fvp, on_unused_input='ignore')
# get/set flatten params
print 'compling flat updater'
self.get_flat = theano.function([], tensor.concatenate([self.tparams[v].flatten() for v in self.tparams]))
theta = tensor.vector()
start = 0
updates = []
for v in self.tparams:
p = self.tparams[v]
shape = p.shape
size = tensor.prod(shape)
updates.append((p, theta[start:start + size].reshape(shape)))
start += size
self.set_flat = theano.function([theta], [], updates=updates)
def run_trpo(self, act_inputs, actions, reward,
update=True, cg_damping=1e-3, max_kl=1e-2, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
self.f_update_b(lr)
# get current action distributions
probs = self.f_probs(*act_inputs)
inps = act_inputs + [actions, advantages, probs]
thprev = self.get_flat()
def fisher_vector_product(p):
return self.f_fisher(p, *inps) + cg_damping * p
g = self.f_pg(*inps)
losses_before = self.f_loss(*inps)
if numpy.allclose(g, 0):
print 'zero gradient, not updating'
else:
stepdir = self.cg(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = numpy.sqrt(shs / max_kl)
print "\nlagrange multiplier:", lm, "gnorm:", numpy.linalg.norm(g)
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
def loss(th):
self.set_flat(th)
return self.f_loss(*inps)[0]
print 'do line search'
success, theta = self.linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
print "success", success
self.set_flat(theta)
losses_after = self.f_loss(*inps)
info = OrderedDict()
for (lname, lbefore, lafter) in zipsame(['J', 'KL', 'entropy'], losses_before, losses_after):
info[lname + "_before"] = lbefore
info[lname + "_after"] = lafter
# add the baseline loss into full information
info['B_loss'] = L
return info
@staticmethod
def linesearch(f, x, fullstep, expected_improve_rate, max_backtracks=10, accept_ratio=.1):
"""
Backtracking linesearch, where expected_improve_rate is the slope dy/dx at the initial point
"""
fval = f(x)
print "fval before", fval
for (_n_backtracks, stepfrac) in enumerate(.5 ** numpy.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval = f(xnew)
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
print "a/e/r", actual_improve, expected_improve, ratio
if ratio > accept_ratio and actual_improve > 0:
print "fval after", newfval
return True, xnew
return False, x
@staticmethod
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Conjuctate Gradient
"""
p = b.copy()
r = b.copy()
x = numpy.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print titlestr % ("iter", "residual norm", "soln norm")
for i in xrange(cg_iters):
if callback is not None:
callback(x)
if verbose: print fmtstr % (i, rdotr, numpy.linalg.norm(x))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print fmtstr % (i + 1, rdotr, numpy.linalg.norm(x))
return x
# ====================================================================== #
# Save & Load
# ====================================================================== #
def save(self, history, it):
_params = OrderedDict()
_params = unzip(self.tparams, _params)
_params = unzip(self.tparams_b, _params)
print 'save the policy network >> {}'.format(self.model)
numpy.savez('%s.current' % (self.model),
history=history,
it=it,
**_params)
numpy.savez('{}.iter={}'.format(self.model, it),
history=history,
it=it,
**_params)
def load(self):
if os.path.exists(self.model):
print 'loading from the existing model (current)'
rmodel = numpy.load(self.model)
history = rmodel['history']
it = rmodel['it']
self.params = load_params(rmodel, self.params)
self.params_b = load_params(rmodel, self.params_b)
self.tparams = init_tparams(self.params)
self.tparams_b = init_tparams(self.params_b)
print 'the dataset need to go over {} lines'.format(it)
return history, it
else:
return [], -1
| 39.871838 | 118 | 0.514084 | 23,398 | 0.989596 | 0 | 0 | 2,401 | 0.101548 | 0 | 0 | 5,344 | 0.226019 |
b43f15ecbdb1d9b59ec1324ee2719d330bd46baf | 3,637 | py | Python | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
]
| null | null | null | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
]
| null | null | null | src/app/drivers/pycolator/splitmerge.py | husensofteng/msstitch | a917ed24fbc8b018b3f2bbec31e852aa76cc715c | [
"MIT"
]
| null | null | null | from app.drivers.pycolator import base
from app.actions.pycolator import splitmerge as preparation
from app.readers import pycolator as readers
from app.drivers.options import pycolator_options
class SplitDriver(base.PycolatorDriver):
outfile = None
def run(self):
self.set_filter_types()
for filter_type, suffix in self.filter_types:
self.prepare()
self.set_features(filter_type)
self.outsuffix = suffix
self.write()
self.finish()
def set_options(self):
"""Since splitdriver splits into multiple files we cannot set an
output file"""
super().set_options()
del(self.options['-o'])
def set_features(self, filter_type):
"""Calls splitter to split percolator output into target/decoy
elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = self.splitfunc(elements_to_split, self.ns, filter_type)
class SplitTDDriver(SplitDriver):
command = 'splittd'
commandhelp = ('Splits target and decoy data, producing 2 output files')
def set_filter_types(self):
self.filter_types = [('target', '_target.xml'),
('decoy', '_decoy.xml')]
def set_features(self, filter_type):
self.splitfunc = preparation.split_target_decoy
super().set_features(filter_type)
class SplitProteinDriver(SplitDriver):
command = 'splitprotein'
commandhelp = ('Splits input XML into multiple files depending based on '
'the protein headers specified. Each header class gets '
'its own output file')
def set_filter_types(self):
maxdigits = len(str(len(self.protheaders)))
self.filter_types = [(headers, '_h{i:0{dig}d}.xml'.format(
i=ix, dig=maxdigits))
for ix, headers in enumerate(self.protheaders)]
def set_features(self, filter_type):
self.splitfunc = preparation.split_protein_header_id_type
super().set_features(filter_type)
def set_options(self):
super().set_options()
options = self.define_options(['protheaders'], pycolator_options)
self.options.update(options)
class MergeDriver(base.PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from
multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
command = 'merge'
commandhelp = 'Merges percolator xml files, nothing else.'
def parse_input(self, **kwargs):
super().parse_input(**kwargs)
self.mergefiles = self.fn[:]
self.fn = self.fn[0]
def set_options(self):
super().set_options()
options = self.define_options(['multifiles'], pycolator_options)
self.options.update(options)
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps = preparation.merge_peptides(self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps}
| 36.37 | 79 | 0.668958 | 3,431 | 0.94336 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.303547 |
b442fb148ab72708b2f20e85644d227c7977348c | 453 | py | Python | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
]
| null | null | null | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
]
| null | null | null | ejercicio 14.py | Davidpadilla1234/taller_estructura-secuencial | 3a65931ad75fd4902f406c6c872053169dad1a0b | [
"MIT"
]
| null | null | null | """
Entradas:
lectura actual--->float--->lect2
lectura anterior--->float--->lect1
valor kw--->float--->valorkw
Salidas:
consumo--->float--->consumo
total factura-->flotante--->total
"""
lect2 = float ( entrada ( "Digite lectura real:" ))
lect1 = float ( entrada ( "Digite lectura anterior:" ))
valorkw = float ( input ( "Valor del kilowatio: " ))
consumo = ( lect2 - lect1 )
total = ( consumo * valorkw )
print ( "El valor a pagar es: " + str ( total )) | 30.2 | 55 | 0.653422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.615894 |
b4430cd61e95dcd15b900c13c175b1309fa0cc87 | 4,955 | py | Python | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
]
| 10 | 2017-08-31T06:16:56.000Z | 2022-03-12T19:44:50.000Z | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
]
| 2 | 2018-06-01T09:27:07.000Z | 2018-07-23T01:43:16.000Z | src/workers/correct.py | brainsqueeze/Image_correction | db19088fb101ce760601416d19622d46d76f482c | [
"MIT"
]
| 3 | 2018-10-24T04:59:10.000Z | 2021-09-03T10:37:35.000Z | # __author__ = 'Dave'
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
class CorrectImage(object):
def __init__(self):
self.path = ""
self.name = ""
self.image = None
self.edges = None
self.lines = None
def _load_image(self, image):
"""
:param image: image file name (str)
:return: skimage image data
"""
filename = os.path.join(self.path, image)
return io.imread(filename)
def add_path(self, image_path):
"""
Adds image to the list of images
:param image_path: (string)
"""
self.path = image_path + '/'
def add_image(self, filename):
"""
Adds image to the list of images
:param filename: (string)
"""
self.name = filename
self.hough_transform()
def _detect_edges(self, image, vary=False, plot=False):
"""
:param image: image file name (str)
:param vary: turn tunable plotting on
:param plot: turn plotting on
:return: detected edges with variable filters
"""
self.image = self._load_image(image)
if vary:
def nothing(x):
pass
cv2.namedWindow('image')
cv2.createTrackbar('th1', 'image', 0, 255, nothing)
cv2.createTrackbar('th2', 'image', 0, 255, nothing)
while True:
th1 = cv2.getTrackbarPos('th1', 'image')
th2 = cv2.getTrackbarPos('th2', 'image')
edges = cv2.Canny(self.image, th1, th2)
cv2.imshow('image', edges)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
edges = cv2.Canny(self.image, 255, 255)
if plot:
cv2.namedWindow('image')
cv2.imshow('image', edges)
cv2.waitKey(5000)
cv2.destroyAllWindows()
return edges
def hough_transform(self, vary=False, plot=False):
"""
:param vary: turn edge detection tunable plotting on
:param plot: turn plotting on
:return: numpy array of probabilistically found straight lines
"""
if self.name == "":
raise ValueError('Missing image: you need to specify the image file using add_image.')
self.edges = self._detect_edges(self.name, vary=vary, plot=plot)
self.lines = probabilistic_hough_line(self.edges, threshold=10, line_length=5, line_gap=3)
if plot:
for line in self.lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
plt.show()
@staticmethod
def slope(lines):
"""
:param lines: array of coordinates (ie. [((x0, y0), (xf, yf)), ...]
:return: array of slope values with the same number of entries as lines
"""
# for doing vectorized subtraction across all line pairs,
# we need the first line of each pair to be the negative of itself
sign_op = np.ones_like(lines)
sign_op[:, :, 0] *= -1
# get the differences between x and y coordinates (start, end), respectively
slopes = np.sum(sign_op * lines, axis=2)
# compute the slopes of each line for every line pair
slopes = slopes[:, :, 0] / slopes[:, :, 1]
# turn infinite values to a finite, but very large value
slopes[np.isinf(slopes)] = 1e6
# this catches cases when the line - as defined - is actually a point and the slope doesn't exist
slopes[np.isnan(slopes)] = 0
return slopes
def line_pair(self, num_pairs):
"""
:param num_pairs: number of line pairs to take (int)
:return: line pairs (array)
"""
idx = np.random.randint(len(self.lines), size=num_pairs * 2)
lines = np.array(self.lines)[idx]
return lines.reshape(num_pairs, 2, 2, 2)
@staticmethod
def mutation(pairs, p_mutate=0.01):
"""
:param pairs: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines
:param p_mutate: (float) probability of a mutation
:return: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines with mutations
"""
for i in range(len(pairs)):
if p_mutate > random.random():
# column = np.random.randint(low=0, high=2)
for column in [0, 1]:
t = pairs[i, :, :, column]
low, high = np.min(t), np.max(t)
if high == low:
high *= 2
pairs[i, :, :, column] = np.random.randint(low=low, high=high, size=t.shape)
return pairs
| 32.598684 | 105 | 0.559435 | 4,688 | 0.946115 | 0 | 0 | 1,731 | 0.349344 | 0 | 0 | 1,829 | 0.369122 |
b443c0485b44fdad4aad919722875c535cf37d83 | 2,469 | py | Python | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
]
| null | null | null | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
]
| 1 | 2022-01-28T13:12:26.000Z | 2022-01-28T13:12:26.000Z | plot_scripts/CC_timeline_plot.py | idunnam/Thesis | a567a25aa037c949de285158804a6ee396fc0e6c | [
"MIT"
]
| null | null | null | """
This code is used for plotting induvidual timelines of seasonal CC for each CMIP5 and CMIP6 model
"""
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_ACCESS.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_HADGEM.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CSIRO.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_IPSL.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MIROC5.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_NORESM.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CESM.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_CM6.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_CNRM_ESM2.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_MRI.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/src/seas_UKMO.nc')
fig, axs = plt.subplots(1,2, sharey = True, figsize=(30, 10))
axs[0].plot(ACCESS.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='ACCESS')
axs[0].plot(HADGEM.CC.year, HADGEM.CC.mean(dim=["X10_105","Y21_199"]),label='HADGEM')
axs[0].plot(IPSL.CC.year, IPSL.CC.mean(dim=["X10_105","Y21_199"]),label='IPSL')
axs[0].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MIROC5')
axs[0].plot(NORESM.CC.year, NORESM.CC.mean(dim=["X10_105","Y21_199"]),label='NORESM')
axs[0].plot(CSIRO.CC.year, CSIRO.CC.mean(dim=["X10_105","Y21_199"]),label='CSIRO')
axs[0].legend(loc='upper left')
axs[0].set_xlabel('year')
axs[0].set_ylabel('CC')
axs[0].set_title('Cloud Cover - CMIP5 Models')
axs[1].plot(CESM.CC.year, ACCESS.CC.mean(dim=["X10_105","Y21_199"]), label='CESM')
axs[1].plot(CNRM_CM6.CC.year, CNRM_CM6.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_CM6')
axs[1].plot(CNRM_ESM2.CC.year, CNRM_ESM2.CC.mean(dim=["X10_105","Y21_199"]),label='CNRM_ESM2')
axs[1].plot(MIROC5.CC.year, MIROC5.CC.mean(dim=["X10_105","Y21_199"]),label='MRI')
axs[1].plot(UKMO.CC.year, UKMO.CC.mean(dim=["X10_105","Y21_199"]),label='UKMO')
axs[1].legend(loc='upper left')
axs[1].set_xlabel('year')
axs[1].set_ylabel('CC')
axs[1].set_title('Cloud Cover - CMIP5 Models')
sns.set_palette('colorblind')
plt.savefig('CC_test_2.png')
plt.show()
| 46.584906 | 97 | 0.722155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,089 | 0.441069 |
b443e69cd16f1827fe9ba10cb1499425321f1ac2 | 1,059 | py | Python | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
]
| 30 | 2018-05-23T16:58:12.000Z | 2021-10-18T21:25:01.000Z | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
]
| 2 | 2019-12-01T13:32:50.000Z | 2019-12-01T13:32:53.000Z | manage.py | xinbingliang/dockertest | aca2a508658681a5e6b1beab714059bf1b43d9ed | [
"MIT"
]
| 136 | 2018-02-04T14:13:33.000Z | 2022-03-09T08:26:07.000Z | # manage.py
import unittest
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from skeleton.server import app, db
from skeleton.server.models import User
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without coverage."""
tests = unittest.TestLoader().discover('tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User(email='[email protected]', password='admin', admin=True))
db.session.commit()
@manager.command
def create_data():
"""Creates sample data."""
pass
if __name__ == '__main__':
manager.run()
| 18.578947 | 79 | 0.685552 | 0 | 0 | 0 | 0 | 699 | 0.660057 | 0 | 0 | 230 | 0.217186 |
b444035780c265816dfc1fd4e30cb0ee8b926672 | 610 | py | Python | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
]
| null | null | null | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
]
| 77 | 2018-10-29T14:38:37.000Z | 2022-03-23T14:20:39.000Z | client/middleware.py | uktrade/directory-forms-api | 078e38ddf7a761d2d34a0e1ab2dc3f20cd32e6aa | [
"MIT"
]
| 1 | 2021-08-05T10:20:17.000Z | 2021-08-05T10:20:17.000Z | import sigauth.middleware
import sigauth.helpers
from client import helpers
class SignatureCheckMiddleware(sigauth.middleware.SignatureCheckMiddlewareBase):
secret = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request_checker = helpers.RequestSignatureChecker(self.secret)
def should_check(self, request):
if request.resolver_match.namespace in [
'admin', 'healthcheck', 'authbroker_client'
] or request.path_info.startswith('/admin/login'):
return False
return super().should_check(request)
| 30.5 | 80 | 0.703279 | 530 | 0.868852 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.086885 |
b444a932576d7caabe2a8eb3dc47c1e354d4d5e3 | 3,867 | py | Python | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
]
| 4 | 2017-05-22T07:05:33.000Z | 2020-10-22T02:34:48.000Z | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
]
| null | null | null | scripts/prepare-kernel-headers.py | sonicyang/mctest | 39c26c43e9fcf1fd94322effad4ca211d495339a | [
"BSD-2-Clause"
]
| 2 | 2020-02-19T13:23:16.000Z | 2020-12-08T02:26:16.000Z | import os
import subprocess
import errno
import shutil
import re
import sys
kernel_path = ''
install_path = ''
patch_rules = []
arch = ''
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def patch_rule_append(find_pattern, replace):
global patch_rules
patch_rules.append((find_pattern, replace))
def file_patch(infile):
with open(infile, 'r') as f:
lines = f.readlines()
with open(infile, 'w') as f:
global patch_rules
for line in lines:
for rule in patch_rules:
line = re.sub(rule[0], rule[1], line)
f.write(line)
def header_check(header):
global arch
unrelated_header_types =['drivers', 'tools', 'scripts', 'security',
'sound', 'drm', 'kvm', 'xen', 'scsi', 'video']
# skip unrelated architecture
arch_path = 'arch/' + arch
if 'arch/' in header and not arch_path in header:
return False
for h in unrelated_header_types:
if h in header:
return False
return True
def file_patch_and_install(src_path):
global kernel_path
global install_path
relative_path = src_path.split(kernel_path)[1]
file = relative_path.rsplit('/')[-1]
relative_dir = relative_path.split(file)[0]
dest_dir = install_path + relative_dir
if header_check(dest_dir) == False:
return
mkdir_p(dest_dir)
shutil.copy2(src_path, dest_dir)
dest_path = dest_dir + file
file_patch(dest_path)
def main():
"""Main function."""
argv = sys.argv
assert len(argv) == 4, 'Invalid arguments'
global kernel_path
global install_path
global arch
kernel_path = argv[1]
install_path = argv[2]
arch = argv[3]
# avoid the conflic with the 'new' operator in C++
patch_rule_append('new', 'anew')
# TODO: Add "extern "C"" to function declaration in string_64.h
# while we want to compile module with C++ code.
if 'x86' in arch:
patch_rule_append('void \*memset\(void \*s, int c, size_t n\)\;',
'extern \"C\" {\nvoid *memset(void *s, int c, size_t n);')
patch_rule_append('int strcmp\(const char \*cs, const char \*ct\);',
'int strcmp(const char *cs, const char *ct);}')
# wrap the declaration of extern function with extern "C"
# e.g. extern void func(void); => extern "C" {void func(void);}
def wrapped_with_externC(matched):
func = matched.group(0).split('extern')[1]
return 'extern \"C\" {' + func + '}'
pattern = re.compile(r'^extern\s*[\w_][\w\d_]*[\s\*]*[\w_][\w\d_]*\(.*\);$')
patch_rule_append(pattern, wrapped_with_externC)
# avoid duplicated keyword definition
# e.g. typedef _Bool bool;
# => #ifndef __cplusplus
# typedef _Bool bool;
# #endif
def wrapped_with_ifndef_cpluscplus_macro(matched):
line = matched.group(0)
return '#ifndef __cplusplus\n' + line + '\n#endif\n'
pattern = re.compile(r'^\s*typedef.*\s*(false|true|bool);$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
pattern = re.compile(r'^\s*(false|true|bool)\s*=.*$')
patch_rule_append(pattern, wrapped_with_ifndef_cpluscplus_macro)
# Use find command to find out all headers
find_cmd = 'find -L ' + kernel_path + ' -name *.h'
proc = subprocess.Popen(find_cmd, shell = True, stdout = subprocess.PIPE)
lines = proc.stdout.readlines()
for line in lines:
if line == '':
break
# Remove the newline character
src = line.replace('\n', "")
file_patch_and_install(src)
if __name__ == '__main__':
sys.exit(main())
| 27.820144 | 82 | 0.60693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,131 | 0.292475 |
b446c92bc9ef0b8ec976811e71bda60bd2a8e30d | 18,912 | py | Python | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
]
| null | null | null | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
]
| null | null | null | model/loss.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/9/18 下午11:19
# @Author : DaiPuWei
# @Email : [email protected]
# @File : loss.py
# @Software: PyCharm
"""
这是YOLO模型的损失函数的定义脚本,目前目标分类损失支持smooth Label;
目标定位损失支持均方差损失、GIOU Loss、DIOU Loss和CIOU Loss;
"""
import math
import tensorflow as tf
from tensorflow.keras import backend as K
# ---------------------------------------------------#
# 平滑标签
# ---------------------------------------------------#
def _smooth_labels(y_true, label_smoothing):
num_classes = tf.cast(K.shape(y_true)[-1], dtype=K.floatx())
label_smoothing = K.constant(label_smoothing, dtype=K.floatx())
return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes
# ---------------------------------------------------#
# 将预测值的每个特征层调成真实值
# ---------------------------------------------------#
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
num_anchors = len(anchors)
# ---------------------------------------------------#
# [1, 1, 1, num_anchors, 2]
# ---------------------------------------------------#
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
# ---------------------------------------------------#
# 获得x,y的网格
# (13, 13, 1, 2)
# ---------------------------------------------------#
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
# ---------------------------------------------------#
# 将预测结果调整成(batch_size,13,13,3,85)
# 85可拆分成4 + 1 + 80
# 4代表的是中心宽高的调整参数
# 1代表的是框的置信度
# 80代表的是种类的置信度
# ---------------------------------------------------#
feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# ---------------------------------------------------#
# 将预测值调成真实值
# box_xy对应框的中心点
# box_wh对应框的宽和高
# ---------------------------------------------------#
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
# ---------------------------------------------------------------------#
# 在计算loss的时候返回grid, feats, box_xy, box_wh
# 在预测的时候返回box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------------------------#
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
# ---------------------------------------------------#
# 用于计算每个预测框与真实框的iou
# ---------------------------------------------------#
def box_iou(b_true, b_pred):
# 13,13,3,1,4
# 计算左上角的坐标和右下角的坐标
b_true = K.expand_dims(b_true, -2)
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 1,n,4
# 计算左上角和右下角的坐标
b_pred = K.expand_dims(b_pred, 0)
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 计算重合面积
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
iou = intersect_area / (b_true_area + b_pred_area - intersect_area)
return iou
def box_giou(b_true, b_pred):
"""
Calculate GIoU loss on anchor boxes
Reference Paper:
"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression"
https://arxiv.org/abs/1902.09630
Parameters
----------
b_true: GT boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b_pred: predict boxes tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
Returns
-------
giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b_true_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b_true_area + b_pred_area - intersect_area
# calculate IoU, add epsilon in denominator to avoid dividing by 0
iou = intersect_area / (union_area + K.epsilon())
# get enclosed area
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
# calculate GIoU, add epsilon in denominator to avoid dividing by 0
giou = iou - 1.0 * (enclose_area - union_area) / (enclose_area + K.epsilon())
giou = K.expand_dims(giou, -1)
return giou
def box_diou(b_true, b_pred,use_ciou_loss=False):
"""
输入为:
----------
b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
返回为:
-------
ciou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
# 求出预测框左上角右下角
b_true_xy = b_true[..., :2]
b_true_wh = b_true[..., 2:4]
b_true_wh_half = b_true_wh / 2.
b_true_mins = b_true_xy - b_true_wh_half
b_true_maxes = b_true_xy + b_true_wh_half
# 求出真实框左上角右下角
b_pred_xy = b_pred[..., :2]
b_pred_wh = b_pred[..., 2:4]
b_pred_wh_half = b_pred_wh / 2.
b_pred_mins = b_pred_xy - b_pred_wh_half
b_pred_maxes = b_pred_xy + b_pred_wh_half
# 求真实框和预测框所有的iou
intersect_mins = K.maximum(b_true_mins, b_pred_mins)
intersect_maxes = K.minimum(b_true_maxes, b_pred_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b_true_wh[..., 0] * b_true_wh[..., 1]
b_pred_area = b_pred_wh[..., 0] * b_pred_wh[..., 1]
union_area = b1_area + b_pred_area - intersect_area
iou = intersect_area / K.maximum(union_area, K.epsilon())
# 计算中心的差距
center_distance = K.sum(K.square(b_true_xy - b_pred_xy), axis=-1)
# 找到包裹两个框的最小框的左上角和右下角
enclose_mins = K.minimum(b_true_mins, b_pred_mins)
enclose_maxes = K.maximum(b_true_maxes, b_pred_maxes)
enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
# 计算对角线距离
enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
diou = iou - 1.0 * (center_distance) / K.maximum(enclose_diagonal, K.epsilon())
if use_ciou_loss:
v = 4 * K.square(tf.math.atan2(b_true_wh[..., 0], K.maximum(b_true_wh[..., 1], K.epsilon()))
- tf.math.atan2(b_pred_wh[..., 0],K.maximum(b_pred_wh[..., 1],K.epsilon()))) / (math.pi * math.pi)
# a trick: here we add an non-gradient coefficient w^2+h^2 to v to customize it's back-propagate,
# to match related description for equation (12) in original paper
#
#
# v'/w' = (8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (h/(w^2+h^2)) (12)
# v'/h' = -(8/pi^2) * (arctan(wgt/hgt) - arctan(w/h)) * (w/(w^2+h^2))
#
# The dominator w^2+h^2 is usually a small value for the cases
# h and w ranging in [0; 1], which is likely to yield gradient
# explosion. And thus in our implementation, the dominator
# w^2+h^2 is simply removed for stable convergence, by which
# the step size 1/(w^2+h^2) is replaced by 1 and the gradient direction
# is still consistent with Eqn. (12).
v = v * tf.stop_gradient(b_pred_wh[..., 0] * b_pred_wh[..., 0] + b_pred_wh[..., 1] * b_pred_wh[..., 1])
alpha = v / K.maximum((1.0 - iou + v), K.epsilon())
diou = diou - alpha * v
diou = K.expand_dims(diou, -1)
diou = tf.where(tf.math.is_nan(diou), tf.zeros_like(diou), diou)
return diou
# ---------------------------------------------------#
# loss值计算
# ---------------------------------------------------#
def yolo_loss(args, anchors,num_classes,ignore_threshold=.5,label_smoothing=0.1,
use_giou_loss=False,use_diou_loss=False,use_ciou_loss=False,normalize=True,model_name='yolov3'):
# 根据不同yolo模型初始化不同anchor掩膜和输出层数
if model_name == "yolov3": # yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov3-spp': # yolov3-spp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4': # yolov4
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-csp': # yolov4-csp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
elif model_name == 'yolov4-p5': # yolov4-p5
anchor_mask = [[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 3
elif model_name == 'yolov4-p6': # yolov4-p6
anchor_mask = [[12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 4
elif model_name == 'yolov4-p7': # yolov4-p7
anchor_mask = [[16, 17, 18, 19], [12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 5
elif model_name == 'yolov3-tiny': # yolov3-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
elif model_name == 'yolov4-tiny': # yolov4-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
else: # 默认为yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
# 将预测结果和实际ground truth分开,args是[*model_body.output, *y_true]
y_true = args[num_layers:]
yolo_outputs = args[:num_layers]
# 根据不同yolo模型初始化输入尺度和网格尺度
if model_name == "yolov3": # yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov3-spp': # yolov3-spp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4': # yolov4
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-csp': # yolov4-csp
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p5': # yolov4-p5
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-p6': # yolov4-p6
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*64, K.dtype(y_true[0]))
elif model_name == 'yolov4-p7': # yolov4-p7
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*128, K.dtype(y_true[0]))
elif model_name == 'yolov3-tiny': # yolov3-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
elif model_name == 'yolov4-tiny': # yolov4-tiny
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
else: # 默认为yolov3
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3]*32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[l])) for l in range(num_layers)]
loss = 0
num_pos = 0
m = K.shape(yolo_outputs[0])[0]
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
# -----------------------------------------------------------#
# 以第一个特征层(m,13,13,3,85)为例子
# 取出该特征层中存在目标的点的位置。(m,13,13,3,1)
# -----------------------------------------------------------#
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
if label_smoothing: # 使用平滑标签
true_class_probs = _smooth_labels(true_class_probs, label_smoothing)
# -----------------------------------------------------------#
# 将yolo_outputs的特征层输出进行处理、获得四个返回值
# grid为网格坐标
# raw_pred为尚未处理的预测结果
# pred_xy为解码后的中心坐标
# pred_wh为解码后的宽高坐标
# -----------------------------------------------------------#
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
# pred_box是解码后的预测的box的位置
pred_box = K.concatenate([pred_xy, pred_wh])
# -----------------------------------------------------------#
# 找到负样本群组,第一步是创建一个数组,[]
# -----------------------------------------------------------#
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
# 对每一张图片计算ignore_mask
def loop_body(b, ignore_mask):
# 取出n个真实框:n,4
true_box = tf.boolean_mask(y_true[l][b, ..., 0:4], object_mask_bool[b, ..., 0])
# -----------------------------------------------------------#
# 计算预测框与真实框的iou
# pred_box为预测框的坐标
# true_box为真实框的坐标
# iou为预测框和真实框的iou
# -----------------------------------------------------------#
iou = box_iou(pred_box[b], true_box)
# best_iou为每个特征点与真实框的最大重合程度
best_iou = K.max(iou, axis=-1)
# -----------------------------------------------------------#
# 判断预测框和真实框的最大iou小于ignore_thresh
# 则认为该预测框没有与之对应的真实框
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# -----------------------------------------------------------#
ignore_mask = ignore_mask.write(b, K.cast(best_iou < ignore_threshold, K.dtype(true_box)))
return b + 1, ignore_mask
# 在这个地方进行一个循环、循环是对每一张图片进行的
_, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body, [0, ignore_mask])
# ignore_mask用于提取出作为负样本的特征点
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# 真实框越大,比重越小,小框的比重更大。
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
# ------------------------------------------------------------------------------#
# 如果该位置本来有框,那么计算1与置信度的交叉熵
# 如果该位置本来没有框,那么计算0与置信度的交叉熵
# 在这其中会忽略一部分样本,这些被忽略的样本满足条件best_iou<ignore_thresh
# 该操作的目的是:
# 忽略预测结果与真实框非常对应特征点,因为这些框已经比较准了
# 不适合当作负样本,所以忽略掉。
# ------------------------------------------------------------------------------#
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) + \
(1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 4:5],
from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 5:], from_logits=True)
# 根据不同参数选择不同定位损失
if use_giou_loss: # 计算GIOU损失
raw_true_box = y_true[l][..., 0:4]
giou = box_giou(raw_true_box, pred_box)
giou_loss = object_mask * box_loss_scale * (1 - giou)
giou_loss = K.sum(giou_loss)
location_loss = giou_loss
elif use_diou_loss: # 计算DIOU损失
raw_true_box = y_true[l][..., 0:4]
diou = box_diou(pred_box, raw_true_box, use_ciou_loss=False)
diou_loss = object_mask * box_loss_scale * (1 - diou)
location_loss = diou_loss
elif use_ciou_loss: # 计算CIOU损失
raw_true_box = y_true[l][..., 0:4]
ciou = box_diou(pred_box, raw_true_box,use_ciou_loss=True)
ciou_loss = object_mask * box_loss_scale * (1 - ciou)
location_loss = ciou_loss
else: # YOLO v3边界框定位损失
# Standard YOLOv3 location loss
# K.binary_crossentropy is helpful to avoid exp overflow.
raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2],
from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4])
xy_loss = K.sum(xy_loss)
wh_loss = K.sum(wh_loss)
location_loss = xy_loss + wh_loss
location_loss = K.sum(location_loss)
confidence_loss = K.sum(confidence_loss)
class_loss = K.sum(class_loss)
# 计算正样本数量
num_pos += tf.maximum(K.sum(K.cast(object_mask, tf.float32)), 1)
loss += location_loss + confidence_loss + class_loss
loss = K.expand_dims(loss, axis=-1)
# 计算YOLO模型损失
if normalize:
loss = loss / num_pos
else:
loss = loss / mf
return loss | 46.239609 | 123 | 0.519882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,306 | 0.355558 |
b4470139b4eff5eadddd95183f7509c2d7a4cf79 | 59,405 | py | Python | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
]
| null | null | null | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
]
| 2 | 2022-01-11T17:19:40.000Z | 2022-01-14T16:32:23.000Z | electrum_vtc/tests/test_lnpeer.py | samdisk11/electrum | 4fffb4328a1764b5cd969b5c733e67bced2548a0 | [
"MIT"
]
| 2 | 2022-01-13T05:04:16.000Z | 2022-01-14T11:48:39.000Z | import asyncio
import tempfile
from decimal import Decimal
import os
from contextlib import contextmanager
from collections import defaultdict
import logging
import concurrent
from concurrent import futures
import unittest
from typing import Iterable, NamedTuple, Tuple, List, Dict
from aiorpcx import TaskGroup, timeout_after, TaskTimeout
import electrum_vtc as electrum
import electrum_vtc.trampoline
from electrum_vtc import bitcoin
from electrum_vtc import constants
from electrum_vtc.network import Network
from electrum_vtc.ecc import ECPrivkey
from electrum_vtc import simple_config, lnutil
from electrum_vtc.lnaddr import lnencode, LnAddr, lndecode
from electrum_vtc.bitcoin import COIN, sha256
from electrum_vtc.util import bh2u, create_and_start_event_loop, NetworkRetryManager, bfh
from electrum_vtc.lnpeer import Peer, UpfrontShutdownScriptViolation
from electrum_vtc.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
from electrum_vtc.lnutil import LightningPeerConnectionClosed, RemoteMisbehaving
from electrum_vtc.lnutil import PaymentFailure, LnFeatures, HTLCOwner
from electrum_vtc.lnchannel import ChannelState, PeerState, Channel
from electrum_vtc.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
from electrum_vtc.channel_db import ChannelDB
from electrum_vtc.lnworker import LNWallet, NoPathFound
from electrum_vtc.lnmsg import encode_msg, decode_msg
from electrum_vtc import lnmsg
from electrum_vtc.logging import console_stderr_handler, Logger
from electrum_vtc.lnworker import PaymentInfo, RECEIVED
from electrum_vtc.lnonion import OnionFailureCode
from electrum_vtc.lnutil import derive_payment_secret_from_payment_preimage
from electrum_vtc.lnutil import LOCAL, REMOTE
from electrum_vtc.invoices import PR_PAID, PR_UNPAID
from .test_lnchannel import create_test_channels
from .test_bitcoin import needs_test_with_all_chacha20_implementations
from . import TestCaseForTestnet
def keypair():
priv = ECPrivkey.generate_random_key().get_secret_bytes()
k1 = Keypair(
pubkey=privkey_to_pubkey(priv),
privkey=priv)
return k1
@contextmanager
def noop_lock():
yield
class MockNetwork:
def __init__(self, tx_queue):
self.callbacks = defaultdict(list)
self.lnwatcher = None
self.interface = None
user_config = {}
user_dir = tempfile.mkdtemp(prefix="electrum-lnpeer-test-")
self.config = simple_config.SimpleConfig(user_config, read_user_dir_function=lambda: user_dir)
self.asyncio_loop = asyncio.get_event_loop()
self.channel_db = ChannelDB(self)
self.channel_db.data_loaded.set()
self.path_finder = LNPathFinder(self.channel_db)
self.tx_queue = tx_queue
self._blockchain = MockBlockchain()
@property
def callback_lock(self):
return noop_lock()
def get_local_height(self):
return 0
def blockchain(self):
return self._blockchain
async def broadcast_transaction(self, tx):
if self.tx_queue:
await self.tx_queue.put(tx)
async def try_broadcasting(self, tx, name):
await self.broadcast_transaction(tx)
class MockBlockchain:
def height(self):
return 0
def is_tip_stale(self):
return False
class MockWallet:
def set_label(self, x, y):
pass
def save_db(self):
pass
def add_transaction(self, tx):
pass
def is_lightning_backup(self):
return False
def is_mine(self, addr):
return True
class MockLNWallet(Logger, NetworkRetryManager[LNPeerAddr]):
MPP_EXPIRY = 2 # HTLC timestamps are cast to int, so this cannot be 1
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 0
INITIAL_TRAMPOLINE_FEE_LEVEL = 0
def __init__(self, *, local_keypair: Keypair, chans: Iterable['Channel'], tx_queue, name):
self.name = name
Logger.__init__(self)
NetworkRetryManager.__init__(self, max_retry_delay_normal=1, init_retry_delay_normal=1)
self.node_keypair = local_keypair
self.network = MockNetwork(tx_queue)
self.taskgroup = TaskGroup()
self.lnwatcher = None
self.listen_server = None
self._channels = {chan.channel_id: chan for chan in chans}
self.payments = {}
self.logs = defaultdict(list)
self.wallet = MockWallet()
self.features = LnFeatures(0)
self.features |= LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT
self.features |= LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
self.features |= LnFeatures.VAR_ONION_OPT
self.features |= LnFeatures.PAYMENT_SECRET_OPT
self.features |= LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
self.pending_payments = defaultdict(asyncio.Future)
for chan in chans:
chan.lnworker = self
self._peers = {} # bytes -> Peer
# used in tests
self.enable_htlc_settle = True
self.enable_htlc_forwarding = True
self.received_mpp_htlcs = dict()
self.sent_htlcs = defaultdict(asyncio.Queue)
self.sent_htlcs_routes = dict()
self.sent_buckets = defaultdict(set)
self.trampoline_forwarding_failures = {}
self.inflight_payments = set()
self.preimages = {}
self.stopping_soon = False
self.downstream_htlc_to_upstream_peer_map = {}
self.logger.info(f"created LNWallet[{name}] with nodeID={local_keypair.pubkey.hex()}")
def get_invoice_status(self, key):
pass
@property
def lock(self):
return noop_lock()
@property
def channel_db(self):
return self.network.channel_db if self.network else None
@property
def channels(self):
return self._channels
@property
def peers(self):
return self._peers
def get_channel_by_short_id(self, short_channel_id):
with self.lock:
for chan in self._channels.values():
if chan.short_channel_id == short_channel_id:
return chan
def channel_state_changed(self, chan):
pass
def save_channel(self, chan):
print("Ignoring channel save")
def diagnostic_name(self):
return self.name
async def stop(self):
await LNWallet.stop(self)
if self.channel_db:
self.channel_db.stop()
await self.channel_db.stopped_event.wait()
async def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
return [r async for r in self.create_routes_for_payment(
amount_msat=amount_msat,
final_total_msat=amount_msat,
invoice_pubkey=decoded_invoice.pubkey.serialize(),
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
r_tags=decoded_invoice.get_routing_info('r'),
invoice_features=decoded_invoice.get_features(),
trampoline_fee_levels=defaultdict(int),
use_two_trampolines=False,
payment_hash=decoded_invoice.paymenthash,
payment_secret=decoded_invoice.payment_secret,
full_path=full_path)]
get_payments = LNWallet.get_payments
get_payment_info = LNWallet.get_payment_info
save_payment_info = LNWallet.save_payment_info
set_invoice_status = LNWallet.set_invoice_status
set_request_status = LNWallet.set_request_status
set_payment_status = LNWallet.set_payment_status
get_payment_status = LNWallet.get_payment_status
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
htlc_fulfilled = LNWallet.htlc_fulfilled
htlc_failed = LNWallet.htlc_failed
save_preimage = LNWallet.save_preimage
get_preimage = LNWallet.get_preimage
create_route_for_payment = LNWallet.create_route_for_payment
create_routes_for_payment = LNWallet.create_routes_for_payment
_check_invoice = staticmethod(LNWallet._check_invoice)
pay_to_route = LNWallet.pay_to_route
pay_to_node = LNWallet.pay_to_node
pay_invoice = LNWallet.pay_invoice
force_close_channel = LNWallet.force_close_channel
try_force_closing = LNWallet.try_force_closing
get_first_timestamp = lambda self: 0
on_peer_successfully_established = LNWallet.on_peer_successfully_established
get_channel_by_id = LNWallet.get_channel_by_id
channels_for_peer = LNWallet.channels_for_peer
_calc_routing_hints_for_invoice = LNWallet._calc_routing_hints_for_invoice
handle_error_code_from_failed_htlc = LNWallet.handle_error_code_from_failed_htlc
is_trampoline_peer = LNWallet.is_trampoline_peer
wait_for_received_pending_htlcs_to_get_removed = LNWallet.wait_for_received_pending_htlcs_to_get_removed
on_proxy_changed = LNWallet.on_proxy_changed
_decode_channel_update_msg = LNWallet._decode_channel_update_msg
_handle_chanupd_from_failed_htlc = LNWallet._handle_chanupd_from_failed_htlc
_on_maybe_forwarded_htlc_resolved = LNWallet._on_maybe_forwarded_htlc_resolved
class MockTransport:
def __init__(self, name):
self.queue = asyncio.Queue()
self._name = name
def name(self):
return self._name
async def read_messages(self):
while True:
yield await self.queue.get()
class NoFeaturesTransport(MockTransport):
"""
This answers the init message with a init that doesn't signal any features.
Used for testing that we require DATA_LOSS_PROTECT.
"""
def send_bytes(self, data):
decoded = decode_msg(data)
print(decoded)
if decoded[0] == 'init':
self.queue.put_nowait(encode_msg('init', lflen=1, gflen=1, localfeatures=b"\x00", globalfeatures=b"\x00"))
class PutIntoOthersQueueTransport(MockTransport):
def __init__(self, keypair, name):
super().__init__(name)
self.other_mock_transport = None
self.privkey = keypair.privkey
def send_bytes(self, data):
self.other_mock_transport.queue.put_nowait(data)
def transport_pair(k1, k2, name1, name2):
t1 = PutIntoOthersQueueTransport(k1, name1)
t2 = PutIntoOthersQueueTransport(k2, name2)
t1.other_mock_transport = t2
t2.other_mock_transport = t1
return t1, t2
class PeerInTests(Peer):
DELAY_INC_MSG_PROCESSING_SLEEP = 0 # disable rate-limiting
high_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 500_000,
'local_fee_rate_millionths': 500,
'remote_base_fee_msat': 500_000,
'remote_fee_rate_millionths': 500,
}
low_fee_channel = {
'local_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'remote_balance_msat': 10 * bitcoin.COIN * 1000 // 2,
'local_base_fee_msat': 1_000,
'local_fee_rate_millionths': 1,
'remote_base_fee_msat': 1_000,
'remote_fee_rate_millionths': 1,
}
GRAPH_DEFINITIONS = {
'square_graph': {
'alice': {
'channels': {
# we should use copies of channel definitions if
# we want to independently alter them in a test
'bob': high_fee_channel.copy(),
'carol': low_fee_channel.copy(),
},
},
'bob': {
'channels': {
'dave': high_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'carol': {
'channels': {
'dave': low_fee_channel.copy(),
},
'config': {
'lightning_forward_payments': True,
'lightning_forward_trampoline_payments': True,
},
},
'dave': {
},
}
}
class Graph(NamedTuple):
workers: Dict[str, MockLNWallet]
peers: Dict[Tuple[str, str], Peer]
channels: Dict[Tuple[str, str], Channel]
class PaymentDone(Exception): pass
class SuccessfulTest(Exception): pass
class TestPeer(TestCaseForTestnet):
@classmethod
def setUpClass(cls):
super().setUpClass()
console_stderr_handler.setLevel(logging.DEBUG)
def setUp(self):
super().setUp()
self.asyncio_loop, self._stop_loop, self._loop_thread = create_and_start_event_loop()
self._lnworkers_created = [] # type: List[MockLNWallet]
def tearDown(self):
async def cleanup_lnworkers():
async with TaskGroup() as group:
for lnworker in self._lnworkers_created:
await group.spawn(lnworker.stop())
self._lnworkers_created.clear()
run(cleanup_lnworkers())
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
super().tearDown()
def prepare_peers(self, alice_channel: Channel, bob_channel: Channel):
k1, k2 = keypair(), keypair()
alice_channel.node_id = k2.pubkey
bob_channel.node_id = k1.pubkey
t1, t2 = transport_pair(k1, k2, alice_channel.name, bob_channel.name)
q1, q2 = asyncio.Queue(), asyncio.Queue()
w1 = MockLNWallet(local_keypair=k1, chans=[alice_channel], tx_queue=q1, name=bob_channel.name)
w2 = MockLNWallet(local_keypair=k2, chans=[bob_channel], tx_queue=q2, name=alice_channel.name)
self._lnworkers_created.extend([w1, w2])
p1 = PeerInTests(w1, k2.pubkey, t1)
p2 = PeerInTests(w2, k1.pubkey, t2)
w1._peers[p1.pubkey] = p1
w2._peers[p2.pubkey] = p2
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
alice_channel._state = ChannelState.FUNDED
bob_channel._state = ChannelState.FUNDED
# this populates the channel graph:
p1.mark_open(alice_channel)
p2.mark_open(bob_channel)
return p1, p2, w1, w2, q1, q2
def prepare_chans_and_peers_in_graph(self, graph_definition) -> Graph:
keys = {k: keypair() for k in graph_definition}
txs_queues = {k: asyncio.Queue() for k in graph_definition}
channels = {} # type: Dict[Tuple[str, str], Channel]
transports = {}
workers = {} # type: Dict[str, MockLNWallet]
peers = {}
# create channels
for a, definition in graph_definition.items():
for b, channel_def in definition.get('channels', {}).items():
channel_ab, channel_ba = create_test_channels(
alice_name=a,
bob_name=b,
alice_pubkey=keys[a].pubkey,
bob_pubkey=keys[b].pubkey,
local_msat=channel_def['local_balance_msat'],
remote_msat=channel_def['remote_balance_msat'],
)
channels[(a, b)], channels[(b, a)] = channel_ab, channel_ba
transport_ab, transport_ba = transport_pair(keys[a], keys[b], channel_ab.name, channel_ba.name)
transports[(a, b)], transports[(b, a)] = transport_ab, transport_ba
# set fees
channel_ab.forwarding_fee_proportional_millionths = channel_def['local_fee_rate_millionths']
channel_ab.forwarding_fee_base_msat = channel_def['local_base_fee_msat']
channel_ba.forwarding_fee_proportional_millionths = channel_def['remote_fee_rate_millionths']
channel_ba.forwarding_fee_base_msat = channel_def['remote_base_fee_msat']
# create workers and peers
for a, definition in graph_definition.items():
channels_of_node = [c for k, c in channels.items() if k[0] == a]
workers[a] = MockLNWallet(local_keypair=keys[a], chans=channels_of_node, tx_queue=txs_queues[a], name=a)
self._lnworkers_created.extend(list(workers.values()))
# create peers
for ab in channels.keys():
peers[ab] = Peer(workers[ab[0]], keys[ab[1]].pubkey, transports[ab])
# add peers to workers
for a, w in workers.items():
for ab, peer_ab in peers.items():
if ab[0] == a:
w._peers[peer_ab.pubkey] = peer_ab
# set forwarding properties
for a, definition in graph_definition.items():
for property in definition.get('config', {}).items():
workers[a].network.config.set_key(*property)
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
for channel_ab in channels.values():
channel_ab._state = ChannelState.FUNDED
# this populates the channel graph:
for ab, peer_ab in peers.items():
peer_ab.mark_open(channels[ab])
graph = Graph(
workers=workers,
peers=peers,
channels=channels,
)
for a in workers:
print(f"{a} -> pubkey {keys[a].pubkey}")
return graph
@staticmethod
async def prepare_invoice(
w2: MockLNWallet, # receiver
*,
amount_msat=100_000_000,
include_routing_hints=False,
) -> Tuple[LnAddr, str]:
amount_btc = amount_msat/Decimal(COIN*1000)
payment_preimage = os.urandom(32)
RHASH = sha256(payment_preimage)
info = PaymentInfo(RHASH, amount_msat, RECEIVED, PR_UNPAID)
w2.save_preimage(RHASH, payment_preimage)
w2.save_payment_info(info)
if include_routing_hints:
routing_hints = await w2._calc_routing_hints_for_invoice(amount_msat)
else:
routing_hints = []
trampoline_hints = []
for r in routing_hints:
node_id, short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = r[1][0]
if len(r[1])== 1 and w2.is_trampoline_peer(node_id):
trampoline_hints.append(('t', (node_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)))
invoice_features = w2.features.for_invoice()
if invoice_features.supports(LnFeatures.PAYMENT_SECRET_OPT):
payment_secret = derive_payment_secret_from_payment_preimage(payment_preimage)
else:
payment_secret = None
lnaddr1 = LnAddr(
paymenthash=RHASH,
amount=amount_btc,
tags=[('c', lnutil.MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
('d', 'coffee'),
('9', invoice_features),
] + routing_hints + trampoline_hints,
payment_secret=payment_secret,
)
invoice = lnencode(lnaddr1, w2.node_keypair.privkey)
lnaddr2 = lndecode(invoice) # unlike lnaddr1, this now has a pubkey set
return lnaddr2, invoice
def test_reestablish(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
for chan in (alice_channel, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel.peer_state, PeerState.GOOD)
self.assertEqual(bob_channel.peer_state, PeerState.GOOD)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p1.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_reestablish_with_old_state(self):
random_seed = os.urandom(32)
alice_channel, bob_channel = create_test_channels(random_seed=random_seed)
alice_channel_0, bob_channel_0 = create_test_channels(random_seed=random_seed) # these are identical
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
result, log = await w1.pay_invoice(pay_req)
self.assertEqual(result, True)
gath.cancel()
gath = asyncio.gather(pay(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel_0, bob_channel)
for chan in (alice_channel_0, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel_0),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel_0.peer_state, PeerState.BAD)
self.assertEqual(bob_channel._state, ChannelState.FORCE_CLOSING)
# wait so that pending messages are processed
#await asyncio.sleep(1)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment(self):
"""Alice pays Bob a single HTLC via direct channel."""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, w2.get_payment_status(lnaddr.paymenthash))
result, log = await w1.pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, w2.get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
lnaddr, pay_req = await self.prepare_invoice(w2)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_race(self):
"""Alice and Bob pay each other simultaneously.
They both send 'update_add_htlc' and receive each other's update
before sending 'commitment_signed'. Neither party should fulfill
the respective HTLCs until those are irrevocably committed to.
"""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# prep
_maybe_send_commitment1 = p1.maybe_send_commitment
_maybe_send_commitment2 = p2.maybe_send_commitment
lnaddr2, pay_req2 = await self.prepare_invoice(w2)
lnaddr1, pay_req1 = await self.prepare_invoice(w1)
# create the htlc queues now (side-effecting defaultdict)
q1 = w1.sent_htlcs[lnaddr2.paymenthash]
q2 = w2.sent_htlcs[lnaddr1.paymenthash]
# alice sends htlc BUT NOT COMMITMENT_SIGNED
p1.maybe_send_commitment = lambda x: None
route1 = (await w1.create_routes_from_invoice(lnaddr2.get_amount_msat(), decoded_invoice=lnaddr2))[0][0]
amount_msat = lnaddr2.get_amount_msat()
await w1.pay_to_route(
route=route1,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr2.paymenthash,
min_cltv_expiry=lnaddr2.get_min_final_cltv_expiry(),
payment_secret=lnaddr2.payment_secret,
)
p1.maybe_send_commitment = _maybe_send_commitment1
# bob sends htlc BUT NOT COMMITMENT_SIGNED
p2.maybe_send_commitment = lambda x: None
route2 = (await w2.create_routes_from_invoice(lnaddr1.get_amount_msat(), decoded_invoice=lnaddr1))[0][0]
amount_msat = lnaddr1.get_amount_msat()
await w2.pay_to_route(
route=route2,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr1.paymenthash,
min_cltv_expiry=lnaddr1.get_min_final_cltv_expiry(),
payment_secret=lnaddr1.payment_secret,
)
p2.maybe_send_commitment = _maybe_send_commitment2
# sleep a bit so that they both receive msgs sent so far
await asyncio.sleep(0.2)
# now they both send COMMITMENT_SIGNED
p1.maybe_send_commitment(alice_channel)
p2.maybe_send_commitment(bob_channel)
htlc_log1 = await q1.get()
assert htlc_log1.success
htlc_log2 = await q2.get()
assert htlc_log2.success
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
await group.spawn(pay())
with self.assertRaises(PaymentDone):
run(f())
#@unittest.skip("too expensive")
#@needs_test_with_all_chacha20_implementations
def test_payments_stresstest(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
alice_init_balance_msat = alice_channel.balance(HTLCOwner.LOCAL)
bob_init_balance_msat = bob_channel.balance(HTLCOwner.LOCAL)
num_payments = 50
payment_value_msat = 10_000_000 # make it large enough so that there are actually HTLCs on the ctx
max_htlcs_in_flight = asyncio.Semaphore(5)
async def single_payment(pay_req):
async with max_htlcs_in_flight:
await w1.pay_invoice(pay_req)
async def many_payments():
async with TaskGroup() as group:
pay_reqs_tasks = [await group.spawn(self.prepare_invoice(w2, amount_msat=payment_value_msat))
for i in range(num_payments)]
async with TaskGroup() as group:
for pay_req_task in pay_reqs_tasks:
lnaddr, pay_req = pay_req_task.result()
await group.spawn(single_payment(pay_req))
gath.cancel()
gath = asyncio.gather(many_payments(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.REMOTE))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.REMOTE))
@needs_test_with_all_chacha20_implementations
def test_payment_multihop(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_with_preselected_path(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
peers = graph.peers.values()
async def pay(pay_req):
with self.subTest(msg="bad path: edges do not chain together"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['carol'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
end_node=graph.workers['dave'].node_keypair.pubkey,
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
with self.subTest(msg="bad path: last node id differs from invoice pubkey"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['bob'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
with self.subTest(msg="good path"):
path = [PathEdge(start_node=graph.workers['alice'].node_keypair.pubkey,
end_node=graph.workers['bob'].node_keypair.pubkey,
short_channel_id=graph.channels[('alice', 'bob')].short_channel_id),
PathEdge(start_node=graph.workers['bob'].node_keypair.pubkey,
end_node=graph.workers['dave'].node_keypair.pubkey,
short_channel_id=graph.channels['bob', 'dave'].short_channel_id)]
result, log = await graph.workers['alice'].pay_invoice(pay_req, full_path=path)
self.assertTrue(result)
self.assertEqual(
[edge.short_channel_id for edge in path],
[edge.short_channel_id for edge in log[0].route])
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_temp_node_failure(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
graph.workers['bob'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req)
self.assertFalse(result)
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_route_around_failure(self):
# Alice will pay Dave. Alice first tries A->C->D route, due to lower fees, but Carol
# will fail the htlc and get blacklisted. Alice will then try A->B->D and succeed.
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
graph.workers['carol'].network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(500000000000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500000000000, graph.channels[('dave', 'bob')].balance(LOCAL))
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=2)
self.assertEqual(2, len(log))
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual([graph.channels[('alice', 'carol')].short_channel_id, graph.channels[('carol', 'dave')].short_channel_id],
[edge.short_channel_id for edge in log[0].route])
self.assertEqual([graph.channels[('alice', 'bob')].short_channel_id, graph.channels[('bob', 'dave')].short_channel_id],
[edge.short_channel_id for edge in log[1].route])
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
self.assertEqual(499899450000, graph.channels[('alice', 'bob')].balance(LOCAL))
await asyncio.sleep(0.2) # wait for COMMITMENT_SIGNED / REVACK msgs to update balance
self.assertEqual(500100000000, graph.channels[('dave', 'bob')].balance(LOCAL))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_with_temp_channel_failure_and_liquidty_hints(self):
# prepare channels such that a temporary channel failure happens at c->d
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
graph_definition['alice']['channels']['carol']['local_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['carol']['remote_balance_msat'] = 200_000_000
graph_definition['carol']['channels']['dave']['local_balance_msat'] = 50_000_000
graph_definition['carol']['channels']['dave']['remote_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['bob']['local_balance_msat'] = 200_000_000
graph_definition['alice']['channels']['bob']['remote_balance_msat'] = 200_000_000
graph_definition['bob']['channels']['dave']['local_balance_msat'] = 200_000_000
graph_definition['bob']['channels']['dave']['remote_balance_msat'] = 200_000_000
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
# the payment happens in two attempts:
# 1. along a->c->d due to low fees with temp channel failure:
# with chanupd: ORPHANED, private channel update
# c->d gets a liquidity hint and gets blocked
# 2. along a->b->d with success
amount_to_pay = 100_000_000
peers = graph.peers.values()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=3)
self.assertTrue(result)
self.assertEqual(2, len(log))
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, log[0].failure_msg.code)
liquidity_hints = graph.workers['alice'].network.path_finder.liquidity_hints
pubkey_a = graph.workers['alice'].node_keypair.pubkey
pubkey_b = graph.workers['bob'].node_keypair.pubkey
pubkey_c = graph.workers['carol'].node_keypair.pubkey
pubkey_d = graph.workers['dave'].node_keypair.pubkey
# check liquidity hints for failing route:
hint_ac = liquidity_hints.get_hint(graph.channels[('alice', 'carol')].short_channel_id)
hint_cd = liquidity_hints.get_hint(graph.channels[('carol', 'dave')].short_channel_id)
self.assertEqual(amount_to_pay, hint_ac.can_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_ac.cannot_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_cd.can_send(pubkey_c < pubkey_d))
self.assertEqual(amount_to_pay, hint_cd.cannot_send(pubkey_c < pubkey_d))
# check liquidity hints for successful route:
hint_ab = liquidity_hints.get_hint(graph.channels[('alice', 'bob')].short_channel_id)
hint_bd = liquidity_hints.get_hint(graph.channels[('bob', 'dave')].short_channel_id)
self.assertEqual(amount_to_pay, hint_ab.can_send(pubkey_a < pubkey_b))
self.assertEqual(None, hint_ab.cannot_send(pubkey_a < pubkey_b))
self.assertEqual(amount_to_pay, hint_bd.can_send(pubkey_b < pubkey_d))
self.assertEqual(None, hint_bd.cannot_send(pubkey_b < pubkey_d))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], amount_msat=amount_to_pay, include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
def _run_mpp(self, graph, fail_kwargs, success_kwargs):
"""Tests a multipart payment scenario for failing and successful cases."""
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.peers.values()
async def pay(
attempts=1,
alice_uses_trampoline=False,
bob_forwarding=True,
mpp_invoice=True
):
if mpp_invoice:
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
if not bob_forwarding:
graph.workers['bob'].enable_htlc_forwarding = False
if alice_uses_trampoline:
if graph.workers['alice'].network.channel_db:
graph.workers['alice'].network.channel_db.stop()
await graph.workers['alice'].network.channel_db.stopped_event.wait()
graph.workers['alice'].network.channel_db = None
else:
assert graph.workers['alice'].network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=attempts)
if not bob_forwarding:
# reset to previous state, sleep 2s so that the second htlc can time out
graph.workers['bob'].enable_htlc_forwarding = True
await asyncio.sleep(2)
if result:
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
else:
raise NoPathFound()
async def f(kwargs):
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay(**kwargs))
with self.assertRaises(NoPathFound):
run(f(fail_kwargs))
with self.assertRaises(PaymentDone):
run(f(success_kwargs))
@needs_test_with_all_chacha20_implementations
def test_payment_multipart_with_timeout(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self._run_mpp(graph, {'bob_forwarding': False}, {'bob_forwarding': True})
@needs_test_with_all_chacha20_implementations
def test_payment_multipart(self):
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self._run_mpp(graph, {'mpp_invoice': False}, {'mpp_invoice': True})
@needs_test_with_all_chacha20_implementations
def test_payment_trampoline(self):
async def turn_on_trampoline_alice():
if graph.workers['alice'].network.channel_db:
graph.workers['alice'].network.channel_db.stop()
await graph.workers['alice'].network.channel_db.stopped_event.wait()
graph.workers['alice'].network.channel_db = None
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=10)
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.workers['dave'].get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
await turn_on_trampoline_alice()
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
for is_legacy in (True, False):
graph_definition = GRAPH_DEFINITIONS['square_graph'].copy()
# insert a channel from bob to carol for faster tests,
# otherwise will fail randomly
graph_definition['bob']['channels']['carol'] = high_fee_channel
graph = self.prepare_chans_and_peers_in_graph(graph_definition)
peers = graph.peers.values()
if is_legacy:
# turn off trampoline features
graph.workers['dave'].features = graph.workers['dave'].features ^ LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
# declare routing nodes as trampoline nodes
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
}
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multipart_trampoline(self):
# single attempt will fail with insufficient trampoline fee
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
graph.workers['bob'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['bob'].node_keypair.pubkey),
graph.workers['carol'].name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.workers['carol'].node_keypair.pubkey),
}
try:
self._run_mpp(
graph,
{'alice_uses_trampoline': True, 'attempts': 1},
{'alice_uses_trampoline': True, 'attempts': 30})
finally:
electrum_vtc.trampoline._TRAMPOLINE_NODES_UNITTESTS = {}
@needs_test_with_all_chacha20_implementations
def test_fail_pending_htlcs_on_shutdown(self):
"""Alice tries to pay Dave via MPP. Dave receives some HTLCs but not all.
Dave shuts down (stops wallet).
We test if Dave fails the pending HTLCs during shutdown.
"""
graph = self.prepare_chans_and_peers_in_graph(GRAPH_DEFINITIONS['square_graph'])
self.assertEqual(500_000_000_000, graph.channels[('alice', 'bob')].balance(LOCAL))
self.assertEqual(500_000_000_000, graph.channels[('alice', 'carol')].balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.peers.values()
graph.workers['dave'].MPP_EXPIRY = 120
graph.workers['dave'].TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 3
async def pay():
graph.workers['dave'].features |= LnFeatures.BASIC_MPP_OPT
graph.workers['bob'].enable_htlc_forwarding = False # Bob will hold forwarded HTLCs
assert graph.workers['alice'].network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.workers['dave'], include_routing_hints=True, amount_msat=amount_to_pay)
try:
async with timeout_after(0.5):
result, log = await graph.workers['alice'].pay_invoice(pay_req, attempts=1)
except TaskTimeout:
# by now Dave hopefully received some HTLCs:
self.assertTrue(len(graph.channels[('dave', 'carol')].hm.htlcs(LOCAL)) > 0)
self.assertTrue(len(graph.channels[('dave', 'carol')].hm.htlcs(REMOTE)) > 0)
else:
self.fail(f"pay_invoice finished but was not supposed to. result={result}")
await graph.workers['dave'].stop()
# Dave is supposed to have failed the pending incomplete MPP HTLCs
self.assertEqual(0, len(graph.channels[('dave', 'carol')].hm.htlcs(LOCAL)))
self.assertEqual(0, len(graph.channels[('dave', 'carol')].hm.htlcs(REMOTE)))
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
w2.enable_htlc_settle = False
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# alice sends htlc
route, amount_msat = (await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
p1.pay(route=route,
chan=alice_channel,
amount_msat=lnaddr.get_amount_msat(),
total_msat=lnaddr.get_amount_msat(),
payment_hash=lnaddr.paymenthash,
min_final_cltv_expiry=lnaddr.get_min_final_cltv_expiry(),
payment_secret=lnaddr.payment_secret)
# alice closes
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def set_settle():
await asyncio.sleep(0.1)
w2.enable_htlc_settle = True
gath = asyncio.gather(pay(), set_settle(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close_upfront_shutdown_script(self):
alice_channel, bob_channel = create_test_channels()
# create upfront shutdown script for bob, alice doesn't use upfront
# shutdown script
bob_uss_pub = lnutil.privkey_to_pubkey(os.urandom(32))
bob_uss_addr = bitcoin.pubkey_to_address('p2wpkh', bh2u(bob_uss_pub))
bob_uss = bfh(bitcoin.address_to_script(bob_uss_addr))
# bob commits to close to bob_uss
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
# but bob closes to some receiving address, which we achieve by not
# setting the upfront shutdown script in the channel config
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = b''
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# bob closes channel with different shutdown script
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(UpfrontShutdownScriptViolation):
run(test())
# bob sends the same upfront_shutdown_script has he announced
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = bob_uss
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(test())
def test_channel_usage_after_closing(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
lnaddr = w1._check_invoice(pay_req)
route, amount_msat = run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
assert amount_msat == lnaddr.get_amount_msat()
run(w1.force_close_channel(alice_channel.channel_id))
# check if a tx (commitment transaction) was broadcasted:
assert q1.qsize() == 1
with self.assertRaises(NoPathFound) as e:
run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))
peer = w1.peers[route[0].node_id]
# AssertionError is ok since we shouldn't use old routes, and the
# route finding should fail when channel is closed
async def f():
min_cltv_expiry = lnaddr.get_min_final_cltv_expiry()
payment_hash = lnaddr.paymenthash
payment_secret = lnaddr.payment_secret
pay = w1.pay_to_route(
route=route,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=payment_hash,
payment_secret=payment_secret,
min_cltv_expiry=min_cltv_expiry)
await asyncio.gather(pay, p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
with self.assertRaises(PaymentFailure):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages_that_should_be_ignored(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with trailing garbage
# BOLT-01 says peer2 should ignore trailing garbage
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4) + bytes(range(55))
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
# peer1 sends unknown 'odd-type' message
# BOLT-01 says peer2 should ignore whole message
raw_msg2 = (43333).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in [p1, p2]:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__unknown_even_type(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends unknown 'even-type' message
# BOLT-01 says peer2 should close the connection
raw_msg2 = (43334).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnknownMandatoryMsgType):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnknownMandatoryMsgType))
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__known_msg_with_insufficient_length(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with insufficient length for the contents
# BOLT-01 says peer2 should fail the connection
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4)[:-1]
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnexpectedEndOfStream):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnexpectedEndOfStream))
def run(coro):
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
| 46.775591 | 138 | 0.647908 | 55,415 | 0.932834 | 117 | 0.00197 | 35,810 | 0.602811 | 27,639 | 0.465264 | 6,700 | 0.112785 |
b4484ab703976e8f170a719cc81c5d0146cb13ba | 533 | py | Python | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
]
| null | null | null | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
]
| null | null | null | dictionaries/lab/06_students.py | Galchov/python-fundamentals | 4939bdd1c66a7b458fd9ffd0a01d714de26724b5 | [
"MIT"
]
| null | null | null | data = input()
courses = {}
while ":" in data:
student_name, id, course_name = data.split(":")
if course_name not in courses:
courses[course_name] = {}
courses[course_name][id] = student_name
data = input()
searched_course = data
searched_course_name_as_list = searched_course.split("_")
searched_course = " ".join(searched_course_name_as_list)
for course_name in courses:
if course_name == searched_course:
for id, name in courses[course_name].items():
print(f"{name} - {id}")
| 23.173913 | 57 | 0.669794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.052533 |
b44863efc63447d4fc28f184aca9628762468a02 | 637 | py | Python | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
]
| 215 | 2018-05-17T19:09:07.000Z | 2021-03-05T18:10:15.000Z | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
]
| 1 | 2021-03-25T21:51:01.000Z | 2021-03-25T21:51:01.000Z | eth_tester/normalization/common.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
]
| 1 | 2019-02-27T21:29:16.000Z | 2019-02-27T21:29:16.000Z | from cytoolz.functoolz import (
curry,
)
from eth_utils import (
to_dict,
to_tuple,
)
@curry
@to_dict
def normalize_dict(value, normalizers):
for key, item in value.items():
normalizer = normalizers[key]
yield key, normalizer(item)
@curry
@to_tuple
def normalize_array(value, normalizer):
"""
This is just `map` but it's nice to have it return a consisten type
(tuple).
"""
for item in value:
yield normalizer(item)
@curry
def normalize_if(value, conditional_fn, normalizer):
if conditional_fn(value):
return normalizer(value)
else:
return value
| 17.694444 | 71 | 0.657771 | 0 | 0 | 343 | 0.538462 | 529 | 0.830455 | 0 | 0 | 96 | 0.150706 |
b448742ef1c956bf4c670f1ca4c802b2271cb5bd | 1,030 | py | Python | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
]
| null | null | null | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
]
| null | null | null | je_open_cv/modules/image_operations.py | JE-Chen/Python-OPENCV-JE | d5dd3823f0a1cfc195da66bdcbe738c9bbdfc59b | [
"MIT"
]
| null | null | null | import cv2
'''
基本圖像處理用
'''
# 取得圖像 行 列 通道數
def get_image_properties(image):
total = [image.shape, image.size, image.dtype]
return total
def get_image_shape(image):
return image.shape
# 取得 圖片大小
def get_image_size(image):
return image.size
# 取得圖片類型
def get_image_type(image):
return image.dtype
# 分割通道
def split_image(image):
B, G, R = cv2.split(image)
return [B, G, R]
'''
The B,G,R channels of an image can be split into their individual planes when needed. Then,
the individual channels can be merged back together to form a BGR image again. This can be performed by:
b = img[:,:,0]
Suppose, you want to make all the red pixels to zero, you need not split like this and put it equal to zero.
You can simply use Numpy indexing which is faster.
img[:,:,2] = 0
'''
# 組合通道
def merge_image(B, G, R):
return cv2.merge((B, G, R))
# 合併2張圖片 採用透明度
def image_Blending(image1, image1_Alpha, image2, image2_Alpha):
return cv2.addWeighted(image1, image1_Alpha, image2, image2_Alpha, 0)
| 18.727273 | 109 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.501783 |
b44950222260e5d85816513148e16767252becb1 | 9,124 | py | Python | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
]
| 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
]
| 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/grr/grr/client/grr_response_client/vfs_handlers/ntfs.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
]
| 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | #!/usr/bin/env python
"""Virtual filesystem module based on pyfsntfs."""
import stat
from typing import Any, Callable, Dict, Iterable, Optional, Text, Type
import pyfsntfs
from grr_response_client import client_utils
from grr_response_client.vfs_handlers import base as vfs_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# Caches pyfsntfs.volume instances.
MOUNT_CACHE = utils.TimeBasedCache()
# See
# https://github.com/libyal/libfsntfs/blob/master/documentation/New%20Technologies%20File%20System%20(NTFS).asciidoc#file_attribute_flags
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
def _GetAlternateDataStreamCaseInsensitive(
fd: pyfsntfs.file_entry, name: Text) -> Optional[pyfsntfs.data_stream]:
name = name.lower()
for data_stream in fd.alternate_data_streams:
if data_stream.name.lower() == name:
return data_stream
class NTFSFile(vfs_base.VFSHandler):
"""VFSHandler implementation based on pyfsntfs."""
supported_pathtype = rdf_paths.PathSpec.PathType.NTFS
def __init__(self,
base_fd: Optional[vfs_base.VFSHandler],
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None):
super().__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
# self.pathspec is initialized to a copy of base_fd
if base_fd is None:
raise ValueError("NTFS driver must have a file base.")
elif isinstance(base_fd, NTFSFile) and base_fd.IsDirectory():
self.volume = base_fd.volume
last_path = utils.JoinPath(self.pathspec.last.path, pathspec.path)
# Replace the last component with this one.
self.pathspec.Pop(-1)
self.pathspec.Append(pathspec)
self.pathspec.last.path = last_path
elif not base_fd.IsDirectory():
cache_key = base_fd.pathspec.SerializeToBytes()
try:
self.volume = MOUNT_CACHE.Get(cache_key)
except KeyError:
self.volume = pyfsntfs.volume()
self.volume.open_file_object(base_fd)
MOUNT_CACHE.Put(cache_key, self.volume)
self.pathspec.Append(pathspec)
elif base_fd.IsDirectory():
raise IOError("Base must be a file.")
self.fd = None
self.data_stream = None
# Try to open by "inode" number.
if pathspec is not None and pathspec.HasField("inode"):
# The lower 48 bits of the file_reference are the MFT index.
mft_index = pathspec.inode & ((1 << 48) - 1)
self.fd = self.volume.get_file_entry(mft_index)
# If the file_reference changed, then the MFT entry points now to
# a different file. Reopen it by path.
if self.fd is not None and self.fd.file_reference != pathspec.inode:
self.fd = None
# Try to open by path
if self.fd is None:
path = self.pathspec.last.path
path = path.replace("/", "\\")
self.fd = self.volume.get_file_entry_by_path(path)
if self.fd is None:
raise IOError("Failed to open {}".format(path))
# Determine data stream
if pathspec is not None and pathspec.HasField("stream_name"):
if pathspec.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL:
self.data_stream = self.fd.get_alternate_data_stream_by_name(
pathspec.stream_name)
else:
self.data_stream = _GetAlternateDataStreamCaseInsensitive(
self.fd, pathspec.stream_name)
if self.data_stream is None:
raise IOError("Failed to open data stream {} in {}.".format(
pathspec.stream_name, path))
self.pathspec.last.stream_name = self.data_stream.name
else:
if self.fd.has_default_data_stream():
self.data_stream = self.fd
# self.pathspec will be used for future access to this file.
# The name is now literal, so disable case-insensitive lookup (expensive).
self.pathspec.last.path_options = rdf_paths.PathSpec.Options.CASE_LITERAL
# Access the file by file_reference, to skip path lookups.
self.pathspec.last.inode = self.fd.file_reference
if not self.IsDirectory():
if self.data_stream is not None:
self.size = self.data_stream.get_size()
else:
self.size = 0
def Stat(self,
ext_attrs: bool = False,
follow_symlink: bool = True) -> rdf_client_fs.StatEntry:
return self._Stat(self.fd, self.data_stream, self.pathspec.Copy())
def Read(self, length: int) -> bytes:
self.data_stream.seek(self.offset)
data = self.data_stream.read(length)
self.offset += len(data)
return data
def IsDirectory(self) -> bool:
return self.fd.has_directory_entries_index()
def ListFiles(self,
ext_attrs: bool = False) -> Iterable[rdf_client_fs.StatEntry]:
del ext_attrs # Unused.
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
pathspec = self.pathspec.Copy()
pathspec.last.path = utils.JoinPath(pathspec.last.path, entry.name)
pathspec.last.inode = entry.file_reference
pathspec.last.options = rdf_paths.PathSpec.Options.CASE_LITERAL
data_stream = entry if entry.has_default_data_stream() else None
yield self._Stat(entry, data_stream, pathspec.Copy())
# Create extra entries for alternate data streams
for data_stream in entry.alternate_data_streams:
pathspec.last.stream_name = data_stream.name
yield self._Stat(entry, data_stream, pathspec.Copy())
def ListNames(self) -> Iterable[Text]:
self._CheckIsDirectory()
for entry in self.fd.sub_file_entries:
yield entry.name
def _CheckIsDirectory(self) -> None:
if not self.IsDirectory():
raise IOError("{} is not a directory".format(
self.pathspec.CollapsePath()))
def _Stat(
self,
entry: pyfsntfs.file_entry,
data_stream: pyfsntfs.data_stream,
pathspec: rdf_paths.PathSpec,
) -> rdf_client_fs.StatEntry:
st = rdf_client_fs.StatEntry()
st.pathspec = pathspec
st.st_atime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_access_time())
st.st_mtime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_modification_time())
st.st_btime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_creation_time())
st.st_ctime = rdfvalue.RDFDatetimeSeconds.FromDatetime(
entry.get_entry_modification_time())
if entry.has_directory_entries_index():
st.st_mode = stat.S_IFDIR
else:
st.st_mode = stat.S_IFREG
if data_stream is not None:
st.st_size = data_stream.get_size()
flags = entry.file_attribute_flags
st.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if (flags & FILE_ATTRIBUTE_READONLY) == 0:
st.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
if (flags & FILE_ATTRIBUTE_HIDDEN) == 0:
st.st_mode |= stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
return st
@classmethod
def Open(
cls,
fd: Optional[vfs_base.VFSHandler],
component: rdf_paths.PathSpec,
handlers: Dict[Any, Type[vfs_base.VFSHandler]],
pathspec: Optional[rdf_paths.PathSpec] = None,
progress_callback: Optional[Callable[[], None]] = None
) -> Optional[vfs_base.VFSHandler]:
# A Pathspec which starts with NTFS means we need to resolve the mount
# point at runtime.
if (fd is None and
component.pathtype == rdf_paths.PathSpec.PathType.NTFS and
pathspec is not None):
# We are the top level handler. This means we need to check the system
# mounts to work out the exact mount point and device we need to
# open. We then modify the pathspec so we get nested in the raw
# pathspec.
raw_pathspec, corrected_path = client_utils.GetRawDevice(component.path) # pytype: disable=attribute-error
# Insert the raw device before the component in the pathspec and correct
# the path
component.path = corrected_path
pathspec.Insert(0, component)
pathspec.Insert(0, raw_pathspec)
# Allow incoming pathspec to be given in the local system path
# conventions.
for component in pathspec:
if component.path:
component.path = client_utils.LocalPathToCanonicalPath(component.path)
# We have not actually opened anything in this iteration, but modified the
# pathspec. Next time we should be able to open it properly.
return fd
# If an inode is specified, just use it directly.
# This is necessary so that component.path is ignored.
elif component.HasField("inode"):
return NTFSFile(
fd, handlers, component, progress_callback=progress_callback)
else:
return super(NTFSFile, cls).Open(
fd=fd,
component=component,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
| 36.790323 | 137 | 0.694432 | 8,071 | 0.88459 | 923 | 0.101162 | 1,946 | 0.213284 | 0 | 0 | 1,778 | 0.194871 |
b44954b2c2b3e9462c5ae4cfc721ce64071a8588 | 1,184 | py | Python | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
]
| null | null | null | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
]
| null | null | null | 04.Encapsulation/Exe/pizza_maker/project/main.py | nmoskova/Python-OOP | 07327bcb93eee3a7db5d7c0bbdd1b54eb9e8b864 | [
"MIT"
]
| null | null | null | from encapsulation_04.exe.pizza_maker.project.dough import Dough
from encapsulation_04.exe.pizza_maker.project.pizza import Pizza
from encapsulation_04.exe.pizza_maker.project.topping import Topping
tomato_topping = Topping("Tomato", 60)
print(tomato_topping.topping_type)
print(tomato_topping.weight)
mushrooms_topping = Topping("Mushroom", 75)
print(mushrooms_topping.topping_type)
print(mushrooms_topping.weight)
mozzarella_topping = Topping("Mozzarella", 80)
print(mozzarella_topping.topping_type)
print(mozzarella_topping.weight)
cheddar_topping = Topping("Cheddar", 150)
pepperoni_topping = Topping("Pepperoni", 120)
white_flour_dough = Dough("White Flour", "Mixing", 200)
print(white_flour_dough.flour_type)
print(white_flour_dough.weight)
print(white_flour_dough.baking_technique)
whole_wheat_dough = Dough("Whole Wheat Flour", "Mixing", 200)
print(whole_wheat_dough.weight)
print(whole_wheat_dough.flour_type)
print(whole_wheat_dough.baking_technique)
p = Pizza("Margherita", whole_wheat_dough, 2)
p.add_topping(tomato_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
print(p.calculate_total_weight())
p.add_topping(mozzarella_topping)
| 29.6 | 68 | 0.831081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.092905 |
b4498ac05bf8ea7aa023efd2ecbb1bd7c7b56fb2 | 1,158 | py | Python | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
]
| null | null | null | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
]
| null | null | null | src/unicon/plugins/iosxe/cat9k/__init__.py | nielsvanhooy/unicon.plugins | 3416fd8223f070cbb67a2cbe604e3c5d13584318 | [
"Apache-2.0"
]
| null | null | null | """ cat9k IOS-XE connection implementation.
"""
__author__ = "Rob Trotter <[email protected]>"
from unicon.plugins.iosxe import (
IosXESingleRpConnection,
IosXEDualRPConnection,
IosXEServiceList,
HAIosXEServiceList)
from .statemachine import IosXECat9kSingleRpStateMachine, IosXECat9kDualRpStateMachine
from .settings import IosXECat9kSettings
from . import service_implementation as svc
class IosXECat9kServiceList(IosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.Reload
self.rommon = svc.Rommon
class IosxeCat9kHAServiceList(HAIosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.HAReloadService
class IosXECat9kSingleRpConnection(IosXESingleRpConnection):
platform = 'cat9k'
state_machine_class = IosXECat9kSingleRpStateMachine
subcommand_list = IosXECat9kServiceList
settings = IosXECat9kSettings()
class IosXECat9kDualRPConnection(IosXEDualRPConnection):
platform = 'cat9k'
subcommand_list = IosxeCat9kHAServiceList
settings = IosXECat9kSettings()
state_machine_class = IosXECat9kDualRpStateMachine
| 26.930233 | 86 | 0.768566 | 742 | 0.64076 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.07772 |
b44998685fc665e80493c8e5ef4cef6084f68ca9 | 4,875 | py | Python | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
]
| 2 | 2018-03-16T23:05:51.000Z | 2021-08-05T03:23:44.000Z | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
]
| null | null | null | ludopediaAnuncios.py | christianbobsin/LudopediaDataMiner | d136a40b024b3611a8a88371b4a47a673c782180 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from lxml import html
from time import sleep
from datetime import datetime
import requests
import os
import sqlite3
import sys
# No terminal usar ~: python ludopedia.py [idIni] [regs]
# por ex. ~: python ludopedia.py 451 3000
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""SELECT (ANUNCIO + 1) FROM JOGOS WHERE ANUNCIO=(SELECT MAX(ANUNCIO) FROM JOGOS WHERE TIPO='ANUNCIO') """)
anuncios = cursor.fetchall()
con.close()
idIni = int(anuncios[0][0])
#idIni = 75691
#regs = int(sys.argv[2])
regs = 9999
idMax = ( idIni + regs )
jogosAdicionados = 0
for id in range(idIni, idMax):
# 'http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id)
#url = 'http://www.ludopedia.com.br/anuncio?id_anuncio=' % id
try:
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
except:
print 'nova tentativa em 10s'
sleep(10)
page = requests.get('http://www.ludopedia.com.br/anuncio?id_anuncio='+str(id))
tree = html.fromstring(page.content)
#jogoNome = tree.xpath('//div[@class="col-xs-10"]/h3/a/text()')
jogoNome = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/a/text()')
#jogoFlavor = tree.xpath('//div[@class="col-xs-10"]/h3/span/text()')
jogoFlavor = tree.xpath('//*[@id="page-content"]/div/div/div/div[2]/h3/span/text()')
if len(jogoFlavor):
detalhes = jogoFlavor[0]
else:
detalhes = 'NA'
jogoPreco = tree.xpath('//span[@class="negrito proximo_lance"]/text()')
if len(jogoPreco):
jogoPreco =jogoPreco[0].split()
jogoPreco[1] = jogoPreco[1].replace('.','')
preco = float( jogoPreco[1].replace( ',','.' ) )
else:
preco = 0.0
status = tree.xpath('//td/span/text()')
validadeAnuncio = tree.xpath('//td/text()')
if len(validadeAnuncio):
validadeAnuncio[4] = validadeAnuncio[4].replace(',',' ')
data = validadeAnuncio[4].split()
ano = data[0].split('/')
hora = data[1].split(':')
data = datetime( int(ano[2]), int(ano[1]),int(ano[0]), int(hora[0]), int(hora[1]))
if ( data > datetime.now() and status[1] == 'Vendido'):
data = datetime.now()
else:
data = datetime( 1979, 8, 10 )
pessoa = tree.xpath('//td/a/text()')
if len(pessoa):
vendedor = pessoa[1]
if len(pessoa) < 3:
comprador = 'NA'
else:
comprador = pessoa[2]
current = id - idIni + 1
total = idMax - idIni
progress = (current/float(total))*100
#print str(current) + ' / ' + str(total) + " : " + "%.2f" % round(progress,2) + "%"
#print 'Id: ', id
#jogoCount = id - idIni
if len(jogoNome):
jogosAdicionados = jogosAdicionados + 1
if ( len(status[1]) > 15 ):
status[1] = 'Ativo'
#print 'Jogo: ', jogoNome[0]
#print 'Detalhes ', detalhes
#print 'Preco: ', str(preco)
#print 'Status: ', status[1]
#print 'Validade: ', data
#print 'Estado: ', validadeAnuncio[6]
#print 'Local: ', validadeAnuncio[8]
#print 'Vendedor: ', vendedor
#print 'Comprador:', comprador
print str( current ).zfill( 4 ) + ' '+ str ( id ) + ' ' + ano[2] + '-' +str( ano[1] ).zfill(2) + '-'+ str( ano[0] ).zfill(2) + ' ' + status[1] + '\t\t' + validadeAnuncio[6] + '\t' + str(preco) + '\t ' + jogoNome[0]
con = sqlite3.connect('ludopedia.db')
cursor = con.cursor()
cursor.execute("""INSERT INTO JOGOS ( ANUNCIO, JOGO, SUBTITULO, PRECO, STATUS, VALIDADE, ESTADO, ORIGEM, VENDEDOR, COMPRADOR, TIPO )
VALUES (?,?,?,?,?,?,?,?,?,?,?)""", (id, jogoNome[0], detalhes, preco, status[1], data, validadeAnuncio[6],
validadeAnuncio[8], vendedor, comprador, 'ANUNCIO' ) )
try:
con.commit()
except:
print 'Falha no Commit, tentando novamente em 10s.'
sleep(10)
con.commit()
con.close()
#print '-----------------------'
#print 'Jogos Adicionados: ' + str( jogosAdicionados )
#print '-----------------------'
else:
print str( current ).zfill( 4 ) + ' ' + str ( id ) + '\t ' + '-------' + ' \t ' + '-------' + ' \t ' + '------' + '\t ' + '---'
sleep(0.05)
#os.system('clear')
print '---------------------------------------------------------------'
print 'Jogos Adicionados: ' + str( jogosAdicionados )
print '---------------------------------------------------------------'
########################################################################
#sTable = sorted( table, key = getKey )
#print tabulate(sTable, tablefmt="plain" )
#f = open ( 'LudopediaLeaks %s-%s.csv' % ( idIni, idMax) , 'w' )
#for x in range ( 0, len( sTable ) ):
# row = "%s;%s;%s;%s;%s;%s;%s;%s;%s;%s" % ( sTable[x][0],
# sTable[x][1].encode('utf8'),
# sTable[x][2].encode('utf8'),
# sTable[x][3],
# sTable[x][4].encode('utf8'),
# sTable[x][5],
# sTable[x][6].encode('utf8'),
# sTable[x][7].encode('utf8'),
# sTable[x][8].encode('utf8'),
# sTable[x][9].encode('utf8') )
# print row
# f.write(row + '\n' )
#f.close()
| 28.676471 | 223 | 0.570256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,564 | 0.525949 |
b449d07b5e029400778e8d16d3a55f2ee36130ff | 18,334 | py | Python | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
]
| null | null | null | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
]
| null | null | null | midterm/yolo_utils.py | ClarkBrun/emotic | ea4c1d846ac8aa18a902c0e68fb6e5dc5e1ae2d1 | [
"MIT"
]
| null | null | null | import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def to_cpu(tensor):
return tensor.detach().cpu()
def xywh2xyxy(x):
''' Convert bounding box from [x, y, w, h] to [x1, y1, x2, y2]
:param x: bounding boxes array
:return: Converted bounding box array
'''
y = x.new(x.shape)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def rescale_boxes(boxes, current_dim, original_shape):
""" Rescales bounding boxes to the original shape """
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
image_pred = image_pred[image_pred[:, 4] >= conf_thres]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Object confidence times class confidence
score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]
# Sort by it
image_pred = image_pred[(-score).argsort()]
class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)
detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)
# Perform non-maximum suppression
keep_boxes = []
while detections.size(0):
large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres
label_match = detections[0, -1] == detections[:, -1]
# Indices of boxes with lower confidence scores, large IOUs and matching labels
invalid = large_overlap & label_match
weights = detections[invalid, 4:5]
# Merge overlapping bboxes by order of confidence
detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()
keep_boxes += [detections[0]]
detections = detections[~invalid]
if keep_boxes:
output[image_i] = torch.stack(keep_boxes)
return output
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_config(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams["channels"])]
module_list = nn.ModuleList()
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
num_classes = int(module_def["classes"])
img_size = int(hyperparams["height"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
"""Detection layer"""
def __init__(self, anchors, num_classes, img_dim=416):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_anchors = len(anchors)
self.num_classes = num_classes
self.ignore_thres = 0.5
self.mse_loss = nn.MSELoss()
self.bce_loss = nn.BCELoss()
self.obj_scale = 1
self.noobj_scale = 100
self.metrics = {}
self.img_dim = img_dim
self.grid_size = 0 # grid size
def compute_grid_offsets(self, grid_size, cuda=True):
self.grid_size = grid_size
g = self.grid_size
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
self.stride = self.img_dim / self.grid_size
# Calculate offsets for each grid
self.grid_x = torch.arange(g).repeat(g, 1).view([1, 1, g, g]).type(FloatTensor)
self.grid_y = torch.arange(g).repeat(g, 1).t().view([1, 1, g, g]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.num_anchors, 1, 1))
def forward(self, x, targets=None, img_dim=None):
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
self.img_dim = img_dim
num_samples = x.size(0)
grid_size = x.size(2)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size, grid_size)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# If grid size does not match current we compute new offsets
if grid_size != self.grid_size:
self.compute_grid_offsets(grid_size, cuda=x.is_cuda)
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + self.grid_x
pred_boxes[..., 1] = y.data + self.grid_y
pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h
output = torch.cat(
(
pred_boxes.view(num_samples, -1, 4) * self.stride,
pred_conf.view(num_samples, -1, 1),
pred_cls.view(num_samples, -1, self.num_classes),
),
-1,
)
if targets is None:
return output, 0
else:
iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(
pred_boxes=pred_boxes,
pred_cls=pred_cls,
target=targets,
anchors=self.scaled_anchors,
ignore_thres=self.ignore_thres,
)
# Loss : Mask outputs to ignore non-existing objects (except with conf. loss)
loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])
loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])
loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])
loss_h = self.mse_loss(h[obj_mask], th[obj_mask])
loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])
loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])
loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj
loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])
total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
# Metrics
cls_acc = 100 * class_mask[obj_mask].mean()
conf_obj = pred_conf[obj_mask].mean()
conf_noobj = pred_conf[noobj_mask].mean()
conf50 = (pred_conf > 0.5).float()
iou50 = (iou_scores > 0.5).float()
iou75 = (iou_scores > 0.75).float()
detected_mask = conf50 * class_mask * tconf
precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
self.metrics = {
"loss": to_cpu(total_loss).item(),
"x": to_cpu(loss_x).item(),
"y": to_cpu(loss_y).item(),
"w": to_cpu(loss_w).item(),
"h": to_cpu(loss_h).item(),
"conf": to_cpu(loss_conf).item(),
"cls": to_cpu(loss_cls).item(),
"cls_acc": to_cpu(cls_acc).item(),
"recall50": to_cpu(recall50).item(),
"recall75": to_cpu(recall75).item(),
"precision": to_cpu(precision).item(),
"conf_obj": to_cpu(conf_obj).item(),
"conf_noobj": to_cpu(conf_noobj).item(),
"grid_size": grid_size,
}
return output, total_loss
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, config_path, img_size=416):
super(Darknet, self).__init__()
self.module_defs = parse_model_config(config_path)
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if hasattr(layer[0], "metrics")]
self.img_size = img_size
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x, targets=None):
img_dim = x.shape[2]
loss = 0
layer_outputs, yolo_outputs = [], []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if module_def["type"] in ["convolutional", "upsample", "maxpool"]:
x = module(x)
elif module_def["type"] == "route":
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
x, layer_loss = module[0](x, targets, img_dim)
loss += layer_loss
yolo_outputs.append(x)
layer_outputs.append(x)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# Open the weights file
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
def prepare_yolo(model_dir):
''' Download yolo model files and load the model weights
:param model_dir: Directory path where to store yolo model weights and yolo model configuration file.
:return: Yolo model after loading model weights
'''
cfg_file = os.path.join(model_dir, 'yolov3.cfg')
if not os.path.exists(cfg_file):
download_command = 'wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg -O ' + cfg_file
os.system(download_command)
weight_file = os.path.join(model_dir, 'yolov3.weights')
if not os.path.exists(weight_file):
download_command = 'wget https://pjreddie.com/media/files/yolov3.weights -O ' + weight_file
os.system(download_command)
yolo_model = Darknet(cfg_file, 416)
yolo_model.load_darknet_weights(weight_file)
print ('prepared yolo model')
return yolo_model
# if __name__ == '__main__':
# prepare_yolo(model_dir = '/home/face-r/Steps_face_recognition/emotic/debug/models') | 37.038384 | 115 | 0.678412 | 9,443 | 0.515054 | 0 | 0 | 0 | 0 | 0 | 0 | 4,020 | 0.219265 |
b44ac7b8e26906825e3b89cdfb277cf731bbe790 | 5,557 | py | Python | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
]
| 3 | 2021-12-02T11:34:37.000Z | 2021-12-19T09:30:10.000Z | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
]
| null | null | null | pytracking-master/ltr/train_settings/bbreg/atom.py | wsumel/AMMC | ef101878b4a97f07984186ea09146348c0526fa6 | [
"Apache-2.0"
]
| null | null | null | import torch.nn as nn
import torch.optim as optim
from ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k
from ltr.data import processing, sampler, LTRLoader
import ltr.models.bbreg.atom as atom_models
from ltr import actors
from ltr.trainers import LTRTrainer
import ltr.data.transforms as tfm
def run(settings):
# Most common settings are assigned in the settings struct
settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'
settings.batch_size = 64
settings.num_workers = 8 #8
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 5.0
settings.feature_sz = 18
settings.output_sz = settings.feature_sz * 16
settings.center_jitter_factor = {'train': 0, 'test': 4.5}
settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
# Train datasets
lasot_train = Lasot(settings.env.lasot_dir, split='train')
got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))
# coco_train = MSCOCOSeq(settings.env.coco_dir,version='2017')
# Validation datasets
got10k_val = Got10k(settings.env.got10k_dir, split='votval')
# The joint augmentation transform, that is applied to the pairs jointly
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
# The augmentation transform applied to the training set (individually to each image in the pair)
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# The augmentation transform applied to the validation set (individually to each image in the pair)
transform_val = tfm.Transform(tfm.ToTensor(),
tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
# Data processing to do on the training pairs
proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_train,
joint_transform=transform_joint)
# Data processing to do on the validation pairs
data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
output_sz=settings.output_sz,
center_jitter_factor=settings.center_jitter_factor,
scale_jitter_factor=settings.scale_jitter_factor,
mode='sequence',
proposal_params=proposal_params,
transform=transform_val,
joint_transform=transform_joint)
# The sampler for training
dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train], [1,1,1],
samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],
# samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)
# The loader for training
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=True, drop_last=True, stack_dim=1)
# The sampler for validation
dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,
processing=data_processing_val)
dataset_val.datatype = 'val'
# The loader for validation
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)
# Create network and actor
net = atom_models.atom_resnet18(backbone_pretrained=True)
objective = nn.MSELoss()
actor = actors.AtomActor(net=net, objective=objective)
# Optimizer
optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2) #过了15epoch lr=lr*gamma
# Create trainer
trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)
# Run training (set fail_safe=False if you are debugging)
trainer.train(50, load_latest=False, fail_safe=True)
# trainer.train(50, load_latest=True, fail_safe=False)
| 55.57 | 133 | 0.638114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,250 | 0.22478 |
b44b2dc4ce40901657329bbb40489909361c416f | 281 | py | Python | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
]
| null | null | null | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
]
| null | null | null | exercise 8.6.py | tuyanyang/python_exercise | c1027c2451d7f3c0fd00152a5430386d930ef9ef | [
"Apache-2.0"
]
| null | null | null | nums = list()
while True:
nStr = input('Enter a number: ')
try:
if nStr == 'done':
break
n = float(nStr)
nums.append(n)
except:
print('Invalid input')
continue
print('Maximum: ',max(nums))
print('Minimum: ',min(nums)) | 21.615385 | 36 | 0.519573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.217082 |
b44cdf1520f9983049c66891c92f13dc5a062fff | 5,899 | py | Python | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
]
| null | null | null | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
]
| 82 | 2020-06-25T09:45:01.000Z | 2022-03-31T09:35:31.000Z | gui/activity_list.py | keremkoseoglu/Kifu | bed7a15f71e2345c654b1adab07a5edecdbae342 | [
"MIT"
]
| null | null | null | """ Activity list window """
import tkinter
import tkinter.ttk
from model import activity, invoice
from model.activity import Activity
from model.company import Company
from gui.activity import ActivityWindow
from gui.activity_split import ActivitySplit
from gui.invoice import InvoiceWindow
from gui.popup_file import popup_email
from gui.prime_singleton import PrimeSingleton
from util import activity_xlsx_report, backup, date_time
import config
class ActivityListWindow(tkinter.Toplevel):
""" Activity list window """
_BUTTON_WIDTH = 150
_WINDOW_WIDTH = 1200
_WINDOW_HEIGHT = 400
_Y_SPACING = 10
def __init__(self):
# Initialization
tkinter.Toplevel.__init__(self)
self.wm_geometry(str(self._WINDOW_WIDTH) + "x" + str(self._WINDOW_HEIGHT))
# Build tree
self._tree = tkinter.ttk.Treeview(self)
tree_height = self._WINDOW_HEIGHT - config.CONSTANTS["GUI_CELL_HEIGHT"] - self._Y_SPACING
self._tree.place(x=0, y=0, width=self._WINDOW_WIDTH, height=tree_height)
cell_y = tree_height + self._Y_SPACING
self._tree["columns"] = ("Client", "Project", "Location", "GUID")
self._tree.heading("Client", text="Client")
self._tree.heading("Project", text="Project")
self._tree.heading("Location", text="Location")
self._tree.heading("GUID", text="GUID")
# Fill tree with data
self._activities = []
self._tree_content = {}
self._fill_tree_with_activities()
# Buttons
cell_x = 0
edit_button = tkinter.Button(self, text="Edit", command=self._edit_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
edit_button = tkinter.Button(self, text="Excel", command=self._excel_click)
edit_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
split_button = tkinter.Button(self, text="Split", command=self._split_click)
split_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Invoice", command=self._invoice_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
invoice_button = tkinter.Button(self, text="Delete", command=self._delete_click)
invoice_button.place(x=cell_x, y=cell_y)
cell_x += self._BUTTON_WIDTH
@property
def _first_selected_activity(self) -> activity.Activity:
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return None
return selected_activities[0]
@property
def _selected_activities(self) -> []:
selected_activities = []
for selected_id in self._tree.selection():
selected_activity = self._tree_content[selected_id]
selected_activities.append(selected_activity)
return selected_activities
def _delete_click(self):
deletable_activities = self._selected_activities
if len(deletable_activities) == 0:
return
deletable_guids = []
for act in deletable_activities:
deletable_guids.append(act.guid)
backup.execute()
Activity.delete_activities(deletable_guids)
self._fill_tree_with_activities()
PrimeSingleton.get().refresh()
def _edit_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_window = ActivityWindow()
activity_window.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_window.mainloop()
def _excel_click(self):
selected_activity_objects = self._selected_activities
xlsx_report = activity_xlsx_report.Report()
xlsx_report.generate_with_activity_objects(selected_activity_objects)
activity_company = Company(config.CONSTANTS["COMPANY_NAME_1E1"])
popup_email(recipients=activity_company.activity_emails,
subject="Bu ayki aktivitelerim",
attachment=xlsx_report.last_saved_files[0])
def _fill_tree_with_activities(self):
self._activities = Activity.get_activities()
self._activities["activities"] = sorted(
self._activities["activities"],
key=lambda x: x["date"],
reverse=False)
self._tree_content = {}
self._tree.delete(*self._tree.get_children())
for activity_line in self._activities["activities"]:
activity_obj = activity.Activity(activity_line)
project_obj = activity_obj.project
tree_val = (
project_obj.client.name,
project_obj.name,
activity_obj.location,
activity_obj.guid
)
id_in_tree = self._tree.insert(
'',
'end',
text=date_time.get_formatted_date(activity_obj.date),
value=tree_val
)
self._tree_content[id_in_tree] = activity_obj
self.update()
def _invoice_click(self):
selected_activities = self._selected_activities
if len(selected_activities) == 0:
return
new_invoice = invoice.get_invoice_obj_from_activities(selected_activities)
invoice_window = InvoiceWindow()
invoice_window.fill_with_invoice(new_invoice, browser=True, invoice_dir=True)
invoice_window.mainloop()
def _split_click(self):
first_selected_activity = self._first_selected_activity
if first_selected_activity is None:
return
activity_split = ActivitySplit()
activity_split.fill_with_activity(first_selected_activity)
self.after(1, self.destroy())
activity_split.mainloop()
| 34.098266 | 97 | 0.66418 | 5,447 | 0.923377 | 0 | 0 | 525 | 0.088998 | 0 | 0 | 369 | 0.062553 |
b44e0121e131edfd41c92b9e516f42e320c6b70f | 3,551 | py | Python | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
]
| 209 | 2016-11-12T14:16:50.000Z | 2022-03-30T04:44:11.000Z | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
]
| 468 | 2016-11-06T01:16:43.000Z | 2022-03-31T16:24:37.000Z | src/cactus/shared/commonTest.py | thiagogenez/cactus | 910234eb8bafca33e6a219079c8d988b6f43bc59 | [
"MIT-0"
]
| 75 | 2017-03-09T22:19:27.000Z | 2022-03-14T22:03:33.000Z | import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup(1, 5, 10, 100)
self.tempDir = getTempDirectory(os.getcwd())
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
system("rm -rf %s" % self.tempDir)
@TestStatus.shortLength
def testCactusCall(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open("/dev/urandom", "rb") as randText:
with open(inputFile, 'w') as fh:
fh.write(b64encode(randText.read(1024)).decode())
with open(inputFile) as fh:
input = "".join(fh.read().split("\n"))
#Send input to container's stdin through a file, get output
#from stdout
output = "".join(cactus_call(infile=inputFile, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
#Send input as string, get output from stdout
output = "".join(cactus_call(stdin_string=input, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
@TestStatus.shortLength
def testCactusCallPipes(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open(inputFile, 'w') as f:
f.write('foobar\n')
# using 'cat' here rather than infile is intentional; it tests
# whether the directory is mounted into containers correctly.
output = cactus_call(parameters=[['cat', inputFile],
['sed', 's/foo/baz/g'],
['awk', '{ print "quux" $0 }']],
check_output=True)
self.assertEqual(output, 'quuxbazbar\n')
@TestStatus.mediumLength
def testChildTreeJob(self):
"""Check that the ChildTreeJob class runs all children."""
numChildren = 100
flagDir = getTempDirectory()
options = Job.Runner.getDefaultOptions(getTempDirectory())
shutil.rmtree(options.jobStore)
with Toil(options) as toil:
toil.start(CTTestParent(flagDir, numChildren))
# Check that all jobs ran
for i in range(numChildren):
self.assertTrue(os.path.exists(os.path.join(flagDir, str(i))))
shutil.rmtree(flagDir)
class CTTestParent(ChildTreeJob):
def __init__(self, flagDir, numChildren):
self.flagDir = flagDir
self.numChildren = numChildren
super(CTTestParent, self).__init__()
def run(self, fileStore):
for i in range(self.numChildren):
self.addChild(CTTestChild(self.flagDir, i))
class CTTestChild(Job):
def __init__(self, flagDir, index):
self.flagDir = flagDir
self.index = index
super(CTTestChild, self).__init__()
def run(self, fileStore):
# Mark that this job has run using a flag file
path = os.path.join(self.flagDir, str(self.index))
with open(path, 'w') as f:
# Empty file
f.write('')
if __name__ == '__main__':
unittest.main()
| 34.475728 | 84 | 0.619825 | 3,167 | 0.891861 | 0 | 0 | 2,088 | 0.588003 | 0 | 0 | 561 | 0.157984 |
b44e0a41d16e0ba8bfc1be48250cce3e7506e1d1 | 7,185 | py | Python | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
]
| 3 | 2020-09-02T20:02:55.000Z | 2021-07-09T03:50:49.000Z | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
]
| null | null | null | porespy/networks/__getnet__.py | hfathian/porespy | 8747e675ba8e6410d8448492c70f6911e0eb816a | [
"MIT"
]
| null | null | null | import sys
import numpy as np
import openpnm as op
from tqdm import tqdm
import scipy.ndimage as spim
from porespy.tools import extend_slice
import openpnm.models.geometry as op_gm
def regions_to_network(im, dt=None, voxel_size=1):
r"""
Analyzes an image that has been partitioned into pore regions and extracts
the pore and throat geometry as well as network connectivity.
Parameters
----------
im : ND-array
An image of the pore space partitioned into individual pore regions.
Note that this image must have zeros indicating the solid phase.
dt : ND-array
The distance transform of the pore space. If not given it will be
calculated, but it can save time to provide one if available.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
Returns
-------
A dictionary containing all the pore and throat size data, as well as the
network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
print('-' * 60, flush=True)
print('Extracting pore and throat information from image', flush=True)
from skimage.morphology import disk, ball
struc_elem = disk if im.ndim == 2 else ball
# if ~np.any(im == 0):
# raise Exception('The received image has no solid phase (0\'s)')
if dt is None:
dt = spim.distance_transform_edt(im > 0)
dt = spim.gaussian_filter(input=dt, sigma=0.5)
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im)+1)
Np = np.size(Ps)
p_coords = np.zeros((Np, im.ndim), dtype=float)
p_volume = np.zeros((Np, ), dtype=float)
p_dia_local = np.zeros((Np, ), dtype=float)
p_dia_global = np.zeros((Np, ), dtype=float)
p_label = np.zeros((Np, ), dtype=int)
p_area_surf = np.zeros((Np, ), dtype=int)
t_conns = []
t_dia_inscribed = []
t_area = []
t_perimeter = []
t_coords = []
# dt_shape = np.array(dt.shape)
# Start extracting size information for pores and throats
for i in tqdm(Ps, file=sys.stdout):
pore = i - 1
if slices[pore] is None:
continue
s = extend_slice(slices[pore], im.shape)
sub_im = im[s]
sub_dt = dt[s]
pore_im = sub_im == i
padded_mask = np.pad(pore_im, pad_width=1, mode='constant')
pore_dt = spim.distance_transform_edt(padded_mask)
s_offset = np.array([i.start for i in s])
p_label[pore] = i
p_coords[pore, :] = spim.center_of_mass(pore_im) + s_offset
p_volume[pore] = np.sum(pore_im)
p_dia_local[pore] = (2*np.amax(pore_dt)) - np.sqrt(3)
p_dia_global[pore] = 2*np.amax(sub_dt)
p_area_surf[pore] = np.sum(pore_dt == 1)
im_w_throats = spim.binary_dilation(input=pore_im, structure=struc_elem(1))
im_w_throats = im_w_throats*sub_im
Pn = np.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > pore:
t_conns.append([pore, j])
vx = np.where(im_w_throats == (j + 1))
t_dia_inscribed.append(2*np.amax(sub_dt[vx]))
t_perimeter.append(np.sum(sub_dt[vx] < 2))
t_area.append(np.size(vx[0]))
t_inds = tuple([i+j for i, j in zip(vx, s_offset)])
temp = np.where(dt[t_inds] == np.amax(dt[t_inds]))[0][0]
if im.ndim == 2:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp])))
else:
t_coords.append(tuple((t_inds[0][temp],
t_inds[1][temp],
t_inds[2][temp])))
# Clean up values
Nt = len(t_dia_inscribed) # Get number of throats
if im.ndim == 2: # If 2D, add 0's in 3rd dimension
p_coords = np.vstack((p_coords.T, np.zeros((Np, )))).T
t_coords = np.vstack((np.array(t_coords).T, np.zeros((Nt, )))).T
net = {}
net['pore.all'] = np.ones((Np, ), dtype=bool)
net['throat.all'] = np.ones((Nt, ), dtype=bool)
net['pore.coords'] = np.copy(p_coords)*voxel_size
net['pore.centroid'] = np.copy(p_coords)*voxel_size
net['throat.centroid'] = np.array(t_coords)*voxel_size
net['throat.conns'] = np.array(t_conns)
net['pore.label'] = np.array(p_label)
net['pore.volume'] = np.copy(p_volume)*(voxel_size**3)
net['throat.volume'] = np.zeros((Nt, ), dtype=float)
net['pore.diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.inscribed_diameter'] = np.copy(p_dia_local)*voxel_size
net['pore.equivalent_diameter'] = 2*((3/4*net['pore.volume']/np.pi)**(1/3))
net['pore.extended_diameter'] = np.copy(p_dia_global)*voxel_size
net['pore.surface_area'] = np.copy(p_area_surf)*(voxel_size)**2
net['throat.diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.inscribed_diameter'] = np.array(t_dia_inscribed)*voxel_size
net['throat.area'] = np.array(t_area)*(voxel_size**2)
net['throat.perimeter'] = np.array(t_perimeter)*voxel_size
net['throat.equivalent_diameter'] = (np.array(t_area) * (voxel_size**2))**0.5
P12 = net['throat.conns']
PT1 = np.sqrt(np.sum(((p_coords[P12[:, 0]]-t_coords) * voxel_size)**2, axis=1))
PT2 = np.sqrt(np.sum(((p_coords[P12[:, 1]]-t_coords) * voxel_size)**2, axis=1))
net['throat.total_length'] = PT1 + PT2
PT1 = PT1-p_dia_local[P12[:, 0]]/2*voxel_size
PT2 = PT2-p_dia_local[P12[:, 1]]/2*voxel_size
net['throat.length'] = PT1 + PT2
dist = (p_coords[P12[:, 0]]-p_coords[P12[:, 1]])*voxel_size
net['throat.direct_length'] = np.sqrt(np.sum(dist**2, axis=1))
# Make a dummy openpnm network to get the conduit lengths
pn = op.network.GenericNetwork()
pn.update(net)
pn.add_model(propname='throat.endpoints',
model=op_gm.throat_endpoints.spherical_pores,
pore_diameter='pore.inscribed_diameter',
throat_diameter='throat.inscribed_diameter')
pn.add_model(propname='throat.conduit_lengths',
model=op_gm.throat_length.conduit_lengths)
pn.add_model(propname='pore.area',
model=op_gm.pore_area.sphere)
net['throat.endpoints.head'] = pn['throat.endpoints.head']
net['throat.endpoints.tail'] = pn['throat.endpoints.tail']
net['throat.conduit_lengths.pore1'] = pn['throat.conduit_lengths.pore1']
net['throat.conduit_lengths.pore2'] = pn['throat.conduit_lengths.pore2']
net['throat.conduit_lengths.throat'] = pn['throat.conduit_lengths.throat']
net['pore.area'] = pn['pore.area']
prj = pn.project
prj.clear()
wrk = op.Workspace()
wrk.close_project(prj)
return net
| 43.545455 | 83 | 0.622825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,433 | 0.338622 |
b44ef5d465bb9fde348df90c5e65dba1ad7814be | 67,560 | py | Python | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | pandas/core/internals.py | lodagro/pandas | dfcf74679a273395cc9d7b3db78a1fbbc17c4f57 | [
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | import itertools
from datetime import datetime
from numpy import nan
import numpy as np
from pandas.core.common import _possibly_downcast_to_dtype, isnull
from pandas.core.index import Index, MultiIndex, _ensure_index, _handle_legacy_indexes
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.expressions as expressions
from pandas.tslib import Timestamp
from pandas.util import py3compat
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
is_numeric = False
is_bool = False
is_object = False
_can_hold_na = False
_downcast_dtype = None
def __init__(self, values, items, ref_items, ndim=2):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
if values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
if len(items) != len(values):
raise ValueError('Wrong number of items passed %d, indices imply %d'
% (len(items), len(values)))
self._ref_locs = None
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
def _gi(self, arg):
return self.values[arg]
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
indexer = com._ensure_platform_int(indexer)
if (indexer == -1).any():
raise AssertionError('Some block items were not in block '
'ref_items')
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
if not isinstance(ref_items, Index):
raise AssertionError('block ref_items must be an Index')
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
name = type(self).__name__
result = '%s: %s, %s, dtype %s' % (
name, com.pprint_thing(self.items), shape, self.dtype)
if py3compat.PY3:
return unicode(result)
return com.console_encode(result)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
if not self.ref_items.equals(other.ref_items):
raise AssertionError('Merge operands must have same ref_items')
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, axis=1, fill_value=np.nan, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
masked_idx = indexer[indexer != -1]
new_values = com.take_nd(self.values, masked_idx, axis=0,
allow_fill=False)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block into zero or more blocks around columns with given label,
for "deleting" a column without having to copy data by returning views
on the original array.
Returns
-------
generator of Block
"""
loc = self.items.get_loc(item)
if type(loc) == slice or type(loc) == int:
mask = [True] * len(self)
mask[loc] = False
else: # already a mask, inverted
mask = -loc
for s, e in com.split_ranges(mask):
yield make_block(self.values[s:e],
self.items[s:e].copy(),
self.ref_items)
def fillna(self, value, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
new_values = self.values if inplace else self.values.copy()
mask = com.isnull(new_values)
np.putmask(new_values, mask, value)
block = make_block(new_values, self.items, self.ref_items)
if downcast:
block = block.downcast()
return block
def downcast(self, dtypes = None):
""" try to downcast each item to the dict of dtypes if present """
if dtypes is None:
dtypes = dict()
values = self.values
blocks = []
for i, item in enumerate(self.items):
dtype = dtypes.get(item,self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i])
blocks.append(make_block(nv, [ item ], self.ref_items))
continue
nv = _possibly_downcast_to_dtype(values[i], np.dtype(dtype))
nv = _block_shape(nv)
blocks.append(make_block(nv, [ item ], self.ref_items))
return blocks
def astype(self, dtype, copy = True, raise_on_error = True):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
try:
newb = make_block(com._astype_nansafe(self.values, dtype, copy = copy),
self.items, self.ref_items)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if (newb.shape != self.shape or
(not copy and newb.itemsize < self.itemsize)):
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name, newb.itemsize))
return newb
def convert(self, copy = True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
return result
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
return values.tolist()
def replace(self, to_replace, value, inplace=False, filter=None):
""" replace the to_replace value with value, possible to create new blocks here
this is just a call to putmask """
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
for i, item in enumerate(self.items):
if item not in filter:
mask[i] = False
if not mask.any():
if inplace:
return [ self ]
return [ self.copy() ]
return self.putmask(mask, value, inplace=inplace)
def putmask(self, mask, new, inplace=False):
""" putmask the data to the block; it is possible that we may create a new dtype of block
return the resulting block(s) """
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
axis = getattr(new, '_het_axis', 0)
new = new.reindex_axis(self.items, axis=axis, copy=False).values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
axis = getattr(mask, '_het_axis', 0)
mask = mask.reindex_axis(self.items, axis=axis, copy=False).values.T
if self._can_hold_element(new):
new = self._try_cast(new)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
for i, item in enumerate(self.items):
m = mask[i]
# need a new block
if m.any():
n = new[i] if isinstance(new, np.ndarray) else new
# type of the new block
dtype, _ = com._maybe_promote(np.array(n).dtype)
# we need to exiplicty astype here to make a copy
nv = new_values[i].astype(dtype)
# we create a new block type
np.putmask(nv, m, n)
else:
nv = new_values[i] if inplace else new_values[i].copy()
nv = _block_shape(nv)
new_blocks.append(make_block(nv, [ item ], self.ref_items))
return new_blocks
if inplace:
return [ self ]
return [ make_block(new_values, self.items, self.ref_items) ]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, missing=None, coerce=False):
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
values = self.values if inplace else self.values.copy()
if values.ndim != 2:
raise NotImplementedError
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
if missing is None:
mask = None
else: # todo create faster fill func without masking
mask = com.mask_missing(transf(values), missing)
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
else:
com.backfill_2d(transf(values), limit=limit, mask=mask)
return make_block(values, self.items, self.ref_items)
def take(self, indexer, axis=1):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
new_values = com.take_nd(self.values, indexer, axis=axis,
allow_fill=False)
return make_block(new_values, self.items, self.ref_items)
def get_values(self, dtype):
return self.values
def diff(self, n):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=1)
return make_block(new_values, self.items, self.ref_items)
def shift(self, indexer, periods):
""" shift the block by periods, possibly upcast """
new_values = self.values.take(indexer, axis=1)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:, :periods] = fill_value
else:
new_values[:, periods:] = fill_value
return make_block(new_values, self.items, self.ref_items)
def eval(self, func, other, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
axis = getattr(other, '_het_axis', 0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
values, other = self._try_coerce_args(values, other)
args = [ values, other ]
try:
result = self._try_coerce_result(func(*args))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(other),str(detail)))
else:
# return the values
result = np.empty(values.shape,dtype='O')
result.fill(np.nan)
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, self.items, self.ref_items)
def where(self, other, cond, raise_on_error = True, try_cast = False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
raise_on_error : if True, raise when I can't perform the function, False by default (and just return
the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other,'reindex_axis'):
axis = getattr(other,'_het_axis',0)
other = other.reindex_axis(self.items, axis=axis, copy=True).values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond,'shape'):
raise ValueError("where must have a condition that is ndarray like")
if hasattr(cond,'reindex_axis'):
axis = getattr(cond,'_het_axis',0)
cond = cond.reindex_axis(self.items, axis=axis, copy=True).values
else:
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
# our where function
def func(c,v,o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(expressions.where(c, v, o, raise_on_error=True))
except (Exception), detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values [%s]'
% (repr(o),str(detail)))
else:
# return the values
result = np.empty(v.shape,dtype='float64')
result.fill(np.nan)
return result
def create_block(result, items, transpose = True):
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if transpose and is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result, items, self.ref_items)
# see if we can operate on the entire block, or need item-by-item
if not self._can_hold_na:
axis = cond.ndim-1
result_blocks = []
for item in self.items:
loc = self.items.get_loc(item)
item = self.items.take([loc])
v = values.take([loc],axis=axis)
c = cond.take([loc],axis=axis)
o = other.take([loc],axis=axis) if hasattr(other,'shape') else other
result = func(c,v,o)
if len(result) == 1:
result = np.repeat(result,self.shape[1:])
result = _block_shape(result,ndim=self.ndim,shape=self.shape[1:])
result_blocks.append(create_block(result, item, transpose = False))
return result_blocks
else:
result = func(cond,values,other)
return create_block(result, self.items)
class NumericBlock(Block):
is_numeric = True
_can_hold_na = True
def _try_cast_result(self, result):
return _possibly_downcast_to_dtype(result, self.dtype)
class FloatBlock(NumericBlock):
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, (np.floating, np.integer))
return isinstance(element, (float, int))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
values = np.array(values,dtype=object)
mask = isnull(values)
values[mask] = na_rep
if float_format:
imask = (-mask).ravel()
values.flat[imask] = np.array([ float_format % val for val in values.ravel()[imask] ])
return values.tolist()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype
class ComplexBlock(NumericBlock):
def _can_hold_element(self, element):
return isinstance(element, complex)
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
_can_hold_na = False
def _can_hold_element(self, element):
if isinstance(element, np.ndarray):
return issubclass(element.dtype.type, np.integer)
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class BoolBlock(NumericBlock):
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
is_object = True
_can_hold_na = True
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type object """
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates = True, convert_numeric = True, copy = True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
for i, c in enumerate(self.items):
values = self.get(c)
values = com._possibly_convert_objects(values, convert_dates=convert_dates, convert_numeric=convert_numeric)
values = _block_shape(values)
items = self.items.take([i])
newb = make_block(values, items, self.ref_items)
blocks.append(newb)
return blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_))
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
class DatetimeBlock(Block):
_can_hold_na = True
def __init__(self, values, items, ref_items, ndim=2):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
Block.__init__(self, values, items, ref_items, ndim=ndim)
def _gi(self, arg):
return lib.Timestamp(self.values[arg])
def _can_hold_element(self, element):
return com.is_integer(element) or isinstance(element, datetime)
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments
we are going to compare vs i8, so coerce to integer
values is always ndarra like, other may not be """
values = values.view('i8')
if isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif isnull(other):
other = tslib.iNaT
else:
other = other.view('i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype == 'i8':
result = tslib.array_to_datetime(result.astype(object).ravel()).reshape(result.shape)
elif isinstance(result, np.integer):
result = lib.Timestamp(result)
return result
def to_native_types(self, slicer=None, na_rep=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:,slicer]
mask = isnull(values)
rvalues = np.empty(self.shape,dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (-mask).ravel()
if self.dtype == 'datetime64[ns]':
rvalues.flat[imask] = np.array([ Timestamp(val)._repr_base for val in values.ravel()[imask] ],dtype=object)
elif self.dtype == 'timedelta64[ns]':
rvalues.flat[imask] = np.array([ lib.repr_timedelta64(val) for val in values.ravel()[imask] ],dtype=object)
return rvalues.tolist()
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
if value.dtype != _NS_DTYPE:
value = tslib.cast_to_nanoseconds(value)
self.values[loc] = value
def get_values(self, dtype):
if dtype == object:
flat_i8 = self.values.ravel().view(np.int64)
res = tslib.ints_to_pydatetime(flat_i8)
return res.reshape(self.values.shape)
return self.values
def make_block(values, items, ref_items):
dtype = values.dtype
vtype = dtype.type
klass = None
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.integer):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
# try to infer a datetimeblock
if klass is None and np.prod(values.shape):
flat = values.ravel()
inferred_type = lib.infer_dtype(flat)
if inferred_type == 'datetime':
# we have an object array that has been inferred as datetime, so
# convert it
try:
values = tslib.array_to_datetime(flat).reshape(values.shape)
klass = DatetimeBlock
except: # it already object, so leave it
pass
if klass is None:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_known_consolidated', '_is_consolidated']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = len(axes)
for block in blocks:
if ndim != block.values.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.values.ndim, ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
@classmethod
def make_empty(self):
return BlockManager([], [[], []])
def __nonzero__(self):
return True
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
value = _ensure_index(value)
if len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
self.axes[axis] = value
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_rename=True)
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def get_dtype_counts(self):
""" return a dict of the counts of dtypes in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
counts[b.dtype.name] = counts.get(b.dtype,0) + b.shape[0]
return counts
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard anything after 3rd, support beta pickling format for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
self.axes = _handle_legacy_indexes(self.axes)
self._is_consolidated = False
self._known_consolidated = False
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0])
blocks.append(blk)
self.blocks = blocks
def __len__(self):
return len(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.items) for x in self.blocks)
for block in self.blocks:
if block.ref_items is not self.items:
raise AssertionError("Block ref_items must be BlockManager "
"items")
if block.values.shape[1:] != mgr_shape[1:]:
construction_error(tot_items,block.values.shape[1:],self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items')
def apply(self, f, *args, **kwargs):
""" iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in the block
"""
axes = kwargs.pop('axes',None)
filter = kwargs.get('filter')
result_blocks = []
for blk in self.blocks:
if filter is not None:
kwargs['filter'] = set(kwargs['filter'])
if not blk.items.isin(filter).any():
result_blocks.append(blk)
continue
if callable(f):
applied = f(blk, *args, **kwargs)
else:
applied = getattr(blk,f)(*args, **kwargs)
if isinstance(applied,list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
bm = self.__class__(result_blocks, axes or self.axes)
bm._consolidate_inplace()
return bm
def where(self, *args, **kwargs):
return self.apply('where', *args, **kwargs)
def eval(self, *args, **kwargs):
return self.apply('eval', *args, **kwargs)
def putmask(self, *args, **kwargs):
return self.apply('putmask', *args, **kwargs)
def diff(self, *args, **kwargs):
return self.apply('diff', *args, **kwargs)
def interpolate(self, *args, **kwargs):
return self.apply('interpolate', *args, **kwargs)
def shift(self, *args, **kwargs):
return self.apply('shift', *args, **kwargs)
def fillna(self, *args, **kwargs):
return self.apply('fillna', *args, **kwargs)
def downcast(self, *args, **kwargs):
return self.apply('downcast', *args, **kwargs)
def astype(self, *args, **kwargs):
return self.apply('astype', *args, **kwargs)
def convert(self, *args, **kwargs):
return self.apply('convert', *args, **kwargs)
def replace(self, *args, **kwargs):
return self.apply('replace', *args, **kwargs)
def replace_list(self, src_lst, dest_lst, inplace=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return values == s
masks = [ comp(s) for i, s in enumerate(src_lst) ]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [ blk if inplace else blk.copy() ]
for i, d in enumerate(dest_lst):
new_rb = []
for b in rb:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.ref_locs]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
dtypes = [blk.dtype.type for blk in self.blocks]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
self._consolidate_inplace()
return all([ block.is_numeric for block in self.blocks ])
def get_numeric_data(self, copy=False, type_list=None, as_blocks = False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
type_list : tuple of type, default None
Numeric types by default (Float/Complex/Int but not Datetime)
"""
if type_list is None:
filter_blocks = lambda block: block.is_numeric
else:
type_list = self._get_clean_block_types(type_list)
filter_blocks = lambda block: isinstance(block, type_list)
maybe_copy = lambda b: b.copy() if copy else b
num_blocks = [maybe_copy(b) for b in self.blocks if filter_blocks(b)]
if as_blocks:
return num_blocks
if len(num_blocks) == 0:
return BlockManager.make_empty()
indexer = np.sort(np.concatenate([b.ref_locs for b in num_blocks]))
new_items = self.items.take(indexer)
new_blocks = []
for b in num_blocks:
b = b.copy(deep=False)
b.ref_items = new_items
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _get_clean_block_types(self, type_list):
if not isinstance(type_list, tuple):
try:
type_list = tuple(type_list)
except TypeError:
type_list = (type_list,)
type_map = {int: IntBlock, float: FloatBlock,
complex: ComplexBlock,
np.datetime64: DatetimeBlock,
datetime: DatetimeBlock,
bool: BoolBlock,
object: ObjectBlock}
type_list = tuple([type_map.get(t, t) for t in type_list])
return type_list
def get_bool_data(self, copy=False, as_blocks=False):
return self.get_numeric_data(copy=copy, type_list=(BoolBlock,),
as_blocks=as_blocks)
def get_slice(self, slobj, axis=0, raise_on_error=False):
new_axes = list(self.axes)
if raise_on_error:
_check_slice_bounds(slobj, new_axes[axis])
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if len(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.append(newb)
return new_blocks
def get_series_dict(self):
# For DataFrame
return _blocks_to_series_dict(self.blocks, self.axes[1])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif len(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just call interleave per below
mat = blk.values
else:
mat = self.reindex_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindex_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
if items.is_unique:
for block in self.blocks:
indexer = items.get_indexer(block.items)
if (indexer == -1).any():
raise AssertionError('Items must contain all block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
else:
for block in self.blocks:
mask = items.isin(block.items)
indexer = mask.nonzero()[0]
if (len(indexer) != len(block.items)):
raise AssertionError('All items must be in block items')
result[indexer] = block.get_values(dtype)
itemmask[indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = blk._gi((j, loc))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = _consolidate(self.blocks, self.items)
self._is_consolidated = True
self._known_consolidated = True
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def iget(self, i):
item = self.items[i]
if self.items.is_unique:
return self.get(item)
else:
# ugh
try:
inds, = (self.items == item).nonzero()
except AttributeError: # MultiIndex
inds, = self.items.map(lambda x: x == item).nonzero()
_, block = self._find_block(item)
try:
binds, = (block.items == item).nonzero()
except AttributeError: # MultiIndex
binds, = block.items.map(lambda x: x == item).nonzero()
for j, (k, b) in enumerate(zip(inds, binds)):
if i == k:
return block.values[b]
raise Exception('Cannot have duplicate column names '
'split across dtypes')
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
self._delete_from_block(i, item)
if com._is_bool_indexer(loc): # dupe keys may return mask
loc = [i for i, v in enumerate(loc) if v]
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
self._known_consolidated = False
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = _block_shape(value,self.ndim-1)
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
def _set_item(item, arr):
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, arr, loc=None)
else:
block.set(item, arr)
try:
loc = self.items.get_loc(item)
if isinstance(loc, int):
_set_item(self.items[loc], value)
else:
subset = self.items[loc]
if len(value) != len(subset):
raise AssertionError(
'Number of items to set did not match')
for i, (item, arr) in enumerate(zip(subset, value)):
_set_item(item, arr[None, :])
except KeyError:
# insert at end
self.insert(len(self.items), item, value)
self._known_consolidated = False
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
try:
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value, loc=loc)
except:
# so our insertion operation failed, so back out of the new items
# GH 3010
new_items = self.items.delete(loc)
self.set_items_norename(new_items)
# re-raise
raise
if len(self.blocks) > 100:
self._consolidate_inplace()
self._known_consolidated = False
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
for b in block.split_block_at(item):
self.blocks.append(b)
def _add_new_block(self, item, value, loc=None):
# Do we care about dtype at the moment?
# hm, elaborate hack?
if loc is None:
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc + 1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % com.pprint_thing(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
if axis == 0:
# patch ref_items, #1823
for blk in result.blocks:
blk.ref_items = new_axis
return result
else:
return self
if axis == 0:
if method is not None:
raise AssertionError('method argument not supported for '
'axis == 0')
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=np.nan):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, axis=axis, fill_value=fill_value)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindex_indexer_items(self, new_items, indexer, fill_value):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
# update with observed items
mask |= selector
if not selector.any():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
allow_fill=False)
new_blocks.append(make_block(new_values, new_block_items,
new_items))
if not mask.all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindex_items(self, new_items, copy=True, fill_value=np.nan):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
blk.ref_items = new_items
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items,
fill_value=fill_value)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items, fill_value=np.nan):
# TODO: infer dtypes other than float64 from fill_value
block_shape = list(self.shape)
block_shape[0] = len(items)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
na_block = make_block(block_values, items, ref_items)
return na_block
def take(self, indexer, axis=1, verify=True):
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
indexer = com._ensure_platform_int(indexer)
n = len(self.axes[axis])
if verify:
indexer = _maybe_convert_indices(indexer, n)
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_nd(blk.values, indexer, axis=axis,
allow_fill=False)
newb = make_block(new_values, blk.items, self.items)
new_blocks.append(newb)
return BlockManager(new_blocks, new_axes)
def merge(self, other, lsuffix=None, rsuffix=None):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
this, other = self._maybe_rename_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True):
to_rename = self.items.intersection(other.items)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
this = self.rename_items(lrenamer, copydata=copydata)
other = other.rename_items(rrenamer, copydata=copydata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def rename_axis(self, mapper, axis=1):
index = self.axes[axis]
if isinstance(index, MultiIndex):
new_axis = MultiIndex.from_tuples([tuple(mapper(y) for y in x) for x in index], names=index.names)
else:
new_axis = Index([mapper(x) for x in index], name=index.name)
if not new_axis.is_unique:
raise AssertionError('New axis must be unique to rename')
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(self.blocks, new_axes)
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
new_items.is_unique
new_blocks = []
for block in self.blocks:
newb = block.copy(deep=copydata)
newb.set_ref_items(new_items, maybe_rename=True)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes)
def add_prefix(self, prefix):
f = (('%s' % prefix) + '%s').__mod__
return self.rename_items(f)
def add_suffix(self, suffix):
f = ('%s' + ('%s' % suffix)).__mod__
return self.rename_items(f)
@property
def block_id_vector(self):
# TODO
result = np.empty(len(self.items), dtype=int)
result.fill(-1)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
if (indexer == -1).any():
raise AssertionError('Block items must be in manager items')
result.put(indexer, i)
if (result < 0).any():
raise AssertionError('Some items were not in any block')
return result
@property
def item_dtypes(self):
result = np.empty(len(self.items), dtype='O')
mask = np.zeros(len(self.items), dtype=bool)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
result.put(indexer, blk.values.dtype.name)
mask.put(indexer, 1)
if not (mask.all()):
raise AssertionError('Some items were not in any block')
return result
def construction_error(tot_items, block_shape, axes):
""" raise a helpful message about our construction """
raise ValueError("Shape of passed values is %s, indices imply %s" % (
tuple(map(int, [tot_items] + list(block_shape))),
tuple(map(int, [len(ax) for ax in axes]))))
def create_block_manager_from_blocks(blocks, axes):
try:
# if we are passed values, make the blocks
if len(blocks) == 1 and not isinstance(blocks[0], Block):
blocks = [ make_block(blocks[0], axes[0], axes[0]) ]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError):
blocks = [ getattr(b,'values',b) for b in blocks ]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items,blocks[0].shape[1:],axes)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError):
construction_error(len(arrays),arrays[0].shape[1:],axes)
def form_blocks(arrays, names, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(arrays) < len(items):
extra_items = items - Index(names)
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
datetime_items = []
for k, v in zip(names, arrays):
if issubclass(v.dtype.type, np.floating):
float_items.append((k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((k, v))
else:
datetime_items.append((k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((k, v))
continue
int_items.append((k, v))
elif v.dtype == np.bool_:
bool_items.append((k, v))
else:
object_items.append((k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items, items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(complex_items, items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items, items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, items, np.object_)
blocks.extend(object_blocks)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
def _simple_blockify(tuples, ref_items, dtype):
""" return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """
block_items, values = _stack_arrays(tuples, ref_items, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
return [ make_block(values, block_items, ref_items) ]
def _multi_blockify(tuples, ref_items, dtype = None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[1].dtype)
new_blocks = []
for dtype, tup_block in grouper:
block_items, values = _stack_arrays(list(tup_block), ref_items, dtype)
block = make_block(values, block_items, ref_items)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, ref_items, dtype):
from pandas.core.series import Series
# fml
def _asarray_compat(x):
# asarray shouldn't be called on SparseSeries
if isinstance(x, Series):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
# sparseseries
if isinstance(x, Series):
return len(x),
else:
return x.shape
names, arrays = zip(*tuples)
# index may box values
items = ref_items[ref_items.isin(names)]
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return items, stacked
def _blocks_to_series_dict(blocks, index=None):
from pandas.core.series import Series
series_dict = {}
for block in blocks:
for item, vec in zip(block.items, block.values):
series_dict[item] = Series(vec, index=index, name=item)
return series_dict
def _interleaved_dtype(blocks):
if not len(blocks): return None
from collections import defaultdict
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_numeric = have_float or have_complex or have_int
if (have_object or
(have_bool and have_numeric) or
(have_numeric and have_dt64)):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
return _lcd_dtype(counts[IntBlock])
elif have_dt64 and not have_float and not have_complex:
return np.dtype('M8[ns]')
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock])
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype.name
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype),
lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items, dtype)
new_blocks.append(new_block)
return new_blocks
def _merge_blocks(blocks, items, dtype=None):
if len(blocks) == 1:
return blocks[0]
if dtype is None:
if len(set([ b.dtype for b in blocks ])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
new_values = _vstack([ b.values for b in blocks ], dtype)
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items)
return new_block.reindex_items_from(items)
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim == ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
| 33.004397 | 120 | 0.574571 | 57,414 | 0.849822 | 718 | 0.010628 | 2,283 | 0.033792 | 0 | 0 | 11,310 | 0.167407 |
b44f004ae7c6b3eb8725a6532e9b3868344a526e | 4,919 | py | Python | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MH/PipeBuilder/BuildViewer.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
]
| 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
# Simple control window for a looping audio player
import pygame
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Kamaelia.Visualisation.PhysicsGraph.TopologyViewerComponent import TopologyViewerComponent
from Kamaelia.Physics.Simple import SimpleLaws, Particle
import time
class ComponentParticle(Particle):
"""Version of Physics.Particle designed to represent components in a simple pipeline"""
def __init__(self, ID, position, name):
super(ComponentParticle,self).__init__(position=position, ID = ID )
self.radius = 20
self.labelText = name # strip up to the first pipe only
self.name = name
font = pygame.font.Font(None, 24)
self.label = font.render(self.labelText, False, (0,0,0))
self.left = 0
self.top = 0
self.selected = False
def render(self, surface):
"""Rendering passes. A generator method that renders in multiple passes.
Use yields to specify a wait until the pass the next stage of rendering
should take place at.
Example, that renders bonds 'behind' the blobs.
def render(self, surface):
yield 1
self.renderBonds(surface) # render bonds on pass 1
yield 5
self.renderSelf(surface) # render 'blob' on pass 5
If another particle type rendered, for example, on pass 3, then it
would be rendered on top of the bonds, but behind the blobs.
Use this mechanism to order rendering into layers.
"""
sx = int(self.pos[0]) - self.left
sy = int(self.pos[1]) - self.top
yield 1
phase = (time.time()*4) % 2.0
off = phase > 1.0
phase = phase % 1.0
for p in self.bondedTo:
ex = int(p.pos[0] -self.left)
ey = int(p.pos[1] - self.top)
# 'make a crawling dotted line' appearance, to give an animated indication
# directionality of the link
dx = ex-sx
dy = ey-sy
length = (dx*dx + dy*dy)**0.5
dx = dx/length
dy = dy/length
p=0
while p<length:
newp = min(length, p+ phase * 10.0 )
phase = 1.0
if not off:
pygame.draw.line( surface,
(128,128,255),
(sx+dx*p,sy+dy*p),
(sx+dx*newp,sy+dy*newp)
)
off = not off
p=newp
yield 2
if self.selected:
pygame.draw.circle(surface, (255,255,128), (sx,sy), self.radius)
else:
pygame.draw.circle(surface, (192,192,192), (sx,sy), self.radius)
surface.blit(self.label, (sx - self.label.get_width()/2, sy - self.label.get_height()/2))
def setOffset( self, (left,top) ):
"""Inform of a change to the coords of the top left of the drawing surface,
so that this entity can render, as if the top left had moved
"""
self.left = left
self.top = top
def select( self ):
"""Tell this particle it is selected"""
self.selected = True
def deselect( self ):
"""Tell this particle it is selected"""
self.selected = False
def BuildViewer(screensize = (800,600), fullscreen = False, transparency = None):
laws = SimpleLaws(bondLength=100)
return TopologyViewerComponent( screensize=screensize,
fullscreen=fullscreen,
caption = "The pipeline",
particleTypes = {"component":ComponentParticle},
laws = laws
)
| 36.708955 | 121 | 0.551332 | 3,211 | 0.652775 | 2,158 | 0.438707 | 0 | 0 | 0 | 0 | 2,134 | 0.433828 |
b44f498d26d9dd58f69d6d12b6ff289ae252ed43 | 2,076 | py | Python | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
]
| null | null | null | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
]
| null | null | null | examples/exp_example.py | physimals/avb | 16663a935de35e4042c77000ea47abd7e5cd16ad | [
"Apache-2.0"
]
| null | null | null | """
Example of usage of the AVB framework to infer a single exponential decay
model.
This uses the Python classes directly to infer the parameters for a single
instance of noisy data constructed as a Numpy array.
"""
import sys
import logging
import numpy as np
from vaby_avb import Avb
import vaby
# Uncomment line below to start the random number generator off with the same seed value
# each time, for repeatable results
#np.random.seed(0)
# Ground truth parameters
PARAMS_TRUTH = [42, 0.5]
NOISE_PREC_TRUTH = 0.1
NOISE_VAR_TRUTH = 1/NOISE_PREC_TRUTH
NOISE_STD_TRUTH = np.sqrt(NOISE_VAR_TRUTH)
print("Ground truth: a=%f, r=%f, noise=%f (precision)" % (PARAMS_TRUTH[0], PARAMS_TRUTH[1], NOISE_PREC_TRUTH))
# Create single exponential model
model = vaby.get_model_class("exp")(None)
# Observed data samples are generated by Numpy from the ground truth
# Gaussian distribution. Reducing the number of samples should make
# the inference less 'confident' - i.e. the output variances for
# MU and BETA will increase
N = 100
DT = 0.02
t = np.array([float(t)*DT for t in range(N)])
DATA_CLEAN = model.evaluate(PARAMS_TRUTH, t).numpy()
DATA_NOISY = DATA_CLEAN + np.random.normal(0, NOISE_STD_TRUTH, [N])
print("Time values:")
print(t)
print("Data samples (clean):")
print(DATA_CLEAN)
print("Data samples (noisy):")
print(DATA_NOISY)
# Run Fabber as a comparison if desired
#import os
#import nibabel as nib
#niidata = DATA_NOISY.reshape((1, 1, 1, N))
#nii = nib.Nifti1Image(niidata, np.identity(4))
#nii.to_filename("data_noisy.nii.gz")
#os.system("fabber_exp --data=data_noisy --print-free-energy --output=fabberout --dt=%.3f --model=exp --num-exps=1 --method=vb --noise=white --overwrite --debug" % DT)
# Log to stdout
logging.getLogger().setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(levelname)s : %(message)s'))
logging.getLogger().addHandler(handler)
# Run AVB inference
avb = Avb(t, vaby.DataModel(DATA_NOISY), model)
avb.run(method="leastsq", maxits=20, learning_rate=0.1, debug="--debug" in sys.argv)
| 33.483871 | 167 | 0.750482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,201 | 0.578516 |
b44ff9fd50fe2d54276ded1d327434e0e7c23eab | 282 | py | Python | tests/test_classes/users.py | dialogs/python-bot-sdk | 737152e5ef8406af0b22600ef7cefa78da9035e8 | [
"Apache-2.0"
]
| 9 | 2019-01-22T09:59:12.000Z | 2021-05-08T10:59:00.000Z | tests/test_classes/users.py | dialogs/python-bot-sdk | 737152e5ef8406af0b22600ef7cefa78da9035e8 | [
"Apache-2.0"
]
| 29 | 2018-10-08T17:10:49.000Z | 2021-04-28T18:46:30.000Z | tests/test_classes/users.py | dialogs/python-bot-sdk | 737152e5ef8406af0b22600ef7cefa78da9035e8 | [
"Apache-2.0"
]
| 8 | 2019-01-22T09:49:32.000Z | 2022-01-26T18:55:52.000Z | from dialog_api.users_pb2 import RequestLoadFullUsers, ResponseLoadFullUsers, FullUser
class Users:
def LoadFullUsers(self, request: RequestLoadFullUsers) -> ResponseLoadFullUsers:
return ResponseLoadFullUsers(full_users=[FullUser(id=1, contact_info=[], about=None)])
| 40.285714 | 94 | 0.797872 | 192 | 0.680851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b45067ac1c187f969ca36977c34f99d5b3112b27 | 3,965 | py | Python | aws_marketplace/creating_marketplace_products/src/training_specification.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
]
| 2,610 | 2020-10-01T14:14:53.000Z | 2022-03-31T18:02:31.000Z | aws_marketplace/creating_marketplace_products/src/training_specification.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
]
| 1,959 | 2020-09-30T20:22:42.000Z | 2022-03-31T23:58:37.000Z | aws_marketplace/creating_marketplace_products/src/training_specification.py | jerrypeng7773/amazon-sagemaker-examples | c5ddecce1f739a345465b9a38b064983a129141d | [
"Apache-2.0"
]
| 2,052 | 2020-09-30T22:11:46.000Z | 2022-03-31T23:02:51.000Z | import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
| 30.5 | 186 | 0.54174 | 3,950 | 0.996217 | 0 | 0 | 1,029 | 0.259521 | 0 | 0 | 1,556 | 0.392434 |
b450cdc3b9ccbf44259b53088cde193f44fcd78c | 483 | py | Python | Exercises1_12/R-1.12.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
]
| null | null | null | Exercises1_12/R-1.12.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
]
| null | null | null | Exercises1_12/R-1.12.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
]
| null | null | null | #Python's random module includes a function choice(data) that returns a
#random element from a non-empty sequence. The random modul includes a
#more basic function randrange,with parameterization similar to the
#built-in range function , that return a random choice from the given
#range.Using only the randrange funciton,implement your own version of
#the choice function.
import random
lottoMax = list()
lottoMax = [random.randrange(1,50,1) for i in range(1,8)]
print(lottoMax) | 48.3 | 72 | 0.78882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.770186 |
b45291fae69314faf72f4230d287a0ca6f8e5cfe | 10,093 | py | Python | database/mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
]
| 1 | 2020-06-29T17:25:44.000Z | 2020-06-29T17:25:44.000Z | database/mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
]
| null | null | null | database/mongo_connector.py | timburbank/openrvdas | ba77d3958075abd21ff94a396e4a97879962ac0c | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python3
"""Tables:
data: pk timestamp field_name field_value source_record
We don't know what type each value will have, so have a column for
int, float, str and bool and leave all but the appropriate value type
NULL. Docs claim that NULL values take no space, so...
Still so many ways we could make this more space efficient, most
obviously by partitioning field_name (and even timestamp?) into
foreign keys.
field_name - could store this in a separate table so that it's only
a foreign key in the data table. Something like:
fields: id field_name field_type
source_record - an id indexing a table where raw source records are
stored, so that we can re-parse and recreate whatever data we want
if needed.
Current implementation is simple and inefficient in both computation
and storage.
TODO: Allow wildcarding field selection, so client can specify 'S330*,Knud*'
"""
import logging
import sys
import json
sys.path.append('.')
from logger.utils.formats import Python_Record
from logger.utils.das_record import DASRecord
try:
import pymongo
MONGO_ENABLED = True
except ImportError:
MONGO_ENABLED = False
################################################################################
class MongoConnector:
# Name of table in which we will store mappings from record field
# names to the tnames of the tables containing those fields.
DATA_TABLE = 'data'
FIELD_TABLE = 'fields'
SOURCE_TABLE = 'source'
def __init__(self, database, host, user, password,
tail=False, save_source=True):
"""Interface to MongoConnector, to be imported by, e.g. DatabaseWriter."""
if not MONGO_ENABLED:
logging.warning('MongoClient not found, so MongoDB functionality not available.')
return
self.client = pymongo.MongoClient([host])
self.db = self.client[database]
self.save_source = save_source
# What's the next id we're supposed to read? Or if we've been
# reading by timestamp, what's the last timestamp we've seen?
self.next_id = 0
self.last_timestamp = None
# Create tables if they don't exist yet
if not self.SOURCE_TABLE in self.db.collection_names():
sourceCol = self.db[self.SOURCE_TABLE]
if not self.DATA_TABLE in self.db.collection_names():
dataCol = self.db[self.DATA_TABLE]
############################
def write_record(self, record):
"""Write record to table."""
# First, check that we've got something we can work with
if not record:
return
if not type(record) == DASRecord:
logging.error('write_record() received non-DASRecord as input. '
'Type: %s', type(record))
return
# If we're saving source records, we have to do a little
# legerdemain: after we've saved the record, we need to retrieve
# the id of the record we've just saved so that we can attach it
# to the data values we're about to save.
if self.save_source:
logging.debug('Inserting source into table')
logging.debug(record)
result = self.db[self.SOURCE_TABLE].insert_one(json.loads(record.as_json()))
# Get the id of the saved source record. Note: documentation
# *claims* that this is kept on a per-client basis, so it's safe
# even if another client does an intervening write.
source_id = result.inserted_id
else:
source_id = None
if not record.fields:
logging.info('DASRecord has no parsed fields. Skipping record.')
return
# Write one row for each field-value pair. Columns are:
# timestamp
# field_name
# int_value \
# float_value, \ Only one of these fields will be non-NULL,
# str_value / depending on the type of the value.
# bool_value /
values = []
for field_name, value in record.fields.items():
data_record = {
'timestamp': record.timestamp,
'field_name': field_name,
'int_value': None,
'float_value': None,
'str_value': None,
'bool_value': None,
'source_id': None
}
if type(value) is int:
data_record['int_value'] = value
elif type(value) is float:
data_record['float_value'] = value
elif type(value) is str:
data_record['str_value'] = value
elif type(value) is bool:
data_record['bool_value'] = True if value else False
elif value is None:
data_record['str_value'] = '""'
else:
logging.error('Unknown record value type (%s) for %s: %s',
type(value), key, value)
continue
# If we've saved this field's source record, append source's
# foreign key to row so we can look it up.
if source_id:
data_record['source_id'] = source_id
# Join entries into a string, append to list of other values
# we've already saved.
values.append(data_record)
# Build the SQL query
# fields = ['timestamp',
# 'field_name',
# 'int_value',
# 'float_value',
# 'str_value',
# 'bool_value']
# if source_id:
# fields.append('source')
if not values:
logging.warning('No values found in record %s', str(record))
# write_cmd = 'insert into `%s` (%s) values %s' % \
# (self.DATA_TABLE, ','.join(fields), ','.join(values))
logging.debug('Inserting record into table')
result = self.db[self.DATA_TABLE].insert_many(values)
# self.exec_sql_command(write_cmd)
############################
def read(self, field_list=None, start=None, num_records=1):
"""Read the next record from table. If start is specified, reset read
to start at that position."""
query = {}
projection = { '_id': 0 }
if start is None:
start = self.next_id
# If they haven't given us any fields, retrieve everything
if field_list:
query['field_name'] = { "$in": field_list.split(',') }
if num_records is None:
limit = 0
else:
limit = num_records
# query = 'select * from `%s` where %s' % (self.DATA_TABLE, condition)
results = list(self.db[self.DATA_TABLE].find(query, projection).skip(start).limit(limit))
if len(results) == 0:
return {}
output = {}
for result in results:
if not result['field_name'] in output:
output[result['field_name']] = []
if result['int_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['int_value']))
elif result['float_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['float_value']))
elif result['str_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['str_value']))
elif result['bool_value'] is not None:
output[result['field_name']].append((result['timestamp'], result['bool_value']))
else:
output[result['field_name']].append((result['timestamp']))
self.next_id = start + len(results)
return output
# return self._process_query(query)
############################
def read_time(self, field_list=None, start_time=None, stop_time=None):
"""Read the next records from table based on timestamps. If start_time
is None, use the timestamp of the last read record. If stop_time is None,
read all records since then."""
query = {}
if start_time or stop_time:
query['timestamp'] = {}
if start_time is not None:
query['timestamp']['$gte'] = start_time
if stop_time is not None:
query['timestamp']['$lte'] = stop_time
# If they haven't given us any fields, retrieve everything
if field_list:
query['field_name'] = { "$in": {field_list.split(',')} }
sort = { timestamp: -1 }
logging.debug('read query: %s', query)
return self.db[self.DATA_TABLE].find(query).sort(sort).toArray()
############################
def seek(self, offset=0, origin='current'):
"""Behavior is intended to mimic file seek() behavior but with
respect to records: 'offset' means number of records, and origin
is either 'start', 'current' or 'end'."""
num_rows = self.db[self.DATA_TABLE].count()
if origin == 'current':
self.next_id += offset
elif origin == 'start':
self.next_id = offset
elif origin == 'end':
self.next_id = num_rows + offset
self._next_id = min(num_rows, self.next_id)
logging.debug('Seek: next position %d', self.next_id)
############################
# def _num_rows(self, table_name):
# query = 'select count(1) from `%s`' % table_name
# cursor = self.connection.cursor()
# cursor.execute(query)
# num_rows = next(cursor)[0]
# return num_rows
############################
# def _process_query(self, query):
# cursor = self.connection.cursor()
# cursor.execute(query)
# results = {}
# for values in cursor:
# (id, timestamp, field_name,
# int_value, float_value, str_value, bool_value,
# source) = values
# if not field_name in results:
# results[field_name] = []
# if int_value is not None:
# val = int_value
# elif float_value is not None:
# val = float_value
# elif str_value is not None:
# val = str_value
# elif float_value is not None:
# val = int_value
# elif bool_value is not None:
# val = bool(bool_value)
# results[field_name].append((timestamp, val))
# self.next_id = id + 1
# self.last_timestamp = timestamp
# cursor.close()
# return results
############################
def delete_table(self, table_name):
"""Delete a table."""
# delete_cmd = 'drop table `%s`' % table_name
logging.info('Dropping table')
return self.db[table_name].drop()
# self.exec_sql_command(delete_cmd)
############################
def close(self):
"""Close connection."""
# self.connection.close()
self.client.close()
| 31.442368 | 93 | 0.618548 | 8,838 | 0.875656 | 0 | 0 | 0 | 0 | 0 | 0 | 5,546 | 0.54949 |
b4532acfa1818a77c2c7b5bc9830fae6dbfa670b | 10,204 | py | Python | ElectricRoute/flask_auth_app/project/__init__.py | luciatomainodelacr/TFM | dd989ba18e22d71f6c9c03e16b71110c7c23a9a8 | [
"MIT"
]
| 1 | 2021-05-26T15:45:06.000Z | 2021-05-26T15:45:06.000Z | ElectricRoute/flask_auth_app/project/__init__.py | luciatomainodelacr/TFM | dd989ba18e22d71f6c9c03e16b71110c7c23a9a8 | [
"MIT"
]
| null | null | null | ElectricRoute/flask_auth_app/project/__init__.py | luciatomainodelacr/TFM | dd989ba18e22d71f6c9c03e16b71110c7c23a9a8 | [
"MIT"
]
| 5 | 2021-03-07T19:20:26.000Z | 2021-04-11T18:26:12.000Z | # =============================================================================
# Inicialización de la aplicación: __init__.py
# =============================================================================
"""
Este archivo tendrá la función de crear la aplicación, que iniciará la base de
datos y registrará los molodelos.
Para ejecutar:
1º) En un terminal de linux ir a la ruta:
>> cd Documentos/TFM/ElectricRoute/flask_auth_app
>> export FLASK_APP=project
>> export FLASK_DEBUG=1
>> Si se va a lanzar flask_app en local: export DB_HOST=0.0.0.0
>> Si se va a lanzar flask_app en local: export GRAFANA_HOST=0.0.0.0
>> flask run
2º) Abrir el navegador e ir a la ruta http://localhost:5000/login
3") Insertar un mail y una contraseña (cualquier)
Ejemplo:
User: [email protected]
Password: blanca
"""
# Se cargan las librerias
from flask import Flask
from flask_login import LoginManager
from flask_mysqldb import MySQL
from os import environ
from .BE.Output import BaseDatos
# Se inicializa SQLAlchemy
db = MySQL()
# Se crea la app
def create_app():
app = Flask(__name__)
db_host = environ.get('DB_HOST')
app.config['MYSQL_HOST'] = db_host
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'root'
app.config['MYSQL_DB'] = 'tfm'
app.config['MYSQL_DATABASE_PORT'] = '3306'
app.secret_key = "123456789"
db.init_app(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
# since the user_id is just the primary key of our user table, use it in the query for the user
#return User.query.get(int(user_id))
return 1
# blueprint for auth routes in our app
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| 129.164557 | 6,108 | 0.130243 | 0 | 0 | 0 | 0 | 220 | 0.021539 | 0 | 0 | 1,223 | 0.119738 |
b4533a5f5cb54cabae74aca99fd6222059d74d85 | 1,822 | py | Python | benchmarks/cifar10/benchmark_sample_creator.py | aarati-K/one-access | 928788a8729770c665cb94db6891dfaf4f32d1fc | [
"MIT"
]
| null | null | null | benchmarks/cifar10/benchmark_sample_creator.py | aarati-K/one-access | 928788a8729770c665cb94db6891dfaf4f32d1fc | [
"MIT"
]
| 12 | 2018-12-01T23:30:20.000Z | 2018-12-19T02:56:34.000Z | benchmarks/cifar10/benchmark_sample_creator.py | aarati-K/one-access | 928788a8729770c665cb94db6891dfaf4f32d1fc | [
"MIT"
]
| null | null | null | from store.cifar10 import Cifar10
import torchvision.transforms as transforms
import time
import matplotlib.pyplot as plt
batch_size = 1
rel_sample_size = 10000
ds = Cifar10(input_data_folder="/home/aarati/datasets/cifar-10-batches-py", \
max_batches=2, batch_size=batch_size, rel_sample_size=rel_sample_size, \
max_samples=1, transform=transforms.ToTensor())
ds.count_num_points()
ds.generate_IR()
all_times = []
for i in range(10):
start = time.time()
ds.initialize_samples()
end = time.time()
all_times.append(end-start)
s = ds.samples[0].get()
print(all_times)
# Sample creation time for sample size:
# 1: [0.349, 0.306, 0.431, 0.303, 0.18, 0.69, 0.557, 0.681, 0.424, 0.300]
# 10: [0.742, 0.685, 0.679, 0.676, 0.673, 0.676, 0.551, 0.673, 0.669, 0.670]
# 100: [0.713, 0.672, 0.668, 0.671, 0.668, 0.680, 0.682, 0.675, 0.673, 0.669]
# 1000: [0.738, 0.689, 0.704, 0.693, 0.684, 0.683, 0.678, 0.677, 0.700, 0.687]
# 10000: [0.765, 0.727, 0.717, 0.740, 0.723, 0.774, 0.720, 0.868, 0.724, 0.771]
# Plotting code
# x = [1, 10, 50, 100, 1000, 10000]
# y = [0.45, 0.702, 0.703, 0.708, 0.715, 0.746]
# plt.plot(x, y, color='b', marker='o', markerfacecolor='k', markersize=10, fillstyle='full', linewidth=3, linestyle='solid')
# plt.xscale('log')
# plt.ylim(0.40, 0.78)
# plt.xlabel("Reservoir Sample Size", fontsize=20, fontweight='semibold', fontname='serif')
# plt.ylabel("Creation Time (s)", fontsize=20, fontweight='semibold', fontname='serif')
# plt.xticks(x, [1, 10, '', 100, 1000, 10000])
# _, ticks = plt.xticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# _, ticks = plt.yticks()
# for tick in ticks:
# tick.set_fontsize(16)
# tick.set_fontweight('medium')
# tick.set_fontname('serif')
# plt.show()
| 36.44 | 125 | 0.660263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,242 | 0.681668 |
b4544908bc703c4da565a916d491bf32413a078b | 1,095 | py | Python | beet/contrib/render.py | Arcensoth/beet | 8a20a6fbee8724d8c26fe7e9ade68dffa57406bd | [
"MIT"
]
| 46 | 2021-03-09T23:34:52.000Z | 2022-03-08T01:30:04.000Z | beet/contrib/render.py | Arcensoth/beet | 8a20a6fbee8724d8c26fe7e9ade68dffa57406bd | [
"MIT"
]
| 127 | 2021-02-24T00:41:44.000Z | 2022-03-31T05:14:31.000Z | beet/contrib/render.py | Arcensoth/beet | 8a20a6fbee8724d8c26fe7e9ade68dffa57406bd | [
"MIT"
]
| 9 | 2021-03-11T18:18:28.000Z | 2022-03-11T20:32:01.000Z | """Plugin that invokes the built-in template renderer."""
__all__ = [
"RenderOptions",
"render",
]
from typing import Dict, List
from pydantic import BaseModel
from beet import Context, configurable
class RenderOptions(BaseModel):
resource_pack: Dict[str, List[str]] = {}
data_pack: Dict[str, List[str]] = {}
def beet_default(ctx: Context):
ctx.require(render)
@configurable(validator=RenderOptions)
def render(ctx: Context, opts: RenderOptions):
"""Plugin that processes the data pack and the resource pack with Jinja."""
for groups, pack in zip([opts.resource_pack, opts.data_pack], ctx.packs):
for group, patterns in groups.items():
try:
proxy = getattr(pack, group)
file_paths = proxy.match(*patterns)
except:
raise ValueError(f"Invalid render group {group!r}.") from None
else:
for path in file_paths:
with ctx.override(render_path=path, render_group=group):
ctx.template.render_file(proxy[path])
| 26.707317 | 79 | 0.630137 | 117 | 0.106849 | 0 | 0 | 701 | 0.640183 | 0 | 0 | 189 | 0.172603 |
b45497bfff55851d8e2cfb1521b54e7ce3939306 | 15,269 | py | Python | customtkinter/customtkinter_progressbar.py | thisSELFmySELF/CustomTkinter | 9edb25d6c4604300641212fe664097d71ed12d0c | [
"CC0-1.0"
]
| 1 | 2022-02-20T12:05:08.000Z | 2022-02-20T12:05:08.000Z | customtkinter/customtkinter_progressbar.py | thisSELFmySELF/CustomTkinter | 9edb25d6c4604300641212fe664097d71ed12d0c | [
"CC0-1.0"
]
| null | null | null | customtkinter/customtkinter_progressbar.py | thisSELFmySELF/CustomTkinter | 9edb25d6c4604300641212fe664097d71ed12d0c | [
"CC0-1.0"
]
| null | null | null | import sys
import tkinter
from .customtkinter_tk import CTk
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
class CTkProgressBar(tkinter.Frame):
""" tkinter custom progressbar, always horizontal, values are from 0 to 1 """
def __init__(self, *args,
variable=None,
bg_color=None,
border_color="CTkColorManager",
fg_color="CTkColorManager",
progress_color="CTkColorManager",
width=160,
height=10,
border_width=0,
**kwargs):
super().__init__(*args, **kwargs)
# overwrite configure methods of master when master is tkinter widget, so that bg changes get applied on child CTk widget too
if isinstance(self.master, (tkinter.Tk, tkinter.Frame)) and not isinstance(self.master, (CTk, CTkFrame)):
master_old_configure = self.master.config
def new_configure(*args, **kwargs):
if "bg" in kwargs:
self.configure(bg_color=kwargs["bg"])
elif "background" in kwargs:
self.configure(bg_color=kwargs["background"])
# args[0] is dict when attribute gets changed by widget[<attribut>] syntax
elif len(args) > 0 and type(args[0]) == dict:
if "bg" in args[0]:
self.configure(bg_color=args[0]["bg"])
elif "background" in args[0]:
self.configure(bg_color=args[0]["background"])
master_old_configure(*args, **kwargs)
self.master.config = new_configure
self.master.configure = new_configure
AppearanceModeTracker.add(self.change_appearance_mode, self)
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.bg_color = self.detect_color_of_master() if bg_color is None else bg_color
self.border_color = CTkColorManager.PROGRESS_BG if border_color == "CTkColorManager" else border_color
self.fg_color = CTkColorManager.PROGRESS_BG if fg_color == "CTkColorManager" else fg_color
self.progress_color = CTkColorManager.MAIN if progress_color == "CTkColorManager" else progress_color
self.variable = variable
self.variable_callback_blocked = False
self.variabel_callback_name = None
self.width = width
self.height = self.calc_optimal_height(height)
self.border_width = round(border_width)
self.value = 0.5
self.configure(width=self.width, height=self.height)
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
# Each time an item is resized due to pack position mode, the binding Configure is called on the widget
self.bind('<Configure>', self.update_dimensions)
self.draw() # initial draw
if self.variable is not None:
self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback)
self.variable_callback_blocked = True
self.set(self.variable.get(), from_variable_callback=True)
self.variable_callback_blocked = False
def destroy(self):
AppearanceModeTracker.remove(self.change_appearance_mode)
if self.variable is not None:
self.variable.trace_remove("write", self.variabel_callback_name)
super().destroy()
def detect_color_of_master(self):
if isinstance(self.master, CTkFrame):
return self.master.fg_color
else:
return self.master.cget("bg")
@staticmethod
def calc_optimal_height(user_height):
if sys.platform == "darwin":
return user_height # on macOS just use given value (canvas has Antialiasing)
else:
# make sure the value is always with uneven for better rendering of the ovals
if user_height == 0:
return 0
elif user_height % 2 == 0:
return user_height + 1
else:
return user_height
def update_dimensions(self, event):
# only redraw if dimensions changed (for performance)
if self.width != event.width or self.height != event.height:
self.width = event.width
self.height = event.height
self.draw()
def draw(self, no_color_updates=False):
# decide the drawing method
if sys.platform == "darwin":
# on macOS draw button with polygons (positions are more accurate, macOS has Antialiasing)
self.draw_with_polygon_shapes()
else:
# on Windows and other draw with ovals (corner_radius can be optimised to look better than with polygons)
self.draw_with_ovals_and_rects()
if no_color_updates is False:
self.canvas.configure(bg=CTkColorManager.single_color(self.bg_color, self.appearance_mode))
self.canvas.itemconfig("border_parts", fill=CTkColorManager.single_color(self.border_color, self.appearance_mode))
self.canvas.itemconfig("inner_parts", fill=CTkColorManager.single_color(self.fg_color, self.appearance_mode))
self.canvas.itemconfig("progress_parts", fill=CTkColorManager.single_color(self.progress_color, self.appearance_mode))
def draw_with_polygon_shapes(self):
""" draw the progress bar parts with just three polygons that have a rounded border """
coordinate_shift = -1
width_reduced = -1
# create border button parts (only if border exists)
if self.border_width > 0:
if not self.canvas.find_withtag("border_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("border_line_1", "border_parts"))
self.canvas.coords("border_line_1",
(self.height / 2,
self.height / 2,
self.width - self.height / 2 + coordinate_shift,
self.height / 2))
self.canvas.itemconfig("border_line_1",
capstyle=tkinter.ROUND,
width=self.height + width_reduced)
self.canvas.lower("border_parts")
# create inner button parts
if not self.canvas.find_withtag("inner_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("inner_line_1", "inner_parts"))
self.canvas.coords("inner_line_1",
(self.height / 2,
self.height / 2,
self.width - self.height / 2 + coordinate_shift,
self.height / 2))
self.canvas.itemconfig("inner_line_1",
capstyle=tkinter.ROUND,
width=self.height - self.border_width * 2 + width_reduced)
# progress parts
if not self.canvas.find_withtag("progress_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("progress_line_1", "progress_parts"))
self.canvas.coords("progress_line_1",
(self.height / 2,
self.height / 2,
self.height / 2 + (self.width + coordinate_shift - self.height) * self.value,
self.height / 2))
self.canvas.itemconfig("progress_line_1",
capstyle=tkinter.ROUND,
width=self.height - self.border_width * 2 + width_reduced)
def draw_with_ovals_and_rects(self):
""" draw the progress bar parts with ovals and rectangles """
if sys.platform == "darwin":
oval_bottom_right_shift = 0
rect_bottom_right_shift = 0
else:
# ovals and rects are always rendered too large on Windows and need to be made smaller by -1
oval_bottom_right_shift = -1
rect_bottom_right_shift = -0
# frame_border
if self.border_width > 0:
if not self.canvas.find_withtag("border_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_1", "border_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("border_rect_1", "border_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_2", "border_parts"), width=0)
self.canvas.coords("border_oval_1", (0,
0,
self.height + oval_bottom_right_shift,
self.height + oval_bottom_right_shift))
self.canvas.coords("border_rect_1", (self.height/2,
0,
self.width-(self.height/2) + rect_bottom_right_shift,
self.height + rect_bottom_right_shift))
self.canvas.coords("border_oval_2", (self.width-self.height,
0,
self.width + oval_bottom_right_shift,
self.height + oval_bottom_right_shift))
# foreground
if not self.canvas.find_withtag("inner_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_1", "inner_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("inner_rect_1", "inner_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_2", "inner_parts"), width=0)
self.canvas.coords("inner_oval_1", (self.border_width,
self.border_width,
self.height-self.border_width + oval_bottom_right_shift,
self.height-self.border_width + oval_bottom_right_shift))
self.canvas.coords("inner_rect_1", (self.height/2,
self.border_width,
self.width-(self.height/2 + rect_bottom_right_shift),
self.height-self.border_width + rect_bottom_right_shift))
self.canvas.coords("inner_oval_2", (self.width-self.height+self.border_width,
self.border_width,
self.width-self.border_width + oval_bottom_right_shift,
self.height-self.border_width + oval_bottom_right_shift))
# progress parts
if not self.canvas.find_withtag("progress_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_1", "progress_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("progress_rect_1", "progress_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_2", "progress_parts"), width=0)
self.canvas.coords("progress_oval_1", (self.border_width,
self.border_width,
self.height - self.border_width + oval_bottom_right_shift,
self.height - self.border_width + oval_bottom_right_shift))
self.canvas.coords("progress_rect_1", (self.height / 2,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + rect_bottom_right_shift,
self.height - self.border_width + rect_bottom_right_shift))
self.canvas.coords("progress_oval_2",
(self.height / 2 + (self.width - self.height) * self.value - self.height / 2 + self.border_width,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + self.height / 2 - self.border_width + oval_bottom_right_shift,
self.height - self.border_width + oval_bottom_right_shift))
def configure(self, *args, **kwargs):
require_redraw = False # some attribute changes require a call of self.draw() at the end
if "bg_color" in kwargs:
self.bg_color = kwargs["bg_color"]
del kwargs["bg_color"]
require_redraw = True
if "fg_color" in kwargs:
self.fg_color = kwargs["fg_color"]
del kwargs["fg_color"]
require_redraw = True
if "border_color" in kwargs:
self.border_color = kwargs["border_color"]
del kwargs["border_color"]
require_redraw = True
if "progress_color" in kwargs:
self.progress_color = kwargs["progress_color"]
del kwargs["progress_color"]
require_redraw = True
if "border_width" in kwargs:
self.border_width = kwargs["border_width"]
del kwargs["border_width"]
require_redraw = True
if "variable" in kwargs:
if self.variable is not None:
self.variable.trace_remove("write", self.variabel_callback_name)
self.variable = kwargs["variable"]
if self.variable is not None and self.variable != "":
self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback)
self.set(self.variable.get(), from_variable_callback=True)
else:
self.variable = None
del kwargs["variable"]
super().configure(*args, **kwargs)
if require_redraw is True:
self.draw()
def variable_callback(self, var_name, index, mode):
if not self.variable_callback_blocked:
self.set(self.variable.get(), from_variable_callback=True)
def set(self, value, from_variable_callback=False):
self.value = value
if self.value > 1:
self.value = 1
elif self.value < 0:
self.value = 0
self.draw(no_color_updates=True)
if self.variable is not None and not from_variable_callback:
self.variable_callback_blocked = True
self.variable.set(round(self.value) if isinstance(self.variable, tkinter.IntVar) else self.value)
self.variable_callback_blocked = False
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
| 46.551829 | 150 | 0.568276 | 15,047 | 0.985461 | 0 | 0 | 475 | 0.031109 | 0 | 0 | 2,471 | 0.161831 |
b45613d6f68d15fc7e95c155cc5ae42470c0d2ab | 694 | py | Python | backtest/tests/test_strategy.py | Christakou/backtest | fa97f50b36a1d56fe667250169ed50a8d9121c3c | [
"MIT"
]
| null | null | null | backtest/tests/test_strategy.py | Christakou/backtest | fa97f50b36a1d56fe667250169ed50a8d9121c3c | [
"MIT"
]
| null | null | null | backtest/tests/test_strategy.py | Christakou/backtest | fa97f50b36a1d56fe667250169ed50a8d9121c3c | [
"MIT"
]
| null | null | null | import pytest
from backtest.strategy import BuyAndHoldEqualAllocation
@pytest.fixture
def strategy():
symbols = ('AAPL', 'GOOG')
strategy = BuyAndHoldEqualAllocation(relevant_symbols=symbols)
return strategy
def test_strategy_execute(strategy):
strategy.execute()
assert len(strategy.holdings) > 0
assert len(strategy.trades) > 0
def test_holdings_at(strategy):
strategy.execute()
assert (strategy._holdings_at('2018-05-05') =={})
assert (strategy._holdings_at('2021-05-06') == {'AAPL': 7466})
assert (strategy._holdings_at('2021-05-07') == {'AAPL': 3862, 'GOOG': 209})
assert (strategy._holdings_at('2021-05-08') == {'AAPL': 3862, 'GOOG': 209}) | 33.047619 | 79 | 0.698847 | 0 | 0 | 0 | 0 | 149 | 0.214697 | 0 | 0 | 90 | 0.129683 |
b4561bfc43f0bcb4bcb4c7719b19ceba05dfa31d | 853 | py | Python | onnx/backend/test/case/node/constant.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
]
| null | null | null | onnx/backend/test/case/node/constant.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
]
| null | null | null | onnx/backend/test/case/node/constant.py | stillmatic/onnx | 8d5eb62d5299f6dcb6ac787f0ea8e6cf5b8331a7 | [
"Apache-2.0"
]
| null | null | null | # SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class Constant(Base):
@staticmethod
def export(): # type: () -> None
values = np.random.randn(5, 5).astype(np.float32)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
expect(node, inputs=[], outputs=[values],
name='test_constant')
| 25.088235 | 57 | 0.601407 | 584 | 0.684642 | 0 | 0 | 557 | 0.652989 | 0 | 0 | 102 | 0.119578 |
b4566ae71e0167b9625fba644e2226d4778cf90a | 4,337 | py | Python | nvd3/multiChart.py | areski/python-nvd3 | 1890ad28e13f9a35d8b338817ef161720576538b | [
"MIT"
]
| 442 | 2015-01-12T10:13:52.000Z | 2022-01-11T15:18:48.000Z | nvd3/multiChart.py | areski/python-nvd3 | 1890ad28e13f9a35d8b338817ef161720576538b | [
"MIT"
]
| 106 | 2015-01-11T20:27:50.000Z | 2021-11-05T17:18:15.000Z | nvd3/multiChart.py | areski/python-nvd3 | 1890ad28e13f9a35d8b338817ef161720576538b | [
"MIT"
]
| 161 | 2015-01-06T13:31:18.000Z | 2022-03-09T05:22:30.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
class multiChart(TemplateMixin, NVD3Chart):
"""
A multiChart is a type of chart which combines several plots of the same or different types.
Python example::
from nvd3 import multiChart
type = "multiChart"
chart = multiChart(name=type, x_is_date=False, x_axis_format="AM_PM")
xdata = [1,2,3,4,5,6]
ydata = [115.5,160.5,108,145.5,84,70.5]
ydata2 = [48624,42944,43439,24194,38440,31651]
kwargs1 = {'color': 'black'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}}
chart.add_serie(y=ydata, x=xdata, type='line', yaxis=1, name='visits', extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(y=ydata2, x=xdata, type='bar', yaxis=2,name='spend', extra=extra_serie, **kwargs2)
chart.buildhtml()
Javascript rendered to:
.. raw:: html
<div id="multichart"><svg style="height:450px;"></svg></div>
<script>
data_multichart=[{"color": "black", "type": "line", "values": [{"y": 115.5, "x": 1}, {"y": 160.5, "x": 2}, {"y": 108, "x": 3}, {"y
": 145.5, "x": 4}, {"y": 84, "x": 5}, {"y": 70.5, "x": 6}], "key": "visits", "yAxis": 1}, {"color": "red", "type": "bar", "values": [{"y": 486
24, "x": 1}, {"y": 42944, "x": 2}, {"y": 43439, "x": 3}, {"y": 24194, "x": 4}, {"y": 38440, "x": 5}, {"y": 31651, "x": 6}], "key": "spend", "y
Axis": 2}];
nv.addGraph(function() {
var chart = nv.models.multiChart();
chart.margin({top: 30, right: 60, bottom: 20, left: 60});
var datum = data_multichart;
chart.yAxis1
.tickFormat(d3.format(',.02f'));
chart.yAxis2
.tickFormat(d3.format(',.02f'));
chart.xAxis
.tickFormat(function(d) { return get_am_pm(parseInt(d)); });
function get_am_pm(d){
if (d > 12) {
d = d - 12; return (String(d) + 'PM');
}
else {
return (String(d) + 'AM');
}
};
chart.showLegend(true);
d3.select('#multichart svg')
.datum(datum)
.transition().duration(500)
.attr('height', 450)
.call(chart);
});
</script>
See the source code of this page, to see the underlying javascript.
"""
CHART_FILENAME = "./multichart.html"
template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME)
def __init__(self, **kwargs):
super(multiChart, self).__init__(**kwargs)
self.model = 'multiChart'
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
if kwargs.get('x_axis_format') == 'AM_PM':
self.x_axis_format = format = 'AM_PM'
else:
format = kwargs.get('x_axis_format', 'r')
self.create_x_axis('xAxis', format=format,
custom_format=kwargs.get('x_custom_format',
False))
self.create_y_axis(
'yAxis1',
format=kwargs.get('y1_axis_format', '.02f'),
custom_format=kwargs.get('y1_custom_format', False))
self.create_y_axis(
'yAxis2',
format=kwargs.get('y2_axis_format', '.02f'),
custom_format=kwargs.get('y2_custom_format', False))
# must have a specified height, otherwise it superimposes both chars
self.set_graph_height(height)
if width:
self.set_graph_width(width)
| 34.975806 | 142 | 0.537929 | 3,992 | 0.920452 | 0 | 0 | 0 | 0 | 0 | 0 | 3,022 | 0.696795 |
b456f67dd7f5c131a9b03642b6c76bf26ca01173 | 4,114 | py | Python | utils/config.py | AlbertiPot/nar | eb081f0e1ee16c2b1eb5e6e2afd41254cd7dce28 | [
"BSD-3-Clause"
]
| 2 | 2022-02-08T06:45:41.000Z | 2022-02-09T03:49:54.000Z | utils/config.py | AlbertiPot/nar | eb081f0e1ee16c2b1eb5e6e2afd41254cd7dce28 | [
"BSD-3-Clause"
]
| null | null | null | utils/config.py | AlbertiPot/nar | eb081f0e1ee16c2b1eb5e6e2afd41254cd7dce28 | [
"BSD-3-Clause"
]
| null | null | null | """
Date: 2021/09/23
Target: config utilities for yml file.
implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks.git
"""
import os
import yaml
class LoaderMeta(type):
"""
Constructor for supporting `!include`.
"""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
return cls
class Loader(yaml.Loader, metaclass=LoaderMeta):
"""
YAML Loader with `!include` constructor.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""
Include file referenced at node.
"""
filename = os.path.abspath(
os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
class AttrDict(dict):
"""
Dict as attribute trick.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""
Convert object to yaml dict and return.
"""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""
Print all variables.
"""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
def __init__(self, filename=None):
try:
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
except EnvironmentError:
print('Please check the file with name of "%s"', filename)
super(Config, self).__init__(cfg_dict)
def get_config(config_file):
assert os.path.exists(config_file), 'File {} not exist.'.format(config_file)
return Config(config_file) | 33.177419 | 89 | 0.528439 | 3,784 | 0.919786 | 0 | 0 | 0 | 0 | 0 | 0 | 721 | 0.175255 |
b457086f7edbfe9899dbbd18e732ac94f61af7c3 | 2,798 | py | Python | src/api/fundings/entities.py | cbn-alpin/gefiproj-api | 35e3f00dd71bcdd9ad751307ac379aa78d1545cf | [
"MIT"
]
| 2 | 2020-10-15T15:16:08.000Z | 2020-11-06T10:41:13.000Z | src/api/fundings/entities.py | cbn-alpin/gefiproj-api | 35e3f00dd71bcdd9ad751307ac379aa78d1545cf | [
"MIT"
]
| 1 | 2020-11-14T19:40:14.000Z | 2020-11-14T19:40:14.000Z | src/api/fundings/entities.py | cbn-alpin/gefiproj-api | 35e3f00dd71bcdd9ad751307ac379aa78d1545cf | [
"MIT"
]
| null | null | null | from marshmallow import Schema, fields, validate
from sqlalchemy import Column, String, Integer, Float, Date, ForeignKey
from sqlalchemy.orm import relationship
from ..funders.entities import Funder, FunderSchema
from src.api import db
from src.shared.entity import Base
class Funding(Base, db.Model):
__tablename__ = 'financement'
id_f = Column(Integer, primary_key=True)
id_p = Column(Integer, nullable=False)
id_financeur = Column(Integer, ForeignKey('financeur.id_financeur'), nullable=False)
financeur = relationship("Funder")
montant_arrete_f = Column(Float, nullable=False)
statut_f = Column(String(250), nullable=False)
date_solde_f = Column(Date)
date_arrete_f = Column(Date)
date_limite_solde_f = Column(Date)
commentaire_admin_f = Column(String(250))
commentaire_resp_f = Column(String(250))
numero_titre_f = Column(String(250))
annee_titre_f = Column(String(250))
imputation_f = Column(String(250))
def __init__(self, id_p, id_financeur, montant_arrete_f, statut_f, date_solde_f = None, date_arrete_f=None,
date_limite_solde_f=None, commentaire_admin_f='', commentaire_resp_f='', numero_titre_f='',
annee_titre_f='', imputation_f='', id_f=''):
if id_f != '':
self.id_f = id_f
self.id_p = id_p
self.id_financeur = id_financeur
self.montant_arrete_f = montant_arrete_f
self.statut_f = statut_f
self.date_solde_f = date_solde_f
self.date_arrete_f = date_arrete_f
self.date_limite_solde_f = date_limite_solde_f
self.commentaire_admin_f = commentaire_admin_f
self.commentaire_resp_f = commentaire_resp_f
self.numero_titre_f = numero_titre_f
self.annee_titre_f = annee_titre_f
self.imputation_f = imputation_f
class FundingSchema(Schema):
id_f = fields.Integer()
id_p = fields.Integer(required=True)
id_financeur = fields.Integer(required=True)
financeur = fields.Nested(FunderSchema)
montant_arrete_f = fields.Float(required=True)
statut_f = fields.Str(validate=validate.OneOf(["ANTR", "ATR", "SOLDE"]), required=True)
date_solde_f = fields.Date(allow_none=True)
date_arrete_f = fields.Date(allow_none=True)
date_limite_solde_f = fields.Date(allow_none=True)
commentaire_admin_f = fields.Str(allow_none=True)
commentaire_resp_f = fields.Str(allow_none=True)
numero_titre_f = fields.Str(allow_none=True)
annee_titre_f = fields.Str(allow_none=True)
imputation_f = fields.Str(allow_none=True)
# TODO find solution to replace because option unknown=INCLUDE don't work in a list
difference = fields.Float(allow_none=True)
solde = fields.Float(allow_none=True)
nom_financeur = fields.Str(allow_none=True) | 43.046154 | 111 | 0.723016 | 2,521 | 0.901001 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.057184 |
b4572258e2d4b6f3324edd66303f6bcb5c6923f1 | 2,817 | py | Python | src/extensions/COMMANDS/CommitCommand.py | DMTF/python-redfish-utility | 4e084d47152aeaede576b32252a38b7dc4525dcb | [
"BSD-3-Clause"
]
| 15 | 2017-01-20T02:25:05.000Z | 2022-03-26T17:42:47.000Z | src/extensions/COMMANDS/CommitCommand.py | DMTF/python-redfish-utility | 4e084d47152aeaede576b32252a38b7dc4525dcb | [
"BSD-3-Clause"
]
| 5 | 2017-05-11T17:07:26.000Z | 2018-08-03T18:08:27.000Z | src/extensions/COMMANDS/CommitCommand.py | DMTF/python-redfish-utility | 4e084d47152aeaede576b32252a38b7dc4525dcb | [
"BSD-3-Clause"
]
| 11 | 2017-01-24T22:50:20.000Z | 2022-03-22T18:40:31.000Z | ###
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md
###
""" Commit Command for RDMC """
import sys
from optparse import OptionParser
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, \
NoChangesFoundOrMadeError, NoCurrentSessionEstablished
from rdmc_base_classes import RdmcCommandBase
class CommitCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self,\
name='commit',\
usage='commit [OPTIONS]\n\n\tRun to apply all changes made during the' \
' current session\n\texample: commit',\
summary='Applies all the changes made during the current' \
' session.',\
aliases=[],\
optparser=OptionParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.logoutobj = rdmcObj.commandsDict["LogoutCommand"](rdmcObj)
def commitfunction(self, options=None):
""" Main commit worker function
:param options: command line options
:type options: list.
"""
self.commitvalidation()
sys.stdout.write("Committing changes...\n")
if not self._rdmc.app.commit(verbose=self._rdmc.opts.verbose):
raise NoChangesFoundOrMadeError("No changes found or made " \
"during commit operation.")
self.logoutobj.logoutfunction("")
def run(self, line):
""" Wrapper function for commit main function
:param line: command line input
:type line: string.
"""
try:
(options, _) = self._parse_arglist(line)
except:
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.commitfunction(options)
#Return code
return ReturnCodes.SUCCESS
def commitvalidation(self):
""" Commit method validation function """
try:
self._rdmc.app.get_current_client()
except:
raise NoCurrentSessionEstablished("Please login and make setting" \
" changes before using commit command.")
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
| 33.535714 | 127 | 0.587504 | 2,289 | 0.812567 | 0 | 0 | 0 | 0 | 0 | 0 | 1,096 | 0.389066 |
b4576da0f0a8dc0e5c329b27142e2588b0fb6f46 | 16,721 | py | Python | rosetta/views.py | evrenesat/ganihomes | eece2d8d957989b176cc5a36d723f676862f8d17 | [
"BSD-2-Clause"
]
| 24 | 2016-08-06T18:10:54.000Z | 2022-03-04T11:47:39.000Z | rosetta/views.py | evrenesat/ganihomes | eece2d8d957989b176cc5a36d723f676862f8d17 | [
"BSD-2-Clause"
]
| 1 | 2017-03-28T02:36:50.000Z | 2017-03-28T07:18:57.000Z | rosetta/views.py | evrenesat/ganihomes | eece2d8d957989b176cc5a36d723f676862f8d17 | [
"BSD-2-Clause"
]
| 13 | 2017-03-28T02:35:32.000Z | 2022-02-21T23:36:15.000Z | from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from rosetta.conf import settings as rosetta_settings
from rosetta.polib import pofile
from rosetta.poutil import find_pos, pagination_range
from rosetta.signals import entry_changed, post_save
import re
import rosetta
import datetime
import unicodedata
import hashlib
import os
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
version = rosetta.get_version(True)
if 'rosetta_i18n_fn' in request.session:
rosetta_i18n_fn = request.session.get('rosetta_i18n_fn')
rosetta_i18n_app = get_app_name(rosetta_i18n_fn)
rosetta_i18n_lang_code = request.session['rosetta_i18n_lang_code']
rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
rosetta_i18n_write = request.session.get('rosetta_i18n_write', True)
if rosetta_i18n_write:
rosetta_i18n_pofile = pofile(rosetta_i18n_fn)
for entry in rosetta_i18n_pofile:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
else:
rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-home'))
rosetta_i18n_filter = request.session.get('rosetta_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = unicode(rx_plural.match(key).groups()[1])
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = rosetta_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
plural_string = fix_nls(entry.msgstr_plural[plural_id], value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=rosetta_i18n_fn,
language_code=rosetta_i18n_lang_code,
)
else:
request.session['rosetta_last_save_error'] = True
if file_change and rosetta_i18n_write:
try:
# Provide defaults in case authorization is not required.
request.user.first_name = getattr(request.user, 'first_name', 'Anonymous')
request.user.last_name = getattr(request.user, 'last_name', 'User')
request.user.email = getattr(request.user, 'email', '[email protected]')
rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.user.email)).encode('ascii', 'ignore')
rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False)
rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z')
except UnicodeDecodeError:
pass
try:
rosetta_i18n_pofile.save()
rosetta_i18n_pofile.save_as_mofile(rosetta_i18n_fn.replace('.po', '.mo'))
post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if rosetta_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if rosetta_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except:
request.session['rosetta_i18n_write'] = False
request.session['rosetta_i18n_pofile'] = rosetta_i18n_pofile
# Retain query arguments
query_arg = ''
if 'query' in request.REQUEST:
query_arg = '?query=%s' % request.REQUEST.get('query')
if 'page' in request.GET:
if query_arg:
query_arg = query_arg + '&'
else:
query_arg = '?'
query_arg = query_arg + 'page=%d' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg))
rosetta_i18n_lang_name = _(request.session.get('rosetta_i18n_lang_name'))
rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code')
if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip():
query = request.REQUEST.get('query').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE)
else:
if rosetta_i18n_filter == 'untranslated':
paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'translated':
paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'fuzzy':
paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0:
page = int(request.GET.get('page'))
else:
page = 1
messages = paginator.page(page).object_list
if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code:
main_language = None
for language in settings.LANGUAGES:
if language[0] == rosetta_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code))
po = pofile(fl)
main_messages = []
for message in messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS
BING_APP_ID = rosetta_settings.BING_APP_ID
MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME
MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE
if 'rosetta_last_save_error' in request.session:
del(request.session['rosetta_last_save_error'])
rosetta_last_save_error = True
return render_to_response('rosetta/pofile.html', locals(), context_instance=RequestContext(request))
else:
return list_languages(request)
home = never_cache(home)
home = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(home)
def download_file(request):
import zipfile
from StringIO import StringIO
# original filename
rosetta_i18n_fn = request.session.get('rosetta_i18n_fn', None)
# in-session modified catalog
rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile', None)
# language code
rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code', None)
if not rosetta_i18n_lang_code or not rosetta_i18n_pofile or not rosetta_i18n_fn:
return HttpResponseRedirect(reverse('rosetta-home'))
try:
if len(rosetta_i18n_fn.split('/')) >= 5:
offered_fn = '_'.join(rosetta_i18n_fn.split('/')[-5:])
else:
offered_fn = rosetta_i18n_fn.split('/')[-1]
po_fn = str(rosetta_i18n_fn.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = StringIO()
zipf = zipfile.ZipFile(zipdata, mode="w")
zipf.writestr(po_fn, unicode(rosetta_i18n_pofile).encode("utf8"))
zipf.writestr(mo_fn, rosetta_i18n_pofile.to_binary())
zipf.close()
zipdata.seek(0)
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, rosetta_i18n_lang_code)
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
return HttpResponseRedirect(reverse('rosetta-home'))
download_file = never_cache(download_file)
download_file = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(download_file)
def list_languages(request):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_catalog_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-pick-file'))
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(language[0],
_(language[1]),
[(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos],
)
)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
version = rosetta.get_version(True)
return render_to_response('rosetta/languages.html', locals(), context_instance=RequestContext(request))
list_languages = never_cache(list_languages)
list_languages = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(list_languages)
def get_app_name(path):
app = path.split("/locale")[0].split("/")[-1]
return app
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
if langid not in [l[0] for l in settings.LANGUAGES]:
raise Http404
else:
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
file_ = find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)[int(idx)]
request.session['rosetta_i18n_lang_code'] = langid
request.session['rosetta_i18n_lang_name'] = unicode([l[1] for l in settings.LANGUAGES if l[0] == langid][0])
request.session['rosetta_i18n_fn'] = file_
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
request.session['rosetta_i18n_pofile'] = po
try:
os.utime(file_, None)
request.session['rosetta_i18n_write'] = True
except OSError:
request.session['rosetta_i18n_write'] = False
return HttpResponseRedirect(reverse('rosetta-home'))
lang_sel = never_cache(lang_sel)
lang_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(lang_sel)
def can_translate(user):
if not getattr(settings, 'ROSETTA_REQUIRES_AUTH', True):
return True
if not user.is_authenticated():
return False
elif user.is_superuser and user.is_staff:
return True
else:
try:
from django.contrib.auth.models import Group
translators = Group.objects.get(name='translators')
return translators in user.groups.all()
except Group.DoesNotExist:
return False
| 46.190608 | 227 | 0.609832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,721 | 0.16273 |
b457b3b9a529a0063b2e3929017b0f26c1b32684 | 937 | py | Python | examples/experiment_pulse.py | HySynth/HySynth | b33b34ff5db138b5d007d0dcd32d53c9f8964b62 | [
"MIT"
]
| 4 | 2021-03-05T11:18:35.000Z | 2021-12-09T18:51:32.000Z | examples/experiment_pulse.py | HySynth/HySynth | b33b34ff5db138b5d007d0dcd32d53c9f8964b62 | [
"MIT"
]
| null | null | null | examples/experiment_pulse.py | HySynth/HySynth | b33b34ff5db138b5d007d0dcd32d53c9f8964b62 | [
"MIT"
]
| null | null | null | # to run this, add code from experiments_HSCC2021.py
def time_series_pulse():
path = Path(__file__).parent.parent / "data" / "real_data" / "datasets" / "basic_data"
filename1 = path / "pulse1-1.csv"
filename2 = path / "pulse1-2.csv"
filename3 = path / "pulse1-3.csv"
f1 = load_time_series(filename1, 1)
f2 = load_time_series(filename2, 1)
f3 = load_time_series(filename3, 1)
dataset = [f1, f2, f3]
return dataset
def parameters_pulse():
delta_ts = 0.02
deltas_ha = [0.1]
n_discrete_steps = 10
reachability_time_step = 1e-3
refinement_distance = 0.001
n_intermediate = 1
max_dwell_time = 4.0
min_dwell_time = None
n_simulations = 3
path_length = 6
time_step = 0.01
return delta_ts, deltas_ha, n_discrete_steps, reachability_time_step, refinement_distance, max_dwell_time,\
n_intermediate, n_simulations, path_length, time_step, min_dwell_time
| 32.310345 | 111 | 0.692636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.141942 |
b458e08685ebc8210a2ffae585d421bdf6eef379 | 13,813 | py | Python | ironic/drivers/modules/drac/management.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
]
| null | null | null | ironic/drivers/modules/drac/management.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
]
| null | null | null | ironic/drivers/modules/drac/management.py | Tehsmash/ironic | a34c351639e960af92a3608fbc9249dfce5c6057 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DRAC Management Driver
"""
from oslo.utils import excutils
from oslo.utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.drivers import base
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import resource_uris
from ironic.openstack.common import log as logging
pywsman = importutils.try_import('pywsman')
LOG = logging.getLogger(__name__)
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'NIC',
boot_devices.CDROM: 'Optical',
}
# IsNext constants
PERSISTENT = '1'
""" Is the next boot config the system will use. """
NOT_NEXT = '2'
""" Is not the next boot config the system will use. """
ONE_TIME_BOOT = '3'
""" Is the next boot config the system will use, one time boot only. """
def _get_next_boot_mode(node):
"""Get the next boot mode.
To see a list of supported boot modes see: http://goo.gl/aEsvUH
(Section 7.2)
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:returns: a dictionary containing:
:instance_id: the instance id of the boot device.
:is_next: whether it's the next device to boot or not. One of
PERSISTENT, NOT_NEXT, ONE_TIME_BOOT constants.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
filter_query = ('select * from DCIM_BootConfigSetting where IsNext=%s '
'or IsNext=%s' % (PERSISTENT, ONE_TIME_BOOT))
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootConfigSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to get next boot mode for '
'node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_BootConfigSetting',
resource_uris.DCIM_BootConfigSetting,
find_all=True)
# This list will have 2 items maximum, one for the persistent element
# and another one for the OneTime if set
boot_mode = None
for i in items:
instance_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_BootConfigSetting).text
is_next = drac_common.find_xml(i, 'IsNext',
resource_uris.DCIM_BootConfigSetting).text
boot_mode = {'instance_id': instance_id, 'is_next': is_next}
# If OneTime is set we should return it, because that's
# where the next boot device is
if is_next == ONE_TIME_BOOT:
break
return boot_mode
def _create_config_job(node):
"""Create a configuration job.
This method is used to apply the pending values created by
set_boot_device().
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError on an error when creating the job.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
options.add_selector('CreationClassName', 'DCIM_BIOSService')
options.add_selector('Name', 'DCIM:BIOSService')
options.add_selector('SystemCreationClassName', 'DCIM_ComputerSystem')
options.add_selector('SystemName', 'DCIM:ComputerSystem')
options.add_property('Target', 'BIOS.Setup.1-1')
options.add_property('ScheduledStartTime', 'TIME_NOW')
doc = client.wsman_invoke(resource_uris.DCIM_BIOSService,
options, 'CreateTargetedConfigJob')
return_value = drac_common.find_xml(doc, 'ReturnValue',
resource_uris.DCIM_BIOSService).text
# NOTE(lucasagomes): Possible return values are: RET_ERROR for error
# or RET_CREATED job created (but changes will be
# applied after the reboot)
# Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.4)
if return_value == drac_common.RET_ERROR:
error_message = drac_common.find_xml(doc, 'Message',
resource_uris.DCIM_BIOSService).text
raise exception.DracConfigJobCreationError(error=error_message)
def _check_for_config_job(node):
"""Check if a configuration job is already created.
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError if the job is already created.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
try:
doc = client.wsman_enumerate(resource_uris.DCIM_LifecycleJob, options)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to list the configuration jobs '
'for node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_LifecycleJob',
resource_uris.DCIM_LifecycleJob,
find_all=True)
for i in items:
name = drac_common.find_xml(i, 'Name', resource_uris.DCIM_LifecycleJob)
if 'BIOS.Setup.1-1' not in name.text:
continue
job_status = drac_common.find_xml(i, 'JobStatus',
resource_uris.DCIM_LifecycleJob).text
# If job is already completed or failed we can
# create another one.
# Job Control Documentation: http://goo.gl/o1dDD3 (Section 7.2.3.2)
if job_status.lower() not in ('completed', 'failed'):
job_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_LifecycleJob).text
reason = (_('Another job with ID "%s" is already created '
'to configure the BIOS. Wait until existing job '
'is completed or is cancelled') % job_id)
raise exception.DracConfigJobCreationError(error=reason)
class DracManagement(base.ManagementInterface):
def get_properties(self):
return drac_common.COMMON_PROPERTIES
def validate(self, task):
"""Validate the driver-specific info supplied.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
manage the node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required driver_info attribute
is missing or invalid on the node.
"""
return drac_common.parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: DracClientError on an error from pywsman library.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: DracConfigJobCreationError on an error when creating the job.
"""
# Check for an existing configuration job
_check_for_config_job(task.node)
client = drac_common.get_wsman_client(task.node)
options = pywsman.ClientOptions()
filter_query = ("select * from DCIM_BootSourceSetting where "
"InstanceID like '%%#%s%%'" %
_BOOT_DEVICES_MAP[device])
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to set the boot device '
'for node %(node_uuid)s. Can\'t find the ID '
'for the %(device)s type. Reason: %(error)s.'),
{'node_uuid': task.node.uuid, 'error': exc,
'device': device})
instance_id = drac_common.find_xml(doc, 'InstanceID',
resource_uris.DCIM_BootSourceSetting).text
source = 'OneTime'
if persistent:
source = drac_common.find_xml(doc, 'BootSourceType',
resource_uris.DCIM_BootSourceSetting).text
# NOTE(lucasagomes): Don't ask me why 'BootSourceType' is set
# for 'InstanceID' and 'InstanceID' is set for 'source'! You
# know enterprisey...
options = pywsman.ClientOptions()
options.add_selector('InstanceID', source)
options.add_property('source', instance_id)
doc = client.wsman_invoke(resource_uris.DCIM_BootConfigSetting,
options, 'ChangeBootOrderByInstanceID')
return_value = drac_common.find_xml(doc, 'ReturnValue',
resource_uris.DCIM_BootConfigSetting).text
# NOTE(lucasagomes): Possible return values are: RET_ERROR for error,
# RET_SUCCESS for success or RET_CREATED job
# created (but changes will be applied after
# the reboot)
# Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.7)
if return_value == drac_common.RET_ERROR:
error_message = drac_common.find_xml(doc, 'Message',
resource_uris.DCIM_BootConfigSetting).text
raise exception.DracOperationError(operation='set_boot_device',
error=error_message)
# Create a configuration job
_create_config_job(task.node)
def get_boot_device(self, task):
"""Get the current boot device for a node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: DracClientError on an error from pywsman library.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
client = drac_common.get_wsman_client(task.node)
boot_mode = _get_next_boot_mode(task.node)
persistent = boot_mode['is_next'] == PERSISTENT
instance_id = boot_mode['instance_id']
options = pywsman.ClientOptions()
filter_query = ('select * from DCIM_BootSourceSetting where '
'PendingAssignedSequence=0 and '
'BootSourceType="%s"' % instance_id)
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to get the current boot '
'device for node %(node_uuid)s. '
'Reason: %(error)s.'),
{'node_uuid': task.node.uuid, 'error': exc})
instance_id = drac_common.find_xml(doc, 'InstanceID',
resource_uris.DCIM_BootSourceSetting).text
boot_device = next((key for (key, value) in _BOOT_DEVICES_MAP.items()
if value in instance_id), None)
return {'boot_device': boot_device, 'persistent': persistent}
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:returns: returns a consistent format dict of sensor data grouped by
sensor type, which can be processed by Ceilometer.
"""
raise NotImplementedError()
| 42.371166 | 79 | 0.625136 | 6,735 | 0.487584 | 0 | 0 | 0 | 0 | 0 | 0 | 6,619 | 0.479186 |
b45b0dfbed15df61de3d9471a3707d66d6c2d4b9 | 2,037 | py | Python | matrix_diagonalization/finite_barrier_square_well.py | coherent17/physics_calculation | cf94813778984f62b2c65174fb44bebb2e9c0d05 | [
"MIT"
]
| 1 | 2021-12-30T01:11:30.000Z | 2021-12-30T01:11:30.000Z | matrix_diagonalization/finite_barrier_square_well.py | coherent17/physics_calculation | cf94813778984f62b2c65174fb44bebb2e9c0d05 | [
"MIT"
]
| null | null | null | matrix_diagonalization/finite_barrier_square_well.py | coherent17/physics_calculation | cf94813778984f62b2c65174fb44bebb2e9c0d05 | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib.pyplot as plt
#grid number on half space (without the origin)
N=150
#total grid number = 2*N + 1 (with origin)
N_g=2*N+1
#finite barrier potential value = 300 (meV)
potential_value=300
#building potential:
def potential(potential_value):
V=np.zeros((1,N_g),dtype=float)
V[0,0:100]=potential_value
V[0,100:201]=0
V[0,201:]=potential_value
return V
# #Hamiltonian matrix:
def Hamiltonian(V):
H=np.zeros((N_g,N_g),dtype=float)
dx=10 #0.1 (nanometer)
for i in range(0,N_g):
for j in range(0,N_g):
if i==j:
x=dx*(i-N) #position
H[i,j]=1/(dx**2)+V[0,i]
elif j==i-1 or j==i+1:
H[i,j]=-0.5/(dx**2)
return H
V=potential(potential_value)
H=Hamiltonian(V)
#sort the eigenvalue and get the corresponding eigenvector
eigenvalue,eigenvector=np.linalg.eig(H)
idx=np.argsort(eigenvalue)
eigenvalue=eigenvalue[idx]
eigenvector=eigenvector[:,idx]
#visualize
fig=plt.figure(figsize=(18,6))
ax1=fig.add_subplot(131)
x=np.linspace(0,10,11)
ax1.plot(x,eigenvalue[0:11],'r.',label='numerical')
ax1.set_xlabel('n')
ax1.set_ylabel('$E_n (meV)$')
ax1.set_title('eigen energies')
ax1.grid(True)
ax1.legend()
ax2=fig.add_subplot(132)
x=np.linspace(-5,5,301)
#x/lamda_0
x=x/(np.sqrt(2)*10**(10-9)/np.pi)
y1=eigenvector[:,0]
y2=eigenvector[:,1]
y3=eigenvector[:,2]
y4=eigenvector[:,3]
y5=eigenvector[:,4]
ax2.plot(x,(y1),label='$Ψ_{n=0}(x)$')
ax2.plot(x,(y2),label='$Ψ_{n=1}(x)$')
ax2.plot(x,(y3),label='$Ψ_{n=2}(x)$')
ax2.set_xlabel('position ($x/λ_0$) ')
ax2.set_ylabel('wavefunction')
ax2.set_title('wave function in different eigen state')
ax2.legend()
ax2.grid(True)
ax3=fig.add_subplot(133)
ax3.plot(x,(y1**2),label='$Ψ^2_{n=0}(x)$')
ax3.plot(x,(y2**2),label='$Ψ^2_{n=1}(x)$')
ax3.plot(x,(y3**2),label='$Ψ^2_{n=2}(x)$')
ax3.set_xlabel('position ($x/λ_0$) ')
ax3.set_ylabel('square wavefunction')
ax3.set_title('probability distribution in finite barrier well')
ax3.grid(True)
ax3.legend()
plt.show() | 25.78481 | 64 | 0.661757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.287531 |
b45b6c7e93b004510cd39ca579e1ae1a135f82e4 | 30 | py | Python | SVDD/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
]
| null | null | null | SVDD/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
]
| null | null | null | SVDD/__init__.py | SolidusAbi/SVDD-Python | ce2b834bf31cfdbbbebc08c8a1bac8c37b081d0e | [
"MIT"
]
| null | null | null | from .BaseSVDD import BaseSVDD | 30 | 30 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b45bc4fbd6c30a7f20f9e98d80b94da630c9831f | 739 | py | Python | setup.py | SteveLTN/iex-api-python | f2c5c8f461ee22d7fc58bc335e397454de4cee7e | [
"MIT"
]
| null | null | null | setup.py | SteveLTN/iex-api-python | f2c5c8f461ee22d7fc58bc335e397454de4cee7e | [
"MIT"
]
| null | null | null | setup.py | SteveLTN/iex-api-python | f2c5c8f461ee22d7fc58bc335e397454de4cee7e | [
"MIT"
]
| null | null | null | import setuptools
import glob
import os
required = [
"requests",
"pandas",
"arrow",
"socketIO-client-nexus"
]
setuptools.setup(name='iex-api-python',
version="0.0.5",
description='Fetch data from the IEX API',
long_description=open('README.md').read().strip(),
author='Daniel E. Cook',
author_email='[email protected]',
url='http://www.github.com/danielecook/iex-api-python',
packages=['iex'],
install_requires=required,
keywords=['finance', 'stock', 'market', 'market-data', 'IEX', 'API'],
license='MIT License',
zip_safe=False)
| 30.791667 | 86 | 0.525034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.358593 |
b45ee2f14f2b2080d0f96be546d04753ee6aacf6 | 3,825 | py | Python | src/models/train_model.py | 4c697361/e-commerce | ddf88ec2c3f020bce129195452dfa9b03fb285d4 | [
"MIT"
]
| 3 | 2020-01-01T06:36:38.000Z | 2020-12-08T02:57:33.000Z | src/models/train_model.py | L2Data/e-commerce | ddf88ec2c3f020bce129195452dfa9b03fb285d4 | [
"MIT"
]
| null | null | null | src/models/train_model.py | L2Data/e-commerce | ddf88ec2c3f020bce129195452dfa9b03fb285d4 | [
"MIT"
]
| 3 | 2020-02-28T01:22:07.000Z | 2020-07-19T13:38:59.000Z | import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from keras.callbacks import ModelCheckpoint, EarlyStopping
import src.utils.utils as ut
import src.utils.model_utils as mu
import src.models.model as md
import src.models.data_generator as dg
import src.data.dataframe as dat
def train(classmode, modelmode, batch_size, epochs, learning_rate):
train = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.train_df))
nclasses = mu.ref_n_classes(classmode)
valid = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.valid_df))
traindata = dg.DataSequence(train,
ut.dirs.train_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
validdata = dg.DataSequence(valid,
ut.dirs.validation_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
model = md.custom(classmode, modelmode, nclasses).make_compiled_model(learning_rate)
model.summary()
save_model_to = os.path.join(ut.dirs.model_dir, classmode + '_' + modelmode + '.h5')
Checkpoint = ModelCheckpoint(save_model_to,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
Earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0,
mode='auto',
baseline=None)
model.fit_generator(generator=traindata,
steps_per_epoch=len(train)//batch_size,
validation_data=validdata,
validation_steps=len(valid)//batch_size,
epochs=epochs,
callbacks=[mu.TrainValTensorBoard(write_graph=False),
Checkpoint],
#verbose=1,
use_multiprocessing=False,
workers=1)
@click.command()
@click.option('--classmode', type=str, default=ut.params.classmode,
help='choose a classmode:\n\
multilabel, multiclass\n\
(default: multilabel)')
@click.option('--modelmode', type=str, default=ut.params.modelmode,
help='choose a modelmode:\n\
image, text, combined\n\
(default: combined)')
@click.option('--ep', type=float, default=ut.params.epochs,
help='number of epochs (default: {})'.
format(ut.params.epochs))
@click.option('--lr', type=float, default=ut.params.learning_rate,
help='learning rate (default: {})'.
format(ut.params.learning_rate))
@click.option('--bs', type=int, default=ut.params.batch_size,
help='batch size (default: {})'.
format(ut.params.batch_size))
def main(classmode, modelmode, bs, ep, lr):
classmode, modelmode = ut.check_modes(classmode, modelmode)
train(classmode, modelmode, bs, ep, lr)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
load_dotenv(find_dotenv())
main()
| 39.43299 | 88 | 0.536209 | 0 | 0 | 0 | 0 | 1,118 | 0.292288 | 0 | 0 | 499 | 0.130458 |
b45f1b6d99b2cd670602d4dd9541964dcf882b2c | 3,412 | py | Python | Jobs/pm_match.py | Shantanu48114860/DPN-SA | 43ffedf26a950563e029954b7bb87ce5b4f7bc55 | [
"MIT"
]
| 2 | 2021-02-18T04:21:34.000Z | 2022-03-09T11:16:17.000Z | Jobs/pm_match.py | Shantanu48114860/DPN-SA | 43ffedf26a950563e029954b7bb87ce5b4f7bc55 | [
"MIT"
]
| null | null | null | Jobs/pm_match.py | Shantanu48114860/DPN-SA | 43ffedf26a950563e029954b7bb87ce5b4f7bc55 | [
"MIT"
]
| 1 | 2020-12-13T08:32:04.000Z | 2020-12-13T08:32:04.000Z | """
MIT License
Copyright (c) 2020 Shantanu Ghosh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy
from matplotlib import pyplot
import pandas as pd
import os
from Propensity_socre_network import Propensity_socre_network
from Utils import Utils
from dataloader import DataLoader
def draw(treated_ps_list, control_ps_list, bins1):
pyplot.hist(treated_ps_list, bins1, alpha=0.5, label='treated')
pyplot.hist(control_ps_list, bins1, alpha=0.5, label='control')
pyplot.legend(loc='upper right')
pyplot.show()
csv_path = "Dataset/ihdp_sample.csv"
# 139 treated
# 747 - 139 = 608 control
# 747 total
split_size = 0.8
device = Utils.get_device()
dL = DataLoader()
np_covariates_X_train, np_covariates_X_test, np_covariates_Y_train, np_covariates_Y_test = \
dL.preprocess_data_from_csv(csv_path, split_size)
ps_train_set = dL.convert_to_tensor(np_covariates_X_train, np_covariates_Y_train)
train_parameters_NN = {
"epochs": 75,
"lr": 0.001,
"batch_size": 32,
"shuffle": True,
"train_set": ps_train_set,
"model_save_path": "./Propensity_Model/NN_PS_model_iter_id_"
+ str(1) + "_epoch_{0}_lr_{1}.pth"
}
# ps using NN
ps_net_NN = Propensity_socre_network()
print("############### Propensity Score neural net Training ###############")
ps_net_NN.train(train_parameters_NN, device, phase="train")
# eval
eval_parameters_NN = {
"eval_set": ps_train_set,
"model_path": "./Propensity_Model/NN_PS_model_iter_id_{0}_epoch_75_lr_0.001.pth"
.format(1)
}
ps_score_list_NN = ps_net_NN.eval_return_complete_list(eval_parameters_NN, device, phase="eval")
treated_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 1]
control_ps_list = [d["prop_score"] for d in ps_score_list_NN if d['treatment'] == 0]
for ps_dict in treated_ps_list:
print(ps_dict)
print("--------------")
for ps_dict in control_ps_list:
print(ps_dict)
print("treated: " + str(len(treated_ps_list)))
print("control: " + str(len(control_ps_list)))
print("total: " + str(len(treated_ps_list) + len(control_ps_list)))
bins1 = numpy.linspace(0, 1, 100)
bins2 = numpy.linspace(0, 0.2, 100)
bins3 = numpy.linspace(0.2, 0.5, 100)
bins4 = numpy.linspace(0.5, 1, 100)
draw(treated_ps_list, control_ps_list, bins1)
draw(treated_ps_list, control_ps_list, bins2)
draw(treated_ps_list, control_ps_list, bins3)
draw(treated_ps_list, control_ps_list, bins4)
| 33.45098 | 96 | 0.750586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,592 | 0.466589 |
b45fb7ad9c5a50822623857ace28d82c3b6d8f02 | 15,182 | py | Python | varcode/effects/effect_prediction.py | openvax/varcode | a51a7dd0868ef05aee4962e66a9226ab70935a3d | [
"Apache-2.0"
]
| 39 | 2018-01-12T20:58:43.000Z | 2022-03-18T04:54:16.000Z | varcode/effects/effect_prediction.py | hammerlab/varcode | a51a7dd0868ef05aee4962e66a9226ab70935a3d | [
"Apache-2.0"
]
| 217 | 2015-01-02T19:04:02.000Z | 2017-09-09T23:21:15.000Z | varcode/effects/effect_prediction.py | openvax/varcode | a51a7dd0868ef05aee4962e66a9226ab70935a3d | [
"Apache-2.0"
]
| 18 | 2018-02-06T13:32:44.000Z | 2022-02-28T04:49:20.000Z | # Copyright (c) 2016-2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
from Bio.Seq import reverse_complement
from pyensembl import Transcript
from ..common import groupby_field
from .transcript_helpers import interval_offset_on_transcript
from .effect_helpers import changes_exonic_splice_site
from .effect_collection import EffectCollection
from .effect_prediction_coding import predict_variant_coding_effect_on_transcript
from .effect_classes import (
Failure,
Intergenic,
Intragenic,
NoncodingTranscript,
IncompleteTranscript,
FivePrimeUTR,
ThreePrimeUTR,
Intronic,
IntronicSpliceSite,
SpliceAcceptor,
SpliceDonor,
StartLoss,
ExonLoss,
ExonicSpliceSite,
)
logger = logging.getLogger(__name__)
def predict_variant_effects(variant, raise_on_error=False):
"""Determine the effects of a variant on any transcripts it overlaps.
Returns an EffectCollection object.
Parameters
----------
variant : Variant
raise_on_error : bool
Raise an exception if we encounter an error while trying to
determine the effect of this variant on a transcript, or simply
log the error and continue.
"""
# if this variant isn't overlapping any genes, return a
# Intergenic effect
# TODO: look for nearby genes and mark those as Upstream and Downstream
# effects
try:
gene_ids = variant.gene_ids
transcripts = variant.transcripts
except:
if raise_on_error:
raise
else:
return []
if len(gene_ids) == 0:
effects = [Intergenic(variant)]
else:
# list of all MutationEffects for all genes & transcripts
effects = []
# group transcripts by their gene ID
transcripts_grouped_by_gene = \
groupby_field(transcripts, 'gene_id')
# want effects in the list grouped by the gene they come from
for gene_id in sorted(gene_ids):
if gene_id not in transcripts_grouped_by_gene:
# intragenic variant overlaps a gene but not any transcripts
gene = variant.genome.gene_by_id(gene_id)
effects.append(Intragenic(variant, gene))
else:
# gene ID has transcripts overlapped by this variant
for transcript in transcripts_grouped_by_gene[gene_id]:
if raise_on_error:
effect = predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
else:
effect = predict_variant_effect_on_transcript_or_failure(
variant=variant,
transcript=transcript)
effects.append(effect)
return EffectCollection(effects)
def predict_variant_effect_on_transcript_or_failure(variant, transcript):
"""
Try predicting the effect of a variant on a particular transcript but
suppress raised exceptions by converting them into `Failure` effect
values.
"""
try:
return predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
except (AssertionError, ValueError) as error:
logger.warn(
"Encountered error annotating %s for %s: %s",
variant,
transcript,
error)
return Failure(variant, transcript)
def predict_variant_effect_on_transcript(variant, transcript):
"""Return the transcript effect (such as FrameShift) that results from
applying this genomic variant to a particular transcript.
Parameters
----------
transcript : Transcript
Transcript we're going to apply mutation to.
"""
if transcript.__class__ is not Transcript:
raise TypeError(
"Expected %s : %s to have type Transcript" % (
transcript, type(transcript)))
# check for non-coding transcripts first, since
# every non-coding transcript is "incomplete".
if not transcript.is_protein_coding:
return NoncodingTranscript(variant, transcript)
if not transcript.complete:
return IncompleteTranscript(variant, transcript)
# since we're using inclusive base-1 coordinates,
# checking for overlap requires special logic for insertions
is_insertion = variant.is_insertion
# determine if any exons are deleted, and if not,
# what is the closest exon and how far is this variant
# from that exon (overlapping the exon = 0 distance)
completely_lost_exons = []
# list of which (exon #, Exon) pairs this mutation overlaps
overlapping_exon_numbers_and_exons = []
distance_to_nearest_exon = float("inf")
start_in_exon = False
end_in_exon = False
nearest_exon = None
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
for i, exon in enumerate(transcript.exons):
if variant_start <= exon.start and variant_end >= exon.end:
completely_lost_exons.append(exon)
if is_insertion and exon.strand == "+" and variant_end == exon.end:
# insertions after an exon don't overlap the exon
distance = 1
elif is_insertion and exon.strand == "-" and variant_start == exon.start:
distance = 1
else:
distance = exon.distance_to_interval(variant_start, variant_end)
if distance == 0:
overlapping_exon_numbers_and_exons.append((i + 1, exon))
# start is contained in current exon
if exon.start <= variant_start <= exon.end:
start_in_exon = True
# end is contained in current exon
if exon.end >= variant_end >= exon.start:
end_in_exon = True
elif distance < distance_to_nearest_exon:
distance_to_nearest_exon = distance
nearest_exon = exon
if len(overlapping_exon_numbers_and_exons) == 0:
intronic_effect_class = choose_intronic_effect_class(
variant=variant,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
return intronic_effect_class(
variant=variant,
transcript=transcript,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
elif len(completely_lost_exons) > 0 or (
len(overlapping_exon_numbers_and_exons) > 1):
# if spanning multiple exons, or completely deleted an exon
# then consider that an ExonLoss mutation
exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons]
return ExonLoss(variant, transcript, exons)
assert len(overlapping_exon_numbers_and_exons) == 1
exon_number, exon = overlapping_exon_numbers_and_exons[0]
exonic_effect_annotation = exonic_transcript_effect(
variant, exon, exon_number, transcript)
# simple case: both start and end are in the same
if start_in_exon and end_in_exon:
return exonic_effect_annotation
elif isinstance(exonic_effect_annotation, ExonicSpliceSite):
# if mutation bleeds over into intro but even just
# the exonic portion got annotated as an exonic splice site
# then return it
return exonic_effect_annotation
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=exonic_effect_annotation)
def choose_intronic_effect_class(
variant,
nearest_exon,
distance_to_exon):
"""
Infer effect of variant which does not overlap any exon of
the given transcript.
"""
assert distance_to_exon > 0, \
"Expected intronic effect to have distance_to_exon > 0, got %d" % (
distance_to_exon,)
if nearest_exon.strand == "+":
# if exon on positive strand
start_before = variant.trimmed_base1_start < nearest_exon.start
start_same = variant.trimmed_base1_start == nearest_exon.start
before_exon = start_before or (variant.is_insertion and start_same)
else:
# if exon on negative strand
end_after = variant.trimmed_base1_end > nearest_exon.end
end_same = variant.trimmed_base1_end == nearest_exon.end
before_exon = end_after or (variant.is_insertion and end_same)
# distance cutoffs based on consensus splice sequences from
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/
# 5' splice site: MAG|GURAGU consensus
# M is A or C; R is purine; | is the exon-intron boundary
# 3' splice site: YAG|R
if distance_to_exon <= 2:
if before_exon:
# 2 last nucleotides of intron before exon are the splice
# acceptor site, typically "AG"
return SpliceAcceptor
else:
# 2 first nucleotides of intron after exon are the splice donor
# site, typically "GT"
return SpliceDonor
elif not before_exon and distance_to_exon <= 6:
# variants in nucleotides 3-6 at start of intron aren't as certain
# to cause problems as nucleotides 1-2 but still implicated in
# alternative splicing
return IntronicSpliceSite
elif before_exon and distance_to_exon <= 3:
# nucleotide -3 before exon is part of the 3' splicing
# motif but allows for more degeneracy than the -2, -1 nucleotides
return IntronicSpliceSite
else:
# intronic mutation unrelated to splicing
return Intronic
def exonic_transcript_effect(variant, exon, exon_number, transcript):
"""Effect of this variant on a Transcript, assuming we already know
that this variant overlaps some exon of the transcript.
Parameters
----------
variant : Variant
exon : pyensembl.Exon
Exon which this variant overlaps
exon_number : int
Index (starting from 1) of the given exon in the transcript's
sequence of exons.
transcript : pyensembl.Transcript
"""
genome_ref = variant.trimmed_ref
genome_alt = variant.trimmed_alt
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
# clip mutation to only affect the current exon
if variant_start < exon.start:
# if mutation starts before current exon then only look
# at nucleotides which overlap the exon
logger.info('Mutation in variant %s starts before exon %s', variant, exon)
assert len(genome_ref) > 0, "Unexpected insertion into intron"
n_skip_start = exon.start - variant_start
genome_ref = genome_ref[n_skip_start:]
genome_alt = genome_alt[n_skip_start:]
genome_start = exon.start
else:
genome_start = variant_start
if variant_end > exon.end:
# if mutation goes past exon end then only look at nucleotides
# which overlap the exon
logger.info('Mutation in variant %s ends after exon %s', variant, exon)
n_skip_end = variant_end - exon.end
genome_ref = genome_ref[:-n_skip_end]
genome_alt = genome_alt[:len(genome_ref)]
genome_end = exon.end
else:
genome_end = variant_end
transcript_offset = interval_offset_on_transcript(
genome_start, genome_end, transcript)
if transcript.on_backward_strand:
cdna_ref = reverse_complement(genome_ref)
cdna_alt = reverse_complement(genome_alt)
else:
cdna_ref = genome_ref
cdna_alt = genome_alt
n_ref = len(cdna_ref)
expected_ref = str(
transcript.sequence[transcript_offset:transcript_offset + n_ref])
if cdna_ref != expected_ref:
raise ValueError(
("Found ref nucleotides '%s' in sequence"
" of %s at offset %d (chromosome positions %d:%d)"
" but variant %s has '%s'") % (
expected_ref,
transcript,
transcript_offset,
genome_start,
genome_end,
variant,
cdna_ref))
utr5_length = min(transcript.start_codon_spliced_offsets)
# does the variant start inside the 5' UTR?
if utr5_length > transcript_offset:
# does the variant end after the 5' UTR, within the coding region?
if utr5_length < transcript_offset + n_ref:
# TODO: we *might* lose the Kozak sequence or the start codon
# but without looking at the modified sequence how can we tell
# for sure that this is a start-loss variant?
return StartLoss(variant, transcript)
else:
# if variant contained within 5' UTR
return FivePrimeUTR(variant, transcript)
utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1
if transcript_offset >= utr3_offset:
return ThreePrimeUTR(variant, transcript)
exon_start_offset = interval_offset_on_transcript(
exon.start, exon.end, transcript)
exon_end_offset = exon_start_offset + len(exon) - 1
# Further below we're going to try to predict exonic splice site
# modifications, which will take this effect_annotation as their
# alternative hypothesis for what happens if splicing doesn't change.
# If the mutation doesn't affect an exonic splice site, then
# we'll just return this effect.
coding_effect_annotation = predict_variant_coding_effect_on_transcript(
variant=variant,
transcript=transcript,
trimmed_cdna_ref=cdna_ref,
trimmed_cdna_alt=cdna_alt,
transcript_offset=transcript_offset)
if changes_exonic_splice_site(
transcript=transcript,
transcript_ref=cdna_ref,
transcript_alt=cdna_alt,
transcript_offset=transcript_offset,
exon_start_offset=exon_start_offset,
exon_end_offset=exon_end_offset,
exon_number=exon_number):
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=coding_effect_annotation)
return coding_effect_annotation
| 37.210784 | 85 | 0.652022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,168 | 0.340403 |
b46021d4ffc65a61c7d142b7d9d9cca8ff758a1a | 8,244 | py | Python | configs.py | platonic-realm/UM-PDD | 747171ec3d87e54fb70a2cf4472b8da9fbd2f67b | [
"MIT"
]
| null | null | null | configs.py | platonic-realm/UM-PDD | 747171ec3d87e54fb70a2cf4472b8da9fbd2f67b | [
"MIT"
]
| null | null | null | configs.py | platonic-realm/UM-PDD | 747171ec3d87e54fb70a2cf4472b8da9fbd2f67b | [
"MIT"
]
| 1 | 2021-06-13T22:41:23.000Z | 2021-06-13T22:41:23.000Z | # OS-Level Imports
import os
import sys
import multiprocessing
from multiprocessing import cpu_count
# Library Imports
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tensorflow.python.distribute.distribute_lib import Strategy
# Internal Imports
from Utils.enums import Environment, Accelerator
# Global Configuration Variables
environment = Environment.GoogleColab
accelerator = Accelerator.GPU
strategy = None
cpu_no = multiprocessing.cpu_count()
batch_size = 64
latent_dim = 100
epochs = 10
supervised_samples_ratio = 0.05
save_interval = 17
super_batches = 1
unsuper_batches = 1
prefetch_no = tf.data.AUTOTUNE
eager_execution = True
model_summary = False
resume_training = False
result_path = './results/'
dataset_path = './dataset/'
def parse_args():
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
for arg in sys.argv:
if arg.lower().__contains__("envi"):
param = arg[arg.index("=") + 1:]
if param.lower() == "local":
environment = Environment.Local
elif param.lower() == "colab":
environment = Environment.GoogleColab
elif param.lower() == "research":
environment = Environment.GoogleResearch
if arg.lower().__contains__("accel"):
param = arg[arg.index("=") + 1:]
if param.lower() == "gpu":
accelerator = Accelerator.GPU
elif param.lower() == "tpu":
accelerator = Accelerator.TPU
if arg.lower().__contains__("batch"):
param = arg[arg.index("=") + 1:]
batch_size = int(param)
if arg.lower().__contains__("epoch"):
param = arg[arg.index("=") + 1:]
epochs = int(param)
if arg.lower().__contains__("sample_ratio"):
param = arg[arg.index("=") + 1:]
supervised_samples_ratio = float(param)
if arg.lower().__contains__("save_interval"):
param = arg[arg.index("=") + 1:]
save_interval = int(param)
if arg.lower().__contains__("super_batches"):
param = arg[arg.index("=") + 1:]
super_batches = int(param)
if arg.lower().__contains__("unsuper_batches"):
param = arg[arg.index("=") + 1:]
unsuper_batches = int(param)
if arg.lower().__contains__("eager"):
param = arg[arg.index("=") + 1:]
if param.lower().__contains__("false"):
eager_execution = False
else:
eager_execution = True
if arg.lower().__contains__("model_sum"):
param = arg[arg.index("=") + 1:]
if param.lower().__contains__("false"):
model_summery = False
else:
model_summery = True
def print_args():
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
print(environment)
print(accelerator)
print("Batch Size: ", batch_size)
print("Epochs: ", epochs)
print("Supervised Ratio: ", supervised_samples_ratio)
print("Save Interval: ", save_interval)
print("Supervised Batches per Interval: ", super_batches)
print("Unsupervised Batches per Interval: ", unsuper_batches)
print("Eager Execution: ", eager_execution)
print("Print Model Summery: ", model_summary)
def configure(enable_xla: bool = True,
print_device_placement: bool = False,
enable_eager_execution: bool = True,
only_cpu: bool = False,
enable_memory_growth: bool = True,
enable_mixed_float16: bool = False):
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
global strategy
global result_path
global dataset_path
# Configurations
#########################################################
# To enable xla compiler
if enable_xla:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
#########################################################
# To print out on which device operation is taking place
if print_device_placement:
tf.debugging.set_log_device_placement(True)
#########################################################
# To disable eager execution and use graph functions
if not enable_eager_execution:
tf.compat.v1.disable_eager_execution()
#########################################################
# To disable GPUs
if only_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#########################################################
# Setting memory growth
gpus = tf.config.list_physical_devices('GPU')
if enable_memory_growth and gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except Exception as ex:
# Invalid device or cannot modify virtual devices once initialized.
pass
#########################################################
# Create 2 virtual GPUs with 1GB memory each
# if gpus:
# try:
# tf.config.experimental.set_virtual_device_configuration(
# gpus[0],
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024),
# tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Virtual devices must be set before GPUs have been initialized
# print(e)
#########################################################
# Using mixed_precision to activate Tensor Cores
if enable_mixed_float16:
mixed_precision.set_global_policy('mixed_float16')
#########################################################
# Configurations
# House keeping
#########################################################
# Storing the default TF strategy, we will use it in case we don`t set our own
strategy = tf.distribute.get_strategy()
if environment == Environment.Local:
accelerator = Accelerator.GPU
if accelerator == Accelerator.TPU and \
(environment == Environment.GoogleColab or environment == Environment.GoogleResearch):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be called at the beginning of program.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("TPUs: ", tf.config.list_logical_devices('TPU'))
strategy = tf.distribute.TPUStrategy(resolver)
if environment == Environment.GoogleColab and accelerator == Accelerator.GPU:
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
dataset_path = '/content/drive/MyDrive/Share/UM-PDD/dataset/'
result_path = '/content/drive/MyDrive/Share/UM-PDD/results/'
if environment == Environment.GoogleColab and accelerator == Accelerator.TPU:
dataset_path = '/content/dataset/'
result_path = '/content/drive/MyDrive/Share/UM-PDD/results/'
#########################################################
# House keeping
| 36.96861 | 99 | 0.589034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,458 | 0.298156 |
b4605592d622fde2874b27f05a1e575beae84ca9 | 136 | py | Python | Legacy/Audit_Sweep/daily_audit_cron.py | QualiSystemsLab/Power-Management | f90f5971d80f17f45c8ac3f43ff93c0071572dd0 | [
"Apache-2.0"
]
| null | null | null | Legacy/Audit_Sweep/daily_audit_cron.py | QualiSystemsLab/Power-Management | f90f5971d80f17f45c8ac3f43ff93c0071572dd0 | [
"Apache-2.0"
]
| null | null | null | Legacy/Audit_Sweep/daily_audit_cron.py | QualiSystemsLab/Power-Management | f90f5971d80f17f45c8ac3f43ff93c0071572dd0 | [
"Apache-2.0"
]
| null | null | null | from power_audit import PowerAudit
def main():
local = PowerAudit()
local.full_audit()
if __name__ == '__main__':
main()
| 13.6 | 34 | 0.661765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.073529 |
b4620451d250c59f2d44e900d7695fc1a4e00f84 | 258 | py | Python | python/hayate/store/actions.py | tao12345666333/Talk-Is-Cheap | 7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c | [
"MIT"
]
| 4 | 2016-04-14T02:11:35.000Z | 2019-05-30T10:18:41.000Z | python/hayate/store/actions.py | tao12345666333/Talk-Is-Cheap | 7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c | [
"MIT"
]
| 8 | 2016-07-21T16:02:17.000Z | 2021-09-23T02:49:34.000Z | python/hayate/store/actions.py | tao12345666333/Talk-Is-Cheap | 7b2c5959828b6d8bbbad8144b9b97f9b77c6b34c | [
"MIT"
]
| 2 | 2017-02-17T05:02:02.000Z | 2017-11-08T12:22:09.000Z | from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
pass
def decrease(rank):
return dispatch('user', mutation_types.DECREASE, rank)
| 19.846154 | 70 | 0.77907 | 0 | 0 | 0 | 0 | 80 | 0.310078 | 0 | 0 | 12 | 0.046512 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.