seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73907153148
|
import tkinter as tk
import os
from style import fnt, ACTIVE_BG, BG, FG, ACCENT
class Option(tk.Checkbutton):
def __init__(self, parent, filename):
# Pull option content from file
with open(filename, 'r') as f:
self.content = f.readlines()
# Grab description of option from first line in file
title = self.content[0].replace('%', '').strip()
self.state = tk.IntVar()
super().__init__(parent, fg=FG, font=fnt(10), highlightbackground=BG,
activebackground=ACTIVE_BG, activeforeground=FG,
bg=BG, selectcolor=ACTIVE_BG, text=title, variable=self.state)
def make(self):
# If the option is selected, return content
if self.state.get():
return ''.join(self.content)
return ''
class Selector(tk.Frame):
def __init__(self, parent, folder):
super().__init__(parent, bg=BG)
title = tk.Label(self, font=fnt(10), text=f' Select {folder} ', fg=FG, bg=ACCENT)
title.pack(anchor=tk.W)
# Frame to hold Options
option_frame = tk.Frame(self, bg=BG, borderwidth=2, relief=tk.RIDGE)
option_frame.pack(fill=tk.X)
# Get path to the folder this module resides in
folder_path = os.path.join(os.path.dirname(__file__), folder)
# Create all option checkbuttons, providing an absolute path to each file
self.options = [Option(option_frame, os.path.join(folder_path, filename))
for filename in os.listdir(folder_path)]
# Place all checkbuttons
[option.pack(anchor=tk.W) for option in self.options]
def make(self):
# Get the result of this selector as a string
return ''.join([option.make() for option in self.options])
|
johnathan-coe/TexInit
|
widgets.py
|
widgets.py
|
py
| 1,828 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tkinter.Checkbutton",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "tkinter.IntVar",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "style.FG",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "style.fnt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "style.BG",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "style.ACTIVE_BG",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "style.FG",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "style.BG",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "style.ACTIVE_BG",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tkinter.Frame",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "style.BG",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tkinter.Label",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "style.fnt",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "style.FG",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "style.ACCENT",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "tkinter.W",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "style.BG",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "tkinter.RIDGE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tkinter.X",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tkinter.W",
"line_number": 46,
"usage_type": "attribute"
}
] |
72699110267
|
import os
import requests
class RegistryHandler(object):
get_repos_url = '/v2/_catalog'
get_tags_url = '/v2/{repo}/tags/list'
get_digests_url = '/v2/{repo}/manifests/{tag}'
delete_digest_url = '/v2/{repo}/manifests/{digest}'
def __init__(self, host):
self.host = host
def get_repos(self):
url = f'{self.host}{self.get_repos_url}'
res = requests.get(url).json()
return res['repositories']
def get_tags(self, repo):
url = f'{self.host}{self.get_tags_url.format(repo=repo)}'
res = requests.get(url).json()
return res['tags']
def get_digest(self, repo, tag):
headers = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"}
url = f'{self.host}{self.get_digests_url.format(repo=repo, tag=tag)}'
resp = requests.get(url, headers=headers)
return resp.headers['Docker-Content-Digest']
def delete_digest(self, repo, digest):
url = f'{self.host}{self.delete_digest_url.format(repo=repo, digest=digest)}'
requests.delete(url)
if __name__ == '__main__':
rh = RegistryHandler('http://10.204.112.43:5001')
repos = rh.get_repos()
for repo in repos:
tags = rh.get_tags(repo)
if not tags:
continue
delete_tags = sorted(
filter(lambda tag: '.' in tag, tags),
key=lambda tag: ''.join([f'{int(n):04d}' for n in tag.split('.')])
)[:-1]
for tag in delete_tags:
try:
digest = rh.get_digest(repo, tag)
rh.delete_digest(repo, digest)
except Exception as e:
print(f'{repo}:{tag} delete fail: {e}')
os.system("docker exec `docker ps | grep registry | awk '{print $1}'` registry garbage-collect /etc/docker/registry/config.yml")
os.system("systemcel restart docker `docker ps | grep registry | awk '{print $1}'`")
# docker exec -it $ registry sh -c 'registry garbage-collect /etc/docker/registry/config.yml'
# curl -I -H "Accept: application/vnd.docker.distribution.manifest.v2+json" 10.204.114.43:5001/v2/$ImageName/manifests/$tag
|
zzyy8678/stady_python
|
delete_regestry.py
|
delete_regestry.py
|
py
| 2,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.delete",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 55,
"usage_type": "call"
}
] |
35282991136
|
import subprocess
import argparse
import datetime
import json
import time
def get_options():
parser = argparse.ArgumentParser(
description='Provision a Kubernetes cluster in GKE.')
parser.add_argument(
'-c', '--cluster', type=str, default=None,
help='K8s cluster to configure'
)
parser.add_argument(
'-i', '--image', type=str, default='',
help='Base distro OS image used in nodes.'
)
parser.add_argument(
'-z', '--zone', type=str, default=None,
help='Zone where the GPU cluster is running in.'
)
args = parser.parse_args()
return args
def run_cmd(cmd):
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Error running command: {}".format(cmd))
return output
def wait_for_gpus(cluster_name, timeout=datetime.timedelta(minutes=15)):
''' Wait until nodes are available in GPU cluster. '''
cmd = [
'kubectl', 'get', 'nodes',
'-l', 'cloud.google.com/gke-nodepool={}-gpu-pool'.format(cluster_name),
'-o=json'
]
end_time = datetime.datetime.now() + timeout
print('Waiting for GPUs to be ready ', end='')
while datetime.datetime.now() <= end_time:
output = run_cmd(cmd)
items = json.loads(output.decode('UTF-8')).get("items", [])
for i in items:
gpus = int(i['status']['capacity'].get('nvidia.com/gpu', '0'))
if gpus > 0:
print('OK')
return
print('.', end='')
time.sleep(10)
if __name__ == '__main__':
opts = get_options()
print('Getting credentials for cluster ...')
run_cmd(['gcloud', 'container', 'clusters', 'get-credentials', opts.cluster, '--zone', opts.zone])
print('Enabling Application CRD...')
app_crd_path = 'https://raw.githubusercontent.com/GoogleCloudPlatform/marketplace-k8s-app-tools/master/crd/app-crd.yaml'
run_cmd(['kubectl', 'apply', '-f', app_crd_path])
print('Enabling GPUs in GPU cluster...')
nv_daemonset = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
run_cmd(['kubectl', 'apply', '-f', nv_daemonset])
wait_for_gpus(opts.cluster)
|
NVIDIA/nvindex-cloud
|
provision/gke/finalize.py
|
finalize.py
|
py
| 2,346 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 60,
"usage_type": "call"
}
] |
30358219721
|
import unittest
from traits.api import HasTraits, Int, Str, Tuple
from traitsui.api import Item, View
from traits.testing.api import UnittestTools
from traitsui.tests._tools import (
BaseTestMixin,
create_ui,
requires_toolkit,
reraise_exceptions,
ToolkitName,
)
class TupleEditor(HasTraits):
"""Dialog containing a Tuple of two Int's."""
tup = Tuple(Int, Int, Str)
traits_view = View(
Item(label="Enter 4 and 6, then press OK"), Item("tup"), buttons=["OK"]
)
class TestTupleEditor(BaseTestMixin, unittest.TestCase, UnittestTools):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
@requires_toolkit([ToolkitName.qt, ToolkitName.wx])
def test_value_update(self):
# Regression test for #179
model = TupleEditor()
with create_ui(model):
with self.assertTraitChanges(model, "tup", count=1):
model.tup = (3, 4, "nono")
@requires_toolkit([ToolkitName.qt])
def test_qt_tuple_editor(self):
# Behavior: when editing the text of a tuple editor,
# value get updated immediately.
from pyface import qt
val = TupleEditor()
with reraise_exceptions(), create_ui(val) as ui:
# the following is equivalent to clicking in the text control of
# the range editor, enter a number, and clicking ok without
# defocusing
# text element inside the spin control
lineedits = ui.control.findChildren(qt.QtGui.QLineEdit)
lineedits[0].setFocus()
lineedits[0].clear()
lineedits[0].insert("4")
lineedits[1].setFocus()
lineedits[1].clear()
lineedits[1].insert("6")
lineedits[2].setFocus()
lineedits[2].clear()
lineedits[2].insert("fun")
# if all went well, the tuple trait has been updated and its value
# is (4, 6, "fun")
self.assertEqual(val.tup, (4, 6, "fun"))
if __name__ == "__main__":
# Executing the file opens the dialog for manual testing
val = TupleEditor()
val.configure_traits()
print(val.tup)
|
enthought/traitsui
|
traitsui/tests/editors/test_tuple_editor.py
|
test_tuple_editor.py
|
py
| 2,229 |
python
|
en
|
code
| 290 |
github-code
|
6
|
[
{
"api_name": "traits.api.HasTraits",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "traits.api.Tuple",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "traits.api.Int",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "traits.api.Str",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "traitsui.api.View",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.BaseTestMixin",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "traits.testing.api.UnittestTools",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "traitsui.tests._tools.BaseTestMixin.setUp",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.BaseTestMixin",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "traitsui.tests._tools.BaseTestMixin.tearDown",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.BaseTestMixin",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "traitsui.tests._tools.create_ui",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.requires_toolkit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.ToolkitName.qt",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "traitsui.tests._tools.ToolkitName",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "traitsui.tests._tools.ToolkitName.wx",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "traitsui.tests._tools.reraise_exceptions",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.create_ui",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pyface.qt.QtGui",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pyface.qt",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "traitsui.tests._tools.requires_toolkit",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "traitsui.tests._tools.ToolkitName.qt",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "traitsui.tests._tools.ToolkitName",
"line_number": 41,
"usage_type": "name"
}
] |
14505164780
|
import typing
from qittle.http.client import ABCHTTPClient, AiohttpClient
from .abc import ABCSessionManager
class SessionManager(ABCSessionManager):
def __init__(self, http_client: typing.Optional[typing.Type[ABCHTTPClient]] = None):
self.http_client = http_client or AiohttpClient
self._active_session: typing.Optional[ABCHTTPClient] = None
async def __aenter__(self) -> ABCHTTPClient:
self._active_session = self.http_client()
return self._active_session
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._active_session.close()
|
cyanlabs-org/qittle
|
qittle/http/session/manager.py
|
manager.py
|
py
| 621 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "abc.ABCSessionManager",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "typing.Type",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "qittle.http.client.ABCHTTPClient",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "qittle.http.client.AiohttpClient",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "qittle.http.client.ABCHTTPClient",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "qittle.http.client.ABCHTTPClient",
"line_number": 12,
"usage_type": "name"
}
] |
26038424036
|
from __future__ import annotations
from typing import ClassVar
from pants.core.util_rules.environments import EnvironmentField
from pants.engine.target import (
COMMON_TARGET_FIELDS,
BoolField,
Dependencies,
DictStringToStringField,
IntField,
MultipleSourcesField,
SpecialCasedDependencies,
StringField,
StringSequenceField,
Target,
ValidNumbers,
)
from pants.util.strutil import help_text
class AdhocToolDependenciesField(Dependencies):
pass
class AdhocToolRunnableField(StringField):
alias: ClassVar[str] = "runnable"
required = True
help = help_text(
lambda: f"""
Address to a target that can be invoked by the `run` goal (and does not set
`run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments
specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive
dependencies, along with the transitive dependencies specified by
`{AdhocToolExecutionDependenciesField.alias}`.
"""
)
class AdhocToolOutputFilesField(StringSequenceField):
alias: ClassVar[str] = "output_files"
required = False
default = ()
help = help_text(
lambda: f"""
Specify the output files to capture, relative to the value of
`{AdhocToolWorkdirField.alias}`.
For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of
`{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the build root.
"""
)
class AdhocToolOutputDirectoriesField(StringSequenceField):
alias: ClassVar[str] = "output_directories"
required = False
default = ()
help = help_text(
lambda: f"""
Specify full directories (including recursive descendants) of output to capture, relative
to the value of `{AdhocToolWorkdirField.alias}`.
For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of
`{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
specified.
Relative paths (including `..`) may be used, as long as the path does not ascend further
than the build root.
"""
)
class AdhocToolOutputDependenciesField(AdhocToolDependenciesField):
supports_transitive_excludes = True
alias: ClassVar[str] = "output_dependencies"
help = help_text(
lambda: f"""
Any dependencies that need to be present (as transitive dependencies) whenever the outputs
of this target are consumed (including as dependencies).
See also `{AdhocToolExecutionDependenciesField.alias}` and
`{AdhocToolRunnableDependenciesField.alias}`.
"""
)
class AdhocToolExecutionDependenciesField(SpecialCasedDependencies):
alias: ClassVar[str] = "execution_dependencies"
required = False
default = None
help = help_text(
lambda: f"""
The execution dependencies for this command.
Dependencies specified here are those required to make the command complete successfully
(e.g. file inputs, packages compiled from other targets, etc), but NOT required to make
the outputs of the command useful. Dependencies that are required to use the outputs
produced by this command should be specified using the
`{AdhocToolOutputDependenciesField.alias}` field.
If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`
will not be added to the execution sandbox.
See also `{AdhocToolOutputDependenciesField.alias}` and
`{AdhocToolRunnableDependenciesField.alias}`.
"""
)
class AdhocToolRunnableDependenciesField(SpecialCasedDependencies):
alias: ClassVar[str] = "runnable_dependencies"
required = False
default = None
help = help_text(
lambda: f"""
The runnable dependencies for this command.
Dependencies specified here are those required to exist on the `PATH` to make the command
complete successfully (interpreters specified in a `#!` command, etc). Note that these
dependencies will be made available on the `PATH` with the name of the target.
See also `{AdhocToolOutputDependenciesField.alias}` and
`{AdhocToolExecutionDependenciesField.alias}`.
"""
)
class AdhocToolSourcesField(MultipleSourcesField):
# We solely register this field for codegen to work.
alias: ClassVar[str] = "_sources"
uses_source_roots = False
expected_num_files = 0
class AdhocToolArgumentsField(StringSequenceField):
alias: ClassVar[str] = "args"
default = ()
help = help_text(
lambda: f"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field."
)
class AdhocToolStdoutFilenameField(StringField):
alias: ClassVar[str] = "stdout"
default = None
help = help_text(
lambda: f"""
A filename to capture the contents of `stdout` to. Relative paths are
relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
start at the build root.
"""
)
class AdhocToolStderrFilenameField(StringField):
alias: ClassVar[str] = "stderr"
default = None
help = help_text(
lambda: f"""
A filename to capture the contents of `stderr` to. Relative paths are
relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
start at the build root.
"""
)
class AdhocToolTimeoutField(IntField):
alias: ClassVar[str] = "timeout"
default = 30
help = "Command execution timeout (in seconds)."
valid_numbers = ValidNumbers.positive_only
class AdhocToolExtraEnvVarsField(StringSequenceField):
alias: ClassVar[str] = "extra_env_vars"
help = help_text(
"""
Additional environment variables to provide to the process.
Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
`ENV_VAR` to copy the value of a variable in Pants's own environment.
"""
)
class AdhocToolLogOutputField(BoolField):
alias: ClassVar[str] = "log_output"
default = False
help = "Set to true if you want the output logged to the console."
class AdhocToolWorkdirField(StringField):
alias: ClassVar[str] = "workdir"
default = "."
help = help_text(
"""
Sets the working directory for the process.
Values are relative to the build root, except in the following cases:
* `.` specifies the location of the `BUILD` file.
* Values beginning with `./` are relative to the location of the `BUILD` file.
* `/` or the empty string specifies the build root.
* Values beginning with `/` are also relative to the build root.
"""
)
class AdhocToolNamedCachesField(DictStringToStringField):
alias = "experimental_named_caches"
help = help_text(
"""
Named caches to construct for the execution.
See https://www.pantsbuild.org/docs/reference-global#named_caches_dir.
The keys of the mapping are the directory name to be created in the named caches dir.
The values are the name of the symlink (relative to the sandbox root) in the sandbox which
points to the subdirectory in the named caches dir
NOTE: The named caches MUST be handled with great care. Processes accessing the named caches
can be run in parallel, and can be cancelled at any point in their execution (and
potentially restarted). That means that _every_ operation modifying the contents of the cache
MUST be concurrency and cancellation safe.
"""
)
class AdhocToolOutputRootDirField(StringField):
alias: ClassVar[str] = "root_output_directory"
default = "/"
help = help_text(
"""
Adjusts the location of files output by this target, when consumed as a dependency.
Values are relative to the build root, except in the following cases:
* `.` specifies the location of the `BUILD` file.
* Values beginning with `./` are relative to the location of the `BUILD` file.
* `/` or the empty string specifies the build root.
* Values beginning with `/` are also relative to the build root.
"""
)
class AdhocToolTarget(Target):
alias: ClassVar[str] = "adhoc_tool"
core_fields = (
*COMMON_TARGET_FIELDS,
AdhocToolRunnableField,
AdhocToolArgumentsField,
AdhocToolExecutionDependenciesField,
AdhocToolOutputDependenciesField,
AdhocToolRunnableDependenciesField,
AdhocToolLogOutputField,
AdhocToolOutputFilesField,
AdhocToolOutputDirectoriesField,
AdhocToolSourcesField,
AdhocToolTimeoutField,
AdhocToolExtraEnvVarsField,
AdhocToolWorkdirField,
AdhocToolOutputRootDirField,
AdhocToolStdoutFilenameField,
AdhocToolStderrFilenameField,
EnvironmentField,
)
help = help_text(
lambda: f"""
Execute any runnable target for its side effects.
Example BUILD file:
{AdhocToolTarget.alias}(
{AdhocToolRunnableField.alias}=":python_source",
{AdhocToolArgumentsField.alias}=[""],
{AdhocToolExecutionDependenciesField.alias}=[":scripts"],
{AdhocToolOutputDirectoriesField.alias}=["results/"],
{AdhocToolOutputFilesField.alias}=["logs/my-script.log"],
)
shell_sources(name="scripts")
"""
)
# ---
# `system_binary` target
# ---
class SystemBinaryNameField(StringField):
alias: ClassVar[str] = "binary_name"
required = True
help = "The name of the binary to find."
class SystemBinaryExtraSearchPathsField(StringSequenceField):
alias: ClassVar[str] = "extra_search_paths"
default = ()
help = help_text(
"""
Extra search paths to look for the binary. These take priority over Pants' default
search paths.
"""
)
class SystemBinaryFingerprintPattern(StringField):
alias: ClassVar[str] = "fingerprint"
required = False
default = None
help = help_text(
"""
A regular expression which will be used to match the fingerprint outputs from
candidate binaries found during the search process.
"""
)
class SystemBinaryFingerprintArgsField(StringSequenceField):
alias: ClassVar[str] = "fingerprint_args"
default = ()
help = help_text(
"Specifies arguments that will be used to run the binary during the search process."
)
class SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):
alias: ClassVar[str] = "fingerprint_dependencies"
help = help_text(
"""
Specifies any runnable dependencies that need to be available on the `PATH` when the binary
is run, so that the search process may complete successfully. The name of the target must
be the name of the runnable dependency that is called by this binary.
"""
)
class SystemBinaryTarget(Target):
alias: ClassVar[str] = "system_binary"
core_fields = (
*COMMON_TARGET_FIELDS,
SystemBinaryNameField,
SystemBinaryExtraSearchPathsField,
SystemBinaryFingerprintPattern,
SystemBinaryFingerprintArgsField,
SystemBinaryFingerprintDependenciesField,
)
help = help_text(
lambda: f"""
A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.
Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search
paths provided, as well as default search paths. If
`{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be
executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries
whose output does not match the pattern will be excluded.
The first non-excluded binary will be the one that is resolved.
"""
)
|
pantsbuild/pants
|
src/python/pants/backend/adhoc/target_types.py
|
target_types.py
|
py
| 12,321 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "pants.engine.target.Dependencies",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "typing.ClassVar",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.SpecialCasedDependencies",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.SpecialCasedDependencies",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.MultipleSourcesField",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.IntField",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.ValidNumbers.positive_only",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "pants.engine.target.ValidNumbers",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.BoolField",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.DictStringToStringField",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.Target",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.COMMON_TARGET_FIELDS",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "pants.core.util_rules.environments.EnvironmentField",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringField",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.StringSequenceField",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "typing.ClassVar",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pants.engine.target.Target",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "typing.ClassVar",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.COMMON_TARGET_FIELDS",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "pants.util.strutil.help_text",
"line_number": 356,
"usage_type": "call"
}
] |
2578089451
|
from collections import defaultdict
def func(nums1, nums2):
hash = defaultdict(int)
while nums1:
np1 = nums1.pop()
hash[np1[0]] += np1[1]
while nums2:
np2 = nums2.pop()
hash[np2[0]] += np2[1]
return sorted([[key, value] for key, value in hash.items()])
nums1 = [[2,4],[3,6],[5,5]]
nums2 = [[1,3],[4,3]]
print(func(nums1, nums2))
|
mayo516/Algorithm
|
주리머/2-2w/wc/(성공) 6362. Merge Two 2D Arrays by Summing Values.py
|
(성공) 6362. Merge Two 2D Arrays by Summing Values.py
|
py
| 409 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 3,
"usage_type": "call"
}
] |
28158074215
|
"""Destroy unused AMIs in your AWS account.
Usage:
ami_destroyer.py <requiredtag> [options]
Arguments:
<requiredtag> Tag required for an AMI to be cleaned up in the form tag:NameOfTag
Options:
--retain=<retain> Number of images to retain, sorted newest to latest [default: 2]
--regions=<regions> A comma-separated list of AWS Regions to run against [default: us-east-1]
--help Show this help string
--dryrun List the AMIs that'll be destroyed by this script
"""
import sys
import logging
from operator import itemgetter
from docopt import docopt
import boto3
import botocore.exceptions as botoex
_LOGGER = logging.Logger("ami-destroyer")
def setup_logging():
_LOGGER.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s : %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
def get_account_id():
acctid = sts.get_caller_identity().get('Account')
_LOGGER.info("Retrieving Account ID: {}".format(acctid))
return acctid
def get_curated_images(tagname, accountid):
_LOGGER.info("Retrieving AMIs with {}".format(tagname))
return ec2.images.filter(
Owners=[accountid],
Filters=[
{
'Name':tagname,
'Values':['True']
}
]
)
def sort_curated_images(curatedimages):
_LOGGER.info("Sorting tagged AMIs into a nice dictionary of lists of dictionaries")
sortedimages = {}
for i in curatedimages:
for tag in i.tags:
if tag['Key'] == 'Name':
iname = tag['Value']
break
else:
iname = "nonametag"
if iname not in sortedimages:
sortedimages[iname] = []
sortedimages[iname].append({
'creation_date': i.creation_date,
'ami_id': i.image_id,
'snapshot_id': i.block_device_mappings[0]['Ebs']['SnapshotId']
})
sortedimages[iname] = sorted(
sortedimages[iname],
key=itemgetter('creation_date'),
reverse=True
)
return sortedimages
def prune_sorted_images(images, retain):
for family in images:
_LOGGER.info(
"Found {} tagged images for type {}. Retaining the latest {}".format(
len(images[family]),
family,
retain
)
)
images[family] = images[family][retain:]
if not images[family]:
_LOGGER.info("No images to prune for {}".format(family))
return images
def destroy_ami(ami_id, family, dryrun):
try:
ec2.Image(ami_id).deregister(DryRun=dryrun)
_LOGGER.info("Family: {} - Deregistered {}".format(family, ami_id))
except botoex.ClientError as e:
_LOGGER.warning("{} - {}".format(ami_id, e))
def destroy_snapshot(snapshot_id, family, dryrun):
try:
ec2.Snapshot(snapshot_id).delete(DryRun=dryrun)
_LOGGER.info("Family: {} - Deleted {}".format(family, snapshot_id))
except botoex.ClientError as e:
_LOGGER.warning("{} - {}".format(snapshot_id, e))
def run(tag, retain, dryrun):
acctid = get_account_id()
curatedimages = get_curated_images(tag, acctid)
sortedimages = sort_curated_images(curatedimages)
if sortedimages:
prunedimages = prune_sorted_images(sortedimages, numretain)
for family in prunedimages:
if prunedimages[family]:
for ami in prunedimages[family]:
destroy_ami(ami['ami_id'], family, dryrun)
destroy_snapshot(ami['snapshot_id'], family, dryrun)
else:
_LOGGER.error("No tagged images to prune")
if __name__ == '__main__':
args = docopt(__doc__)
requiredtag = args['<requiredtag>']
dryrun = args['--dryrun']
numretain = int(args['--retain'])
regions = args['--regions']
setup_logging()
for r in regions.split(','):
_LOGGER.info("##### Running cleanup for region {} #####".format(r))
ec2 = boto3.resource('ec2', region_name=r)
sts = boto3.client('sts')
run(requiredtag, numretain, dryrun)
|
crielly/amidestroyer
|
amidestroyer.py
|
amidestroyer.py
|
py
| 4,275 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "logging.Logger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "botocore.exceptions",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "botocore.exceptions",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "docopt.docopt",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 126,
"usage_type": "call"
}
] |
3625906365
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import DetailView, View, UpdateView, ListView, TemplateView
from django.core.urlresolvers import reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.http import Http404
from .models import Checkout
from .forms import UpdateCheckoutForm
from products.models import Product
from accounts.models import Account
from django.shortcuts import render_to_response
from django.template import RequestContext
# class CheckoutView(DetailView):
# template_name = 'accounts/profile.html'
# success_url = reverse_lazy('orders:checkout')
# queryset = ''
#
# def get_object(self, **kwargs):
# id = self.kwargs['id']
# product = get_object_or_404(Product, id=id, publish=True)
# checkout = Checkout.objects.create(
# user=self.request.user,
# name=product.name,
# price=product.price,
# quantity=product.quantity,
# discount=product.discount,
# )
# return get_object_or_404(Product, id=id, publish=True)
#
# def get_context_data(self, **kwargs):
# context = super(CheckoutView, self).get_context_data(**kwargs)
# context['title'] = 'Profile'
# context['orders'] = Checkout.objects.filter(user=self.request.user)
# return context
# add to cart form
class CheckoutView(View):
template_name = 'orders/checkout.html'
def post(self, request, id):
id = self.kwargs['id']
qs = Checkout.objects.filter(product_id=id, status='waiting')
if qs.exists():
messages.success(request, 'You can not add this product because its already added before!')
return redirect('orders:checkout')
else:
product = get_object_or_404(Product, id=id, publish=True)
checkout = Checkout.objects.create(
user=self.request.user,
product_id=id,
name=product.name,
slug=product.slug,
price=product.price,
quantity=1,
discount=product.discount,
image=product.image,
)
messages.success(request, 'Successfully Added!')
return redirect('orders:checkout')
# all orders page that submitted
class OrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(OrdersView, self).get_context_data(**kwargs)
context['title'] = 'Pending Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='pending')
else:
raise Http404
return context
# all orders page that submitted & accepted
class AcceptedOrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(AcceptedOrdersView, self).get_context_data(**kwargs)
context['title'] = 'Accepted Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='accepted')
else:
raise Http404
return context
# all orders page that submitted & rejected
class RejectedOrdersView(ListView):
template_name = 'orders/orders.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(RejectedOrdersView, self).get_context_data(**kwargs)
context['title'] = 'Rejected Orders'
if self.request.user.is_authenticated:
context['orders'] = Checkout.objects.filter(user=self.request.user, status='rejected')
else:
raise Http404
return context
# checkout page
class CheckoutOrderView(ListView):
template_name = 'orders/checkout.html'
queryset = ''
def get_context_data(self, **kwargs):
context = super(CheckoutOrderView, self).get_context_data(**kwargs)
context['title'] = 'Cart'
if self.request.user.is_authenticated:
qs = Checkout.objects.filter(user=self.request.user, status='waiting')
context['orders'] = qs
orders = qs
total = 0
for order in orders:
total += order.price * order.quantity
context['total'] = total
else:
raise Http404
return context
# # update order page
# class CheckoutUpdateView(UpdateView):
# form_class = UpdateCheckoutForm
# model = Checkout
# template_name = 'orders/update_order.html'
# success_url = reverse_lazy('orders:checkout')
#
# # def get_success_url(self):
# # return reverse('orders:checkout')
#
# def get_context_data(self, **kwargs):
# context = super(CheckoutUpdateView, self).get_context_data(**kwargs)
# context['title'] = 'Update Order {}'.format(Checkout.objects.filter(id=self.kwargs['pk']).first().name)
# return context
# update order page
class CheckoutUpdateView(View):
template_name = 'orders/checkout.html'
def post(self, request, pk):
quantity = int(request.POST['quantity'])
product_id = request.POST['product_id']
available = Product.objects.filter(id=product_id).first()
if quantity > available.quantity:
messages.success(request, 'Quantity more than the available quantity : {} for product : {}'.format(available.quantity, available.name))
return redirect('orders:checkout')
if quantity == 0 or quantity < 0:
messages.success(request, 'Quantity can not be less than 1 for product : {}'.format(available.name))
return redirect('orders:checkout')
qs = Checkout.objects.filter(id=pk, status='waiting')
if qs.exists() and qs.count() == 1:
product_quantity = qs.first()
product_quantity.quantity = quantity
product_quantity.save()
messages.success(request, 'Successfully Added!')
return redirect('orders:checkout')
# delete order
class OrderDeleteView(View):
template_name = 'orders/checkout.html'
def post(self, request, id):
username = self.request.user
if username is None:
raise Http404
else:
qs = Checkout.objects.filter(id=id)
if qs.exists() and qs.count() == 1:
order = qs.first()
order.delete()
return redirect('orders:checkout')
# Buy orders
class BuyOrdersView(View):
template_name = 'orders/orders.html'
def post(self, request):
username = self.request.user
if username is None:
raise Http404
else:
user_id = Account.objects.filter(user=username).first()
account = Account.objects.filter(user=username)
qs = Checkout.objects.filter(user=username, status='waiting')
if account.exists():
user = account.first()
if user.gender is None \
or user.country is None \
or user.region is None \
or user.address1 is None \
or user.phone_number1 is None \
or user.phone_number2 is None:
messages.success(request, 'add your information first to complete buy orders!')
return redirect('accounts:update', pk=user_id.id)
if qs.exists():
for order in qs:
order.status = 'pending'
order.save()
product = Product.objects.filter(id=order.product_id).first()
product.quantity -= order.quantity
product.number_of_sales += 1
product.save()
return redirect('orders:thank')
class BuyThankView(TemplateView):
template_name = "orders/thank.html"
def get_context_data(self, **kwargs):
context = super(BuyThankView, self).get_context_data(**kwargs)
context['title'] = 'Thank You'
return context
def handler404(request):
response = render_to_response('404.html', {}, context_instance=RequestContext(request))
response.status_code = 404
return response
|
tegarty/E-Commerce_django
|
orders/views.py
|
views.py
|
py
| 8,371 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.views.generic.View",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "products.models.Product",
"line_number": 54,
"usage_type": "argument"
},
{
"api_name": "models.Checkout.objects.create",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "django.views.generic.ListView",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "products.models.Product.objects.filter",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "products.models.Product.objects",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "products.models.Product",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "accounts.models.Account.objects.filter",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "accounts.models.Account.objects",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.Account",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "accounts.models.Account.objects.filter",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "accounts.models.Account.objects",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.Account",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "models.Checkout.objects.filter",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "models.Checkout.objects",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "models.Checkout",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "products.models.Product.objects.filter",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "products.models.Product.objects",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "products.models.Product",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "django.views.generic.TemplateView",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 237,
"usage_type": "call"
}
] |
4222865674
|
#!usr/bin/python
import os
import SimpleITK as sitk
import numpy as np
import scipy.ndimage.interpolation
import skimage.exposure
import skimage.filters
import skimage.transform
path="//Users//zhangyuwei//Desktop//test"
ShrinkFactor = 4
for i in os.walk(path):
for j in range(len(i[2])):
if os.path.splitext(i[2][j])[1] == ".gz" :
print(os.path.splitext(i[2][j])[0])
nifti_file = sitk.ReadImage(os.path.join(i[0],i[2][j]))
mask_img = sitk.BinaryThreshold(nifti_file, 80, 5000)
mask_filename = "globalmask_" + os.path.splitext(i[2][j])[0] + ".gz"
output_filename = "N4ITKcorrected_" + os.path.splitext(i[2][j])[0] + ".gz"
output_biasname1 = "bias_in_" + os.path.splitext(i[2][j])[0] + ".gz"
output_biasname2 = "bias_out_" + os.path.splitext(i[2][j])[0] + ".gz"
sitk.WriteImage(mask_img, mask_filename)
nifti_shape = sitk.GetArrayFromImage(nifti_file)
nifti_shape = nifti_shape.shape
# Call and initialize an N4 corrector instance.
corrector = sitk.N4BiasFieldCorrectionImageFilter()
corrector.SetMaximumNumberOfIterations = 50
corrector.SetNumberOfHistogramBins = 128
corrector.SetSplineOrder = 10
corrector.SetConvergenceThreshold = 0.001
corrector.SetNumberOfControlPoints = 8
print("> Initializing Compelete!")
if ShrinkFactor > 1 :
shrinked_img = sitk.Shrink(nifti_file, [ShrinkFactor] * nifti_file.GetDimension())
shrinked_mask = sitk.Shrink(mask_img, [ShrinkFactor] * nifti_file.GetDimension())
shrinked_img = sitk.Cast(shrinked_img, sitk.sitkFloat32)
#shrinked_mask = sitk.Cast(shrinked_mask, sitk.sitkFloat32)
print("> Starting Execution...")
corrected_img = corrector.Execute(shrinked_img, shrinked_mask)
print("> Execution Complete!")
# Estimate the bias field of corrected image
re_corrected = corrector.Execute(corrected_img, shrinked_mask)
print("> Corrected Bias Estimation Complete!")
corrected_img = sitk.GetArrayFromImage(corrected_img)
corrected_img[corrected_img == 0] = 0.001
re_corrected = sitk.GetArrayFromImage(re_corrected)
re_corrected[re_corrected == 0] = 0.001
shrinked_img = sitk.GetArrayFromImage(shrinked_img)
# Generate biasfield
shrinked_bias = shrinked_img / corrected_img
corrected_bias = corrected_img / re_corrected
# Output
output_bias = scipy.ndimage.zoom(shrinked_bias, np.array(nifti_shape) / shrinked_bias.shape)
output_bias2 = scipy.ndimage.zoom(corrected_bias, np.array(nifti_shape) / shrinked_bias.shape)
output_img = sitk.GetArrayFromImage(nifti_file) / output_bias
output_bias = sitk.GetImageFromArray(output_bias)
output_bias2 = sitk.GetImageFromArray(output_bias2)
output_img = sitk.Cast(sitk.GetImageFromArray(output_img), sitk.sitkUInt16)
sitk.WriteImage(output_img, output_filename)
sitk.WriteImage(output_bias, output_biasname1)
sitk.WriteImage(output_bias2, output_biasname2)
print("> Save Complete!")
else:
source_img = sitk.Shrink(nifti_file, [ShrinkFactor] * nifti_file.GetDimension())
mask_img = sitk.Shrink(mask_img, [ShrinkFactor] * mask_img.GetDimension())
source_img = sitk.Cast(source_img, sitk.sitkFloat32)
output_img = corrector.Execute(source_img, mask_img)
output_img = sitk.Cast(output_img, sitk.sitkUInt16)
sitk.WriteImage(output_img, output_filename)
#biasfield_img = source_img / output_img
#biasfield_img[biasfield_img < 0.5] = 0.5
|
20zzyw/Radiomic-Toolbox
|
N4ITK_instance.py
|
N4ITK_instance.py
|
py
| 4,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.walk",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.BinaryThreshold",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetArrayFromImage",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "SimpleITK.N4BiasFieldCorrectionImageFilter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Shrink",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Shrink",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Cast",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkFloat32",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.GetArrayFromImage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetArrayFromImage",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetArrayFromImage",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.interpolation.ndimage.zoom",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.interpolation.ndimage",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage.interpolation",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.interpolation.ndimage.zoom",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.interpolation.ndimage",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage.interpolation",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetArrayFromImage",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetImageFromArray",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetImageFromArray",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Cast",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetImageFromArray",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkUInt16",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Shrink",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Shrink",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "SimpleITK.Cast",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkFloat32",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.Cast",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkUInt16",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.WriteImage",
"line_number": 87,
"usage_type": "call"
}
] |
35383876896
|
import json
from random import randint
import discord
from discord.ext import tasks, commands
def getData():
with open("data.json", "r") as levelsFile:
return json.loads(levelsFile.read())
def setData(_dict):
with open("data.json", "w") as levelsFile:
levelsFile.write(json.dumps(_dict))
levelxp = [0, 300, 900, 2700, 6500, 14000, 23000, 34000, 48000, 64000, 85000, 100000, 120000, 140000, 165000, 195000, 225000, 265000, 305000, 355000]
class DND(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ac = randint(1, 20)
self.base = {
"xp": 0,
"level": 0,
"health": 20,
"class": "",
"inventory": [],
"stats": {
"str": {"base": 0, "mod": 0},
"dex": {"base": 0, "mod": 0},
"con": {"base": 0, "mod": 0},
"int": {"base": 0, "mod": 0},
"wis": {"base": 0, "mod": 0},
"cha": {"base": 0, "mod": 0}
}
}
self.hell_ac.start()
def cog_unload(self):
self.hell_ac.cancel()
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot:
try:
data = getData()
try:
level = data[message.author.name]["level"]
data[message.author.name]["xp"] += (randint(1, 10))
except:
data[message.author.name] = self.base
level = data[message.author.name]["level"]
data[message.author.name]["xp"] += randint(1, 10)
bonus = 2 if level < 5 else (3 if level < 9 else (4 if level < 13 else (5 if level < 17 else 6)))
if message.channel.name == "hell":
roll = randint(1, 20) + bonus
if roll <= self.ac:
await message.delete()
await message.channel.send("(%s > %s+%s) **%s** tried to send a message, but failed the roll!" % (self.ac, roll - bonus, bonus, message.author.name))
else:
await message.delete()
await message.channel.send("(%s < %s+%s) **%s**: %s" % (self.ac, roll - bonus, bonus, message.author.name, message.content))
else:
try:
words = data["banned words"]
except:
data["banned words"] = []
words = data["banned words"]
for word in words:
if word in message.content.lower() and not (message.content.startswith("2m.unban") or message.content.startswith("2m.ban")):
data[message.author.name]["health"] -= 1
await message.channel.send("Uh oh! You fucking idiot. You just said '%s'.\n\nDie." % word)
if data[message.author.name]["xp"] >= levelxp[level]:
data[message.author.name]["level"] += 1
await message.channel.send("**%s** levelled up to level %s!" % (message.author.name, data[message.author.name]["level"]))
if data[message.author.name]["health"] <= 0:
data[message.author.name] = self.base
await message.channel.send("Oop, **%s** is dead. Now you gotta reroll stats!" % message.author.name)
setData(data)
except:
pass
@tasks.loop(minutes=5)
async def hell_ac(self):
hell = discord.utils.get(self.bot.get_guild(677689511525875715).channels, name="hell")
if randint(1, 100) <= 33:
ac = randint(1, 20)
self.ac = ac
await hell.send("__**Hell's AC is now %s!**__" % ac)
@commands.command(brief="Roll up your stats.")
async def rollstats(self, ctx, *, order):
data = getData()
try:
if not (0 in (data[ctx.author.name]["stats"][key]["base"] for key in data[ctx.author.name]["stats"])):
return await ctx.send("You've already rolled your stats! Theres no going back now.")
except:
data[ctx.author.name] = self.base
order = order.split(" ")
for item in order:
if item not in ["str", "dex", "con", "int", "wis", "cha"]:
return await ctx.send("Please use the correct stat names!\nThey are:\n%s" % ("\n".join(["str", "dex", "con", "int", "wis", "cha"])))
final = []
allrolls = []
for i in range(6):
allrolls.append([randint(1, 6) for x in range(4)])
for arr in range(len(allrolls)):
del allrolls[arr][allrolls[arr].index(min(allrolls[arr]))]
allrolls[arr] = sum(allrolls[arr])
allrolls.sort(reverse=True)
for i in range(6):
num = allrolls[i]
tempnum = allrolls[i]
if tempnum % 2 == 1:
tempnum -= 1
bonuses = {
0: -5,
2: -4,
4: -3,
6: -2,
8: -1,
10: 0,
12: 1,
14: 2,
16: 3,
18: 4,
20: 5,
22: 6,
24: 7,
26: 8,
28: 9,
30: 10
}
final.append("%s -> %s (%s)" % (order[i], num, bonuses[tempnum] if num < 10 else ("+%s" % bonuses[tempnum])))
data[ctx.author.name]["stats"][order[i]] = {"base": num, "mod": bonuses[tempnum]}
await ctx.send("\n".join(final))
setData(data)
@commands.command(brief="Get the AC of hell.")
async def getac(self, ctx):
await ctx.send("Hell's AC is currently **%s**!" % self.ac)
@commands.command(brief="Get information on a level.")
async def levelinfo(self, ctx, level: int):
if not level > 20:
await ctx.send("__**Level %s Information**__\nNeeded XP: %s\nProficiency Bonus: %s" % (level, levelxp[level - 1], "+2" if level < 5 else ("+3" if level < 9 else ("+4" if level < 13 else ("+5" if level < 17 else "+6")))))
else:
await ctx.send("That level is too high! Level 20 is the maximum.")
print("%s GOT LEVEL INFORMATION. (%s)" % (ctx.author.name, level))
@commands.command(brief="Get your current level and XP.")
async def stats(self, ctx):
data = getData()
level = data[ctx.author.name]["level"]
bonus = 2 if level < 5 else (3 if level < 9 else (4 if level < 13 else (5 if level < 17 else 6)))
await ctx.send(
"__**%s's Information**__\nHealth: %s\nLevel: %s\nXP: %s\nProficiency Bonus: +%s\n\n%s" % (ctx.author.name, data[ctx.author.name]["health"], data[ctx.author.name]["level"], data[ctx.author.name]["xp"], bonus, "\n".join(
["%s: %s (%s)" % (key, data[ctx.author.name]["stats"][key]["base"], data[ctx.author.name]["stats"][key]["mod"]) for key in data[ctx.author.name]["stats"]])))
print("%s GOT THEIR LEVEL INFORMATION." % ctx.author.name)
def setup(bot):
bot.add_cog(DND(bot))
|
JONKKKK/Codes
|
2MS2A/dnd.py
|
dnd.py
|
py
| 7,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "discord.utils.get",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks.loop",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 162,
"usage_type": "name"
}
] |
13276813866
|
import sys
from os.path import join
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QListWidgetItem, QWidget
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import sqlite3
from functools import partial
class EditInfo(QWidget):
def __init__(self, id) -> None:
super().__init__()
uic.loadUi(join("task5", "addEditCoffeeForm.ui"), self)
self.id = id
self.edit.clicked.connect(self.update)
def update(self):
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
else:
cur.execute(f'''
UPDATE coffee SET name_of_the_variety = ?, degree_of_roasting = ?,
ground_or_in_grains = ?,
taste_description = ?, cost = ?, packing_volume = ?
WHERE id = ?''',
[name, elem1, elem2, desc, cs, vl, self.id])
db.commit()
db.close()
def addNewCoffeeSort(self):
# req = INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
# VALUES ("1", "1", "1", "1", 1, 1)
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
cur.execute(f'''
INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
VALUES (?, ?, ?, ?, ?, ?)''',
[name, elem1, elem2, desc, cs, vl])
db.commit()
db.close()
class AddNewInfo(QWidget):
def __init__(self) -> None:
super().__init__()
uic.loadUi(join("task5", "addEditCoffeeForm2.ui"), self)
self.add.clicked.connect(self.addNewItem)
def addNewItem(self):
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
try:
name = self.bookTitle.text()
elem1 = self.bookAuthor.text()
elem2 = self.bookReleaseYear.text()
desc = self.bookGenre.text()
for i in [name, elem1, elem2, desc]:
if not (bool(i) and bool(i.strip())):
raise ValueError
for w in i:
if w.isdigit():
raise ValueError
cs = float(self.costl.text())
vl = float(self.volumel.text())
except ValueError:
self.errors.setText("Введены некорректные данные!!!")
else:
cur.execute(f'''
INSERT INTO coffee (name_of_the_variety, degree_of_roasting, ground_or_in_grains, taste_description, cost, packing_volume)
VALUES (?, ?, ?, ?, ?, ?)''',
[name, elem1, elem2, desc, cs, vl])
db.commit()
db.close()
class MyWidget(QMainWindow):
def __init__(self) -> None:
super().__init__()
uic.loadUi(join("task5", "main.ui"), self)
self.btn.clicked.connect(self.search)
self.addNewCoffe.clicked.connect(self.addSomeInfo)
def search(self):
self.listWidget.clear()
# searchText = self.lineEdit.text()
# db = sqlite3.connect(join("QT_Standalone", "task7", "books.db"))
db = sqlite3.connect(join("task5", "coffee.sqlite"))
cur = db.cursor()
res = cur.execute(
f'''SELECT * FROM coffee''')
for elem in res:
# print(elem)
btn = QPushButton(f"{elem[1]}(нажми для большей информации)", self)
clickFunc = partial(self.some, elem[0], elem[1],
elem[2], elem[3], elem[4], elem[5], elem[6])
btn.clicked.connect(clickFunc)
item = QListWidgetItem()
item.setSizeHint(btn.sizeHint())
self.listWidget.addItem(item)
self.listWidget.setItemWidget(item, btn)
def some(self, id, name, author, year, genre, cost, volume):
self.pop = EditInfo(id)
# там снизу есть нужные поля для ввода
self.pop.bookAuthor.setText(author)
self.pop.bookTitle.setText(name)
self.pop.bookGenre.setText(genre)
self.pop.bookReleaseYear.setText(str(year))
self.pop.costl.setText(str(cost))
self.pop.volumel.setText(str(volume))
self.pop.show()
def addSomeInfo(self):
self.pop2 = AddNewInfo()
self.pop2.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
# print(eval("9!"))
sys.exit(app.exec_())
|
QBoff/Moscow-Kiper
|
task5/main.py
|
main.py
|
py
| 5,993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QListWidgetItem",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 167,
"usage_type": "call"
}
] |
31791424883
|
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='bhamcal',
version='0.1',
license='GPL 3',
python_requires='>=3',
author='Justin Chadwell',
author_email='[email protected]',
url='https://github.com/jedevc/bhamcal',
description='A timetable extractor for University of Birmingham',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=[
'click',
'colorama',
'beautifulsoup4',
'pytz',
'google-api-python-client',
'google-auth-httplib2',
'google-auth-oauthlib'
],
extras_require={
'browser': ['selenium']
},
entry_points={
'console_scripts': [
'bhamcal=bhamcal:main'
]
},
)
|
jedevc/bhamcal
|
setup.py
|
setup.py
|
py
| 881 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 20,
"usage_type": "call"
}
] |
23142628393
|
# Neuon AI - PlantCLEF 2020
import tensorflow as tf
from preprocessing import inception_preprocessing
slim = tf.contrib.slim
import numpy as np
import cv2
from nets.inception_v4 import inception_v4
from nets import inception_utils
from PIL import Image
from six.moves import cPickle
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
# ============================================================= #
# Directories
# ============================================================= #
image_dir_parent_train = "PlantCLEF2020TrainingData"
image_dir_parent_test = "PlantCLEF2020TrainingData"
checkpoint_model = "checkpoints\run16\040000.ckpt"
species_name_map_csv = "list\clef2020_herbarium_species.csv"
classmap_txt = "list\clef2020_herbarium_species_classid_map_to_index.txt"
herbarium_dictionary_file = "mean_emb_dict_997_herb_500_run16_40k_crops.pkl"
test_image = "PlantCLEF2020TrainingData\photo\373\5859.jpg"
# ============================================================= #
# Parameters
# ============================================================= #
topN = 5 # Number of predictions to output
batch = 10
# Assign batch = 10,
# 10 variations of flipped cropped imgs (center, top left, top right, bottom left, bottom right,
# center flipped, top left flipped, top right flipped, bottom left flipped, bottom right flipped)
numclasses1 = 997 # Class number of Herbarium network
numclasses2 = 10000 # Class number of Field network
input_size = (299,299,3) # Image input size
# ============================================================= #
# Load data
# ============================================================= #
# ----- Read herbarium dictionary pkl file ----- #
with open(herbarium_dictionary_file,'rb') as fid1:
herbarium_dictionary = cPickle.load(fid1)
# ----- Map species index to folder ----- #
with open(classmap_txt,'r') as fid:
classmap = [x.strip().split(' ')[0] for x in fid.readlines()]
# ----- Map species name to index ----- #
species_name_map_df = pd.read_csv(species_name_map_csv, sep=',')
species_list = species_name_map_df['species'].to_list()
# ============================================================= #
# Run network / validate image
# ============================================================= #
# ----- Initiate tensors ----- #
x1 = tf.placeholder(tf.float32,(batch,) + input_size)
x2 = tf.placeholder(tf.float32,(batch,) + input_size)
y1 = tf.placeholder(tf.int32,(batch,))
y2 = tf.placeholder(tf.int32,(batch,))
is_training = tf.placeholder(tf.bool)
is_train = tf.placeholder(tf.bool, name="is_training")
# ----- Image preprocessing methods ----- #
train_preproc = lambda xi: inception_preprocessing.preprocess_image(
xi,input_size[0],input_size[1],is_training=True)
test_preproc = lambda xi: inception_preprocessing.preprocess_image(
xi,input_size[0],input_size[1],is_training=False)
def data_in_train1():
return tf.map_fn(fn = train_preproc,elems = x1,dtype=np.float32)
def data_in_test1():
return tf.map_fn(fn = test_preproc,elems = x1,dtype=np.float32)
def data_in_train2():
return tf.map_fn(fn = train_preproc,elems = x2,dtype=np.float32)
def data_in_test2():
return tf.map_fn(fn = test_preproc,elems = x2,dtype=np.float32)
data_in1 = tf.cond(
is_training,
true_fn = data_in_train1,
false_fn = data_in_test1
)
data_in2 = tf.cond(
is_training,
true_fn = data_in_train2,
false_fn = data_in_test2
)
def read_img(img_path):
img = []
try:
current_img = img_path
im = cv2.imread(current_img)
if im is None:
im = cv2.cvtColor(np.asarray(Image.open(current_img).convert('RGB')),cv2.COLOR_RGB2BGR)
im = cv2.resize(im,(input_size[0:2]))
if np.ndim(im) == 2:
im = cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)
else:
im = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
# Center and Corner crops
im1 = im[0:260,0:260,:]
im2 = im[0:260,-260:,:]
im3 = im[-260:,0:260,:]
im4 = im[-260:,-260:,:]
im5 = im[19:279,19:279,:]
imtemp = [cv2.resize(ims,(input_size[0:2])) for ims in (im1,im2,im3,im4,im5)]
[img.append(ims) for ims in imtemp]
# Flip image
flip_img = cv2.flip(im, 1)
flip_im1 = flip_img[0:260,0:260,:]
flip_im2 = flip_img[0:260,-260:,:]
flip_im3 = flip_img[-260:,0:260,:]
flip_im4 = flip_img[-260:,-260:,:]
flip_im5 = flip_img[19:279,19:279,:]
flip_imtemp = [cv2.resize(imf,(input_size[0:2])) for imf in (flip_im1,flip_im2,flip_im3,flip_im4,flip_im5)]
[img.append(imf) for imf in flip_imtemp]
except:
print("Exception found: Image not read...")
pass
img = np.asarray(img,dtype=np.float32)/255.0
return img
# ----- Construct network 1 ----- #
with slim.arg_scope(inception_utils.inception_arg_scope()):
logits,endpoints = inception_v4(data_in1,
num_classes=numclasses1,
is_training=is_training,
scope='herbarium')
herbarium_embs = endpoints['PreLogitsFlatten']
herbarium_bn = tf.layers.batch_normalization(herbarium_embs, training=is_train)
herbarium_feat = tf.contrib.layers.fully_connected(
inputs=herbarium_bn,
num_outputs=500,
activation_fn=None,
normalizer_fn=None,
trainable=True,
scope='herbarium'
)
herbarium_feat = tf.math.l2_normalize(
herbarium_feat,
axis=1
)
# ----- Construct network 2 ----- #
with slim.arg_scope(inception_utils.inception_arg_scope()):
logits2,endpoints2 = inception_v4(data_in2,
num_classes=numclasses2,
is_training=is_training,
scope='field')
field_embs = endpoints2['PreLogitsFlatten']
field_bn = tf.layers.batch_normalization(field_embs, training=is_train)
field_feat = tf.contrib.layers.fully_connected(
inputs=field_bn,
num_outputs=500,
activation_fn=None,
normalizer_fn=None,
trainable=True,
scope='field'
)
field_feat = tf.math.l2_normalize(
field_feat,
axis=1
)
feat_concat = tf.concat([herbarium_feat, field_feat], 0)
variables_to_restore = slim.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
# ----- Run session ----- #
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
restorer.restore(sess, checkpoint_model)
test_image = read_img(test_image)
sample_embedding = sess.run(
field_feat,
feed_dict = {
x2:test_image,
is_training : False,
is_train : False
}
)
# Average center + corner crop embeddings
averaged_flip = np.mean(sample_embedding, axis=0)
reshaped_emb_sample = averaged_flip.reshape(1,500)
print('Getting herbarium dictionary...')
herbarium_emb_list = []
for herbarium_class, herbarium_emb in herbarium_dictionary.items():
herbarium_emb_list.append(np.squeeze(herbarium_emb))
herbarium_emb_list = np.array(herbarium_emb_list)
print('Comparing sample embedding with herbarium distance...')
similarity = cosine_similarity(reshaped_emb_sample, herbarium_emb_list)
print('Getting probability distribution...')
similarity_distribution = []
for sim in similarity:
new_distribution = []
for d in sim:
new_similarity = 1 - d # 1 - cosine value (d)
new_distribution.append(new_similarity)
similarity_distribution.append(new_distribution)
similarity_distribution = np.array(similarity_distribution)
# Apply inverse weighting with power of 5
probabilty_list = []
for d in similarity_distribution:
inverse_weighting = (1/np.power(d,5))/np.sum(1/np.power(d,5))
probabilty_list.append(inverse_weighting)
probabilty_list = np.array(probabilty_list)
print('Getting topN predictions...')
for prediction in probabilty_list:
topN_class_list = prediction.argsort()[-topN:][::-1]
topN_probability_list = np.sort(prediction)[-topN:][::-1]
counter = 0
for cl, prob in zip(topN_class_list, topN_probability_list):
counter += 1
class_index = classmap[int(cl)]
pred_name = species_list[int(cl)]
print('\nPREDICTION:', counter)
print('Species:', pred_name)
print('Class index (folder):', class_index)
print('Probability:', prob)
|
NeuonAI/plantclef2020_challenge
|
validate_image.py
|
validate_image.py
|
py
| 9,376 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "tensorflow.contrib",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "six.moves.cPickle.load",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "six.moves.cPickle",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.bool",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "tensorflow.bool",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "preprocessing.inception_preprocessing.preprocess_image",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "preprocessing.inception_preprocessing",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "preprocessing.inception_preprocessing.preprocess_image",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "preprocessing.inception_preprocessing",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "tensorflow.map_fn",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.map_fn",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.map_fn",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.map_fn",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cond",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.cond",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.ndim",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2RGB",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "nets.inception_utils.inception_arg_scope",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "nets.inception_utils",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "nets.inception_v4.inception_v4",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers.batch_normalization",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib.layers.fully_connected",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.l2_normalize",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "nets.inception_utils.inception_arg_scope",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "nets.inception_utils",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "nets.inception_v4.inception_v4",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers.batch_normalization",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "tensorflow.layers",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib.layers.fully_connected",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.math.l2_normalize",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "tensorflow.math",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.concat",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Session",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 241,
"usage_type": "call"
}
] |
25798704755
|
#! python3
import sys
import win32api
from PyQt5.QtWidgets import QApplication, QWidget, \
QToolTip, QPushButton, QMessageBox, QDesktopWidget, \
QMainWindow, QAction, QMenu, QStatusBar
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
'''
#面向过程
app = QApplication(sys.argv)
w = QWidget()
w.resize(300, 150)
w.setFixedSize(700, 400)
x, y = win32api.GetSystemMetrics(0), win32api.GetSystemMetrics(1)
w.move((x - 250) / 2, (y - 150) / 2)
w.setWindowTitle('将图片转换为jpg 1920*1080')
w.show()
sys.exit(app.exec_())
'''
x , y= 0,0
class Example(QMainWindow):
def __init__(self):
super().__init__()#调用父类构造函数
self.initUI()
def initUI(self):
global x,y
QToolTip.setFont(QFont('SansSerif', 10))
self.setToolTip('This is a <b>QWidget</b> widget')
btn = QPushButton('Quit', self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
# btn.clicked.connect(QCoreApplication.instance().quit) #clicked btn and exit app
btn.clicked.connect(QApplication.instance().exit) #clicked btn and exit app
btn.resize(btn.sizeHint())
btn.move(50, 50)
exitAct:QAction = QAction(QIcon('46.jpg'), 'Exit', self)
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(QApplication.instance().quit)
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('File')
fileMenu.addAction(exitAct)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(exitAct)
# subMenu add to FileMenu
subMenu = QMenu('Import', self)
subAct = QAction('Import file', self)
subMenu.addAction(subAct)
fileMenu.addMenu(subMenu)
#checkMenu
viewSBar:QAction = QAction('View statusbar', self, checkable=True)
viewSBar.setStatusTip('View statusbar')
viewSBar.setChecked(True)
viewSBar.triggered.connect(self.toggleMenu)
fileMenu.addAction(viewSBar)
# x, y = win32api.GetSystemMetrics(0), win32api.GetSystemMetrics(1)
# self.setGeometry((x-700)/2, (y-400)/2, 700, 400)
self.center()
self.setFixedSize(700, 400)
self.setWindowTitle('将图片转换成jpg 1080')
self.statusbar = self.statusBar()
self.statusbar.showMessage('Ready')
self.setWindowIcon(QIcon('46.jpg'))
self.show()
def center(self):
qr = self.frameGeometry()#get self RECT
cp = QDesktopWidget().availableGeometry().center()#
qr.moveCenter(cp)
self.move(qr.topLeft())
#When Widgets is closing, call this function by automaticlly
#virtualFunction
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Are you sure to quit',
QMessageBox.Yes|QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
#virtual Function
def contextMenuEvent(self, event):
cmenu = QMenu(self)
newAct = cmenu.addAction('New')
opnAct = cmenu.addAction('Open')
quitAct = cmenu.addAction('Quit')
#which Action
print(type(event), event.pos())
action = cmenu.exec_(self.mapToGlobal(event.pos()))
if action == quitAct:
QApplication.instance().quit()
def toggleMenu(self, state):
print(state)
if state:
self.statusbar.show()
else:
self.statusbar.hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
JcobCN/PyLearn
|
pyQt5.py
|
pyQt5.py
|
py
| 3,718 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QToolTip.setFont",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QToolTip",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QFont",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QPushButton",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.instance",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.instance",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMenu",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QAction",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtGui.QIcon",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QDesktopWidget",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.question",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.Yes",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.No",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox.Yes",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets.QMessageBox",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMenu",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication.instance",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 118,
"usage_type": "call"
}
] |
34777830541
|
# Set up logging
import sys
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
level=logging.WARNING,
)
logger = logging.getLogger(__name__)
from typing import Optional
from dataclasses import dataclass, field
import os
import json
from contextlib import nullcontext
from alive_progress import alive_bar
from transformers.hf_argparser import HfArgumentParser
from transformers.models.auto import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM
from seq2seq.utils.pipeline import ConversationalText2SQLGenerationPipeline, Text2SQLGenerationPipeline, Text2SQLInput, ConversationalText2SQLInput
from seq2seq.utils.picard_model_wrapper import PicardArguments, PicardLauncher, with_picard
from seq2seq.utils.dataset import DataTrainingArguments
@dataclass
class PredictionOutputArguments:
"""
Arguments pertaining to execution.
"""
model_path: str = field(
default="tscholak/cxmefzzi",
metadata={"help": "Path to pretrained model"},
)
cache_dir: Optional[str] = field(
default="/tmp",
metadata={"help": "Where to cache pretrained models and data"},
)
db_path: str = field(
default="database",
metadata={"help": "Where to to find the sqlite files"},
)
inputs_path: str = field(default="data/dev.json", metadata={"help": "Where to find the inputs"})
output_path: str = field(
default="predicted_sql.txt", metadata={"help": "Where to write the output queries"}
)
device: int = field(
default=0,
metadata={
"help": "Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU. A non-negative value will run the model on the corresponding CUDA device id."
},
)
conversational: bool = field(default=False, metadata={"help": "Whether or not the inputs are conversations"})
def main():
# See all possible arguments by passing the --help flag to this program.
parser = HfArgumentParser((PicardArguments, PredictionOutputArguments, DataTrainingArguments))
picard_args: PicardArguments
prediction_output_args: PredictionOutputArguments
data_training_args: DataTrainingArguments
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
picard_args, prediction_output_args, data_training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
picard_args, prediction_output_args, data_training_args = parser.parse_args_into_dataclasses()
if os.path.isfile(prediction_output_args.output_path):
raise RuntimeError("file `{}` already exists".format(prediction_output_args.output_path))
# Initialize config
config = AutoConfig.from_pretrained(
prediction_output_args.model_path,
cache_dir=prediction_output_args.cache_dir,
max_length=data_training_args.max_target_length,
num_beams=data_training_args.num_beams,
num_beam_groups=data_training_args.num_beam_groups,
diversity_penalty=data_training_args.diversity_penalty,
)
# Initialize tokenizer
tokenizer = AutoTokenizer.from_pretrained(
prediction_output_args.model_path,
cache_dir=prediction_output_args.cache_dir,
use_fast=True,
)
# Initialize Picard if necessary
with PicardLauncher() if picard_args.launch_picard else nullcontext(None):
# Get Picard model class wrapper
if picard_args.use_picard:
model_cls_wrapper = lambda model_cls: with_picard(
model_cls=model_cls, picard_args=picard_args, tokenizer=tokenizer
)
else:
model_cls_wrapper = lambda model_cls: model_cls
# Initialize model
model = model_cls_wrapper(AutoModelForSeq2SeqLM).from_pretrained(
prediction_output_args.model_path,
config=config,
cache_dir=prediction_output_args.cache_dir,
)
if prediction_output_args.conversational:
conversational_text2sql(model, tokenizer, prediction_output_args, data_training_args)
else:
text2sql(model, tokenizer, prediction_output_args, data_training_args)
def get_pipeline_kwargs(
model, tokenizer: AutoTokenizer, prediction_output_args: PredictionOutputArguments, data_training_args: DataTrainingArguments
) -> dict:
return {
"model": model,
"tokenizer": tokenizer,
"db_path": prediction_output_args.db_path,
"prefix": data_training_args.source_prefix,
"normalize_query": data_training_args.normalize_query,
"schema_serialization_type": data_training_args.schema_serialization_type,
"schema_serialization_with_db_id": data_training_args.schema_serialization_with_db_id,
"schema_serialization_with_db_content": data_training_args.schema_serialization_with_db_content,
"device": prediction_output_args.device,
}
def text2sql(model, tokenizer, prediction_output_args, data_training_args):
# Initalize generation pipeline
pipe = Text2SQLGenerationPipeline(**get_pipeline_kwargs(model, tokenizer, prediction_output_args, data_training_args))
with open(prediction_output_args.inputs_path) as fp:
questions = json.load(fp)
with alive_bar(len(questions)) as bar:
for question in questions:
try:
outputs = pipe(inputs=Text2SQLInput(question["question"],question["db_id"]))
output = outputs[0]
query = output["generated_text"]
except Exception as e:
logger.error(e)
query = ""
logger.info("writing `{}` to `{}`".format(query, prediction_output_args.output_path))
bar.text(query)
bar()
with open(prediction_output_args.output_path, "a") as fp:
fp.write(query + "\n")
def conversational_text2sql(model, tokenizer, prediction_output_args, data_training_args):
# Initalize generation pipeline
pipe = ConversationalText2SQLGenerationPipeline(
**get_pipeline_kwargs(model, tokenizer, prediction_output_args, data_training_args)
)
with open(prediction_output_args.inputs_path) as fp:
conversations = json.load(fp)
length = sum(len(conversation["interaction"]) for conversation in conversations)
with alive_bar(length) as bar:
for conversation in conversations:
utterances = []
for turn in conversation["interaction"]:
utterances.extend((utterance.strip() for utterance in turn["utterance"].split(sep="|")))
try:
outputs = pipe(
inputs=ConversationalText2SQLInput(list(utterances),
db_id=conversation["database_id"])
)
output = outputs[0]
query = output["generated_text"]
except Exception as e:
logger.error(e)
query = ""
logger.info("writing `{}` to `{}`".format(query, prediction_output_args.output_path))
bar.text(query)
bar()
with open(prediction_output_args.output_path, "a") as fp:
fp.write(query + "\n")
with open(prediction_output_args.output_path, "a") as fp:
fp.write("\n")
if __name__ == "__main__":
main()
|
ServiceNow/picard
|
seq2seq/prediction_output.py
|
prediction_output.py
|
py
| 7,647 |
python
|
en
|
code
| 299 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.WARNING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "transformers.hf_argparser.HfArgumentParser",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "seq2seq.utils.picard_model_wrapper.PicardArguments",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.dataset.DataTrainingArguments",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.picard_model_wrapper.PicardArguments",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.dataset.DataTrainingArguments",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "transformers.models.auto.AutoConfig.from_pretrained",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "transformers.models.auto.AutoConfig",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "transformers.models.auto.AutoTokenizer.from_pretrained",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "transformers.models.auto.AutoTokenizer",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.picard_model_wrapper.PicardLauncher",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "contextlib.nullcontext",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "seq2seq.utils.picard_model_wrapper.with_picard",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "transformers.models.auto.AutoModelForSeq2SeqLM",
"line_number": 101,
"usage_type": "argument"
},
{
"api_name": "transformers.models.auto.AutoTokenizer",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.dataset.DataTrainingArguments",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "seq2seq.utils.pipeline.Text2SQLGenerationPipeline",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "alive_progress.alive_bar",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "seq2seq.utils.pipeline.Text2SQLInput",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "seq2seq.utils.pipeline.ConversationalText2SQLGenerationPipeline",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "alive_progress.alive_bar",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "seq2seq.utils.pipeline.ConversationalText2SQLInput",
"line_number": 169,
"usage_type": "call"
}
] |
10139749320
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from .forms import (CreateCourseForm,
CreateCourseRegistrationForm,
CreateDepartmentForm,
CreateRegistrationForm)
from .models import Course, CourseRegistration, Department, Registration
import datetime
from django.utils import timezone
class CreateCourseView(LoginRequiredMixin, View):
"""
View for admin to create new Course.
"""
template_name = 'academicInfo/create_course.html'
create_course_form = CreateCourseForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_form = self.create_course_form()
return render(request, self.template_name, {'create_course_form' : create_course_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_form = CreateCourseForm(request.POST)
if create_course_form.is_valid():
course = create_course_form.save()
course.save()
return redirect('view_course')
return render(request, self.template_name, {'create_course_form' : create_course_form})
class CreateCourseRegistrationView(LoginRequiredMixin, View):
"""
View for admin to add Course to the Registration.
"""
template_name = 'academicInfo/create_course_registration.html'
create_course_registration_form = CreateCourseRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_registration_form = self.create_course_registration_form()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_registration_form = CreateCourseRegistrationForm(request.POST)
if create_course_registration_form.is_valid():
# Add course to registration only if this course is not added already
# in this registration.
course_registration = create_course_registration_form.save(commit=False)
# Check if the registration has already started.
if course_registration.registration.startTime <= timezone.now():
create_course_registration_form.add_error('registration',
'The registration has already started, you cannot add course to it now.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
courses_in_registration = course_registration.registration.courseregistration_set.all()
similar_course_registration = courses_in_registration.filter(course=course_registration.course,
semester=course_registration.semester)
# Check if course is not already present in the same registration and semester.
if len(similar_course_registration) == 0:
course_registration.save()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form,
'success': 'Successfully added course to the registration.'})
else:
create_course_registration_form.add_error('course', 'This course is already added in this semester.')
create_course_registration_form.add_error('semester', 'This semester already has this course.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
class CreateDepartmentView(LoginRequiredMixin, View):
"""
View for admin to add new Department.
"""
template_name = 'academicInfo/create_department.html'
create_department_form = CreateDepartmentForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_department_form = self.create_department_form()
return render(request, self.template_name, {'create_department_form' : create_department_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_department_form = CreateDepartmentForm(request.POST)
# Check if Department with same name does not already exist.
if create_department_form.is_valid():
department = create_department_form.save()
department.save()
return redirect('view_department')
else:
return render(request, self.template_name, {'create_department_form' : create_department_form})
class CreateRegistrationView(LoginRequiredMixin, View):
"""
View for admin to create new Registration.
"""
template_name = 'academicInfo/create_registration.html'
create_registration_form = CreateRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_registration_form = self.create_registration_form()
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_registration_form = CreateRegistrationForm(request.POST)
# Check if the Registration form is valid.
if create_registration_form.is_valid():
days = int(request.POST['days'])
hours = int(request.POST['hours'])
minutes = int(request.POST['minutes'])
# Check if duration is 0 or not.
if days + hours + minutes == 0:
# Duration cannot be 0.
return render(request, self.template_name, {'create_registration_form' : create_registration_form,
'error' : 'Duration cannot be 0.'})
startTime = create_registration_form.cleaned_data['startTime']
duration = datetime.timedelta(days=days, hours=hours, minutes=minutes)
endTime = startTime + duration
registration = Registration.objects.create(name=create_registration_form.cleaned_data['name'],
startTime=startTime,
duration=duration,
endTime=endTime)
registration.save()
return redirect('registration')
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
class RegistrationsView(View):
"""
View for everyone to view all the registrations.
"""
template_name = 'academicInfo/registration.html'
def get(self, request, *args, **kwargs):
time = timezone.now()
future_registrations = Registration.objects.filter(startTime__gt=time).order_by('startTime')
present_registrations = Registration.objects.filter(
endTime__gt=time
).exclude(startTime__gt=time).order_by('endTime')
past_registrations = Registration.objects.filter(endTime__lt=time)
return render(request, self.template_name, {'future_registrations': future_registrations,
'present_registrations': present_registrations,
'past_registrations': past_registrations})
class LiveRegistrationView(LoginRequiredMixin, View):
"""
View for student to register and unregister from live registrations.
"""
template_name = 'academicInfo/live_registration.html'
def get(self, request, *args, **kwargs):
# Render this page only for the students.
if hasattr(request.user, 'student'):
registration = get_object_or_404(Registration, pk=self.kwargs['registration_id'])
time = timezone.now()
# Check if registration is currently live.
if registration.startTime < time and registration.endTime > time:
student = request.user.student
# Show courses which are in either current semester or the next semester of student.
courses_in_registration = registration.courseregistration_set.all()
course_registration = courses_in_registration.filter(
semester__gt=student.get_student_semester
).exclude(semester__gt=student.get_student_semester+1)
return render(request, self.template_name, {'course_registration' : course_registration,
'student_courses' : student.courseregistration_set.all()})
else:
return redirect('registration')
else:
return redirect('home')
def post(self, request, *args, **kwargs):
# Only students should be allowed to register.
if hasattr(request.user, 'student'):
course_registration = get_object_or_404(CourseRegistration,
pk=request.POST['course_registration_id'])
registration = course_registration.registration
currTime = timezone.now()
student = request.user.student
semester = student.get_student_semester
# If student wants to register for the course.
if 'Register' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
course_registration.semester in range(semester, semester+2)):
if (not student in course_registration.students.all() and
course_registration.remaining_seats > 0):
course_registration.students.add(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
# If student wants to unregister from the course.
elif 'UnRegister' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
student in course_registration.students.all()):
course_registration.students.remove(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
class DepartmentsView(LoginRequiredMixin, View):
"""
View for admin to see departments and add new department.
"""
template_name = 'academicInfo/departments.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
departments = Department.objects.all()
return render(request, self.template_name, {'departments' : departments})
else:
return redirect('home')
class CourseView(LoginRequiredMixin, View):
"""
View for admin to see Courses and add new Course.
"""
template_name = 'academicInfo/courses.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
courses = Course.objects.all()
return render(request, self.template_name, {'courses' : courses})
else:
return redirect('home')
|
shreygoel7/Pinocchio
|
Pinocchio/academicInfo/views.py
|
views.py
|
py
| 12,563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "forms.CreateCourseForm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "forms.CreateCourseForm",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "forms.CreateCourseRegistrationForm",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "forms.CreateCourseRegistrationForm",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "forms.CreateDepartmentForm",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "forms.CreateDepartmentForm",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "forms.CreateRegistrationForm",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "forms.CreateRegistrationForm",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "models.Registration.objects.create",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "models.Registration.objects",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "models.Registration",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "models.Registration.objects.filter",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "models.Registration.objects",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "models.Registration",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "models.Registration.objects.filter",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "models.Registration.objects",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "models.Registration",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "models.Registration.objects.filter",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "models.Registration.objects",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "models.Registration",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "models.Registration",
"line_number": 201,
"usage_type": "argument"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "models.CourseRegistration",
"line_number": 227,
"usage_type": "argument"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.reverse",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "models.Department.objects.all",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "models.Department.objects",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "models.Course.objects.all",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "models.Course.objects",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "models.Course",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 294,
"usage_type": "call"
}
] |
24132755409
|
#!/usr/bin/env python
import argparse
def filter_sam( out_fn, in_fn, chromosome):
with open(out_fn, 'w') as donor_out:
for line in open(in_fn, 'r'):
if line.startswith("@SQ"):
if "SN:{}\t".format(chromosome) in line:
donor_out.write(line)
elif line.startswith("@"):
donor_out.write(line)
else:
fields = line.strip('\n').split('\t')
if fields[2] == chromosome:
donor_out.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Filter specified chromosome')
parser.add_argument('--target', help='target filename')
parser.add_argument('--source', help='source filename')
parser.add_argument('--chromosome', help='name of chromosome')
args = parser.parse_args()
filter_sam(args.target, args.source, args.chromosome)
|
supernifty/reference-bias
|
bin/filter_sam.py
|
filter_sam.py
|
py
| 832 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
}
] |
1419172936
|
"""scene.py module"""
# Michael Gresham
# CPSC 386-01
# 2021-11-29
# [email protected]
# @Michael-Gresham
#
# Lab 03-00
#
# My scene class
# Holds all the scenes that are present in snek game.
import pygame
from pygame.constants import SCRAP_SELECTION
from random import randint
import os
import pickle
from datetime import datetime
class Scene:
"""General Scene class that's inherited by all other Scene types."""
def __init__(self, screen, clock, background_color=(0, 0, 0)):
self._is_valid = True
self._frame_rate = 60
self._screen = screen
self._background = pygame.Surface(self._screen.get_size())
self._background_color = background_color
self._background.fill(self._background_color)
self._clock = clock
def is_valid(self):
"""If game state is valid return true."""
return self._is_valid
def frame_rate(self):
"""return the frame rate of the game."""
return self._frame_rate
def start_scene(self):
"""method driver for the class"""
pass
def end_scene(self):
"""Does nothing here but meant to return next scene."""
pass
def update(self):
"""update the display of the scene."""
pygame.display.update()
def draw(self):
"""Display the screen background onto the screen."""
self._screen.blit(self._background, (0, 0))
def process_event(self, event):
"""Handles all the events at the particular scene."""
if event.type == pygame.QUIT:
self._is_valid = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
print("Good bye!")
self._is_valid = False
class TitleScene(Scene):
"""Class which handles the title screen of snake."""
def __init__(self, screen, clock, title_size, background_color=(0, 0, 0)):
# class initializer. Initializes basic displays.
super().__init__(screen, clock, background_color)
(w, h) = self._screen.get_size()
self._speed = [0.5, 1, 2]
self._title_name = "Snek Game"
self._title_size = title_size
self._title_color = [50, 50, 50]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
self._title_pos = self._title.get_rect(center=(w // 2, h // 4))
instruction_name = "Press any key to continue"
self._instruction_size = title_size // 4
print(str(self._instruction_size))
instruction_font = pygame.font.Font(
pygame.font.get_default_font(), self._instruction_size
)
self._instruction = instruction_font.render(
instruction_name, True, (255, 255, 0)
)
self._instruction_pos = self._instruction.get_rect(
center=(w // 2, h // 2 + h // 4)
)
self._reverse = False
def start_scene(self):
"""Method driver of the class. Calls other method in order to run the scene."""
while True:
self.draw()
self.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
return self.end_scene()
self.process_event(event)
pass
def draw(self):
"""redraws the background, title name, and instructions and updates screen."""
super().draw()
self._screen.blit(self._title, self._title_pos)
self._screen.blit(self._instruction, self._instruction_pos)
self.display_rules()
pygame.display.update()
def display_rules(self):
"""Displays the instructions for Snake Game."""
instructions = [
"Press arrow keys to change the direction of the snake.",
"Goal of game is to survive and eat as many apples as possible.",
"Every apple you eat extends the length of the snake.",
"Try not to hit yourself or hit into a wall or its game over.",
"Overall score is based off time survived and apples eaten",
"Good Luck!",
]
(w, h) = self._screen.get_size()
height = (h // 4) + 100
width = w // 4
count = 0
for instruction in instructions:
instruction_font = pygame.font.Font(
pygame.font.get_default_font(), 15
)
instruction_render = instruction_font.render(
instruction, True, (255, 255, 255)
)
if count == 5:
self._screen.blit(instruction_render, (w // 2 - 50, height))
else:
self._screen.blit(instruction_render, (width, height))
height += 50
count += 1
def update(self):
"""Updates the color of title text and updates background."""
super().update()
for x in range(3):
self._title_color[x] += 1 * self._speed[x]
if self._title_color[x] <= 0 or self._title_color[x] >= 255:
self._reverse = not self._reverse
self._speed[x] *= -1
if self._title_color[x] > 255:
self._title_color[x] = 255
if self._title_color[x] < 0:
self._title_color[x] = 0
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
def end_scene(self):
"""returns the next scene."""
return "Level1"
pass
def process_event(self, event):
"""handles the exit button."""
if event.type == pygame.QUIT:
print("Goodbye!")
pygame.quit()
class GameScene(Scene):
"""Start of the GameScene Class"""
def __init__(self, screen, clock, background_color=(0, 0, 0)):
"""
This function initializes the GameScene class setting
the snake game board and the display as well.
"""
super().__init__(screen, clock, background_color)
# sets the board and initializes location of snake and apple.
self.direction = None
self.start_ticks = pygame.time.get_ticks()
self.score = 0
self.time = 0
self.snake_size = 1
self.snake = []
# self.player = player
self.offset = 100
(w, h) = self._screen.get_size()
self.board = []
for x in range(0, ((h - 100) // 20)):
row = []
for y in range(0, (w // 20)):
if (
x == 0
or y == 0
or y == (w // 20) - 1
or x == ((h - 100) // 20) - 1
):
row.append("border")
elif x == ((h - 100) // 20) // 2 and y == (w // 20) // 2:
row.append("snek")
self.snake.append((x, y))
elif (
x == ((((h - 100) // 20) // 2) + (((h - 100) // 20) // 4))
and y == (w // 20) // 2
):
row.append("apple")
else:
row.append("empty")
self.board.append(row)
self.timer = "Timer: " + str(self.time)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_time = title_font.render(self.timer, True, (255, 255, 255))
self._timer_pos = self._title_time.get_rect(
center=(w // 4, self.offset // 2)
)
self.title_score = "Score: " + str(self.score)
self._title_score = title_font.render(
self.title_score, True, (255, 255, 255)
)
self._score_pos = self._title_score.get_rect(
center=(w // 2 + w // 4, self.offset // 2)
)
def start_scene(self):
"""method driver that drives the game logic."""
self.__init__(self._screen, self._clock)
while True:
# gets the time in game in miliseconds.
miliseconds = pygame.time.get_ticks() - self.start_ticks
exit = self.move()
if exit != None:
return exit
self.draw()
self.update(miliseconds)
for event in pygame.event.get():
self.process_event(event)
pass
def update(self, miliseconds):
"""handles updating the timer, background, and score."""
if (miliseconds // 1000) > self.time:
self.time = miliseconds // 1000
self.timer = "Timer: " + str(self.time)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_time = title_font.render(
self.timer, True, (255, 255, 255)
)
if (self.time % 3) == 0 and self.time != 0:
self.score += 1
self.title_score = "Score: " + str(self.score)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_score = title_font.render(
self.title_score, True, (255, 255, 255)
)
pygame.display.update()
pass
def create_apple(self):
"""Handles the logic that places a new apple when one is eaten."""
valid = False
while valid == False:
row = randint(1, len(self.board) - 1)
column = randint(1, len(self.board[0]) - 1)
if self.board[row][column] == "empty":
self.board[row][column] = "apple"
valid = True
pass
def move(self):
"""Handles the movement logic of the snake. and loss conditions."""
if self.direction == None:
pass
else:
row = self.snake[0][0]
column = self.snake[0][1]
added = False
if self.direction == "up":
row -= 1
elif self.direction == "down":
row += 1
elif self.direction == "left":
column -= 1
elif self.direction == "right":
column += 1
if (
self.board[row][column] == "border"
or self.board[row][column] == "snek"
):
return self.end_scene()
if (
self.board[row][column] != "border"
and self.board[row][column] != "apple"
):
self.board[row][column] = "snek"
self.snake.insert(0, (row, column))
if self.board[row][column] == "apple":
print("hello World")
added = True
self.score += 10
self.create_apple()
self.board[row][column] = "snek"
self.snake.insert(0, (row, column))
miliseconds = pygame.time.get_ticks() - self.start_ticks
self.draw()
self.update(miliseconds)
if added == False:
(x, y) = self.snake.pop()
self.board[x][y] = "empty"
self._clock.tick(10)
pass
def end_scene(self):
"""returns next scene which is end scene."""
print(str(self.score))
print(str(self.time))
return "End Scene"
def draw(self):
"""displays the score, time, and the game screen on pygame display."""
super().draw()
self._screen.blit(self._title_score, self._score_pos)
self._screen.blit(self._title_time, self._timer_pos)
for x in range(0, len(self.board)):
for y in range(0, len(self.board[0])):
if self.board[x][y] == "border":
pygame.draw.rect(
self._screen,
(164, 116, 73),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
elif self.board[x][y] == "empty":
pygame.draw.rect(
self._screen,
(0, 154, 23),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
elif self.board[x][y] == "apple":
pygame.draw.rect(
self._screen,
(0, 154, 23),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
pygame.draw.circle(
self._screen,
(255, 0, 0),
((y * 20) + 10, (x * 20) + 10 + self.offset),
10,
)
elif self.board[x][y] == "snek":
pygame.draw.rect(
self._screen,
(0, 0, 255),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
pass
def process_event(self, event):
"""handle various events in game: movement and exit button."""
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and self.direction != "down":
self.direction = "up"
if event.key == pygame.K_DOWN and self.direction != "up":
self.direction = "down"
if event.key == pygame.K_LEFT and self.direction != "right":
self.direction = "left"
if event.key == pygame.K_RIGHT and self.direction != "left":
self.direction = "right"
class End_Scene(Scene):
"""The end screen of snake, handles leader board and reset logic."""
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, "data")
def __init__(self, screen, clock, background_color=(0, 0, 0)):
"""
This function initializes the end scene by setting
up visual text and visual instructions.
"""
super().__init__(screen, clock, background_color)
self.player_score = 0
self.player_time = 0
(w, h) = self._screen.get_size()
self.leaderboard = []
# code for Game over screen.
self._title_name = "Leader Board"
self._title_size = 60
self._title_color = [255, 255, 255]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
self._title_pos = self._title.get_rect(center=(w // 2, h // 8))
self._score_name = " Date Score Time"
self._score_size = 30
self._score_color = [255, 255, 255]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._score_size
)
self._score = title_font.render(
self._score_name, True, self._score_color
)
self._score_pos = self._title.get_rect(center=(w // 4, h // 4))
pass
def draw(self):
"""draws the leaderboard and options onto the screen."""
super().draw()
self._screen.blit(self._score, self._score_pos)
self._screen.blit(self._title, self._title_pos)
count = 10
if 10 > len(self.leaderboard):
count = len(self.leaderboard)
for x in range(0, count):
(w, h) = self._screen.get_size()
date = self.leaderboard[x][2].strftime("%d/%m/%Y %H:%M:%S")
record_name = "{0:<2} {1:<10} {2:<10} {3:<30}".format(
str(x + 1), date, self.leaderboard[x][0], self.leaderboard[x][1]
)
print(record_name)
record_size = 25
record_color = (255, 255, 255)
record_font = pygame.font.SysFont("couriernew", record_size, 1, 0)
record = record_font.render(record_name, True, record_color)
record_pos = self._title.get_rect(
center=(w // 4 + 10, h // 4 + (30 * (x + 1)))
)
self._screen.blit(record, record_pos)
restart_title = "Press Space to play again!"
restart_size = 20
restart_color = (255, 255, 255)
restart_font = pygame.font.Font(
pygame.font.get_default_font(), restart_size
)
restart = restart_font.render(restart_title, True, restart_color)
restart_pos = record_pos = self._title.get_rect(
center=(w // 2, h // 2 + h // 4)
)
self._screen.blit(restart, restart_pos)
restart_title = "Press Escape to exit the game!"
restart_size = 20
restart_color = (255, 255, 255)
restart_font = pygame.font.Font(
pygame.font.get_default_font(), restart_size
)
restart = restart_font.render(restart_title, True, restart_color)
restart_pos = record_pos = self._title.get_rect(
center=(w // 2, h // 2 + h // 4 + 50)
)
self._screen.blit(restart, restart_pos)
def pickle_in_player(self):
"""takes player game records and puts it in pickle file."""
game_record = []
game_date = datetime.now()
game_record.append(self.player_score)
game_record.append(self.player_time)
game_record.append(game_date)
with open(self.data_dir + "/leaderboard.pickle", "ab") as fh:
pickle.dump(game_record, fh, pickle.HIGHEST_PROTOCOL)
def load_in(self):
"""loads in all game records."""
with open(self.data_dir + "/leaderboard.pickle", "rb") as fh:
while True:
try:
yield pickle.load(fh)
except EOFError:
break
def start_scene(self, score, time):
"""method driver that handles End Scene logic."""
print(pygame.font.get_fonts())
print(score)
print(time)
self.player_score = score
self.player_time = time
self.pickle_in_player()
self.leaderboard = list(self.load_in())
self.leaderboard.sort(key=lambda l: l[0], reverse=True)
self.draw()
self.update()
print(self.leaderboard)
while True:
for event in pygame.event.get():
next_scene = self.process_event(event)
if next_scene != None:
return next_scene
pass
def process_event(self, event):
"""handles the event in end screen: new game and exit game."""
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return "exit"
if event.key == pygame.K_SPACE:
return "Title"
|
Michael-Gresham/Portfolio
|
cpsc-386-04-snake-Michael-Gresham-main/scene.py
|
scene.py
|
py
| 19,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.Surface",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 261,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "pygame.draw.circle",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "pygame.QUIT",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 376,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 388,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 405,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 442,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.get_default_font",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 464,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 475,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 480,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "pygame.font.get_fonts",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 517,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 518,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 520,
"usage_type": "attribute"
}
] |
2088985509
|
#!/usr/bin/env python
"""@namespace IMP.pmi.tools
Miscellaneous utilities.
"""
from __future__ import print_function, division
import IMP
import IMP.algebra
import IMP.isd
import IMP.pmi
import IMP.pmi.topology
try:
from collections.abc import MutableSet # needs Python 3.3 or later
except ImportError:
from collections import MutableSet
import itertools
import math
import sys
import ast
try:
from time import process_time # needs python 3.3 or later
except ImportError:
from time import clock as process_time
import RMF
import IMP.rmf
from collections import defaultdict, OrderedDict
import warnings
import numpy
def _get_system_for_hier(hier):
"""Given a hierarchy, return the System that created it, or None"""
# If we are given the raw particle, get the corresponding Hierarchy
# decorator if available
if hier and not hasattr(hier, 'get_parent'):
if IMP.atom.Hierarchy.get_is_setup(hier):
hier = IMP.atom.Hierarchy(hier)
else:
return None
while hier:
# See if we labeled the Python object directly with the System
if hasattr(hier, '_pmi2_system'):
h = hier._pmi2_system()
if h:
return h
# Otherwise (maybe we got a new Python wrapper around the same C++
# object), try all extant systems
for s in IMP.pmi.topology.System._all_systems:
if s.hier == hier:
return s
# Try the next level up in the hierarchy
hier = hier.get_parent()
def _all_protocol_outputs(hier):
"""Iterate over all (ProtocolOutput, State) pairs for the
given hierarchy"""
system = _get_system_for_hier(hier)
if system:
for state in system.states:
for p in state._protocol_output:
yield p
def _add_pmi_provenance(p):
"""Tag the given particle as being created by the current version
of PMI."""
IMP.core.add_imp_provenance(p)
IMP.core.add_software_provenance(
p, name="IMP PMI module", version=IMP.pmi.get_module_version(),
location="https://integrativemodeling.org")
IMP.core.add_script_provenance(p)
def _get_restraint_set_keys():
if not hasattr(_get_restraint_set_keys, 'pmi_rs_key'):
_get_restraint_set_keys.pmi_rs_key = IMP.ModelKey("PMI restraints")
_get_restraint_set_keys.rmf_rs_key = IMP.ModelKey("RMF restraints")
return (_get_restraint_set_keys.pmi_rs_key,
_get_restraint_set_keys.rmf_rs_key)
def _add_restraint_sets(model, mk, mk_rmf):
rs = IMP.RestraintSet(model, "All PMI restraints")
rs_rmf = IMP.RestraintSet(model, "All PMI RMF restraints")
model.add_data(mk, rs)
model.add_data(mk_rmf, rs_rmf)
return rs, rs_rmf
def add_restraint_to_model(model, restraint, add_to_rmf=False):
"""Add a PMI restraint to the model.
Since Model.add_restraint() no longer exists (in modern IMP restraints
should be added to a ScoringFunction instead) store them instead in
a RestraintSet, and keep a reference to it in the Model.
If `add_to_rmf` is True, also add the restraint to a separate list
of restraints that will be written out to RMF files (by default, most
PMI restraints are not)."""
mk, mk_rmf = _get_restraint_set_keys()
if model.get_has_data(mk):
rs = IMP.RestraintSet.get_from(model.get_data(mk))
rs_rmf = IMP.RestraintSet.get_from(model.get_data(mk_rmf))
else:
rs, rs_rmf = _add_restraint_sets(model, mk, mk_rmf)
rs.add_restraint(restraint)
if add_to_rmf:
rs_rmf.add_restraint(restraint)
def get_restraint_set(model, rmf=False):
"""Get a RestraintSet containing all PMI restraints added to the model.
If `rmf` is True, return only the subset of these restraints that
should be written out to RMF files."""
mk, mk_rmf = _get_restraint_set_keys()
if not model.get_has_data(mk):
warnings.warn("no restraints added to model yet",
IMP.pmi.ParameterWarning)
_add_restraint_sets(model, mk, mk_rmf)
if rmf:
return IMP.RestraintSet.get_from(model.get_data(mk_rmf))
else:
return IMP.RestraintSet.get_from(model.get_data(mk))
class Stopwatch(object):
"""Collect timing information.
Add an instance of this class to outputobjects to get timing information
in a stat file."""
def __init__(self, isdelta=True):
"""Constructor.
@param isdelta if True (the default) then report the time since the
last use of this class; if False, report cumulative time."""
self.starttime = process_time()
self.label = "None"
self.isdelta = isdelta
def set_label(self, labelstr):
self.label = labelstr
def get_output(self):
output = {}
if self.isdelta:
newtime = process_time()
output["Stopwatch_" + self.label + "_delta_seconds"] \
= str(newtime - self.starttime)
self.starttime = newtime
else:
output["Stopwatch_" + self.label + "_elapsed_seconds"] \
= str(process_time() - self.starttime)
return output
class SetupNuisance(object):
def __init__(self, m, initialvalue, minvalue, maxvalue, isoptimized=True,
name=None):
p = IMP.Particle(m)
if name:
p.set_name(name)
nuisance = IMP.isd.Scale.setup_particle(p, initialvalue)
if minvalue:
nuisance.set_lower(minvalue)
if maxvalue:
nuisance.set_upper(maxvalue)
# m.add_score_state(IMP.core.SingletonConstraint(IMP.isd.NuisanceRangeModifier(),None,nuisance))
nuisance.set_is_optimized(nuisance.get_nuisance_key(), isoptimized)
self.nuisance = nuisance
def get_particle(self):
return self.nuisance
class SetupWeight(object):
def __init__(self, m, isoptimized=True, nweights_or_weights=None):
pw = IMP.Particle(m)
if isinstance(nweights_or_weights, int):
self.weight = IMP.isd.Weight.setup_particle(
pw, nweights_or_weights
)
else:
try:
nweights_or_weights = list(nweights_or_weights)
self.weight = IMP.isd.Weight.setup_particle(
pw, nweights_or_weights
)
except (TypeError, IMP.UsageException):
self.weight = IMP.isd.Weight.setup_particle(pw)
self.weight.set_weights_are_optimized(isoptimized)
def get_particle(self):
return self.weight
class SetupSurface(object):
def __init__(self, m, center, normal, isoptimized=True):
p = IMP.Particle(m)
self.surface = IMP.core.Surface.setup_particle(p, center, normal)
self.surface.set_coordinates_are_optimized(isoptimized)
self.surface.set_normal_is_optimized(isoptimized)
def get_particle(self):
return self.surface
def get_cross_link_data(directory, filename, dist, omega, sigma,
don=None, doff=None, prior=0, type_of_profile="gofr"):
(distmin, distmax, ndist) = dist
(omegamin, omegamax, nomega) = omega
(sigmamin, sigmamax, nsigma) = sigma
filen = IMP.isd.get_data_path("CrossLinkPMFs.dict")
with open(filen) as xlpot:
dictionary = ast.literal_eval(xlpot.readline())
xpot = dictionary[directory][filename]["distance"]
pot = dictionary[directory][filename][type_of_profile]
dist_grid = get_grid(distmin, distmax, ndist, False)
omega_grid = get_log_grid(omegamin, omegamax, nomega)
sigma_grid = get_log_grid(sigmamin, sigmamax, nsigma)
if don is not None and doff is not None:
xlmsdata = IMP.isd.CrossLinkData(
dist_grid,
omega_grid,
sigma_grid,
xpot,
pot,
don,
doff,
prior)
else:
xlmsdata = IMP.isd.CrossLinkData(
dist_grid,
omega_grid,
sigma_grid,
xpot,
pot)
return xlmsdata
def get_grid(gmin, gmax, ngrid, boundaries):
grid = []
dx = (gmax - gmin) / float(ngrid)
for i in range(0, ngrid + 1):
if not boundaries and i == 0:
continue
if not boundaries and i == ngrid:
continue
grid.append(gmin + float(i) * dx)
return grid
def get_log_grid(gmin, gmax, ngrid):
grid = []
for i in range(0, ngrid + 1):
grid.append(gmin * math.exp(float(i) / ngrid * math.log(gmax / gmin)))
return grid
def cross_link_db_filter_parser(inputstring):
'''
example '"{ID_Score}" > 28 AND "{Sample}" ==
"%10_1%" OR ":Sample}" == "%10_2%" OR ":Sample}"
== "%10_3%" OR ":Sample}" == "%8_1%" OR ":Sample}" == "%8_2%"'
'''
import pyparsing as pp
operator = pp.Regex(">=|<=|!=|>|<|==|in").setName("operator")
value = pp.QuotedString(
'"') | pp.Regex(
r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
identifier = pp.Word(pp.alphas, pp.alphanums + "_")
comparison_term = identifier | value
condition = pp.Group(comparison_term + operator + comparison_term)
expr = pp.operatorPrecedence(condition, [
("OR", 2, pp.opAssoc.LEFT, ),
("AND", 2, pp.opAssoc.LEFT, ),
])
parsedstring = str(expr.parseString(inputstring)) \
.replace("[", "(") \
.replace("]", ")") \
.replace(",", " ") \
.replace("'", " ") \
.replace("%", "'") \
.replace("{", "float(entry['") \
.replace("}", "'])") \
.replace(":", "str(entry['") \
.replace("}", "'])") \
.replace("AND", "and") \
.replace("OR", "or")
return parsedstring
def open_file_or_inline_text(filename):
try:
fl = open(filename, "r")
except IOError:
fl = filename.split("\n")
return fl
def get_ids_from_fasta_file(fastafile):
ids = []
with open(fastafile) as ff:
for line in ff:
if line[0] == ">":
ids.append(line[1:-1])
return ids
def get_closest_residue_position(hier, resindex, terminus="N"):
'''
this function works with plain hierarchies, as read from the pdb,
no multi-scale hierarchies
'''
p = []
niter = 0
while len(p) == 0:
niter += 1
sel = IMP.atom.Selection(hier, residue_index=resindex,
atom_type=IMP.atom.AT_CA)
if terminus == "N":
resindex += 1
if terminus == "C":
resindex -= 1
if niter >= 10000:
print("get_closest_residue_position: exiting while loop "
"without result")
break
p = sel.get_selected_particles()
if len(p) == 1:
return IMP.core.XYZ(p[0]).get_coordinates()
elif len(p) == 0:
print("get_closest_residue_position: got NO residues for hierarchy "
"%s and residue %i" % (hier, resindex))
raise Exception(
"get_closest_residue_position: got NO residues for hierarchy "
"%s and residue %i" % (hier, resindex))
else:
raise ValueError(
"got multiple residues for hierarchy %s and residue %i; the list "
"of particles is %s"
% (hier, resindex, str([pp.get_name() for pp in p])))
def get_residue_gaps_in_hierarchy(hierarchy, start, end):
'''
Return the residue index gaps and contiguous segments in the hierarchy.
@param hierarchy hierarchy to examine
@param start first residue index
@param end last residue index
@return A list of lists of the form
[[1,100,"cont"],[101,120,"gap"],[121,200,"cont"]]
'''
gaps = []
for n, rindex in enumerate(range(start, end + 1)):
sel = IMP.atom.Selection(hierarchy, residue_index=rindex,
atom_type=IMP.atom.AT_CA)
if len(sel.get_selected_particles()) == 0:
if n == 0:
# set the initial condition
rindexgap = start
rindexcont = start - 1
if rindexgap == rindex - 1:
# residue is contiguous with the previously discovered gap
gaps[-1][1] += 1
else:
# residue is not contiguous with the previously discovered gap
# hence create a new gap tuple
gaps.append([rindex, rindex, "gap"])
# update the index of the last residue gap
rindexgap = rindex
else:
if n == 0:
# set the initial condition
rindexgap = start - 1
rindexcont = start
if rindexcont == rindex - 1:
# residue is contiguous with the previously discovered
# continuous part
gaps[-1][1] += 1
else:
# residue is not contiguous with the previously discovered
# continuous part, hence create a new cont tuple
gaps.append([rindex, rindex, "cont"])
# update the index of the last residue gap
rindexcont = rindex
return gaps
class map(object):
def __init__(self):
self.map = {}
def set_map_element(self, xvalue, yvalue):
self.map[xvalue] = yvalue
def get_map_element(self, invalue):
if isinstance(invalue, float):
n = 0
mindist = 1
for x in self.map:
dist = (invalue - x) * (invalue - x)
if n == 0:
mindist = dist
minx = x
if dist < mindist:
mindist = dist
minx = x
n += 1
return self.map[minx]
elif isinstance(invalue, str):
return self.map[invalue]
else:
raise TypeError("wrong type for map")
def select_by_tuple_2(hier, tuple_selection, resolution):
"""New tuple format: molname OR (start,stop,molname,copynum,statenum)
Copy and state are optional. Can also use 'None' for them which will
get all. You can also pass -1 for stop which will go to the end.
Returns the particles
"""
kwds = {} # going to accumulate keywords
kwds['resolution'] = resolution
if isinstance(tuple_selection, str):
kwds['molecule'] = tuple_selection
elif isinstance(tuple_selection, tuple):
rbegin = tuple_selection[0]
rend = tuple_selection[1]
kwds['molecule'] = tuple_selection[2]
try:
copynum = tuple_selection[3]
if copynum is not None:
kwds['copy_index'] = copynum
except: # noqa: E722
pass
try:
statenum = tuple_selection[4]
if statenum is not None:
kwds['state_index'] = statenum
except: # noqa: E722
pass
if rend == -1:
if rbegin > 1:
s = IMP.atom.Selection(hier, **kwds)
s -= IMP.atom.Selection(hier,
residue_indexes=range(1, rbegin),
**kwds)
return s.get_selected_particles()
else:
kwds['residue_indexes'] = range(rbegin, rend+1)
s = IMP.atom.Selection(hier, **kwds)
return s.get_selected_particles()
def get_db_from_csv(csvfilename, encoding=None):
if sys.version_info[0] == 2:
def open_with_encoding(fname, encoding):
return open(fname)
else:
open_with_encoding = open
import csv
outputlist = []
with open_with_encoding(csvfilename, encoding=encoding) as fh:
csvr = csv.DictReader(fh)
for ls in csvr:
outputlist.append(ls)
return outputlist
def get_prot_name_from_particle(p, list_of_names):
'''Return the component name provided a particle and a list of names'''
root = p
protname = root.get_name()
is_a_bead = False
while protname not in list_of_names:
root0 = root.get_parent()
if root0 == IMP.atom.Hierarchy():
return (None, None)
protname = root0.get_name()
# check if that is a bead
# this piece of code might be dangerous if
# the hierarchy was called Bead :)
if "Beads" in protname:
is_a_bead = True
root = root0
return (protname, is_a_bead)
def get_residue_indexes(hier):
'''
Retrieve the residue indexes for the given particle.
The particle must be an instance of Fragment,Residue, Atom or Molecule
or else returns an empty list
'''
resind = []
if IMP.atom.Fragment.get_is_setup(hier):
resind = IMP.atom.Fragment(hier).get_residue_indexes()
elif IMP.atom.Residue.get_is_setup(hier):
resind = [IMP.atom.Residue(hier).get_index()]
elif IMP.atom.Atom.get_is_setup(hier):
a = IMP.atom.Atom(hier)
resind = [IMP.atom.Residue(a.get_parent()).get_index()]
elif IMP.atom.Molecule.get_is_setup(hier):
resind_tmp = IMP.pmi.tools.OrderedSet()
for lv in IMP.atom.get_leaves(hier):
if IMP.atom.Fragment.get_is_setup(lv) or \
IMP.atom.Residue.get_is_setup(lv) or \
IMP.atom.Atom.get_is_setup(lv):
for ind in get_residue_indexes(lv):
resind_tmp.add(ind)
resind = list(resind_tmp)
else:
resind = []
return resind
def sort_by_residues(particles):
particles_residues = [(p, list(IMP.pmi.tools.get_residue_indexes(p)))
for p in particles]
sorted_particles_residues = sorted(
particles_residues,
key=lambda tup: tup[1])
particles = [p[0] for p in sorted_particles_residues]
return particles
#
# Parallel Computation
#
def scatter_and_gather(data):
"""Synchronize data over a parallel run"""
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
number_of_processes = comm.size
comm.Barrier()
if rank != 0:
comm.send(data, dest=0, tag=11)
elif rank == 0:
for i in range(1, number_of_processes):
data_tmp = comm.recv(source=i, tag=11)
if isinstance(data, list):
data += data_tmp
elif isinstance(data, dict):
data.update(data_tmp)
else:
raise TypeError("data not supported, use list or dictionaries")
for i in range(1, number_of_processes):
comm.send(data, dest=i, tag=11)
if rank != 0:
data = comm.recv(source=0, tag=11)
return data
#
# Lists and iterators
#
def sublist_iterator(ls, lmin=1, lmax=None):
'''
Yield all sublists of length >= lmin and <= lmax
'''
if lmax is None:
lmax = len(ls)
n = len(ls)
for i in range(n):
for j in range(i + lmin, min(n + 1, i + 1 + lmax)):
yield ls[i:j]
def flatten_list(ls):
return [item for sublist in ls for item in sublist]
def list_chunks_iterator(list, length):
""" Yield successive length-sized chunks from a list.
"""
for i in range(0, len(list), length):
yield list[i:i + length]
def chunk_list_into_segments(seq, num):
seq = list(seq)
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
class Segments(object):
''' This class stores integers
in ordered compact lists eg:
[[1,2,3],[6,7,8]]
the methods help splitting and merging the internal lists
Example:
s=Segments([1,2,3]) is [[1,2,3]]
s.add(4) is [[1,2,3,4]] (add right)
s.add(3) is [[1,2,3,4]] (item already existing)
s.add(7) is [[1,2,3,4],[7]] (new list)
s.add([8,9]) is [[1,2,3,4],[7,8,9]] (add item right)
s.add([5,6]) is [[1,2,3,4,5,6,7,8,9]] (merge)
s.remove(3) is [[1,2],[4,5,6,7,8,9]] (split)
etc.
'''
def __init__(self, index):
'''index can be a integer or a list of integers '''
if isinstance(index, int):
self.segs = [[index]]
elif isinstance(index, list):
self.segs = [[index[0]]]
for i in index[1:]:
self.add(i)
else:
raise TypeError("index must be an int or list of ints")
def add(self, index):
'''index can be a integer or a list of integers '''
if isinstance(index, (int, numpy.int32, numpy.int64)):
mergeleft = None
mergeright = None
for n, s in enumerate(self.segs):
if index in s:
return 0
else:
if s[0]-index == 1:
mergeleft = n
if index-s[-1] == 1:
mergeright = n
if mergeright is None and mergeleft is None:
self.segs.append([index])
if mergeright is not None and mergeleft is None:
self.segs[mergeright].append(index)
if mergeleft is not None and mergeright is None:
self.segs[mergeleft] = [index]+self.segs[mergeleft]
if mergeleft is not None and mergeright is not None:
self.segs[mergeright] = \
self.segs[mergeright]+[index]+self.segs[mergeleft]
del self.segs[mergeleft]
for n in range(len(self.segs)):
self.segs[n].sort()
self.segs.sort(key=lambda tup: tup[0])
elif isinstance(index, list):
for i in index:
self.add(i)
else:
raise TypeError("index must be an int or list of ints")
def remove(self, index):
'''index can be a integer'''
for n, s in enumerate(self.segs):
if index in s:
if s[0] == index:
self.segs[n] = s[1:]
elif s[-1] == index:
self.segs[n] = s[:-1]
else:
i = self.segs[n].index(index)
self.segs[n] = s[:i]
self.segs.append(s[i+1:])
for n in range(len(self.segs)):
self.segs[n].sort()
if len(self.segs[n]) == 0:
del self.segs[n]
self.segs.sort(key=lambda tup: tup[0])
def get_flatten(self):
''' Returns a flatten list '''
return [item for sublist in self.segs for item in sublist]
def __repr__(self):
ret_tmp = "["
for seg in self.segs:
ret_tmp += str(seg[0])+"-"+str(seg[-1])+","
ret = ret_tmp[:-1]+"]"
return ret
#
# Tools to simulate data
#
def normal_density_function(expected_value, sigma, x):
return (
1 / math.sqrt(2 * math.pi) / sigma *
math.exp(-(x - expected_value) ** 2 / 2 / sigma / sigma)
)
def log_normal_density_function(expected_value, sigma, x):
return (
1 / math.sqrt(2 * math.pi) / sigma / x *
math.exp(-(math.log(x / expected_value) ** 2 / 2 / sigma / sigma))
)
def print_multicolumn(list_of_strings, ncolumns=2, truncate=40):
ls = list_of_strings
cols = ncolumns
# add empty entries after ls
for i in range(len(ls) % cols):
ls.append(" ")
split = [ls[i:i + len(ls) // cols]
for i in range(0, len(ls), len(ls) // cols)]
for row in zip(*split):
print("".join(str.ljust(i, truncate) for i in row))
class ColorChange(object):
'''Change color code to hexadecimal to rgb'''
def __init__(self):
self._NUMERALS = '0123456789abcdefABCDEF'
self._HEXDEC = dict((v, int(v, 16)) for v in
(x+y for x in self._NUMERALS
for y in self._NUMERALS))
self.LOWERCASE, self.UPPERCASE = 'x', 'X'
def rgb(self, triplet):
return (float(self._HEXDEC[triplet[0:2]]),
float(self._HEXDEC[triplet[2:4]]),
float(self._HEXDEC[triplet[4:6]]))
def triplet(self, rgb, lettercase=None):
if lettercase is None:
lettercase = self.LOWERCASE
return format(rgb[0] << 16 | rgb[1] << 8 | rgb[2], '06'+lettercase)
# -------------- Collections --------------- #
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
if last:
key = self.end[1][0]
else:
key = self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class OrderedDefaultDict(OrderedDict):
"""Store objects in order they were added, but with default type.
Source: http://stackoverflow.com/a/4127426/2608793
"""
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
def __reduce__(self): # optional, for pickle support
args = (self.default_factory,) if self.default_factory else ()
if sys.version_info[0] >= 3:
return self.__class__, args, None, None, self.items()
else:
return self.__class__, args, None, None, self.iteritems()
# -------------- PMI2 Tools --------------- #
def set_coordinates_from_rmf(hier, rmf_fn, frame_num=0):
"""Extract frame from RMF file and fill coordinates. Must be identical
topology.
@param hier The (System) hierarchy to fill (e.g. after you've built it)
@param rmf_fn The file to extract from
@param frame_num The frame number to extract
"""
rh = RMF.open_rmf_file_read_only(rmf_fn)
IMP.rmf.link_hierarchies(rh, [hier])
IMP.rmf.load_frame(rh, RMF.FrameID(frame_num))
del rh
def input_adaptor(stuff, pmi_resolution=0, flatten=False, selection_tuple=None,
warn_about_slices=True):
"""Adapt things for PMI (degrees of freedom, restraints, ...)
Returns list of list of hierarchies, separated into Molecules if possible.
The input can be a list, or a list of lists (iterable of ^1 or
iterable of ^2)
(iterable of ^2) Hierarchy -> returns input as list of list of hierarchies,
only one entry, not grouped by molecules.
(iterable of ^2) PMI::System/State/Molecule/TempResidue ->
returns residue hierarchies, grouped in molecules, at requested
resolution
@param stuff Can be one of the following inputs:
IMP Hierarchy, PMI System/State/Molecule/TempResidue, or a
list/set (of list/set) of them.
Must be uniform input, however. No mixing object types.
@param pmi_resolution For selecting, only does it if you pass PMI
objects. Set it to "all" if you want all resolutions!
@param flatten Set to True if you just want all hierarchies in one list.
@param warn_about_slices Print a warning if you are requesting only part
of a bead. Sometimes you just don't care!
@note since this relies on IMP::atom::Selection, this will not return
any objects if they weren't built! But there should be no problem
if you request unbuilt residues - they should be ignored.
"""
if stuff is None:
return stuff
if hasattr(stuff, '__iter__'):
if len(stuff) == 0:
return stuff
thelist = list(stuff)
# iter of iter of should be ok
if all(hasattr(el, '__iter__') for el in thelist):
thelist = [i for sublist in thelist for i in sublist]
elif any(hasattr(el, '__iter__') for el in thelist):
raise Exception('input_adaptor: input_object must be a list '
'or a list of lists')
stuff = thelist
else:
stuff = [stuff]
# check that it is a hierarchy homogeneously:
try:
is_hierarchy = all(IMP.atom.Hierarchy.get_is_setup(s) for s in stuff)
except (NotImplementedError, TypeError):
is_hierarchy = False
# get the other types homogeneously
is_system = all(isinstance(s, IMP.pmi.topology.System) for s in stuff)
is_state = all(isinstance(s, IMP.pmi.topology.State) for s in stuff)
is_molecule = all(isinstance(s, IMP.pmi.topology.Molecule) for s in stuff)
is_temp_residue = all(isinstance(s, IMP.pmi.topology.TempResidue)
for s in stuff)
# now that things are ok, do selection if requested
hier_list = []
pmi_input = False
if is_system or is_state or is_molecule or is_temp_residue:
# if PMI, perform selection using gathered indexes
pmi_input = True
# key is Molecule object, value are residues
indexes_per_mol = OrderedDefaultDict(list)
if is_system:
for system in stuff:
for state in system.get_states():
mdict = state.get_molecules()
for molname in mdict:
for copy in mdict[molname]:
indexes_per_mol[copy] += \
[r.get_index() for r in copy.get_residues()]
elif is_state:
for state in stuff:
mdict = state.get_molecules()
for molname in mdict:
for copy in mdict[molname]:
indexes_per_mol[copy] += [r.get_index()
for r in copy.get_residues()]
elif is_molecule:
for molecule in stuff:
indexes_per_mol[molecule] += [r.get_index()
for r in molecule.get_residues()]
else: # is_temp_residue
for tempres in stuff:
indexes_per_mol[tempres.get_molecule()].append(
tempres.get_index())
for mol in indexes_per_mol:
if pmi_resolution == 'all':
# because you select from the molecule,
# this will start the search from the base resolution
ps = select_at_all_resolutions(
mol.get_hierarchy(), residue_indexes=indexes_per_mol[mol])
else:
sel = IMP.atom.Selection(mol.get_hierarchy(),
resolution=pmi_resolution,
residue_indexes=indexes_per_mol[mol])
ps = sel.get_selected_particles()
# check that you don't have any incomplete fragments!
if warn_about_slices:
rset = set(indexes_per_mol[mol])
for p in ps:
if IMP.atom.Fragment.get_is_setup(p):
fset = set(IMP.atom.Fragment(p).get_residue_indexes())
if not fset <= rset:
minset = min(fset)
maxset = max(fset)
found = fset & rset
minf = min(found)
maxf = max(found)
resbreak = maxf if minf == minset else minset-1
warnings.warn(
'You are trying to select only part of the '
'bead %s:%i-%i. The residues you requested '
'are %i-%i. You can fix this by: '
'1) requesting the whole bead/none of it; or'
'2) break the bead up by passing '
'bead_extra_breaks=[\'%i\'] in '
'molecule.add_representation()'
% (mol.get_name(), minset, maxset, minf, maxf,
resbreak), IMP.pmi.ParameterWarning)
hier_list.append([IMP.atom.Hierarchy(p) for p in ps])
elif is_hierarchy:
# check
ps = []
if pmi_resolution == 'all':
for h in stuff:
ps += select_at_all_resolutions(h)
else:
for h in stuff:
ps += IMP.atom.Selection(
h, resolution=pmi_resolution).get_selected_particles()
hier_list = [IMP.atom.Hierarchy(p) for p in ps]
if not flatten:
hier_list = [hier_list]
else:
raise Exception('input_adaptor: you passed something of wrong type '
'or a list with mixed types')
if flatten and pmi_input:
return [h for sublist in hier_list for h in sublist]
else:
return hier_list
def get_sorted_segments(mol):
"""Returns sequence-sorted segments array, each containing the first
particle the last particle and the first residue index."""
from operator import itemgetter
hiers = IMP.pmi.tools.input_adaptor(mol)
if len(hiers) > 1:
raise ValueError("only pass stuff from one Molecule, please")
hiers = hiers[0]
segs = []
for h in hiers:
try:
start = IMP.atom.Hierarchy(h).get_children()[0]
except IndexError:
start = IMP.atom.Hierarchy(h)
try:
end = IMP.atom.Hierarchy(h).get_children()[-1]
except IndexError:
end = IMP.atom.Hierarchy(h)
startres = IMP.pmi.tools.get_residue_indexes(start)[0]
segs.append((start, end, startres))
return sorted(segs, key=itemgetter(2))
def display_bonds(mol):
"""Decorate the sequence-consecutive particles from a PMI2 molecule
with a bond, so that they appear connected in the rmf file"""
SortedSegments = get_sorted_segments(mol)
for x in range(len(SortedSegments) - 1):
last = SortedSegments[x][1]
first = SortedSegments[x + 1][0]
p1 = last.get_particle()
p2 = first.get_particle()
if not IMP.atom.Bonded.get_is_setup(p1):
IMP.atom.Bonded.setup_particle(p1)
if not IMP.atom.Bonded.get_is_setup(p2):
IMP.atom.Bonded.setup_particle(p2)
if not IMP.atom.get_bond(IMP.atom.Bonded(p1), IMP.atom.Bonded(p2)):
IMP.atom.create_bond(
IMP.atom.Bonded(p1),
IMP.atom.Bonded(p2), 1)
def get_all_leaves(list_of_hs):
""" Just get the leaves from a list of hierarchies """
lvs = list(itertools.chain.from_iterable(
IMP.atom.get_leaves(item) for item in list_of_hs))
return lvs
def select_at_all_resolutions(hier=None, hiers=None, **kwargs):
"""Perform selection using the usual keywords but return ALL
resolutions (BEADS and GAUSSIANS).
Returns in flat list!
"""
if hiers is None:
hiers = []
if hier is not None:
hiers.append(hier)
if len(hiers) == 0:
warnings.warn("You passed nothing to select_at_all_resolutions()",
IMP.pmi.ParameterWarning)
return []
ret = OrderedSet()
for hsel in hiers:
try:
htest = IMP.atom.Hierarchy.get_is_setup(hsel)
except: # noqa: E722
raise Exception('select_at_all_resolutions: you have to pass '
'an IMP Hierarchy')
if not htest:
raise Exception('select_at_all_resolutions: you have to pass '
'an IMP Hierarchy')
if 'resolution' in kwargs or 'representation_type' in kwargs:
raise Exception("don't pass resolution or representation_type "
"to this function")
selB = IMP.atom.Selection(hsel, resolution=IMP.atom.ALL_RESOLUTIONS,
representation_type=IMP.atom.BALLS,
**kwargs)
selD = IMP.atom.Selection(hsel, resolution=IMP.atom.ALL_RESOLUTIONS,
representation_type=IMP.atom.DENSITIES,
**kwargs)
ret |= OrderedSet(selB.get_selected_particles())
ret |= OrderedSet(selD.get_selected_particles())
return list(ret)
def get_particles_within_zone(hier,
target_ps,
sel_zone,
entire_residues,
exclude_backbone):
"""Utility to retrieve particles from a hierarchy within a
zone around a set of ps.
@param hier The hierarchy in which to look for neighbors
@param target_ps The particles for zoning
@param sel_zone The maximum distance
@param entire_residues If True, will grab entire residues
@param exclude_backbone If True, will only return sidechain particles
"""
test_sel = IMP.atom.Selection(hier)
backbone_types = ['C', 'N', 'CB', 'O']
if exclude_backbone:
test_sel -= IMP.atom.Selection(
hier, atom_types=[IMP.atom.AtomType(n) for n in backbone_types])
test_ps = test_sel.get_selected_particles()
nn = IMP.algebra.NearestNeighbor3D([IMP.core.XYZ(p).get_coordinates()
for p in test_ps])
zone = set()
for target in target_ps:
zone |= set(nn.get_in_ball(IMP.core.XYZ(target).get_coordinates(),
sel_zone))
zone_ps = [test_ps[z] for z in zone]
if entire_residues:
final_ps = set()
for z in zone_ps:
final_ps |= set(IMP.atom.Hierarchy(z).get_parent().get_children())
zone_ps = [h.get_particle() for h in final_ps]
return zone_ps
def get_rbs_and_beads(hiers):
"""Returns unique objects in original order"""
rbs = set()
beads = []
rbs_ordered = []
if not hasattr(hiers, '__iter__'):
hiers = [hiers]
for p in get_all_leaves(hiers):
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
if rb not in rbs:
rbs.add(rb)
rbs_ordered.append(rb)
elif IMP.core.NonRigidMember.get_is_setup(p):
rb = IMP.core.NonRigidMember(p).get_rigid_body()
if rb not in rbs:
rbs.add(rb)
rbs_ordered.append(rb)
beads.append(p)
else:
beads.append(p)
return rbs_ordered, beads
def get_molecules(input_objects):
"This function returns the parent molecule hierarchies of given objects"
stuff = input_adaptor(input_objects, pmi_resolution='all', flatten=True)
molecules = set()
for h in stuff:
is_root = False
is_molecule = False
while not (is_molecule or is_root):
root = IMP.atom.get_root(h)
if root == h:
is_root = True
is_molecule = IMP.atom.Molecule.get_is_setup(h)
if is_molecule:
molecules.add(IMP.atom.Molecule(h))
h = h.get_parent()
return list(molecules)
def get_molecules_dictionary(input_objects):
moldict = defaultdict(list)
for mol in IMP.pmi.tools.get_molecules(input_objects):
name = mol.get_name()
moldict[name].append(mol)
for mol in moldict:
moldict[mol].sort(key=lambda x: IMP.atom.Copy(x).get_copy_index())
return moldict
def get_molecules_dictionary_by_copy(input_objects):
moldict = defaultdict(dict)
for mol in IMP.pmi.tools.get_molecules(input_objects):
name = mol.get_name()
c = IMP.atom.Copy(mol).get_copy_index()
moldict[name][c] = mol
return moldict
def get_selections_dictionary(input_objects):
moldict = IMP.pmi.tools.get_molecules_dictionary(input_objects)
seldict = defaultdict(list)
for name, mols in moldict.items():
for m in mols:
seldict[name].append(IMP.atom.Selection(m))
return seldict
def get_densities(input_objects):
"""Given a list of PMI objects, returns all density hierarchies within
these objects. The output of this function can be inputted into
things such as EM restraints. This function is intended to gather
density particles appended to molecules (and not other hierarchies
which might have been appended to the root node directly).
"""
# Note that Densities can only be selected at the Root or Molecule
# level and not at the Leaves level.
# we'll first get all molecule hierarchies corresponding to the leaves.
molecules = get_molecules(input_objects)
densities = []
for i in molecules:
densities += IMP.atom.Selection(
i, representation_type=IMP.atom.DENSITIES).get_selected_particles()
return densities
def shuffle_configuration(objects,
max_translation=300., max_rotation=2.0 * math.pi,
avoidcollision_rb=True, avoidcollision_fb=False,
cutoff=10.0, niterations=100,
bounding_box=None,
excluded_rigid_bodies=[],
hierarchies_excluded_from_collision=[],
hierarchies_included_in_collision=[],
verbose=False,
return_debug=False):
"""Shuffle particles. Used to restart the optimization.
The configuration of the system is initialized by placing each
rigid body and each bead randomly in a box. If `bounding_box` is
specified, the particles are placed inside this box; otherwise, each
particle is displaced by up to max_translation angstroms, and randomly
rotated. Effort is made to place particles far enough from each other to
prevent any steric clashes.
@param objects Can be one of the following inputs:
IMP Hierarchy, PMI System/State/Molecule/TempResidue, or
a list/set of them
@param max_translation Max translation (rbs and flexible beads)
@param max_rotation Max rotation (rbs only)
@param avoidcollision_rb check if the particle/rigid body was
placed close to another particle; uses the optional
arguments cutoff and niterations
@param avoidcollision_fb Advanced. Generally you want this False because
it's hard to shuffle beads.
@param cutoff Distance less than this is a collision
@param niterations How many times to try avoiding collision
@param bounding_box Only shuffle particles within this box.
Defined by ((x1,y1,z1),(x2,y2,z2)).
@param excluded_rigid_bodies Don't shuffle these rigid body objects
@param hierarchies_excluded_from_collision Don't count collision
with these bodies
@param hierarchies_included_in_collision Hierarchies that are not
shuffled, but should be included in collision calculation
(for fixed regions)
@param verbose Give more output
@note Best to only call this function after you've set up degrees
of freedom
For debugging purposes, returns: <shuffled indexes>,
<collision avoided indexes>
"""
# checking input
hierarchies = IMP.pmi.tools.input_adaptor(objects,
pmi_resolution='all',
flatten=True)
rigid_bodies, flexible_beads = get_rbs_and_beads(hierarchies)
if len(rigid_bodies) > 0:
mdl = rigid_bodies[0].get_model()
elif len(flexible_beads) > 0:
mdl = flexible_beads[0].get_model()
else:
raise Exception("Could not find any particles in the hierarchy")
if len(rigid_bodies) == 0:
print("shuffle_configuration: rigid bodies were not initialized")
# gather all particles
gcpf = IMP.core.GridClosePairsFinder()
gcpf.set_distance(cutoff)
# Add particles from excluded hierarchies to excluded list
collision_excluded_hierarchies = IMP.pmi.tools.input_adaptor(
hierarchies_excluded_from_collision, pmi_resolution='all',
flatten=True)
collision_included_hierarchies = IMP.pmi.tools.input_adaptor(
hierarchies_included_in_collision, pmi_resolution='all', flatten=True)
collision_excluded_idxs = set(
leaf.get_particle().get_index()
for h in collision_excluded_hierarchies
for leaf in IMP.core.get_leaves(h))
collision_included_idxs = set(
leaf.get_particle().get_index()
for h in collision_included_hierarchies
for leaf in IMP.core.get_leaves(h))
# Excluded collision with Gaussians
all_idxs = [] # expand to representations?
for p in IMP.pmi.tools.get_all_leaves(hierarchies):
if IMP.core.XYZ.get_is_setup(p):
all_idxs.append(p.get_particle_index())
if IMP.core.Gaussian.get_is_setup(p):
collision_excluded_idxs.add(p.get_particle_index())
if bounding_box is not None:
((x1, y1, z1), (x2, y2, z2)) = bounding_box
ub = IMP.algebra.Vector3D(x1, y1, z1)
lb = IMP.algebra.Vector3D(x2, y2, z2)
bb = IMP.algebra.BoundingBox3D(ub, lb)
all_idxs = set(all_idxs) | collision_included_idxs
all_idxs = all_idxs - collision_excluded_idxs
debug = []
print('shuffling', len(rigid_bodies), 'rigid bodies')
for rb in rigid_bodies:
if rb not in excluded_rigid_bodies:
# gather particles to avoid with this transform
if avoidcollision_rb:
rb_idxs = set(rb.get_member_particle_indexes()) - \
collision_excluded_idxs
other_idxs = all_idxs - rb_idxs
debug.append([rb, other_idxs if avoidcollision_rb else set()])
# iterate, trying to avoid collisions
niter = 0
while niter < niterations:
rbxyz = (rb.get_x(), rb.get_y(), rb.get_z())
# local transform
if bounding_box:
translation = IMP.algebra.get_random_vector_in(bb)
# First move to origin
transformation_orig = IMP.algebra.Transformation3D(
IMP.algebra.get_identity_rotation_3d(),
-IMP.core.XYZ(rb).get_coordinates())
IMP.core.transform(rb, transformation_orig)
rotation = IMP.algebra.get_random_rotation_3d()
transformation = IMP.algebra.Transformation3D(rotation,
translation)
else:
transformation = \
IMP.algebra.get_random_local_transformation(
rbxyz, max_translation, max_rotation)
IMP.core.transform(rb, transformation)
# check collisions
if avoidcollision_rb and other_idxs:
mdl.update()
npairs = len(gcpf.get_close_pairs(mdl,
list(other_idxs),
list(rb_idxs)))
if npairs == 0:
break
else:
niter += 1
if verbose:
print("shuffle_configuration: rigid body placed "
"close to other %d particles, trying "
"again..." % npairs)
print("shuffle_configuration: rigid body name: "
+ rb.get_name())
if niter == niterations:
raise ValueError(
"tried the maximum number of iterations to "
"avoid collisions, increase the distance "
"cutoff")
else:
break
print('shuffling', len(flexible_beads), 'flexible beads')
for fb in flexible_beads:
# gather particles to avoid
if avoidcollision_fb:
fb_idxs = set(IMP.get_indexes([fb]))
other_idxs = all_idxs - fb_idxs
if not other_idxs:
continue
# iterate, trying to avoid collisions
niter = 0
while niter < niterations:
if bounding_box:
translation = IMP.algebra.get_random_vector_in(bb)
transformation = IMP.algebra.Transformation3D(translation)
else:
fbxyz = IMP.core.XYZ(fb).get_coordinates()
transformation = IMP.algebra.get_random_local_transformation(
fbxyz, max_translation, max_rotation)
# For gaussians, treat this fb as an rb
if IMP.core.NonRigidMember.get_is_setup(fb):
memb = IMP.core.NonRigidMember(fb)
xyz = memb.get_internal_coordinates()
if bounding_box:
# 'translation' is the new desired position in global
# coordinates; we need to convert that to internal
# coordinates first using the rigid body's ref frame
rf = memb.get_rigid_body().get_reference_frame()
glob_to_int = rf.get_transformation_from()
memb.set_internal_coordinates(
glob_to_int.get_transformed(translation))
else:
xyz_transformed = transformation.get_transformed(xyz)
memb.set_internal_coordinates(xyz_transformed)
if niter == 0:
debug.append(
[xyz, other_idxs if avoidcollision_fb else set()])
else:
d = IMP.core.XYZ(fb)
if bounding_box:
# Translate to origin first
if IMP.core.RigidBody.get_is_setup(fb.get_particle()):
IMP.core.transform(
IMP.core.RigidBody(fb.get_particle()),
-d.get_coordinates())
else:
IMP.core.transform(d, -d.get_coordinates())
d = IMP.core.XYZ(fb)
if niter == 0:
debug.append(
[d, other_idxs if avoidcollision_fb else set()])
if IMP.core.RigidBody.get_is_setup(fb.get_particle()):
IMP.core.transform(
IMP.core.RigidBody(fb.get_particle()), transformation)
else:
IMP.core.transform(d, transformation)
if avoidcollision_fb:
mdl.update()
npairs = len(gcpf.get_close_pairs(mdl,
list(other_idxs),
list(fb_idxs)))
if npairs == 0:
break
else:
niter += 1
print("shuffle_configuration: floppy body placed close "
"to other %d particles, trying again..." % npairs)
if niter == niterations:
raise ValueError(
"tried the maximum number of iterations to avoid "
"collisions, increase the distance cutoff")
else:
break
if return_debug:
return debug
class ColorHierarchy(object):
def __init__(self, hier):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
self.mpl = mpl
self.plt = plt
hier.ColorHierarchy = self
self.hier = hier
mols = IMP.pmi.tools.get_molecules(IMP.atom.get_leaves(self.hier))
self.mols = [IMP.pmi.topology.PMIMoleculeHierarchy(mol)
for mol in mols]
self.method = self.nochange
self.scheme = None
self.first = None
self.last = None
def nochange(self):
pass
def get_color(self, fl):
return IMP.display.Color(*self.scheme(fl)[0:3])
def get_log_scale(self, fl):
import math
eps = 1.0
return math.log(fl+eps)
def color_by_resid(self):
self.method = self.color_by_resid
self.scheme = self.mpl.cm.rainbow
for mol in self.mols:
self.first = 1
self.last = len(IMP.pmi.topology.PMIMoleculeHierarchy(
mol).get_residue_indexes())
for p in IMP.atom.get_leaves(mol):
if IMP.atom.Residue.get_is_setup(p):
ri = IMP.atom.Residue(p).get_index()
c = self.get_color(float(ri)/self.last)
IMP.display.Colored(p).set_color(c)
if IMP.atom.Fragment.get_is_setup(p):
ris = IMP.atom.Fragment(p).get_residue_indexes()
avr = sum(ris)/len(ris)
c = self.get_color(float(avr)/self.last)
IMP.display.Colored(p).set_color(c)
def color_by_uncertainty(self):
self.method = self.color_by_uncertainty
self.scheme = self.mpl.cm.jet
ps = IMP.atom.get_leaves(self.hier)
unc_dict = {}
for p in ps:
if IMP.pmi.Uncertainty.get_is_setup(p):
u = IMP.pmi.Uncertainty(p).get_uncertainty()
unc_dict[p] = u
self.first = self.get_log_scale(1.0)
self.last = self.get_log_scale(100.0)
for p in unc_dict:
value = self.get_log_scale(unc_dict[p])
if value >= self.last:
value = self.last
if value <= self.first:
value = self.first
c = self.get_color((value-self.first) / (self.last-self.first))
IMP.display.Colored(p).set_color(c)
def get_color_bar(self, filename):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.clf()
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
cmap = self.scheme
norm = mpl.colors.Normalize(vmin=0.0, vmax=1.0)
if self.method == self.color_by_uncertainty:
angticks = [1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0]
vvalues = []
marks = []
for at in angticks:
vvalue = (self.get_log_scale(at)-self.first) \
/ (self.last-self.first)
if vvalue <= 1.0 and vvalue >= 0.0:
vvalues.append(vvalue)
marks.append(str(at))
cb1 = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, ticks=vvalues,
orientation='horizontal')
print(self.first, self.last, marks, vvalues)
cb1.ax.set_xticklabels(marks)
cb1.set_label('Angstorm')
plt.savefig(filename, dpi=150, transparent=True)
plt.show()
def color2rgb(colorname):
"""Given a Chimera color name or hex color value, return RGB"""
d = {'aquamarine': (0.4980392156862745, 1.0, 0.8313725490196079),
'black': (0.0, 0.0, 0.0),
'blue': (0.0, 0.0, 1.0),
'brown': (0.6470588235, 0.16470588235294117, 0.16470588235294117),
'chartreuse': (0.4980392156862745, 1.0, 0.0),
'coral': (1.0, 0.4980392156862745, 0.3137254901960784),
'cornflower blue': (0.39215686, 0.58431372549, 0.9294117647058824),
'cyan': (0.0, 1.0, 1.0),
'dark cyan': (0.0, 0.5450980392156862, 0.5450980392156862),
'dark gray': (0.6627450980, 0.6627450980392157, 0.6627450980392157),
'dark green': (0.0, 0.39215686274509803, 0.0),
'dark khaki': (0.74117647, 0.7176470588235294, 0.4196078431372549),
'dark magenta': (0.5450980392156862, 0.0, 0.5450980392156862),
'dark olive green': (0.333333333, 0.419607843, 0.1843137254901961),
'dark red': (0.5450980392156862, 0.0, 0.0),
'dark slate blue': (0.28235294, 0.239215686, 0.5450980392156862),
'dark slate gray': (0.1843137, 0.30980392, 0.30980392156862746),
'deep pink': (1.0, 0.0784313725490196, 0.5764705882352941),
'deep sky blue': (0.0, 0.7490196078431373, 1.0),
'dim gray': (0.41176470, 0.4117647058823529, 0.4117647058823529),
'dodger blue': (0.11764705882352941, 0.5647058823529412, 1.0),
'firebrick': (0.6980392, 0.13333333333333333, 0.13333333333333333),
'forest green': (0.13333333, 0.5450980392156862, 0.13333333333333333),
'gold': (1.0, 0.8431372549019608, 0.0),
'goldenrod': (0.85490196, 0.6470588235294118, 0.12549019607843137),
'gray': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863),
'green': (0.0, 1.0, 0.0),
'hot pink': (1.0, 0.4117647058823529, 0.7058823529411765),
'khaki': (0.9411764705882353, 0.9019607843137255, 0.5490196078431373),
'light blue': (0.67843137, 0.8470588235294118, 0.9019607843137255),
'light gray': (0.82745098, 0.8274509803921568, 0.8274509803921568),
'light green': (0.56470588, 0.9333333333333333, 0.5647058823529412),
'light sea green': (0.125490, 0.6980392156862745, 0.6666666666666666),
'lime green': (0.1960784, 0.803921568627451, 0.19607843137254902),
'magenta': (1.0, 0.0, 1.0),
'medium blue': (0.1960784, 0.19607843137254902, 0.803921568627451),
'medium purple': (0.57647, 0.4392156862745098, 0.8588235294117647),
'navy blue': (0.0, 0.0, 0.5019607843137255),
'olive drab': (0.4196078, 0.5568627450980392, 0.13725490196078433),
'orange red': (1.0, 0.27058823529411763, 0.0),
'orange': (1.0, 0.4980392156862745, 0.0),
'orchid': (0.85490196, 0.4392156862745098, 0.8392156862745098),
'pink': (1.0, 0.7529411764705882, 0.796078431372549),
'plum': (0.8666666666666667, 0.6274509803921569, 0.8666666666666667),
'purple': (0.62745098, 0.12549019607843137, 0.9411764705882353),
'red': (1.0, 0.0, 0.0),
'rosy brown': (0.7372549, 0.5607843137254902, 0.5607843137254902),
'salmon': (0.980392, 0.5019607843137255, 0.4470588235294118),
'sandy brown': (0.956862745, 0.6431372549019608, 0.3764705882352941),
'sea green': (0.18039, 0.5450980392156862, 0.3411764705882353),
'sienna': (0.6274509, 0.3215686274509804, 0.17647058823529413),
'sky blue': (0.52941176, 0.807843137254902, 0.9215686274509803),
'slate gray': (0.439215686, 0.50196078, 0.5647058823529412),
'spring green': (0.0, 1.0, 0.4980392156862745),
'steel blue': (0.2745098, 0.50980392, 0.70588235),
'tan': (0.8235294117647058, 0.7058823529411765, 0.5490196078431373),
'turquoise': (0.25098039, 0.87843137, 0.81568627),
'violet red': (0.81568627, 0.125490196, 0.56470588235),
'white': (1.0, 1.0, 1.0),
'yellow': (1.0, 1.0, 0.0)}
if colorname.startswith('#'):
return tuple(int(colorname[i:i+2], 16) / 255. for i in (1, 3, 5))
else:
return d[colorname]
|
salilab/pmi
|
pyext/src/tools.py
|
tools.py
|
py
| 60,875 |
python
|
en
|
code
| 12 |
github-code
|
6
|
[
{
"api_name": "IMP.atom.Hierarchy.get_is_setup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.add_imp_provenance",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.add_software_provenance",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.get_module_version",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.add_script_provenance",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "IMP.ModelKey",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "IMP.ModelKey",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet.get_from",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet.get_from",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet.get_from",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "IMP.RestraintSet.get_from",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "IMP.RestraintSet",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "time.clock",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "IMP.Particle",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "IMP.isd.Scale.setup_particle",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "IMP.Particle",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "IMP.isd.Weight.setup_particle",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "IMP.isd.Weight.setup_particle",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "IMP.UsageException",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "IMP.isd.Weight.setup_particle",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "IMP.Particle",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "IMP.core.Surface.setup_particle",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "IMP.isd.get_data_path",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "IMP.isd.CrossLinkData",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "IMP.isd.CrossLinkData",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "IMP.isd",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "pyparsing.Regex",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "pyparsing.QuotedString",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "pyparsing.Regex",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "pyparsing.Word",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "pyparsing.alphas",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "pyparsing.alphanums",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "pyparsing.Group",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "pyparsing.operatorPrecedence",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "pyparsing.opAssoc",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "pyparsing.opAssoc",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "pyparsing.get_name",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 466,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 466,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 467,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 467,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 478,
"usage_type": "attribute"
},
{
"api_name": "csv.DictReader",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 499,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment.get_is_setup",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 520,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 521,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue.get_is_setup",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 522,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 523,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Atom.get_is_setup",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 524,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Atom",
"line_number": 525,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 525,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 526,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Molecule.get_is_setup",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.OrderedSet",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 528,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_leaves",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 529,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment.get_is_setup",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 530,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue.get_is_setup",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Atom.get_is_setup",
"line_number": 532,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 532,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_residue_indexes",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 542,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 558,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 558,
"usage_type": "name"
},
{
"api_name": "numpy.int32",
"line_number": 653,
"usage_type": "attribute"
},
{
"api_name": "numpy.int64",
"line_number": 653,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 722,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 722,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 729,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 729,
"usage_type": "attribute"
},
{
"api_name": "math.exp",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 730,
"usage_type": "call"
},
{
"api_name": "collections.MutableSet",
"line_number": 770,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 832,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 854,
"usage_type": "attribute"
},
{
"api_name": "RMF.open_rmf_file_read_only",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "IMP.rmf.link_hierarchies",
"line_number": 871,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 871,
"usage_type": "attribute"
},
{
"api_name": "IMP.rmf.load_frame",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 872,
"usage_type": "attribute"
},
{
"api_name": "RMF.FrameID",
"line_number": 872,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Hierarchy.get_is_setup",
"line_number": 923,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 923,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi",
"line_number": 927,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi",
"line_number": 928,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi",
"line_number": 929,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi",
"line_number": 930,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 971,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 971,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment.get_is_setup",
"line_number": 980,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 980,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment",
"line_number": 981,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 981,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 989,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 998,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 999,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 999,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1008,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1008,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1010,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1010,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 1028,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1028,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1035,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1035,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1037,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1037,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1040,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1040,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1042,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1042,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_residue_indexes",
"line_number": 1044,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1044,
"usage_type": "attribute"
},
{
"api_name": "operator.itemgetter",
"line_number": 1046,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Bonded.get_is_setup",
"line_number": 1060,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1060,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded.setup_particle",
"line_number": 1061,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1061,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded.get_is_setup",
"line_number": 1062,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1062,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded.setup_particle",
"line_number": 1063,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1063,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_bond",
"line_number": 1065,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1065,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded",
"line_number": 1065,
"usage_type": "call"
},
{
"api_name": "IMP.atom.create_bond",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1066,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded",
"line_number": 1067,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1067,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Bonded",
"line_number": 1068,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1068,
"usage_type": "attribute"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 1073,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 1073,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_leaves",
"line_number": 1074,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1074,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 1089,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1090,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy.get_is_setup",
"line_number": 1095,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1095,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1105,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1105,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 1106,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1108,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1108,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 1109,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1130,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1130,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1133,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1133,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.AtomType",
"line_number": 1134,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1134,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.NearestNeighbor3D",
"line_number": 1136,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1136,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1136,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1136,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1140,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1140,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Hierarchy",
"line_number": 1146,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1146,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidMember.get_is_setup",
"line_number": 1159,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1159,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidMember",
"line_number": 1160,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1160,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.NonRigidMember.get_is_setup",
"line_number": 1164,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1164,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.NonRigidMember",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1165,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_root",
"line_number": 1183,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1183,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Molecule.get_is_setup",
"line_number": 1186,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1186,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Molecule",
"line_number": 1188,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1188,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 1194,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.get_molecules",
"line_number": 1195,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1195,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Copy",
"line_number": 1200,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1200,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 1205,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.tools.get_molecules",
"line_number": 1206,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1206,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Copy",
"line_number": 1208,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1208,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_molecules_dictionary",
"line_number": 1214,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1214,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 1215,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1218,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1218,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 1235,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1235,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 1236,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 1241,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 1285,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1285,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.GridClosePairsFinder",
"line_number": 1299,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1299,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 1303,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1303,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.input_adaptor",
"line_number": 1307,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1307,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_leaves",
"line_number": 1313,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1313,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.get_leaves",
"line_number": 1318,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1318,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_all_leaves",
"line_number": 1322,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1322,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ.get_is_setup",
"line_number": 1323,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1323,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.Gaussian.get_is_setup",
"line_number": 1325,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1325,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Vector3D",
"line_number": 1330,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1330,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Vector3D",
"line_number": 1331,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1331,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.BoundingBox3D",
"line_number": 1332,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1332,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.get_random_vector_in",
"line_number": 1354,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1354,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Transformation3D",
"line_number": 1356,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1356,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.get_identity_rotation_3d",
"line_number": 1357,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1357,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1358,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1358,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1359,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1359,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.get_random_rotation_3d",
"line_number": 1360,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1360,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Transformation3D",
"line_number": 1361,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1361,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.get_random_local_transformation",
"line_number": 1366,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1366,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1369,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1369,
"usage_type": "attribute"
},
{
"api_name": "IMP.get_indexes",
"line_number": 1399,
"usage_type": "call"
},
{
"api_name": "IMP.algebra.get_random_vector_in",
"line_number": 1408,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1408,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.Transformation3D",
"line_number": 1409,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1409,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1411,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1411,
"usage_type": "attribute"
},
{
"api_name": "IMP.algebra.get_random_local_transformation",
"line_number": 1412,
"usage_type": "call"
},
{
"api_name": "IMP.algebra",
"line_number": 1412,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.NonRigidMember.get_is_setup",
"line_number": 1416,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1416,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.NonRigidMember",
"line_number": 1417,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1417,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1434,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1434,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidBody.get_is_setup",
"line_number": 1437,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1437,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1438,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1438,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidBody",
"line_number": 1439,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1439,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1442,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1442,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 1443,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1443,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidBody.get_is_setup",
"line_number": 1447,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1447,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1448,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1448,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.RigidBody",
"line_number": 1449,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1449,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 1451,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 1451,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 1479,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1482,
"usage_type": "name"
},
{
"api_name": "IMP.pmi.tools.get_molecules",
"line_number": 1486,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1486,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_leaves",
"line_number": 1486,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1486,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.topology.PMIMoleculeHierarchy",
"line_number": 1487,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1487,
"usage_type": "attribute"
},
{
"api_name": "IMP.display.Color",
"line_number": 1498,
"usage_type": "call"
},
{
"api_name": "IMP.display",
"line_number": 1498,
"usage_type": "attribute"
},
{
"api_name": "math.log",
"line_number": 1503,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.topology.PMIMoleculeHierarchy",
"line_number": 1510,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1510,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_leaves",
"line_number": 1512,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1512,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue.get_is_setup",
"line_number": 1513,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1513,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Residue",
"line_number": 1514,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1514,
"usage_type": "attribute"
},
{
"api_name": "IMP.display.Colored",
"line_number": 1516,
"usage_type": "call"
},
{
"api_name": "IMP.display",
"line_number": 1516,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment.get_is_setup",
"line_number": 1517,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1517,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Fragment",
"line_number": 1518,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1518,
"usage_type": "attribute"
},
{
"api_name": "IMP.display.Colored",
"line_number": 1521,
"usage_type": "call"
},
{
"api_name": "IMP.display",
"line_number": 1521,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_leaves",
"line_number": 1526,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 1526,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.Uncertainty.get_is_setup",
"line_number": 1529,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1529,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.Uncertainty",
"line_number": 1530,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 1530,
"usage_type": "attribute"
},
{
"api_name": "IMP.display.Colored",
"line_number": 1541,
"usage_type": "call"
},
{
"api_name": "IMP.display",
"line_number": 1541,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 1545,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 1547,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1547,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 1548,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1548,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 1552,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 1552,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colorbar.ColorbarBase",
"line_number": 1564,
"usage_type": "call"
},
{
"api_name": "matplotlib.colorbar",
"line_number": 1564,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 1570,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1570,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 1571,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 1571,
"usage_type": "name"
}
] |
60843869
|
# -*- coding:utf-8 -*-
import time
import pickle
from .utils import Logger
class Scheduler(object):
spider = None
def __init__(self, crawler):
self.settings = crawler.settings
self.logger = Logger.from_crawler(crawler)
if self.settings.getbool("CUSTOM_REDIS"):
from custom_redis.client import Redis
else:
from redis import Redis
self.redis_conn = Redis(self.settings.get("REDIS_HOST"),
self.settings.getint("REDIS_PORT"))
self.queue_name = None
self.queues = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def open(self, spider):
self.spider = spider
self.queue_name = self.settings.get(
"TASK_QUEUE_TEMPLATE", "%s:request:queue") % spider.name
spider.set_redis(self.redis_conn)
def enqueue_request(self, request):
request.callback = getattr(
request.callback, "__name__", request.callback)
request.errback = getattr(
request.errback, "__name__", request.errback)
self.redis_conn.zadd(
self.queue_name,
pickle.dumps(request),
-int(request.meta["priority"]))
self.logger.debug("Crawlid: %s, url: %s added to queue. " % (
request.meta['crawlid'], request.url))
def next_request(self):
self.logger.debug(
"length of queue %s is %s" % (
self.queue_name, self.redis_conn.zcard(self.queue_name)))
item = None
if self.settings.getbool("CUSTOM_REDIS"):
item = self.redis_conn.zpop(self.queue_name)
else:
pipe = self.redis_conn.pipeline()
pipe.multi()
pipe.zrange(self.queue_name, 0, 0).zremrangebyrank(
self.queue_name, 0, 0)
result, _ = pipe.execute()
if result:
item = result[0]
if item:
request = pickle.loads(item)
request.callback = request.callback and getattr(
self.spider, request.callback)
request.errback = request.errback and getattr(
self.spider, request.errback)
return request
def close(self, reason):
self.logger.info("Closing Spider: %s. " % self.spider.name)
def has_pending_requests(self):
return False
class SingleTaskScheduler(Scheduler):
def __init__(self, crawler):
super(SingleTaskScheduler, self).__init__(crawler)
self.queue_name = "%s:single:queue"
def has_pending_requests(self):
return self.redis_conn.zcard(self.queue_name) > 0
|
ShichaoMa/structure_spider
|
structor/scheduler.py
|
scheduler.py
|
py
| 2,682 |
python
|
en
|
code
| 29 |
github-code
|
6
|
[
{
"api_name": "utils.Logger.from_crawler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.Logger",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "redis.Redis",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.dumps",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 63,
"usage_type": "call"
}
] |
36229690900
|
from typing import Optional
'''
1373. 二叉搜索子树的最大键值和
dfs
边统计和边判断是否为搜索树即可。
一旦子树不为搜索树,直接520520。
'''
null = None
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxSumBST(self, root: Optional[TreeNode]) -> int:
res = 0
def dfs(node):
nonlocal res
if node is None:
return 0, 520520, -520520
sum_left, min_left, max_left = dfs(node.left)
sum_right, min_right, max_right = dfs(node.right)
if min_left == -520520 or min_right == -520520 or max_left == 520520 or max_right == 520520:
return 0, -520520, 520520
if max_left >= node.val or min_right <= node.val:
return 0, -520520, 520520
sum1 = sum_left + sum_right + node.val
res = max(res, sum1)
return sum1, min(node.val, min_left), max(node.val, max_right)
_, _, _ = dfs(root)
return res
s = Solution()
print(s.maxSumBST([1,4,3,2,4,2,5,null,null,null,null,null,null,4,6]))
|
z-w-wang/Leetcode-Problemlist
|
DailyProblem/Tree/1373.2023-05-20.py
|
1373.2023-05-20.py
|
py
| 1,207 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
}
] |
1336912689
|
#!/usr/bin/env python3
from http.server import ThreadingHTTPServer
from os.path import dirname, realpath
from .httpHandler import HTTPApiHandler
from .fileCacher import cacheFile
from ..query import QueryHandler
def startServer(port):
HTTPApiHandler.queryHandler = QueryHandler()
filesPath = dirname(realpath(__file__)) + "/files"
cacheFile(None, filesPath + "/404.html", "text/html")
cacheFile("/404_styles.css", filesPath + "/404_styles.css", "text/css")
cacheFile("/404.png", filesPath + "/404.png", "image/png")
cacheFile("/index.html", filesPath + "/index.html", "text/html")
cacheFile("/rest-caller.js", filesPath + "/rest-caller.js", "text/javascript")
cacheFile("/styles.css", filesPath + "/styles.css", "text/css")
cacheFile("/roboto.css", filesPath + "/roboto.css", "text/css")
cacheFile("/Roboto-Regular.ttf", filesPath + "/Roboto-Regular.ttf", "application/octet-stream")
cacheFile("/Roboto-Medium.ttf", filesPath + "/Roboto-Medium.ttf", "application/octet-stream")
httpd = ThreadingHTTPServer(('', port), HTTPApiHandler)
try:
print("Server started")
httpd.serve_forever()
except Exception as e:
print("\nException occurred, stopping: {}".format(e))
except KeyboardInterrupt:
print("\nGracefully stopping server...")
httpd.server_close()
print("Server stopped")
if __name__ == "__main__":
startServer(8080)
|
wheelerd/uni-chatbot
|
stockbot/web_frontend/__main__.py
|
__main__.py
|
py
| 1,427 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "httpHandler.HTTPApiHandler.queryHandler",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "httpHandler.HTTPApiHandler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "query.QueryHandler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fileCacher.cacheFile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "http.server.ThreadingHTTPServer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "httpHandler.HTTPApiHandler",
"line_number": 20,
"usage_type": "argument"
}
] |
1425636890
|
# -*- coding: utf-8 -*-
# import needed modules
from requests import get
from csv import writer, reader
from datetime import date
import sys
# %% define function for export/save data to *.csv file
def write_csv(data, filepath):
with open(filepath, 'w', newline='') as csv_file:
write = writer(csv_file)
for element in data:
write.writerow(element.split(","))
# %% check arguments passed by user by sys.argv
if len(sys.argv) == 3:
api_key = sys.argv[1]
user_input_date = sys.argv[2]
elif len(sys.argv) == 2:
api_key = sys.argv[1]
user_input_date = str(date.today())
elif len(sys.argv) < 2:
print("\nPodano za mało argumentów za pomocą 'sys.argv'.\n"
"Program do działania wymaga dwóch lub trzech argumentów:\n"
"rain_forecast.py << klucz API >> << data w formacie YYYY-MM-DD >>\n"
"lub\n"
"rain_forecast.py << klucz API >>.\n\n"
"Działanie programu zakończone.")
sys.exit()
else:
print("\nPodano za dużo argumentów za pomocą 'sys.argv'.\n"
"Program do działania wymaga dwóch lub trzech argumentów:\n"
"rain_forecast.py << klucz API >> << data w formacie YYYY-MM-DD >>\n"
"lub\n"
"rain_forecast.py << klucz API >>.\n\n"
"Działanie programu zakończone.")
sys.exit()
if len(user_input_date) != 10:
print("\nNieprawidłowy format daty.\n"
"Prawidłowy format to YYYY-MM-DD, np. 2022-10-10.\n\n"
"Działanie programu zakończone.")
sys.exit()
# %% variables to downlad data from API
url = "https://weatherbit-v1-mashape.p.rapidapi.com/forecast/daily"
# coordinates for Poznań
latitude = 52.40692
longitude = 16.92993
querystring = {"lat": latitude, "lon": longitude}
headers = {
"X-RapidAPI-Key": f"{api_key}",
"X-RapidAPI-Host": "weatherbit-v1-mashape.p.rapidapi.com"
}
# %% variables to open/read data from *.csv file
FILEPATH = "checked_days.csv"
csv_file = reader(open(FILEPATH))
lines_from_csv = list(csv_file)
dictionary_CSV = {}
# %% load data from *.csv file to dict
for element in lines_from_csv:
dictionary_CSV[f"{element[0]}"] = {
'precip': float(element[1]),
'snow': float(element[2])
}
# %% main 'if/elif/else' statetments
if user_input_date in dictionary_CSV.keys():
if (dictionary_CSV[f'{user_input_date}']['precip'] > 0 and
dictionary_CSV[f'{user_input_date}']['snow'] > 0):
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg z deszczem :(")
elif dictionary_CSV[f'{user_input_date}']['precip'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać deszcz! Zabierz ze sobą parasol :)")
elif dictionary_CSV[f'{user_input_date}']['snow'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg! Ubierz coś ciepłego :)")
else:
print(f"\nW dniu {user_input_date} w Poznaniu nie będzie padać! "
"Miłego dnia :)")
else:
print("\nPobieram dane z API.")
dictionary_API = {}
r = get(url, headers=headers, params=querystring)
response = r.json()
weather_forecast_data = response['data']
for day in weather_forecast_data:
dictionary_API[f"{day['datetime']}"] = {
'precip': day['precip'],
'snow': day['snow']
}
if user_input_date in dictionary_API.keys():
if (dictionary_API[f'{user_input_date}']['precip'] > 0 and
dictionary_API[f'{user_input_date}']['snow'] > 0):
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg z deszczem :(")
elif dictionary_API[f'{user_input_date}']['precip'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać deszcz! Zabierz ze sobą parasol :)")
elif dictionary_API[f'{user_input_date}']['snow'] > 0:
print(f"\nW dniu {user_input_date} w Poznaniu "
"będzie padać śnieg! Ubierz coś ciepłego :)")
else:
print(f"\nW dniu {user_input_date} w Poznaniu nie będzie padać! "
"Miłego dnia :)")
else:
print(f"\nNie wiem czy w dniu {user_input_date} będzie padać "
"w Poznaniu!")
# write/save data to *.csv file
list_for_write_csv = []
for key, day in zip(dictionary_API.keys(), dictionary_API):
string = (f"{key},{dictionary_API[day]['precip']},"
f"{dictionary_API[day]['snow']}")
list_for_write_csv.append(string)
write_csv(list_for_write_csv, FILEPATH)
# %%
|
filrat2/rain_forecast
|
rain_forecast.py
|
rain_forecast.py
|
py
| 4,691 |
python
|
pl
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 105,
"usage_type": "call"
}
] |
71429998588
|
from django.db.models import Q
from django.shortcuts import get_object_or_404
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.exceptions import NotFound, PermissionDenied, ParseError
from rest_framework.permissions import IsAuthenticated
from .models import Letterlist, Letter
from . import serializers
from users.models import User
# /me/
class ChattingList(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="채팅방 목록 조회",
responses={
200: openapi.Response(
description="Succfull Response",
schema=serializers.ChatroomSerialzier(many=True),
)
},
)
def get(self, request):
chatlist = Letterlist.objects.filter(user=request.user).order_by("-updated_at")
chatlist = [i for i in chatlist if request.user not in i.ignore_by.all()]
serializer = serializers.ChatroomSerialzier(
chatlist,
many=True,
context={"request": request},
)
return Response(serializer.data)
# /<int:pk>/ GET
class ChattingRoom(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="해당 채팅방의 쪽지 기록 조회",
responses={
200: openapi.Response(
description="Successful Response",
schema=serializers.MessageSerialzier(),
),
400: openapi.Response(description="Not Found Pk"),
403: openapi.Response(description="Permission Denied"),
},
)
def get(self, request, pk):
chat = Letter.objects.filter(room__pk=pk)
if chat:
if request.user not in chat[0].room.user.all():
raise PermissionDenied
chat = [i for i in chat if request.user not in i.delete_by.all()]
serializer = serializers.MessageSerialzier(
chat,
many=True,
context={"request": request},
)
return Response(serializer.data)
raise NotFound
@swagger_auto_schema(
operation_summary="쪽지방 차단",
responses={
204: openapi.Response(
description="Successful Response",
),
403: openapi.Response(description="Sender != request.user"),
404: openapi.Response(description="Not Found Pk"),
},
)
def delete(self, request, pk):
letter = get_object_or_404(Letterlist, pk=pk)
user = [i for i in letter.user.all()]
if request.user in user:
letter.ignore_by.add(request.user)
letter.save()
return Response("Ok", status=204)
else:
raise PermissionDenied
# /message/ POST -> 메세지 전송
class MessageSend(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="쪽지 전송",
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=["receiver", "text"],
properties={
"receiver": openapi.Schema(
type=openapi.TYPE_INTEGER,
description="보내는 유저의 pk ",
),
"text": openapi.Schema(
type=openapi.TYPE_STRING,
description="전송하는 메세지",
),
},
),
responses={
201: openapi.Response(
description="Successful Response",
),
400: openapi.Response(description="Data Error"),
404: openapi.Response(description="Not Found Pk"),
},
)
def post(self, request):
serializer = serializers.MessageSerialzier(data=request.data)
if serializer.is_valid():
receiver = request.data.get("receiver")
if not receiver:
raise ParseError("required receiver")
if receiver == str(request.user.pk):
raise ParseError("can't send to yourself")
message = serializer.save(sender=request.user, receiver=receiver)
return Response("Successful Response", status=201)
else:
return Response(serializer.errors, status=400)
class MessageDelete(APIView):
permission_classes = [IsAuthenticated]
@swagger_auto_schema(
operation_summary="쪽지 삭제",
responses={
204: openapi.Response(
description="Successful Response",
),
400: openapi.Response(description="Not Found Pk"),
403: openapi.Response(description="Sender != request.user"),
},
)
def delete(self, request, pk):
letter = get_object_or_404(Letter, pk=pk)
user = [i for i in letter.room.user.all()]
if request.user in user:
letter.delete_by.add(request.user)
letter.save()
# if letter.sender == request.user:
# # letter.delete()
return Response("Ok", status=204)
else:
raise PermissionDenied
|
izunaaaaa/CurB_Backend
|
letterlist/views.py
|
views.py
|
py
| 5,352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.views.APIView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.Letterlist.objects.filter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Letterlist.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Letterlist",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "models.Letter.objects.filter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.Letter.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.Letter",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "rest_framework.exceptions.PermissionDenied",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions.NotFound",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "models.Letterlist",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions.PermissionDenied",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "rest_framework.exceptions.ParseError",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions.ParseError",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Schema",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.TYPE_OBJECT",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Schema",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.TYPE_INTEGER",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Schema",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.TYPE_STRING",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "models.Letter",
"line_number": 146,
"usage_type": "argument"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "rest_framework.exceptions.PermissionDenied",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "drf_yasg.utils.swagger_auto_schema",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "drf_yasg.openapi.Response",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 142,
"usage_type": "name"
}
] |
21618000912
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
class check_box_single():
def __init__(self):
self.driver = webdriver.Chrome('./chromedriver')
self.driver.implicitly_wait(10)
self.driver.get("https://www.seleniumeasy.com/test/basic-checkbox-demo.html")
self.driver.maximize_window()
def push_button(self):
button = self.driver.find_element_by_xpath("//*[@id=\"isAgeSelected\"]")
self.driver.execute_script("arguments[0].click();", button)
def extract_text(self):
elements = self.driver.find_element(By.ID, 'txtAge')
msg2 = str(elements.text)
return msg2
def validation(self, msg2):
if msg2:
print('Avem afisare')
if 'Success' in msg2:
return True
else:
return False
else:
return False
def msj_check(driver):
button = driver.find_element_by_xpath("//*[@id=\"isAgeSelected\"]")
driver.execute_script("arguments[0].click();", button)
elements = driver.find_element(By.ID, 'txtAge')
msg2 = str(elements.text)
if msg2:
print('Avem afisare')
if 'Success' in msg2:
print('Contine')
print(msg2)
else:
print("Afisare incorecta!")
else:
print ('False')
ob = check_box_single()
ob.push_button()
msg2 = ob.extract_text()
print(ob.validation(msg2))
|
CorozelEmanuel/Luxoft2021-proiect1
|
Ceausu Ionut Marian/Selenium1/exemple/checkboxsingle.py
|
checkboxsingle.py
|
py
| 1,572 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 36,
"usage_type": "name"
}
] |
30590549707
|
import numpy as np
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
SQSIZE = 60
SPACING = 15
def make_col(start, end, n=5):
"""Create a column of numbers."""
nums = np.random.choice(np.arange(start, end+1), size=n, replace=False)
return nums
def generate_card():
"""Create a bingo card and save it as PNG image."""
# Create five columns
cols = np.array([make_col(15*i + 1, 15*i + 15) for i in range(5)])
# Replace the center cell by the median of the first column
# so that it ends up in the middle when sorting the columns
cols[2, 2] = np.median(np.r_[
cols[2, :2],
cols[2, 3:]
])
# Sort the columns
rows = np.sort(cols.T, axis=0)
rows[2, 2] = -1
cols = rows.T
# Create the bingo image and fill the background with a light color
bgcolor = tuple(np.random.randint(200, 255) for _ in range(3))
textcolor = tuple(np.random.randint(50, 150) for _ in range(3))
img_width = 5 * SQSIZE + 6 * SPACING
img_height = 6 * SQSIZE + 7 * SPACING
img = Image.new("RGB", (img_width, img_height), color=bgcolor)
draw = ImageDraw.Draw(img)
topfont = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=int(SQSIZE * 0.75))
numfont = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=SQSIZE // 2)
for rowidx in range(5):
# Show one letter from 'BINGO' at the top of the column
x0 = SPACING + SQSIZE // 4 + (SPACING + SQSIZE) * rowidx
y0 = SPACING
draw.text((x0, y0), "BINGO"[rowidx], font=topfont, fill=textcolor)
for colidx in range(5):
# Create a square to put the number in
x0 = SPACING + (SPACING + SQSIZE) * rowidx
y0 = SPACING + (SPACING + SQSIZE) * (colidx + 1)
x1 = x0 + SQSIZE
y1 = y0 + SQSIZE
draw.rectangle([x0, y0, x1, y1], outline=(0, 0, 0))
# Create the text for the number
text = str(rows[colidx, rowidx])
textcoords = (x0+SPACING, y0+SPACING)
# For single-digit numbers, move the text to center it
if rows[colidx, rowidx] < 10:
textcoords = (x0 + int(SPACING * 1.5), y0 + SPACING)
font = numfont
# For the center box: other text and font size
if rowidx == colidx == 2:
text = "BONUS"
font = ImageFont.truetype(r"C:\Windows\Fonts\CALIST.TTF", size=SQSIZE // 5)
textcoords = (x0 + SPACING // 2 + 1, y0 + int(SPACING * 1.5))
# Put the number in the square
draw.text(textcoords, text, font=numfont, fill=textcolor)
# Create a filename with a number that doesn't exist yet
bingodir = Path(__file__).parent
volgnr = 0
while True:
fn = bingodir / f"kaart{volgnr:03d}.png"
if not fn.is_file():
break
volgnr += 1
# Finally, save the image
img.save(fn)
if __name__ == "__main__":
for _ in tqdm(range(150)):
generate_card()
|
Lewistrick/bingogenerator
|
bingo.py
|
bingo.py
|
py
| 3,060 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.random.choice",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.sort",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.new",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 90,
"usage_type": "call"
}
] |
6484276041
|
import os
import shutil
import time
import configparser
from PIL import Image
STEP = 10
# from lib.utils import Id2name
# INPUT_DIR = r"data/MH_01_easy/mav0/cam0/data"
# OUTPUT_DIR = r"./imgs"
config = configparser.ConfigParser()
config.read("config.ini", encoding="utf-8")
INPUT_DIR = config["DEFAULT"]["SIMULATOR_IMG_DIR"]
OUTPUT_DIR = config["DEFAULT"]["IMGS_FROM_SERVER"]
imgs = os.listdir(INPUT_DIR)
imgs.sort()
# for i in range (len(os.listdir(INPUT_DIR))):
# shutil.copy("{}/output{}.jpg".format(INPUT_DIR, i), "{}/output{}.jpg".format(OUTPUT_DIR, i))
# time.sleep(0.5)
##for i in range(140, len(os.listdir(INPUT_DIR))):
# for i in range(140, 300):
# img = Id2name(i)
# shutil.copy("{}/{}".format(INPUT_DIR, img), "{}/output{}.jpg".format(OUTPUT_DIR, i-1))
# time.sleep(0.25)
# for i in range(0, 600):
# shutil.copy("{}/img{}.jpg".format(INPUT_DIR, i), "{}/output{}.jpg".format(OUTPUT_DIR, i))
# time.sleep(0.25)
### EuRoC Machine Hall
for i in range(0, 1000000, STEP):
img = imgs[i]
# shutil.copy("{}/{}".format(INPUT_DIR, img), "{}/output{}.jpg".format(OUTPUT_DIR, i))
im = Image.open("{}/{}".format(INPUT_DIR, img))
rgb_im = im.convert("RGB")
rgb_im.save("{}/{}.jpg".format(OUTPUT_DIR, img[:-4])) # jpg
time.sleep(1)
|
franioli/COLMAP_SLAM
|
simulator.py
|
simulator.py
|
py
| 1,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "configparser.ConfigParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 44,
"usage_type": "call"
}
] |
73029609789
|
from django.db import models
class ShowManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['show_title']) < 2:
errors["show_title"] = "Show title should be at least 2 characters!"
if len(postData['show_network']) < 3:
errors["show_network"] = "Network should be at least 3 characters"
if len(postData['show_description']) < 10:
errors["show_description"] = "description should be at least 10 characters"
return errors
class Show(models.Model):
title = models.CharField(max_length=255)
network = models.CharField(max_length=30)
release_date = models.CharField(max_length=10, null=True)
description = models.TextField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ShowManager()
|
Jgomez1996/deployment_test
|
shows_app/models.py
|
models.py
|
py
| 904 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Manager",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
}
] |
2516918892
|
import netCDF4 as netCDF
from extraction_utils import basic, getCoordinateVariable
import json
import matplotlib.pyplot as plt
import decimal
import numpy as np
import traceback
class ImageStats(object):
"""docstring for ImageStats"""
def __init__(self, filename, variable):
super(ImageStats, self).__init__()
self.filename = filename
self.variable = variable
def process(self):
#print "running basic processing on %s" % self.filename
# create three arrays, 1D lat 1D lon 2D data
#print "processing image"
netcdf_file = netCDF.Dataset(self.filename, "r")
#variable = np.ma.masked_array(netcdf_file.variables[self.variable])
variable = np.ma.array(netcdf_file.variables[self.variable][:])
lats = getCoordinateVariable(netcdf_file, "Lat")
lons = getCoordinateVariable(netcdf_file, "Lon")
time_dim_index = netcdf_file.variables[self.variable].dimensions.index('time')
if 'depth' in netcdf_file.variables[self.variable].dimensions:
depth_dim_index = netcdf_file.variables[self.variable].dimensions.index('depth')
var_list = []
lat_list = []
lon_list = []
#print variable.shape
if(len(variable.shape) > 3 ):
#print "hmmmm"
#print variable.shape
#print variable
#print np.nanmean(variable, axis=time_dim_index).shape
var_list = [[float(x) if not np.isinf(x) and not np.isnan(x) else None for x in y ] for y in np.nanmean(variable, axis=time_dim_index)[0]]
#print var_list
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
elif(len(variable.shape) > 2 ):
#print variable.shape
#print variable
#print np.nanmean(variable, axis=time_dim_index)
var_list = [[float(x) if not np.isinf(x) and not np.isnan(x) else None for x in y ] for y in np.nanmean(variable, axis=time_dim_index)]
#var_list = [[float(x) for x in y] for y in variable[0]]
#print var_list
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
else:
var_list = [list(x) for x in variable]
lat_list = [float(x) for x in lats]
lon_list = [float(x) for x in lons]
#print len(lat_list)
#print len(lon_list)
#print len(var_list)
#print len(var_list[0])
#print lat_list
_ret = {}
_ret['vars'] = ['Data','Latitudes','Longitudes']
_ret['data'] = []
_ret['data'].append(var_list)
_ret['data'].append(lat_list)
_ret['data'].append(lon_list)
#print json.dumps(_ret )
return json.dumps(_ret )
#netcdf_variable = netcdf_file[variable]
|
pmlrsg/GISportal
|
plotting/data_extractor/analysis_types/image_stats.py
|
image_stats.py
|
py
| 2,466 |
python
|
en
|
code
| 71 |
github-code
|
6
|
[
{
"api_name": "netCDF4.Dataset",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.ma.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "extraction_utils.getCoordinateVariable",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "extraction_utils.getCoordinateVariable",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.isinf",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.isinf",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
}
] |
75276539706
|
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def testing(group_test, y_test, model):
rmse = 0
j = 1
result = []
while j <= 100:
x_test = group_test.get_group(j).to_numpy()
data_predict = 0
for t in range(x_test.shape[0]): # iterate to the end of each sequence
if t == 0:
continue
elif t == x_test.shape[0] - 1: # for last one row append a zero padding
X_test = np.append(x_test[t - 1:, 2:], [np.zeros(14)], axis=0)
else:
X_test = x_test[t - 1:t + 2, 2:]
X_test_tensors = Variable(torch.Tensor(X_test))
X_test_tensors_final = X_test_tensors.reshape((1, 1, X_test_tensors.shape[0], X_test_tensors.shape[1]))
test_predict = model.forward(X_test_tensors_final, t)
data_predict = test_predict.data.numpy()[-1]
# block for linearily decreasing the RUL after each iteration
if data_predict - 1 < 0:
data_predict = 0
else:
data_predict -= 1
result.append(data_predict)
rmse += np.power((data_predict - y_test.to_numpy()[j - 1]), 2)
j += 1
rmse = np.sqrt(rmse / 100)
result = y_test.join(pd.DataFrame(result))
result = result.sort_values('RUL', ascending=False)
return rmse, result
|
jiaxiang-cheng/PyTorch-Transformer-for-RUL-Prediction
|
testing.py
|
testing.py
|
py
| 1,420 |
python
|
en
|
code
| 140 |
github-code
|
6
|
[
{
"api_name": "numpy.append",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 43,
"usage_type": "call"
}
] |
26776312630
|
"""Apply Perl::Critic tool and gather results."""
import argparse
import logging
import subprocess
from typing import List, Optional
from statick_tool.issue import Issue
from statick_tool.package import Package
from statick_tool.tool_plugin import ToolPlugin
class PerlCriticToolPlugin(ToolPlugin):
"""Apply Perl::Critic tool and gather results."""
def get_name(self) -> str:
"""Get name of tool."""
return "perlcritic"
def gather_args(self, args: argparse.Namespace) -> None:
"""Gather arguments."""
args.add_argument(
"--perlcritic-bin",
dest="perlcritic_bin",
type=str,
help="perlcritic binary path",
)
def get_file_types(self) -> List[str]:
"""Return a list of file types the plugin can scan."""
return ["perl_src"]
def process_files(
self, package: Package, level: str, files: List[str], user_flags: List[str]
) -> Optional[List[str]]:
"""Run tool and gather output."""
perlcritic_bin = "perlcritic"
if self.plugin_context and self.plugin_context.args.perlcritic_bin is not None:
perlcritic_bin = self.plugin_context.args.perlcritic_bin
flags = ["--nocolor", "--verbose=%f:::%l:::%p:::%m:::%s\n"]
flags += self.get_user_flags(level)
try:
output = subprocess.check_output(
[perlcritic_bin] + flags + files,
stderr=subprocess.STDOUT,
universal_newlines=True,
).join(" ")
except subprocess.CalledProcessError as ex:
output = ex.output
if ex.returncode != 2:
logging.warning("perlcritic failed! Returncode = %d", ex.returncode)
logging.warning("%s exception: %s", self.get_name(), ex.output)
return []
except OSError as ex:
logging.warning("Couldn't find %s! (%s)", perlcritic_bin, ex)
return []
logging.debug("%s", output)
return output.splitlines()
def parse_output(
self, total_output: List[str], package: Optional[Package] = None
) -> List[Issue]:
"""Parse tool output and report issues."""
issues: List[Issue] = []
# Load the plugin mapping if possible
warnings_mapping = self.load_mapping()
for line in total_output:
split_line = line.strip().split(":::")
# Should split into five segments, anything less is invalid.
if len(split_line) < 5:
continue
cert_reference = None
if split_line[2].replace("::", "__") in warnings_mapping:
cert_reference = warnings_mapping[split_line[2].replace("::", "__")]
issues.append(
Issue(
split_line[0],
split_line[1],
self.get_name(),
split_line[2],
split_line[4],
split_line[3],
cert_reference,
)
)
return issues
|
sscpac/statick
|
statick_tool/plugins/tool/perlcritic_tool_plugin.py
|
perlcritic_tool_plugin.py
|
py
| 3,117 |
python
|
en
|
code
| 66 |
github-code
|
6
|
[
{
"api_name": "statick_tool.tool_plugin.ToolPlugin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "statick_tool.package.Package",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "subprocess.check_output",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "logging.warning",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "statick_tool.package.Package",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "statick_tool.issue.Issue",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "statick_tool.issue.Issue",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "statick_tool.issue.Issue",
"line_number": 66,
"usage_type": "name"
}
] |
29279214986
|
from setuptools import setup, find_packages
VERSION = '0.0.19'
DESCRIPTION = 'ParkingLot is a Python service imitating a parking lot like system.'
LONG_DESCRIPTION = 'The service indicates rather a vehicle allowed or not allowed to enter the parking lot.'
# Setting up
setup(
name="parkinglot",
version=VERSION,
author="PsychoRover",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=['psycopg2', 'ocrspace'],
keywords=['python', 'parking', 'lot', 'moon', 'parkinglot'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
|
PsychoRover/parking-lot
|
setup.py
|
setup.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
8246901300
|
"""
Module containing the rheologies, fault setup, and ODE cycles code
for the 2D subduction case.
"""
# general imports
import json
import configparser
import numpy as np
import pandas as pd
from scipy.integrate import solve_ivp
from numba import njit, objmode, float64, int64, boolean
from scipy.interpolate import interp1d
from warnings import warn
from abc import ABC
# seqeas imports
from .kernels2d import Glinedisp, Klinedisp
class Rheology(ABC):
"""
Abstract base class for rheologies.
"""
class NonlinearViscous(Rheology):
r"""
Implement a nonlinear viscous fault rheology, where the velocity :math:`v` is
:math:`v = \tau^n / \alpha_n` given the shear stress :math:`\tau`, a strength
constant :math:`\alpha_n`, and a constant exponent :math:`n`.
"""
def __init__(self, n, alpha_n, n_mid=None, alpha_n_mid=None, mid_transition=None,
n_deep=None, alpha_n_deep=None, deep_transition=None,
deep_transition_width=None, n_boundary=None, alpha_n_boundary=None):
r"""
Setup the rheology parameters for a given fault.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : float
Power-law exponent :math:`n` [-]
"""
# input check
assert not np.logical_xor(deep_transition is None, deep_transition_width is None)
# set number of variables
self.n_vars = 2
""" Number of variables to track by rheology [-] """
# initialization
self._n = float(n)
self._n_mid = float(n_mid) if n_mid is not None else self.n
self._n_deep = float(n_deep) if n_deep is not None else self.n_mid
self.n_boundary = float(n_boundary) if n_boundary is not None else self.n_deep
""" Power-law exponent :math:`n` [-] """
self.alpha_n = float(alpha_n)
self.alpha_n_mid = (float(alpha_n_mid) if alpha_n_mid is not None
else self.alpha_n)
self.alpha_n_deep = (float(alpha_n_deep) if alpha_n_deep is not None
else self.alpha_n_mid)
self.alpha_n_boundary = (float(alpha_n_boundary) if alpha_n_boundary is not None
else self.alpha_n_deep)
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
self.mid_transition = None if mid_transition is None else float(mid_transition)
""" Depth [m] for the middle transition point """
self.deep_transition = None if deep_transition is None else float(deep_transition)
""" (Upper) Depth [m] for the deep transition point """
self.deep_transition_width = (None if deep_transition_width is None
else float(deep_transition_width))
""" (Downdip) Width [m] of the deep transition point """
@property
def alpha_n(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n
@alpha_n.setter
def alpha_n(self, alpha_n):
self._alpha_n = float(alpha_n)
self._A = self.calc_A(self._alpha_n, self._n)
@property
def alpha_n_mid(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n_mid
@alpha_n_mid.setter
def alpha_n_mid(self, alpha_n_mid):
self._alpha_n_mid = float(alpha_n_mid)
self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)
@property
def alpha_n_deep(self):
r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """
return self._alpha_n_deep
@alpha_n_deep.setter
def alpha_n_deep(self, alpha_n_deep):
self._alpha_n_deep = float(alpha_n_deep)
self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)
@property
def n(self):
""" Power-law exponent :math:`n` [-] """
return self._n
@n.setter
def n(self, n):
self._n = float(n)
self._A = self.calc_A(self._alpha_n, self._n)
@property
def n_mid(self):
""" Power-law exponent :math:`n` [-] """
return self._n_mid
@n_mid.setter
def n_mid(self, n_mid):
self._n_mid = float(n_mid)
self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)
@property
def n_deep(self):
""" Power-law exponent :math:`n` [-] """
return self._n_deep
@n_deep.setter
def n_deep(self, n_deep):
self._n_deep = float(n_deep)
self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)
@property
def A(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A
@property
def A_mid(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A_mid
@property
def A_deep(self):
r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """
return self._A_deep
@staticmethod
def calc_A(alpha_n, n):
""" Calculate A from alpha_n and n """
return alpha_n ** (1 / n)
def get_param_vectors(self, patch_depths, v_eff):
r"""
Calculate the depth-dependent arrays of :math:`\alpha_n`, :math:`n`, and :math:`A`,
assuming :math:`\alpha_n` and :math:`\alpha_{n,eff}` vary log-linearly with depth,
and :math:`n` adapts between the transition points.
"""
assert np.all(np.diff(patch_depths) >= 0)
# start knots list
knots = [patch_depths[0]]
vals_alpha_n = [self.alpha_n]
vals_n = [self.n]
# add optional mid transition
if self.mid_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])
vals_alpha_n.append(self.alpha_n_mid)
vals_n.append(self.n_mid)
# add optional deep transition
if self.deep_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])
vals_alpha_n.append(self.alpha_n_deep)
vals_n.append(self.n_deep)
knots.append(patch_depths[np.argmin(np.abs(patch_depths
- self.deep_transition
- self.deep_transition_width))])
vals_alpha_n.append(self.alpha_n_boundary)
vals_n.append(self.n_boundary)
# add final value
knots.append(patch_depths[-1])
vals_alpha_n.append(self.alpha_n_boundary)
vals_alpha_n = np.array(vals_alpha_n)
vals_n.append(self.n_boundary)
vals_n = np.array(vals_n)
vals_alpha_eff = SubductionSimulation.get_alpha_eff(vals_alpha_n, vals_n, v_eff)
# interpolate alpha_n and alpha_eff
alpha_n_vec = 10**interp1d(knots, np.log10(vals_alpha_n))(patch_depths)
alpha_eff_vec = 10**interp1d(knots, np.log10(vals_alpha_eff))(patch_depths)
# get n and A
n_vec = SubductionSimulation.get_n(alpha_n_vec, alpha_eff_vec, v_eff)
A_vec = alpha_n_vec ** (1 / n_vec)
return alpha_n_vec, n_vec, A_vec
class RateStateSteadyLogarithmic(Rheology):
r"""
Implement a steady-state rate-and-state rheology using the ageing law (effectively
becoming a rate-dependent rheology) with velocity in logarithmic space defined by
:math:`f_{ss} = f_0 + (a - b) * \zeta = \tau / \sigma_E`
where :math:`f_{ss}` is the steady-state friction, :math:`f_0` is a reference
friction, :math:`a` and :math:`b` are the rate-and-state frictional parameters,
:math:`\zeta = \log (v / v_0)` is the logarithmic velocity, :math:`\tau` is the shear
stress, and :math:`\sigma_E` is the effective fault normal stress.
"""
def __init__(self, v_0, alpha_h, alpha_h_mid=None, mid_transition=None,
alpha_h_deep=None, deep_transition=None, deep_transition_width=None,
alpha_h_boundary=None):
r"""
Setup the rheology parameters for a given fault.
Parameters
----------
v_0 : float
Reference velocity [m/s] used for the transformation into logarithmic space.
alpha_h : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`,
where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,
and :math:`\sigma_E` [Pa] is effective fault normal stress.
"""
self.alpha_h = float(alpha_h)
r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
# input check
assert not np.logical_xor(deep_transition is None, deep_transition_width is None)
assert float(v_0) > 0, "RateStateSteadyLogarithmic needs to have positive v_0."
# set number of variables
self.n_vars = 2
""" Number of variables to track by rheology [-] """
# initialization
self.v_0 = float(v_0)
""" Reference velocity :math:`v_0` [m/s] """
self.alpha_h = float(alpha_h)
r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_mid = (float(alpha_h_mid) if alpha_h_mid is not None
else self.alpha_h)
r""" Middle rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_deep = (float(alpha_h_deep) if alpha_h_deep is not None
else self.alpha_h_mid)
r""" Deep rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.alpha_h_boundary = (float(alpha_h_boundary) if alpha_h_boundary is not None
else self.alpha_h_deep)
r""" Boundary-layer rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """
self.mid_transition = None if mid_transition is None else float(mid_transition)
""" Depth [m] for the middle transition point """
self.deep_transition = None if deep_transition is None else float(deep_transition)
""" (Upper) Depth [m] for the deep transition point """
self.deep_transition_width = (None if deep_transition_width is None
else float(deep_transition_width))
""" (Downdip) Width [m] of the deep transition point """
def get_param_vectors(self, patch_depths):
r"""
Calculate the depth-dependent array of :math:`\alpha_h`, assuming it
varies log-linearly with depth.
"""
assert np.all(np.diff(patch_depths) >= 0)
# start knots list
knots = [patch_depths[0]]
vals_alpha_h = [self.alpha_h]
# add optional mid transition
if self.mid_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])
vals_alpha_h.append(self.alpha_h_mid)
# add optional deep transition
if self.deep_transition is not None:
knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])
vals_alpha_h.append(self.alpha_h_deep)
knots.append(patch_depths[np.argmin(np.abs(patch_depths
- self.deep_transition
- self.deep_transition_width))])
vals_alpha_h.append(self.alpha_h_boundary)
# add final value
knots.append(patch_depths[-1])
vals_alpha_h.append(self.alpha_h_boundary)
vals_alpha_h = np.array(vals_alpha_h)
# interpolate alpha_n and alpha_eff
alpha_h_vec = 10**interp1d(knots, np.log10(vals_alpha_h))(patch_depths)
return alpha_h_vec
@njit(float64[:](float64[:], float64[:], float64[:], float64[:]), cache=True)
def dvdt_plvis(dtaudt, v, A, n):
r"""
Calculate the velocity derivative for a power-law viscous rheology.
From :math:`v = \tau^n / \alpha_n` we get:
:math:`\frac{dv}{dt} = \frac{n}{\alpha_n} \tau^{n-1} \frac{d \tau}{dt}`
where
:math:`\tau^{n-1} = \left( \alpha_n v \right)^{\frac{n-1}{n}}`
simplifying to
:math:`\frac{dv}{dt} = \frac{n}{A} v^{1-\frac{1}{n}} \frac{d \tau}{dt}`
Parameters
----------
dtaudt : numpy.ndarray
1D array of the shear stress derivative
v : numpy.ndarray
1D array of the current velocity
A : numpy.ndarray
Rescaled nonlinear viscous rheology strength constant
n : numpy.ndarray
Power-law exponent
Returns
-------
dvdt : numpy.ndarray
1D array of the velocity derivative.
"""
signs = np.sign(v)
return (n / A) * (signs * v)**(1 - 1 / n) * dtaudt
@njit(float64[:](float64[:], float64[:]), cache=True)
def dzetadt_rdlog(dtaudt, alpha_h_vec):
r"""
Return the velocity derivative in logarithmic space given the current traction
rate in linear space.
Taking the derivative of the steady-state friction gives an explicit
formulation for the slip acceleration :math:`\frac{d \zeta}{dt}`:
:math:`\frac{df_{ss}}{dt} = (a-b) \frac{d \zeta}{dt}`
Recognizing that :math:`\tau = f_{ss} \sigma_E` and assuming
constant effective normal stress leads to
:math:`\frac{d \tau}{dt} = \sigma_E \frac{df_{ss}}{dt}`, which
can be rearranged to give the final expression
:math:`\frac{d \zeta}{dt} = \frac{1}{(a-b) \sigma_E} \frac{d \tau}{dt}`
Parameters
----------
dtaudt : numpy.ndarray
Traction derivative :math:`\frac{d \tau}{dt}` [Pa/s] in linear space
alpha_h_vec : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`
Returns
-------
dzetadt : numpy.ndarray
Velocity derivative :math:`\frac{d \zeta}{dt}` [1/s] in logarithmic space.
"""
return dtaudt / alpha_h_vec
@njit(float64[:](float64[:], float64[:], float64[:], float64[:], float64[:]), cache=True)
def get_new_vel_plvis(v_minus, delta_tau, alpha_n, n, A):
r"""
Calculate the instantaneous velocity change due to an instantaneous stress change
to the fault patches. It is derived from:
:math:`\tau_{+} = \tau_{-} + \Delta \tau`
and plugging in the relationship :math:`v = \tau^n / \alpha_n`, we get
:math:`\sqrt[n]{\alpha_n v_{+}} = \sqrt[n]{\alpha_n v_{-}} + \Delta \tau`
and finally
:math:`v_{+} = \frac{\left( A \sqrt[n]{v_{-}} + \Delta \tau \right)^n}{\alpha_n}`
Parameters
----------
v_minus : numpy.ndarray
Initial velocity :math:`v_{-}` [m/s]
delta_tau : numpy.ndarray
Traction stress change :math:`\Delta \tau` [Pa]
alpha_n : numpy.ndarray
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : numpy.ndarray
Power-law exponent :math:`n` [-]
A : numpy.ndarray
Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)]
Returns
-------
v_plus : numpy.ndarray
Velocity :math:`v_{+}` [m/s] after stress change
"""
signs = np.sign(v_minus)
temp = A * (signs * v_minus)**(1 / n) + (signs * delta_tau)
return np.abs(temp) ** (n - 1) * temp / alpha_n * signs
@njit(float64[:](float64[:], float64[:], float64[:]), cache=True)
def get_new_vel_rdlog(zeta_minus, delta_tau, alpha_h_vec):
r"""
Calculate the instantaneous velocity change (in logarithmic space) due to an
instantaneous stress change to the fault patches. We can kickstart the
derivatuion from the expression in ``RateStateSteadyLinear.get_new_vel``:
:math:`\log (v_{+}/v_0) = \log (v_{-}/v_0) + \Delta\tau / \alpha_h`
and realize that we only have to plug in our definition for :math:`\zeta`
to give us the final result
:math:`\zeta_{+} = \zeta_{-} + \Delta\tau / \alpha_h`
Parameters
----------
zeta_minus : numpy.ndarray
Initial velocity :math:`\zeta_{-}` [-] in logarithmic space
delta_tau : numpy.ndarray, optional
Traction stress change :math:`\Delta \tau` [Pa]
alpha_h_vec : numpy.ndarray
Rate-and-state parameter :math:`(a - b) * \sigma_E`
Returns
-------
zeta_plus : numpy.ndarray
Velocity :math:`\zeta_{+}` [-] in logarithmic space after stress change
See Also
--------
alpha_h
"""
return zeta_minus + delta_tau / alpha_h_vec
@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],
float64[:], float64[:], float64), cache=True)
def flat_ode_plvis(t, state, v_plate, K_int, K_ext, A_upper, n_upper, mu_over_2vs):
r"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in the upper plate interface, and an imposed
constant plate velocity at the lower interface (which can be ignored).
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip and upper velocity.
v_plate : float
Plate velocity.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
A_upper : numpy.ndarray
Upper plate interface rescaled nonlinear viscous rheology strength constant
n_upper : numpy.ndarray
Upper plate interface power-law exponent
mu_over_2vs : float
Radiation damping factor
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper = 2
n_creeping_upper = state.size // n_vars_upper
assert K_int.shape == (n_creeping_upper, n_creeping_upper)
assert K_ext.shape[0] == n_creeping_upper
# extract total velocities
v = state[n_creeping_upper:]
# get shear strain rate
signs = np.sign(v)
temp = mu_over_2vs * (n_upper / A_upper) * (signs * v)**(1 - 1 / n_upper)
dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)
) / (1 + temp)
# get ODE
dstatedt = np.concatenate((v, dvdt_plvis(dtaudt, v, A_upper, n_upper)))
# return
return dstatedt
@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],
float64, float64[:], float64), cache=True)
def flat_ode_rdlog(t, state, v_plate, K_int, K_ext, v_0, alpha_h_vec, mu_over_2vs):
r"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in the upper plate interface, and an imposed
constant plate velocity at the lower interface (which can be ignored).
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip and upper velocity.
v_plate : float
Plate velocity.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
v_0 : float
Reference velocity [m/s]
alpha_h_vec : numpy.ndarray
Rate-and-state parameter :math:`(a - b) * \sigma_E`
mu_over_2vs : float
Radiation damping factor
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper = 2
n_creeping_upper = state.size // n_vars_upper
assert K_int.shape == (n_creeping_upper, n_creeping_upper)
assert K_ext.shape[0] == n_creeping_upper
# extract total velocities
zeta = state[n_creeping_upper:]
v = v_0 * np.exp(zeta)
# get shear strain rate
temp = mu_over_2vs * v / alpha_h_vec
dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)
) / (1 + temp)
# get ODE
dstatedt = np.concatenate((v, dzetadt_rdlog(dtaudt, alpha_h_vec)))
# return
return dstatedt
@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64), cache=True)
def flat_ode_plvis_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower):
"""
Flattened ODE derivative function for a subduction fault with
powerlaw-viscous rheology in both the upper and lower plate interface.
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip, upper velocity,
lower cumulative slip, lower velocity.
n_creeping_upper : int
Number of creeping patches in the upper plate interface.
The number of creeping patches in the lower plate interface can then
be derived from the size of ``state``.
v_plate_vec : float
Initial velocity in all creeping patches.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
A_upper : float
Upper plate interface rescaled nonlinear viscous rheology strength constant
n_upper : float
Upper plate interface power-law exponent
A_lower : float
Lower plate interface rescaled nonlinear viscous rheology strength constant
n_lower : float
Lower plate interface power-law exponent
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = state.size - n_state_upper
n_creeping_lower = n_state_lower // n_vars_lower
n_creeping = n_creeping_lower + n_creeping_upper
assert K_int.shape[0] == K_int.shape[1] == n_creeping
assert K_ext.shape[0] == n_creeping
# split up state
state_upper = state[:n_state_upper]
state_lower = state[n_state_upper:]
# extract total velocities
v_upper = state_upper[n_creeping_upper:]
v_lower = state_lower[n_creeping_lower:]
# get shear strain rate
v = np.concatenate((v_upper, v_lower))
dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))
dtaudt_upper = dtaudt[:n_creeping_upper]
dtaudt_lower = dtaudt[n_creeping_upper:]
# get individual rheologies' ODE
dstatedt_upper = \
np.concatenate((v_upper, dvdt_plvis(dtaudt_upper, v_upper,
np.ones_like(v_upper) * A_upper,
np.ones_like(v_upper) * n_upper)))
dstatedt_lower = \
np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,
np.ones_like(v_lower) * A_lower,
np.ones_like(v_upper) * n_lower)))
# concatenate and return
return np.concatenate((dstatedt_upper, dstatedt_lower))
@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64), cache=True)
def flat_ode_rdlog_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,
v_0, alpha_h_upper, A_lower, n_lower):
r"""
Flattened ODE derivative function for a subduction fault with
rate-dependent (log-space) rheology in the upper and nonlinear viscous
rheology in the lower plate interface.
Parameters
----------
t : float
Current time (needs to be in function call for solve_ivp).
state : numpy.ndarray
1D array with the current state of the creeping fault patches,
containing (in order) the upper cumulative slip, upper velocity,
lower cumulative slip, lower velocity.
n_creeping_upper : int
Number of creeping patches in the upper plate interface.
The number of creeping patches in the lower plate interface can then
be derived from the size of ``state``.
v_plate_vec : float
Initial velocity in all creeping patches.
K_int : numpy.ndarray
2D array with the stress kernel mapping creeping patches to themselves.
K_ext : numpy.ndarray
2D array with the stress kernel mapping the effect of the locked
patches onto the creeping patches.
v_0 : float
Reference velocity [m/s]
alpha_h_upper : float
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
A_lower : float
Lower plate interface rescaled nonlinear viscous rheology strength constant
n_lower : float
Lower plate interface power-law exponent
Returns
-------
dstatedt : numpy.ndarray
1D array with the state derivative.
"""
# get number of variables within state
# (depends on rheology, so is hardcoded here)
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = state.size - n_state_upper
n_creeping_lower = n_state_lower // n_vars_lower
n_creeping = n_creeping_lower + n_creeping_upper
assert K_int.shape[0] == K_int.shape[1] == n_creeping
assert K_ext.shape[0] == n_creeping
# split up state
state_upper = state[:n_state_upper]
state_lower = state[n_state_upper:]
# extract total velocities
v_upper = v_0 * np.exp(state_upper[n_creeping_upper:])
v_lower = state_lower[n_creeping_lower:]
# get shear strain rate
v = np.concatenate((v_upper, v_lower))
dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))
dtaudt_upper = dtaudt[:n_creeping_upper]
dtaudt_lower = dtaudt[n_creeping_upper:]
# get individual rheologies' ODE
dstatedt_upper = \
np.concatenate((v_upper, dzetadt_rdlog(dtaudt_upper,
np.ones_like(v_lower) * alpha_h_upper)))
dstatedt_lower = \
np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,
np.ones_like(v_lower) * A_lower,
np.ones_like(v_upper) * n_lower)))
# concatenate and return
return np.concatenate((dstatedt_upper, dstatedt_lower))
# simple rk4
@njit(float64[:, :](float64, float64, float64[:], float64[:], int64, float64[:],
float64[:, ::1], float64[:, ::1], float64, float64, float64, float64),
cache=True)
def myrk4(ti, tf, state0, t_eval, n_creeping_upper, v_plate_vec,
K_int, K_ext, A_upper, n_upper, A_lower, n_lower):
h = t_eval[1] - t_eval[0]
num_state = state0.size
num_eval = t_eval.size
sol = np.zeros((num_eval, num_state))
sol[0, :] = state0
for i in range(1, num_eval):
cur = sol[i-1, :]
k1 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + (h / 2) * k1
k2 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + (h / 2) * k2
k3 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
cur = sol[i-1, :] + h * k3
k4 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
sol[i, :] = sol[i-1, :] + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return sol
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64[:], float64[:], float64[:], float64), cache=True)
def flat_run_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
alpha_n_vec, n_vec, A_vec, mu_over_2vs):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
alpha_n_vec : numpy.ndarray
Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
at each patch
n_vec : float
Upper plate interface power-law exponent [-] at each patch
A_vec : float
Rescaled upper plate interface nonlinear viscous rheology strength constant
[Pa^n * s/m] at each patch
mu_over_2vs : float
Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear
modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
v_minus_upper = v_init[:n_creeping_upper]
v_minus_lower = v_plate_vec[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),
K_ext[:n_creeping_upper, :], A_vec, n_vec, mu_over_2vs)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
atol = np.ones(n_state_upper) * 1e-6
atol[n_creeping_upper:] = 1e-15
while i < steps.size - 1:
# print(f"{i+1}/{steps.size - 1}")
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_plvis,
t_span=[ti, tf],
y0=state_plus[:n_state_upper],
t_eval=t_eval[ji:jf + 1],
method="LSODA", rtol=1e-6, atol=atol, args=args)
success = sol.success
if success:
sol = sol.y
else:
sol = np.empty((1, 1))
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:n_state_upper, ji:jf + 1] = sol
# fill in the imposed lower state
full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \
* np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))
full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
new_state_upper = new_full_state[:n_state_upper, :]
old_v_upper = old_state_upper[-n_creeping_upper:, -1]
new_v_upper = new_state_upper[-n_creeping_upper:, -1]
lhs_upper = np.abs(old_v_upper - new_v_upper)
rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)
stop_now = np.all(lhs_upper <= rhs_upper)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
v_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
v_plus_upper = get_new_vel_plvis(v_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
alpha_n_vec, n_vec, A_vec)
v_plus_lower = v_minus_lower.ravel()
state_plus = np.concatenate((s_plus_upper, v_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64[:], float64), cache=True)
def flat_run_rdlog(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
v_0, alpha_h_vec, mu_over_2vs):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
v_0 : float
Reference velocity [m/s]
alpha_h_vec : numpy.ndarray
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
mu_over_2vs : float
Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear
modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
assert np.all(v_init[:n_creeping_upper] > 0)
zeta_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)
v_minus_lower = v_plate_vec[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, zeta_minus_upper,
s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),
K_ext[:n_creeping_upper, :], v_0, alpha_h_vec, mu_over_2vs)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# print(f"{i+1}/{steps.size - 1}")
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_rdlog,
t_span=[ti, tf],
y0=state_plus[:n_state_upper],
t_eval=t_eval[ji:jf + 1],
method="LSODA", args=args)
success = sol.success
if success:
sol = sol.y
else:
sol = np.empty((1, 1))
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:n_state_upper, ji:jf + 1] = sol
# fill in the imposed lower state
full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \
* np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))
full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \
np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
new_state_upper = new_full_state[:n_state_upper, :]
old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])
new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])
lhs_upper = np.abs(old_v_upper - new_v_upper)
rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)
stop_now = np.all(lhs_upper <= rhs_upper)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
zeta_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
alpha_h_vec)
v_plus_lower = v_minus_lower.ravel()
state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
full_state[n_creeping_upper:n_state_upper, :] = \
v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64, boolean), cache=True)
def flat_run_plvis_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
alpha_n_upper, n_upper, alpha_n_lower, n_lower,
simple_rk4):
"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
alpha_n_upper : float
Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_upper : float
Upper plate interface power-law exponent [-]
alpha_n_lower : float
Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_lower : float
Lower plate interface power-law exponent [-]
simple_rk4 : bool
Decide whether to use the simple RK4 integrator or not
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
A_upper = alpha_n_upper ** (1 / n_upper)
A_lower = alpha_n_lower ** (1 / n_lower)
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
v_minus_upper = v_init[:n_creeping_upper]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)
v_minus_lower = v_init[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (n_creeping_upper, v_plate_vec, K_int, K_ext,
A_upper, n_upper, A_lower, n_lower)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
if simple_rk4:
sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T
else:
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_plvis_plvis,
t_span=[ti, tf],
y0=state_plus,
t_eval=t_eval[ji:jf + 1],
method="RK45", rtol=1e-9, atol=1e-12, args=args)
success = sol.success
sol = sol.y
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:, ji:jf + 1] = sol
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
old_state_lower = old_full_state[n_state_upper:, :]
new_state_upper = new_full_state[:n_state_upper, :]
new_state_lower = new_full_state[n_state_upper:, :]
old_v_upper = old_state_upper[-n_creeping_upper:, -1]
old_v_lower = old_state_lower[-n_creeping_lower:, -1]
new_v_upper = new_state_upper[-n_creeping_upper:, -1]
new_v_lower = new_state_lower[-n_creeping_lower:, -1]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)
# new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)
lhs_upper = np.abs(old_v_upper - new_v_upper)
lhs_lower = np.abs(old_v_lower - new_v_lower)
rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)
rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)
stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
v_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
v_plus_upper = get_new_vel_plvis(v_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
np.ones(n_creeping_upper) * alpha_n_upper,
np.ones(n_creeping_upper) * n_upper,
np.ones(n_creeping_upper) * A_upper)
v_plus_lower = get_new_vel_plvis(v_minus_lower,
delta_tau_bounded[n_creeping_upper:, i_slip],
np.ones(n_creeping_upper) * alpha_n_lower,
np.ones(n_creeping_upper) * n_lower,
np.ones(n_creeping_upper) * A_lower)
state_plus = np.concatenate((s_plus_upper, v_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)
# done
return full_state
@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],
float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],
float64, float64, float64, float64, boolean), cache=True)
def flat_run_rdlog_plvis(t_eval, i_break, i_eq,
n_creeping_upper, n_creeping_lower, K_int, K_ext,
v_plate_vec, v_init, slip_taper, delta_tau_bounded,
v_0, alpha_h_upper, alpha_n_lower, n_lower,
simple_rk4):
r"""
Run the simulation.
Parameters
----------
t_eval : numpy.ndarray
Evaluation times [s]
i_break : numpy.ndarray
Integer indices of cycle breaks [-]
i_eq : numpy.ndarray
Integer indices of earthquakes within sequence [-]
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
K_int : numpy.ndarray
Internal stress kernel [Pa/m]
K_ext : numpy.ndarray
External stress kernel [Pa/m]
v_plate_vec : numpy.ndarray
Plate velocity for all creeping patches [m/s]
v_init : numpy.ndarray
Initial velocity of the fault patches, in the dimensions of the rheology
slip_taper : numpy.ndarray
Compensating coseismic tapered slip on creeping patches [m]
delta_tau_bounded : numpy.ndarray
Bounded coseismic stress change [Pa]
v_0 : float
Reference velocity [m/s]
alpha_h_upper : float
Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa]
alpha_n_lower : float
Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]
n_lower : float
Lower plate interface power-law exponent [-]
simple_rk4 : bool
Decide whether to use the simple RK4 integrator or not
Returns
-------
full_state : numpy.ndarray
Full state variable at the end of the integration.
"""
# initialize parameters
n_vars_upper, n_vars_lower = 2, 2
n_state_upper = n_vars_upper * n_creeping_upper
n_state_lower = n_vars_lower * n_creeping_lower
A_lower = alpha_n_lower ** (1 / n_lower)
n_eval = t_eval.size
n_slips = delta_tau_bounded.shape[1]
# initialize arrays
s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)
s_minus_lower = np.zeros(n_creeping_lower)
assert np.all(v_init[:n_creeping_upper] > 0)
v_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)
v_minus_lower = v_init[n_creeping_upper:]
full_state = np.empty((n_state_upper + n_state_lower, n_eval))
full_state[:] = np.NaN
state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))
# make flat ODE function arguments
args = (n_creeping_upper, v_plate_vec, K_int, K_ext,
v_0, alpha_h_upper, A_lower, n_lower)
# integrate
spun_up = 0
i_slip = 0
steps = np.sort(np.concatenate((i_eq, i_break)))
i = 0
while i < steps.size - 1:
# get indices
ji, jf = steps[i], steps[i+1]
ti, tf = t_eval[ji], t_eval[jf]
# call integrator
if simple_rk4:
sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T
else:
with objmode(sol="float64[:, :]", success="boolean"):
sol = solve_ivp(flat_ode_rdlog_plvis,
t_span=[ti, tf],
y0=state_plus,
t_eval=t_eval[ji:jf + 1],
method="RK45", rtol=1e-9, atol=1e-12, args=args)
success = sol.success
sol = sol.y
if not success:
raise RuntimeError("Integrator failed.")
# save state to output array
full_state[:, ji:jf + 1] = sol
# can already stop here if this is the last interval
if i == steps.size - 2:
break
# at the end of a full cycle, check the early stopping criteria
if (not spun_up) and (i > n_slips) and (jf in i_break):
old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]
new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]
old_state_upper = old_full_state[:n_state_upper, :]
old_state_lower = old_full_state[n_state_upper:, :]
new_state_upper = new_full_state[:n_state_upper, :]
new_state_lower = new_full_state[n_state_upper:, :]
old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])
old_v_lower = old_state_lower[-n_creeping_lower:, -1]
new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])
new_v_lower = new_state_lower[-n_creeping_lower:, -1]
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)
# new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)
lhs_upper = np.abs(old_v_upper - new_v_upper)
lhs_lower = np.abs(old_v_lower - new_v_lower)
rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)
rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)
stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)
if stop_now:
spun_up = jf
# advance i to the last cycle (don't forget the general advance later)
i = steps.size - n_slips - 3
elif spun_up and (jf in i_break):
break
# apply step change only if there is one
if (jf in i_eq):
state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]
s_minus_upper = state_upper[:-n_creeping_upper]
zeta_minus_upper = state_upper[-n_creeping_upper:]
s_minus_lower = state_lower[:-n_creeping_lower]
v_minus_lower = state_lower[-n_creeping_lower:]
s_plus_upper = s_minus_upper.ravel().copy()
s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]
s_plus_lower = s_minus_lower.ravel()
zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,
delta_tau_bounded[:n_creeping_upper, i_slip],
np.ones(n_creeping_upper) * alpha_h_upper)
v_plus_lower = get_new_vel_plvis(v_minus_lower,
delta_tau_bounded[n_creeping_upper:, i_slip],
np.ones(n_creeping_upper) * alpha_n_lower,
np.ones(n_creeping_upper) * n_lower,
np.ones(n_creeping_upper) * A_lower)
state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,
s_plus_lower, v_plus_lower))
i_slip = (i_slip + 1) % n_slips
else:
state_plus = sol[:, -1]
# advance
i += 1
# warn if we never spun up
if not spun_up:
print(f"Simulation did not spin up after {len(i_break) - 1} cycles!")
full_state[n_creeping_upper:n_state_upper, :] = \
v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])
# if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):
# vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)
# done
return full_state
@njit(float64[:, :](float64[:, ::1], int64, int64, float64[:, ::1], float64[:, ::1]),
cache=True)
# optional(float64[:, ::1]), optional(float64[:, ::1])))
def get_surface_displacements_plvis_plvis(full_state, n_creeping_upper, n_creeping_lower,
G_surf, deep_creep_slip): # , locked_slip):
"""
Calculate the surface displacements given the output of ``run``.
Parameters
----------
full_state : numpy.ndarray
Full state variable at the end of the integration.
n_creeping_upper : int
Number [-] of creeping patches in the upper fault interface
n_creeping_lower : int
Number [-] of creeping patches in the lower fault interface
G_surf : numpy.ndarray
Surface displacements Green's matrix [-] (dimensions must whether `locked_slip`
and/or `deep_creep_slip` are passed to function)
deep_creep_slip : numpy.ndarray
Timeseries of slip [m] on the deep creep patches
locked_slip : numpy.ndarray, optional
Timeseries of slip [m] on the locked patches
Returns
-------
surf_disp : numpy.ndarray
Surface displacement timeseries.
"""
# extract timeseries from solution
slip_upper = full_state[:n_creeping_upper, :]
slip_lower = full_state[2 * n_creeping_upper:2 * n_creeping_upper + n_creeping_lower, :]
# add the locked and deep patches to the combined upper & lower slip history matrix
slips_all = np.concatenate((slip_upper, slip_lower), axis=0)
# if locked_slip is not None:s
# slips_all = np.concatenate((locked_slip[:, :slip_upper.shape[1]], slips_all),
# axis=0)
# if deep_creep_slip is not None:
slips_all = np.concatenate((slips_all, deep_creep_slip), axis=0)
# calculate all surface displacements for last full cycle
surf_disps = G_surf @ slips_all
return surf_disps
class Fault2D():
"""
Base class for the subduction fault mesh.
"""
def __init__(self, theta, D_lock, H, nu, E, v_s, halflen,
upper_rheo, n_upper, lower_rheo, n_lower_left,
n_lower_right, halflen_factor_lower,
D_max=None, x1_pretrench=None):
"""
Define the fault mesh of the subduction zone fault system, based on the
Elastic Subducting Plate Model (ESPM) of [kanda2010]_.
Parameters
----------
theta : float
Dip angle [rad] of the plate interface (positive).
D_lock : float
Locking depth [m] of the upper plate interface (positive).
H : float
Subducting plate thickness [m].
nu : float
Poisson's ratio [-] of the fault zone.
E : float
Young's modulus [Pa] of the fault zone.
v_s : float
Shear wave velocity [m/s] in the fault zone.
halflen : float
Fault patch half-length [m], used for all locked patches.
If ``D_max`` and ``x1_pretrench`` are not set, this length is also used for all
creeping patches, otherwise, this is their minimum half-length.
upper_rheo : Rheology
Upper plate interface's rheology.
n_upper : int
Number [-] of patches on upper plate interface.
lower_rheo : Rheology
Lower plate interface's rheology. Pass ``None`` if it should not be simulated,
but enforced to have the plate velocity.
n_lower_left : int
Number [-] of patches on lower plate interface (left of the bend).
n_lower_right : int
Number [-] of patches on lower plate interface (right of the bend).
halflen_factor_lower : float
Factor used to get a different minimum half-length of the patches on the lower
plate interface.
D_max : float, optional
Maximum depth [m] of the upper plate interface (positive).
If set, this makes the mesh use linearly-increasing patch sizes away from the
locked zone. (``x1_pretrench`` must be set as well.)
x1_pretrench : float, optional
Horizontal distance [m] of the lower plate interface before the trench (positive).
If set, this makes the mesh use linearly-increasing patch sizes away from the
locked zone. (``D_max`` must be set as well.)
References
----------
.. [kanda2010] Kanda, R. V. S., & Simons, M. (2010).
*An elastic plate model for interseismic deformation in subduction zones.*
Journal of Geophysical Research: Solid Earth, 115(B3).
doi:`10.1029/2009JB006611 <https://doi.org/10.1029/2009JB006611>`_.
"""
# initialize
self.theta = float(theta)
""" Subducting plate dip angle [rad] """
assert 0 < self.theta < np.pi / 2
self.D_lock = float(D_lock)
""" Theoretical locking depth [m] of the upper plate interface """
assert self.D_lock > 0
self.H = float(H)
""" Subducting plate thickness [m] """
assert self.H >= 0
self.nu = float(nu)
""" Poisson's ratio [-] of the fault zone """
self.E = float(E)
""" Young's modulus [Pa] of the fault zone """
self.halflen = float(halflen)
""" Fault patch half-length [m] on upper interface """
assert self.halflen > 0
self.upper_rheo = upper_rheo
""" Upper plate interface's rheology """
assert isinstance(self.upper_rheo, Rheology)
self.n_upper = int(n_upper)
""" Number [-] of patches on upper plate interface """
assert self.n_upper >= 1
self.lower_rheo = lower_rheo
""" Lower plate interface's rheology """
assert isinstance(self.lower_rheo, Rheology) or \
(self.lower_rheo is None)
self.n_lower_left = int(n_lower_left)
""" Number [-] of patches on lower plate interface (left of bend) """
assert self.n_lower_left >= 1
self.n_lower_right = int(n_lower_right)
""" Number [-] of patches on lower plate interface (right of bend) """
assert self.n_lower_right >= 1
self.halflen_factor_lower = float(halflen_factor_lower)
""" Prefactor [-] to change the lower interface half-length """
assert self.halflen_factor_lower >= 1
self.lower_halflen = self.halflen * self.halflen_factor_lower
""" Fault patch half-length [m] on lower interface """
if self.lower_rheo is not None:
assert self.H >= 2 * self.lower_halflen, "Plate too thin for given patch sizes."
self.v_s = float(v_s)
""" Shear wave velocity [m/s] in the fault zone """
self.mu_over_2vs = self.E / (2 * (1 + self.nu) * 2 * self.v_s)
""" Radiation damping term [Pa * s/m] """
# switch between constant or linearly-varying patch sizes
if (D_max is not None) and (x1_pretrench is not None):
D_max = float(D_max)
x1_pretrench = float(x1_pretrench)
assert D_max > 0
assert x1_pretrench > 0
variable_mesh = True
else:
D_max = None
x1_pretrench = None
variable_mesh = False
self.D_max = D_max
""" Maximum depth [m] of the upper plate interface (optional) """
self.x1_pretrench = x1_pretrench
""" Horizontal distance [m] of the lower plate interface before the trench (optional) """
self.variable_mesh = variable_mesh
""" Flag whether the creeping patches are linearly-varying in size, or not """
# create mesh, centered about the x2 axis
if self.variable_mesh:
# project the locking depth onto dip angle
L_lock = self.D_lock / np.sin(self.theta)
# get number of locked and creeping patches on upper interface
n_lock = int(L_lock // (2 * self.halflen))
n_creep_up = self.n_upper - n_lock
assert n_creep_up > 0, "Current geometry yields no upper creeping patches."
# project maximum interface depth onto dip angle
L_max = self.D_max / np.sin(self.theta)
# get length of creeping segment that needs to be linearly varying
delta_L = L_max - n_lock * 2 * self.halflen
# get linear half-length increase necessary given the number of patches
# and length of creeping segment, on all three interface regions
delta_h_upper = ((delta_L - 2 * self.halflen * n_creep_up) /
(n_creep_up**2 - n_creep_up))
delta_h_lower_right = \
((L_max - 2 * self.lower_halflen * self.n_lower_right) /
(self.n_lower_right**2 - self.n_lower_right))
delta_h_lower_left = \
((self.x1_pretrench - 2 * self.lower_halflen * self.n_lower_left) /
(self.n_lower_left**2 - self.n_lower_left))
# check that we're not running into numerical problems from starkly
# increasing patch sizes
if any([d > 0.2 for d in [delta_h_upper / self.halflen,
delta_h_lower_right / self.lower_halflen,
delta_h_lower_left / self.lower_halflen]]):
raise ValueError("Half-length increase greater than 20%.")
# build vector of half-lengths
halflen_vec = np.concatenate([
np.ones(n_lock) * self.halflen,
self.halflen + np.arange(n_creep_up) * delta_h_upper,
(self.lower_halflen + np.arange(self.n_lower_left) * delta_h_lower_left)[::-1],
self.lower_halflen + np.arange(self.n_lower_right) * delta_h_lower_right])
else:
# build half-length vector from constant size
halflen_vec = np.ones(self.n_upper + self.n_lower_left + self.n_lower_right
) * self.halflen
halflen_vec[self.n_upper:] *= self.halflen_factor_lower
self.halflen_vec = halflen_vec
""" Half-lengths [m] for each patch in the fault """
s = self.H * np.tan(self.theta / 2)
R = np.array([[np.cos(-self.theta), -np.sin(-self.theta)],
[np.sin(-self.theta), np.cos(-self.theta)]])
# upper plate interface
upper_right_x1 = np.concatenate([[0], np.cumsum(2*self.halflen_vec[:self.n_upper])])
upper_right_x2 = np.zeros_like(upper_right_x1)
upper_right = R @ np.stack([upper_right_x1, upper_right_x2], axis=0)
# lower left plate interface
temp = self.halflen_vec[self.n_upper + self.n_lower_left - 1:self.n_upper - 1:-1]
lower_left_x1 = -s - np.concatenate([[0], np.cumsum(2*temp)])[::-1]
lower_left_x2 = -self.H * np.ones(self.n_lower_left + 1)
lower_left = np.stack([lower_left_x1, lower_left_x2], axis=0)
# lower right
lower_right_x1 = np.concatenate([
[0], np.cumsum(2*self.halflen_vec[self.n_upper + self.n_lower_left:])])
lower_right_x2 = np.zeros_like(lower_right_x1)
lower_right = (R @ np.stack([lower_right_x1, lower_right_x2], axis=0)
- np.array([[s], [self.H]]))
# concatenate mesh parts
self.end_upper = upper_right
""" 2-element coordinates of upper fault patch endpoints [m] """
self.end_lower = np.concatenate([lower_left, lower_right[:, 1:]], axis=1)
""" 2-element coordinates of lower fault patch endpoints [m] """
self.end = np.concatenate([self.end_upper, self.end_lower], axis=1)
""" 2-element coordinates of fault patch endpoints [m] """
self.mid = np.concatenate([upper_right[:, :-1] + upper_right[:, 1:],
lower_left[:, :-1] + lower_left[:, 1:],
lower_right[:, :-1] + lower_right[:, 1:]],
axis=1) / 2
""" 2-element coordinates of fault patch midpoints [m] """
self.mid_x1 = self.mid[0, :]
""" :math:`x_1` coordinates of fault patch midpoints [m] """
self.mid_x2 = self.mid[1, :]
""" :math:`x_2` coordinates of fault patch midpoints [m] """
# access subparts
self.ix_upper = np.arange(self.mid_x1.size) < upper_right_x1.size
""" Mask of upper fault interface patches """
self.ix_lower = ~self.ix_upper
""" Mask of lower fault interface patches (if existing) """
# locked is the part that slips coseismically on the upper plate interface
self.x1_lock = self.D_lock / np.tan(self.theta)
""" Theoretical surface location [m] of end of locked interface """
ix_locked = self.mid_x1 <= self.x1_lock - self.halflen
ix_locked[self.n_upper:] = False
self.ix_locked = ix_locked
""" Mask of fault patches that are locked interseismically """
self.n_locked = (self.ix_locked).sum()
""" Number [-] of locked patches """
# assert self.n_locked == n_lock
self.n_creeping = (~self.ix_locked).sum()
""" Number [-] of creeping patches """
self.n_creeping_upper = (~self.ix_locked[:self.n_upper]).sum()
""" Number [-] of creeping patches in the upper fault interface """
# assert self.n_creeping_upper == n_creep_up
self.n_creeping_lower = self.n_creeping - self.n_creeping_upper
""" Number [-] of creeping patches in the lower fault interface """
assert self.n_creeping_lower == n_lower_left + n_lower_right
self.mid_x1_locked = self.mid_x1[self.ix_locked]
""" :math:`x_1` coordinates of locked fault patch midpoints [m] """
self.mid_x2_locked = self.mid_x2[self.ix_locked]
""" :math:`x_2` coordinates of locked fault patch midpoints [m] """
self.mid_x1_creeping = self.mid_x1[~self.ix_locked]
""" :math:`x_1` coordinates of creeping fault patch midpoints [m] """
self.mid_x2_creeping = self.mid_x2[~self.ix_locked]
""" :math:`x_2` coordinates of creeping fault patch midpoints [m] """
# for later calculations, need theta and unit vectors in vector form
theta_vec = np.ones_like(self.mid_x1) * self.theta
theta_vec[self.n_upper:self.n_upper + self.n_lower_left] = np.pi
theta_vec[self.n_upper + self.n_lower_left:] += np.pi
self.theta_vec = theta_vec
""" Plate dip angle [rad] for all fault patches """
self.e_f = np.stack([np.sin(self.theta_vec), np.cos(self.theta_vec)], axis=0)
""" Unit vectors [-] normal to fault patches"""
self.e_s = np.stack([-np.cos(self.theta_vec), np.sin(self.theta_vec)], axis=0)
""" Unit vectors [-] in fault patch slip direction """
# get external (from the locked to the creeping patches) stress kernel
K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,
self.mid_x1_locked, self.mid_x2_locked,
self.halflen_vec[self.ix_locked],
self.theta_vec[self.ix_locked], self.nu, self.E
)[:, :self.n_locked]
Kx1x1 = K[:self.n_creeping, :]
Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]
Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]
K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]
).reshape(2, 2, self.n_creeping, self.n_locked).transpose(2, 3, 0, 1)
self.K_ext = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked],
K, self.e_f[:, ~self.ix_locked], optimize=True)
""" External stress kernel [Pa/m] """
# get internal (within creeping patches) stress kernel
K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,
self.mid_x1_creeping, self.mid_x2_creeping,
self.halflen_vec[~self.ix_locked],
self.theta_vec[~self.ix_locked], self.nu, self.E
)[:, :self.n_creeping]
Kx1x1 = K[:self.n_creeping, :]
Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]
Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]
K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]
).reshape(2, 2, self.n_creeping, self.n_creeping).transpose(2, 3, 0, 1)
self.K_int = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked],
K, self.e_f[:, ~self.ix_locked], optimize=True)
""" Internal stress kernel [Pa/m] """
self.n_state_upper = self.upper_rheo.n_vars * self.n_creeping_upper
""" Size [-] of upper plate interface state variable """
self.n_state_lower = (self.lower_rheo.n_vars * self.n_creeping_lower
if self.lower_rheo is not None
else 2 * self.n_creeping_lower)
""" Size [-] of lower plate interface state variable """
if (self.n_creeping_upper == 0) or (self.n_creeping_lower == 0):
raise ValueError("Defined geometry results in zero creeping patches in "
"either the upper or lower plate interface.")
# # if upper rheology is Burgers, tell it our specific shear modulus
# if isinstance(self.upper_rheo, rheologies.LinearBurgers):
# self.upper_rheo.set_G(self.K_int[:self.n_creeping_upper, :self.n_creeping_upper])
# discretized locking depth
self.D_lock_disc = -self.end_upper[1, self.n_locked]
""" Discretized locking depth [m] of the upper plate interface """
self.x1_lock_disc = self.D_lock_disc / np.tan(self.theta)
""" Discretized surface location [m] of end of locked interface """
class SubductionSimulation():
"""
Subduction simulation container class.
"""
def __init__(self, v_plate, n_cycles_max, n_samples_per_eq, delta_tau_max, v_max,
fault, Ds_0, Ds_0_logsigma, T_rec, T_rec_logsigma, D_asp_min,
D_asp_max, T_anchor, T_last, enforce_v_plate, largehalflen,
t_obs, pts_surf):
"""
Create a subduction simulation.
Parameters
----------
v_plate : float
Nominal far-field plate velocity, in the dimensions of the rheology
n_cycles_max : int
Maximum number of cycles to simulate [-]
n_samples_per_eq : int
Number of internal evaluation timesteps between earthquakes [-]
delta_tau_max : float
Maximum shear stress change [Pa] from coseismic slip on locked patches
v_max : float
Maximum slip velocity [m/s] on creeping patches
fault : Fault2D
Fault object
Ds_0 : numpy.ndarray
Nominal coseismic left-lateral shearing [m] of the locked fault patch(es)
Ds_0_logsigma : numpy.ndarray
Standard deviation of the fault slip in logarithmic space
T_rec : numpy.ndarray
Nominal recurrence time [a] for each earthquake
T_rec_logsigma : numpy.ndarray
Standard deviation of the recurrence time in logarithmic space
D_asp_min : numpy.ndarray
Minimum depth [m] for the asperities of each earthquake
D_asp_max : numpy.ndarray
Maximum depth [m] for the asperities of each earthquake
T_anchor : str
Anchor date where observations end
T_last : list
Dates of the last occurence for each earthquake (list of strings)
enforce_v_plate : bool
Flag whether to allow v_plate to vary or not
largehalflen : float
Fault patch half-length of the deep crreep patches [m]
t_obs : numpy.ndarray, pandas.DatetimeIndex
Observation timesteps, either as decimal years relative to the cycle start,
or as Timestamps
pts_surf : numpy.ndarray
Horizontal landward observation coordinates [m] relative to the trench
"""
# save general sequence & fault parameters
self.v_plate = float(v_plate)
""" Nominal far-field plate velocity, in the dimensions of the rheology """
self.n_cycles_max = int(n_cycles_max)
""" Maximum number of cycles to simulate [-] """
self.n_samples_per_eq = int(n_samples_per_eq)
""" Number of internal evaluation timesteps between earthquakes [-] """
self.delta_tau_max = float(delta_tau_max)
""" Maximum shear stress change [Pa] from coseismic slip on locked patches """
self.v_max = float(v_max)
""" Maximum slip velocity [m/s] on creeping patches """
# define fault
assert isinstance(fault, Fault2D)
if not (isinstance(fault.upper_rheo, NonlinearViscous) or
isinstance(fault.upper_rheo, RateStateSteadyLogarithmic)) or \
not (isinstance(fault.lower_rheo, NonlinearViscous) or
(fault.lower_rheo is None)):
raise NotImplementedError("SubductionSimulation is only implemented for "
"NonlinearViscous or RateStateSteadyLogarithmic "
"rheologies in the upper interface, and NonlinearViscous "
"rheology in the lower interface.")
self.fault = fault
""" Fault object """
# cast earthquake slips as NumPy array
self.Ds_0 = np.atleast_1d(Ds_0)
""" Nominal coseismic left-lateral shearing [m] of the locked fault patch(es) """
self.Ds_0_logsigma = np.atleast_1d(Ds_0_logsigma)
""" Standard deviation of the fault slip in logarithmic space """
# load recurrence times
self.T_rec = np.atleast_1d(T_rec)
""" Nominal recurrence time [a] for each earthquake """
self.T_rec_logsigma = np.atleast_1d(T_rec_logsigma)
""" Standard deviation of the recurrence time in logarithmic space """
# load the minimum and maximum depths of the earthquakes
self.D_asp_min = np.atleast_1d(D_asp_min)
""" Minimum depth [m] for the asperities of each earthquake """
self.D_asp_max = np.atleast_1d(D_asp_max)
""" Maximum depth [m] for the asperities of each earthquake """
assert all([D <= self.fault.D_lock for D in self.D_asp_max]), \
f"Asperity depths {self.D_asp_max/1e3} km are deeper than the " \
f"locking depth {self.fault.D_lock/1e3}."
self.T_anchor = str(T_anchor)
""" Anchor date where observations end """
assert isinstance(T_last, list) and all([isinstance(tl, str) for tl in T_last])
self.T_last = T_last
""" Dates of the last occurence for each earthquake """
# create a NumPy array that for each locked asperity has the slip per earthquake
self.slip_mask = np.logical_and(self.fault.mid_x2_locked.reshape(-1, 1)
< -self.D_asp_min.reshape(1, -1),
self.fault.mid_x2_locked.reshape(-1, 1)
> -self.D_asp_max.reshape(1, -1))
""" Mask that matches each earthquake to a fault patch """
self.T_fullcycle = np.lcm.reduce(self.T_rec)
""" Nominal recurrence time [a] for an entire joint earthquake cycle """
self.n_eq = self.Ds_0.size
""" Number of distinct earthquakes in sequence """
self.n_eq_per_asp = (self.T_fullcycle / self.T_rec).astype(int)
""" Number of earthquakes per asperity and full cycle """
# create realization of the slip amount and earthquake timings
rng = np.random.default_rng()
# first, create realizations of occurence times
# note that this will result in a varying plate velocity rate
# (ignore zero-slip earthquakes)
self.T_rec_per_asp = [rng.lognormal(np.log(t), s, n) for t, s, n in
zip(self.T_rec, self.T_rec_logsigma, self.n_eq_per_asp)]
""" Recurrence time [a] realization """
self.Ds_0_per_asp = [rng.lognormal(np.log(d), s, n) if d > 0
else np.array([d] * n) for d, s, n in
zip(self.Ds_0, self.Ds_0_logsigma, self.n_eq_per_asp)]
""" Fault slip [m] realization """
# sanity check that in each asperity, the nominal plate rate is recovered
self.slip_asperities = self.slip_mask.astype(int) * self.Ds_0.reshape(1, -1)
""" Slip [m] for each earthquake in each asperity """
v_eff_in_asp = (self.slip_asperities / self.T_rec.reshape(1, -1)).sum(axis=1)
assert np.allclose(v_eff_in_asp, self.v_plate * 86400 * 365.25), \
"The nominal plate rate is not recovered in all asperities.\n" \
f"Plate velocity = {self.v_plate * 86400 * 365.25}\n" \
f"Effective velocity in each asperity:\n{v_eff_in_asp}"
# second, we need to shift the random realization for each earthquake
# individually such that they all yield the same v_plate (enforced or not)
# get the effective recurrence time as implied by the T_rec realizations
T_fullcycle_per_asp_eff = np.array([sum(t) for t in self.T_rec_per_asp])
# same for the effective cumulative slip
Ds_0_fullcycle_per_asp_eff = np.array([sum(d) for d in self.Ds_0_per_asp])
# we need to scale each individual sequence such that it implies the same
# recurrence time and cumulative slip in each asperity
# (again ignoring zero-slip earthquakes)
T_fullcycle_eff_mean = np.mean(T_fullcycle_per_asp_eff)
Ds_0_fullcycle_mean = np.ma.masked_equal(Ds_0_fullcycle_per_asp_eff, 0).mean()
T_rec_per_asp_adj = [np.array(self.T_rec_per_asp[i]) * T_fullcycle_eff_mean
/ T_fullcycle_per_asp_eff[i] for i in range(self.n_eq)]
Ds_0_per_asp_adj = [np.array(self.Ds_0_per_asp[i]) * Ds_0_fullcycle_mean
/ Ds_0_fullcycle_per_asp_eff[i] if self.Ds_0[i] > 0
else np.array(self.Ds_0_per_asp[i]) for i in range(self.n_eq)]
# now each asperity has the same effective plate velocity, which can be different
# from the nominal one - if we want to enforce the nominal plate velocity,
# we can rescale the recurrence times again
self.enforce_v_plate = bool(enforce_v_plate)
""" Flag whether to allow v_plate to vary or not """
ix_nonzero_slip = np.argmax(self.Ds_0 > 0)
v_plate_eff = (sum(Ds_0_per_asp_adj[ix_nonzero_slip])
/ sum(T_rec_per_asp_adj[ix_nonzero_slip]) / 86400 / 365.25)
if self.enforce_v_plate:
v_plate_factor = self.v_plate / v_plate_eff
for i in range(self.n_eq):
T_rec_per_asp_adj[i] /= v_plate_factor
v_plate_eff = self.v_plate
self.v_plate_eff = v_plate_eff
""" Effective far-field plate velocity [m/s] """
self.T_eff = sum(T_rec_per_asp_adj[0])
""" Effective length [a] of entire earthquake sequence """
# third, we need to create a list of earthquake dates and associated slips
temp_slips = np.vstack([self.slip_mask[:, i].reshape(1, -1)
* Ds_0_per_asp_adj[i].reshape(-1, 1)
for i in range(self.n_eq)])
year_offsets = [(pd.Period(self.T_anchor, "D") - pd.Period(self.T_last[i], "D")
).n / 365.25 for i in range(self.n_eq)]
eq_df_index = np.concatenate(
[self.T_eff -
(np.cumsum(T_rec_per_asp_adj[i]) - T_rec_per_asp_adj[i] + year_offsets[i])
for i in range(self.n_eq)])
# round the dates to the closest day and combine earthquakes
eq_df_index_rounded = np.around(eq_df_index * 365.25) / 365.25
# build a DataFrame with exact and rounded times
eq_df = pd.DataFrame(data=temp_slips)
eq_df["time"] = eq_df_index
eq_df["rounded"] = eq_df_index_rounded
# now aggregate by rounded time, keeping the minimum exact time, and summing slip
agg_dict = {"time": "min"}
agg_dict.update({c: "sum" for c in range(self.fault.n_locked)})
eq_df = eq_df.groupby("rounded").agg(agg_dict)
# convert time column to index and sort
eq_df.set_index("time", inplace=True)
eq_df.sort_index(inplace=True)
assert np.allclose(eq_df.sum(axis=0), eq_df.sum(axis=0)[0])
self.eq_df = eq_df
"""
DataFrame with the dates [decimal year from cycle start] and slips [m]
for each asperity
"""
# fourth, we need to create a list of dates to use internally when evaluating
# the earthquake cycle - this is independent of the observation dates
i_frac_cumsum = np.concatenate([[self.eq_df.index[-1] - self.T_eff],
self.eq_df.index.values])
T_frac = np.diff(i_frac_cumsum)
t_eval = np.concatenate(
[np.logspace(0, np.log10(1 + T_frac[i]), self.n_samples_per_eq, endpoint=False)
- 1 + i_frac_cumsum[i] + j*self.T_eff
for j in range(self.n_cycles_max) for i, t in enumerate(T_frac)])
num_neg = (t_eval < 0).sum()
t_eval = np.roll(t_eval, -num_neg)
t_eval[-num_neg:] += self.n_cycles_max * self.T_eff
self.t_eval = np.sort(np.concatenate(
[t_eval, np.arange(self.n_cycles_max + 1) * self.T_eff]))
""" Internal evaluation timesteps [decimal years since cycle start] """
self.n_eval = self.t_eval.size
""" Number of internal evaluation timesteps [-] """
# fifth, for the integration, we need the indices of the timesteps that mark either
# an earthquake or the start of a new cycle
self.n_slips = self.eq_df.shape[0]
""" Number of slips in a sequence [-] """
self.ix_break = [i*(self.n_slips * self.n_samples_per_eq + 1)
for i in range(self.n_cycles_max + 1)]
""" Indices of breaks between cycles """
self.ix_eq = [self.ix_break[i] + j * self.n_samples_per_eq - num_neg + 1
for i in range(self.n_cycles_max) for j in range(1, 1 + self.n_slips)]
""" Indices of earthquakes """
# sixth and last, for the final loop, we need a joint timesteps array between internal
# and external (observation) timestamps, such that we can debug, check early stopping,
# and restrict the output to the requested timeseries
if isinstance(t_obs, pd.DatetimeIndex):
t_obs = self.T_eff + (t_obs - pd.Timestamp(self.T_anchor)
).total_seconds().values / 86400 / 365.25
elif isinstance(t_obs, np.ndarray):
if np.all(t_obs < 0):
# this format is relative to T_anchor and more stable when T_eff varies
t_obs = self.T_eff + t_obs
assert np.all(t_obs >= 0) and np.all(t_obs < self.T_eff), \
f"Range of 't_obs' ({t_obs.min()}-{t_obs.max():} years) outside of " \
f"the earthquake cycle period ({self.T_eff:} years)."
else:
raise ValueError("Unknown 't_obs' data type.")
self.t_obs = t_obs
""" Observation timesteps [decimal years since cycle start] """
# combine all possible timesteps
t_obs_shifted = self.t_obs + (self.n_cycles_max - 1) * self.T_eff
self.t_eval_joint = np.unique(np.concatenate((self.t_eval, t_obs_shifted)))
"""
Joint internal evaluation and external observation timesteps
[decimal years since cycle start]
"""
# get indices of each individual subset in the new timesteps array
self.ix_break_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_break]))
""" Indices of breaks between cycles in joint timesteps """
self.ix_eq_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_eq]))
""" Indices of earthquakes in joint timesteps """
self.ix_obs_joint = \
np.flatnonzero(np.isin(self.t_eval_joint, t_obs_shifted))
""" Indices of observation timestamps in joint timesteps """
# get vectors of upper plate rheology parameters
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
# alpha_h
self.alpha_h_vec = \
self.fault.upper_rheo.get_param_vectors(
-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper])
r""" Depth-variable :math:`(a - b) * \sigma_E` [Pa] of upper plate interface """
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
# A, alpha_n, and n
alpha_n_vec, n_vec, A_vec = \
self.fault.upper_rheo.get_param_vectors(
-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper], self.v_plate)
self.alpha_n_vec = alpha_n_vec
r""" Depth-variable :math:`\alpha_n` [Pa^n * s/m] of upper plate interface """
self.n_vec = n_vec
r""" Depth-variable :math:`n` [-] of upper plate interface """
self.A_vec = A_vec
r""" Depth-variable :math:`A ` [Pa * (s/m)^(1/n)] of upper plate interface """
else:
raise NotImplementedError
# get unbounded delta_tau
self.delta_tau_unbounded = self.fault.K_ext @ self.eq_df.values.T
""" Unbounded coseismic stress change [Pa] """
# get pseudoinverse of K_int for tapered slip
self.K_int_inv_upper = np.linalg.pinv(
self.fault.K_int[:self.fault.n_creeping_upper, :self.fault.n_creeping_upper])
""" Inverse of K_int [m/Pa] """
self.delta_tau_max_from_v_max_lower = \
((self.fault.lower_rheo.alpha_n * self.v_max)**(1 / self.fault.lower_rheo.n) -
(self.fault.lower_rheo.alpha_n * self.v_plate)**(1 / self.fault.lower_rheo.n)
if self.fault.lower_rheo is not None else np.inf)
""" Maximum shear stress change [Pa] in lower plate from capped velocity """
if isinstance(self.fault.upper_rheo, NonlinearViscous):
delta_tau_max_from_v_max_upper = \
(self.alpha_n_vec * self.v_max)**(1 / self.n_vec) - \
(self.alpha_n_vec * self.v_plate)**(1 / self.n_vec)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
delta_tau_max_from_v_max_upper = self.alpha_h_vec * \
(np.log(self.v_max / self.fault.upper_rheo.v_0) -
np.log(self.v_plate / self.fault.upper_rheo.v_0))
self.delta_tau_max_from_v_max_upper = delta_tau_max_from_v_max_upper
""" Maximum shear stress change [Pa] in upper plate from capped velocity """
self.delta_tau_max_joint_upper = np.fmin(self.delta_tau_max,
self.delta_tau_max_from_v_max_upper)
""" Joint maximum shear stress change [Pa] allowed in upper plate """
self.delta_tau_max_joint_lower = \
(min(self.delta_tau_max, self.delta_tau_max_from_v_max_lower)
if self.fault.lower_rheo is not None else np.inf)
""" Joint maximum shear stress change [Pa] allowed in lower plate """
# create tapered slip by making delta_tau linearly increase until delta_tau_max
delta_tau_bounded = self.delta_tau_unbounded.copy()
delta_tau_bounded[:self.fault.n_creeping_upper, :] = \
np.fmin(self.delta_tau_max_joint_upper.reshape(-1, 1),
self.delta_tau_unbounded[:self.fault.n_creeping_upper, :])
self.delta_tau_bounded = delta_tau_bounded
""" Bounded coseismic stress change [Pa] """
# get the additional slip
self.slip_taper = (self.K_int_inv_upper @
(self.delta_tau_bounded - self.delta_tau_unbounded
)[:self.fault.n_creeping_upper, :])
# check if the lower plate should have been bounded as well
if self.fault.lower_rheo is not None:
assert not np.any(np.abs(self.delta_tau_bounded[self.fault.n_creeping_upper:, :])
> self.delta_tau_max_joint_lower), \
("Maximum stress change delta_tau_bounded "
f"{np.max(np.abs(self.delta_tau_bounded)):.2e} Pa in lower interface "
f"above delta_tau_max = {self.delta_tau_max_joint_lower:.2e} Pa")
self.slip_taper_ts = \
pd.DataFrame(index=self.eq_df.index, data=self.slip_taper.T) \
.cumsum(axis=0).reindex(index=self.t_obs, method="ffill", fill_value=0)
""" Timeseries of tapered slip [m] on the upper creeping fault patches """
# need the imagined location and orientation of the deep creep patches
self.largehalflen = float(largehalflen)
""" Fault patch half-length of the deep crreep patches [m] """
self.mid_deep_x1 = \
np.array([self.fault.mid_x1[self.fault.n_upper - 1]
+ np.cos(self.fault.theta_vec[self.fault.n_upper - 1])
* self.fault.halflen_vec[self.fault.n_upper - 1]
+ np.cos(self.fault.theta_vec[self.fault.n_upper - 1])
* self.largehalflen,
self.fault.mid_x1[self.fault.n_upper + self.fault.n_lower_left - 1]
- self.fault.halflen_vec[self.fault.n_upper + self.fault.n_lower_left - 1]
- self.largehalflen,
self.fault.mid_x1[-1]
+ np.cos(self.fault.theta_vec[-1] - np.pi)
* self.fault.halflen_vec[-1]
+ np.cos(self.fault.theta_vec[-1] - np.pi)
* self.largehalflen])
""" :math:`x_1` coordinates of deep creep fault patch midpoints [m] """
self.mid_deep_x2 = \
np.array([self.fault.mid_x2[self.fault.n_upper - 1]
- np.sin(self.fault.theta_vec[self.fault.n_upper - 1])
* self.fault.halflen_vec[self.fault.n_upper - 1]
- np.sin(self.fault.theta_vec[self.fault.n_upper - 1])
* self.largehalflen,
self.fault.mid_x2[self.fault.n_upper + self.fault.n_lower_left - 1],
self.fault.mid_x2[-1]
- np.sin(self.fault.theta_vec[-1] - np.pi)
* self.fault.halflen_vec[-1]
- np.sin(self.fault.theta_vec[-1] - np.pi)
* self.largehalflen])
""" :math:`x_2` coordinates of deep creep fault patch midpoints [m] """
self.theta_vec_deep = \
np.array([self.fault.theta_vec[self.fault.n_upper - 1],
np.pi,
self.fault.theta_vec[-1]])
""" Plate dip angle [rad] for deep creep fault patches """
# create the Green's matrices
self.pts_surf = pts_surf
""" :math:`x_1` coordinates of surface observation points [m] """
self.n_stations = self.pts_surf.size
""" Number of surface observing stations """
self.G_surf_fault = Glinedisp(
self.pts_surf, 0, self.fault.mid_x1, self.fault.mid_x2,
self.fault.halflen_vec, self.fault.theta_vec, self.fault.nu
)[:, :self.fault.mid_x1.size]
""" Green's matrix [-] relating slip on the main fault patches to surface motion """
self.G_surf_deep = Glinedisp(
self.pts_surf, 0, self.mid_deep_x1, self.mid_deep_x2,
self.largehalflen, self.theta_vec_deep, self.fault.nu)[:, :3]
""" Green's matrix [-] relating slip on the deep creep patches to surface motion """
self.G_surf = np.hstack([self.G_surf_fault, self.G_surf_deep])
""" Joint Green's matrix [-] relating slip on the entire ESPM to surface motion """
# calculate the best initial velocity state from the steady state ODE
v_plate_vec = np.ones(self.fault.n_creeping) * self.v_plate
v_plate_vec[self.fault.n_creeping_upper:] *= -1
self.v_plate_vec = v_plate_vec
""" Vector with the plate velocity for each creeping patch [m/s] """
# get the initial velocity, taking advantage of the option that there could be a
# deep transition zone
v_init = v_plate_vec.copy()
if self.fault.upper_rheo.deep_transition is not None:
ix_deep = np.argmin(np.abs(-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]
- self.fault.upper_rheo.deep_transition
- self.fault.upper_rheo.deep_transition_width))
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
v_init[:ix_deep] = np.linspace(self.v_plate * 1e-6, self.v_plate,
num=ix_deep, endpoint=False)
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
v_init[:ix_deep] = np.linspace(0, self.v_plate, num=ix_deep, endpoint=False)
self.v_init = v_init
""" Initial velocity in all creeping patches [m/s] """
@property
def locked_slip(self):
""" Timeseries of slip [m] on the locked patches for observation timespan """
return self.eq_df.cumsum(axis=0) \
.reindex(index=self.t_obs, method="ffill", fill_value=0).values.T
@property
def deep_creep_slip(self):
""" Timeseries of slip [m] on the deep creep patches for observation timestamps """
return (np.tile(self.t_obs.reshape(1, -1), (3, 1))
* np.array([1, -1, -1]).reshape(3, 1)
* self.v_plate_eff * 86400 * 365.25)
@staticmethod
def read_config_file(config_file):
"""
Read a configuration file and return it as a parsed dictionary.
Parameters
----------
config_file : str
Path to INI configuration file.
Returns
-------
cfg_dict : dict
Parsed configuration file.
"""
# load configuration file
cfg = configparser.ConfigParser()
cfg.optionxform = str
with open(config_file, mode="rt") as f:
cfg.read_file(f)
cfg_seq, cfg_fault, cfg_mesh = cfg["sequence"], cfg["fault"], cfg["mesh"]
# parse rheologies
upper_rheo_dict = dict(cfg["upper_rheo"])
upper_rheo_type = upper_rheo_dict.pop("type")
upper_rheo_kw_args = {k: float(v) for k, v in upper_rheo_dict.items()}
try:
lower_rheo_dict = dict(cfg["lower_rheo"])
except KeyError:
lower_rheo_type = None
lower_rheo_kw_args = None
else:
lower_rheo_type = lower_rheo_dict.pop("type")
lower_rheo_kw_args = {k: float(v) for k, v in lower_rheo_dict.items()}
# parse everything else
cfg_dict = {
"theta": np.deg2rad(cfg_fault.getfloat("theta_deg")),
"D_lock": cfg_fault.getfloat("D_lock"),
"H": cfg_fault.getfloat("H"),
"nu": cfg_fault.getfloat("nu"),
"E": cfg_fault.getfloat("E"),
"v_s": cfg_fault.getfloat("v_s"),
"halflen": cfg_mesh.getfloat("halflen"),
"n_upper": cfg_mesh.getint("n_up"),
"n_lower_left": cfg_mesh.getint("n_low_l"),
"n_lower_right": cfg_mesh.getint("n_low_r"),
"halflen_factor_lower": cfg_mesh.getfloat("halflen_factor_lower"),
"D_max": cfg_mesh.getfloat("D_max", fallback=None),
"x1_pretrench": cfg_mesh.getfloat("x1_pretrench", fallback=None),
"v_plate": cfg_seq.getfloat("v_plate"),
"n_cycles_max": cfg_seq.getint("n_cycles_max"),
"n_samples_per_eq": cfg_seq.getint("n_samples_per_eq"),
"delta_tau_max": cfg_fault.getfloat("delta_tau_max", fallback=np.inf),
"v_max": cfg_fault.getfloat("v_max", fallback=np.inf),
"Ds_0": np.atleast_1d(json.loads(cfg_seq["Ds_0"])),
"Ds_0_logsigma": np.atleast_1d(json.loads(cfg_seq["Ds_0_logsigma"])),
"T_rec": np.atleast_1d(json.loads(cfg_seq["T_rec"])),
"T_rec_logsigma": np.atleast_1d(json.loads(cfg_seq["T_rec_logsigma"])),
"D_asp_min": np.atleast_1d(json.loads(cfg_seq["D_asp_min"])),
"D_asp_max": np.atleast_1d(json.loads(cfg_seq["D_asp_max"])),
"T_anchor": cfg_seq.get("T_anchor"),
"T_last": json.loads(cfg_seq["T_last"]),
"enforce_v_plate": cfg_seq.getboolean("enforce_v_plate"),
"largehalflen": cfg_mesh.getfloat("largehalflen"),
"upper_rheo_type": upper_rheo_type,
"lower_rheo_type": lower_rheo_type,
"upper_rheo_kw_args": upper_rheo_kw_args,
"lower_rheo_kw_args": lower_rheo_kw_args
}
return cfg_dict
@classmethod
def from_config_dict(cls, cfg, t_obs, pts_surf):
"""
Create a SubductionSimulation object from a configuration dictionary.
Parameters
----------
cfg : dict
Dictionary containing all parsed elements from the configuration file
t_obs : numpy.ndarray, pandas.DatetimeIndex
Observation timesteps, either as decimal years relative to the cycle start,
or as Timestamps
pts_surf : numpy.ndarray
Horizontal landward observation coordinates [m] relative to the trench
See Also
--------
read_config_file : To load a configuration file into a dictionary.
"""
# create rheology objects
upper_rheo = globals()[cfg["upper_rheo_type"]](**cfg["upper_rheo_kw_args"])
if cfg["lower_rheo_type"] is None:
lower_rheo = None
else:
lower_rheo = globals()[cfg["lower_rheo_type"]](**cfg["lower_rheo_kw_args"])
# create fault object
fault = Fault2D(theta=cfg["theta"],
D_lock=cfg["D_lock"],
H=cfg["H"],
nu=cfg["nu"],
E=cfg["E"],
v_s=cfg["v_s"],
halflen=cfg["halflen"],
upper_rheo=upper_rheo,
n_upper=cfg["n_upper"],
lower_rheo=lower_rheo,
n_lower_left=cfg["n_lower_left"],
n_lower_right=cfg["n_lower_right"],
halflen_factor_lower=cfg["halflen_factor_lower"],
D_max=cfg["D_max"],
x1_pretrench=cfg["x1_pretrench"])
# create simulation object
return cls(v_plate=cfg["v_plate"],
n_cycles_max=cfg["n_cycles_max"],
n_samples_per_eq=cfg["n_samples_per_eq"],
delta_tau_max=cfg["delta_tau_max"],
v_max=cfg["v_max"],
fault=fault,
Ds_0=cfg["Ds_0"],
Ds_0_logsigma=cfg["Ds_0_logsigma"],
T_rec=cfg["T_rec"],
T_rec_logsigma=cfg["T_rec_logsigma"],
D_asp_min=cfg["D_asp_min"],
D_asp_max=cfg["D_asp_max"],
T_anchor=cfg["T_anchor"],
T_last=cfg["T_last"],
enforce_v_plate=cfg["enforce_v_plate"],
largehalflen=cfg["largehalflen"],
t_obs=t_obs,
pts_surf=pts_surf)
@staticmethod
def get_n(alpha_n, alpha_eff, v_eff):
r"""
Calculate the real linear viscous strength constant from the effective one.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
n : float
Power-law exponent :math:`n` [-]
"""
return (np.log(alpha_n) + np.log(v_eff)) / (np.log(alpha_eff) + np.log(v_eff))
@staticmethod
def get_alpha_n(alpha_eff, n, v_eff):
r"""
Calculate the real linear viscous strength constant from the effective one.
Parameters
----------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
n : float
Power-law exponent :math:`n` [-]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
"""
alpha_n = alpha_eff**n * v_eff**(n-1)
return alpha_n
@staticmethod
def get_alpha_eff(alpha_n, n, v_eff):
r"""
Calculate the effective linear viscous strength constant from the real one.
Parameters
----------
alpha_n : float
Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m]
n : float
Power-law exponent :math:`n` [-]
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
"""
if isinstance(v_eff, np.ndarray):
temp = v_eff.copy()
temp[temp == 0] = np.NaN
else:
temp = v_eff
alpha_eff = alpha_n**(1/n) * temp**((1-n)/n)
return alpha_eff
@staticmethod
def get_alpha_eff_from_alpha_h(alpha_h, v_eff):
r"""
Calculate the effective viscosity from the rate-dependent friction.
Parameters
----------
alpha_h : float
Rate-and-state parameter :math:`(a - b) * \sigma_E`,
where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,
and :math:`\sigma_E` [Pa] is effective fault normal stress.
v_eff : float
Effective velocity [m/s] used for ``alpha_eff`` conversions
Returns
-------
alpha_eff : float
Effective linear viscous strength constant [Pa * s/m]
"""
if isinstance(v_eff, np.ndarray):
temp = v_eff.copy()
temp[temp == 0] = np.NaN
else:
temp = v_eff
alpha_eff = alpha_h / temp
return alpha_eff
def run(self, simple_rk4=False):
"""
Run a full simulation.
"""
# run forward integration
if self.fault.lower_rheo is None:
if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
full_state = flat_run_rdlog(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.v_0, self.alpha_h_vec,
self.fault.mu_over_2vs)
elif isinstance(self.fault.upper_rheo, NonlinearViscous):
full_state = flat_run_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.alpha_n_vec, self.n_vec, self.A_vec,
self.fault.mu_over_2vs)
else:
raise NotImplementedError
elif isinstance(self.fault.lower_rheo, NonlinearViscous):
if isinstance(self.fault.upper_rheo, NonlinearViscous):
full_state = flat_run_plvis_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.alpha_n,
self.fault.upper_rheo.n, self.fault.lower_rheo.alpha_n,
self.fault.lower_rheo.n, simple_rk4)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
full_state = flat_run_rdlog_plvis(
self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,
self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,
self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,
self.delta_tau_bounded, self.fault.upper_rheo.v_0,
self.fault.upper_rheo.alpha_h, self.fault.lower_rheo.alpha_n,
self.fault.lower_rheo.n, simple_rk4)
else:
raise NotImplementedError
else:
raise NotImplementedError
# extract the observations that were actually requested
obs_state = full_state[:, self.ix_obs_joint].copy()
# since we're only calculating transient surface displacements, need to
# remove the tapered slip due to bounded stresses
obs_state[:self.fault.n_creeping_upper, :] -= self.slip_taper_ts.values.T
# convert to surface displacements
surf_disps = get_surface_displacements_plvis_plvis(
obs_state, self.fault.n_creeping_upper, self.fault.n_creeping_lower,
np.ascontiguousarray(self.G_surf[:, self.fault.n_locked:]),
self.deep_creep_slip)
return full_state, obs_state, surf_disps
def zero_obs_at_eq(self, surf_disps):
"""
Reset to zero the surface displacement timeseries every time an earthquake happens.
"""
obs_zeroed = surf_disps.copy()
slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,
self.t_obs.max() > self.eq_df.index)
n_slips_obs = slips_obs.sum()
if n_slips_obs == 0:
obs_zeroed -= obs_zeroed[:, 0].reshape(-1, 1)
else:
i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq
in self.eq_df.index.values[slips_obs]]
obs_zeroed[:, :i_slips_obs[0]] -= obs_zeroed[:, i_slips_obs[0] - 1].reshape(-1, 1)
obs_zeroed[:, i_slips_obs[0]:] -= obs_zeroed[:, i_slips_obs[0]].reshape(-1, 1)
for i in range(1, n_slips_obs):
obs_zeroed[:, i_slips_obs[i]:] -= obs_zeroed[:, i_slips_obs[i]].reshape(-1, 1)
return obs_zeroed
def _reduce_full_state(self, data):
# get all NaN columns
cols_all_nan = np.all(np.isnan(data), axis=0)
# check if there was early stopping
if cols_all_nan.sum() > 0:
# get the border indices where integrations have been skipped
ix_last, ix_first = np.flatnonzero(cols_all_nan)[[0, -1]]
ix_last -= 1
ix_first += 1
# get indices before and after the NaN period
ix_valid = np.r_[0:ix_last, ix_first:self.t_eval_joint.size]
# subset data
data = data[:, ix_valid]
t_sub = self.t_eval_joint[ix_valid].copy()
t_sub[ix_last:] -= self.t_eval_joint[ix_first] - self.t_eval_joint[ix_last]
n_cyc_completed = int(np.round(self.t_eval_joint[ix_last] / self.T_eff)) + 1
else:
t_sub = self.t_eval_joint.copy()
n_cyc_completed = self.n_cycles_max + 1
# done
return data, t_sub, n_cyc_completed
def plot_surface_displacements(self, obs_zeroed, obs_noisy=None):
"""
Plot the observers' surface displacement timeseries.
Parameters
----------
obs_zeroed : numpy.ndarray
Surface displacements as output by :meth:`~zero_obs_at_eq`.
obs_noisy : numpy.ndarray, optional
Noisy surface observations.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# some helper variables
isort = np.argsort(self.pts_surf)
i_off = 3 * np.std(obs_zeroed.ravel())
# get float dates of observed earthquakes
slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,
self.t_obs.max() > self.eq_df.index)
n_slips_obs = slips_obs.sum()
if n_slips_obs > 0:
i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq
in self.eq_df.index.values[slips_obs]]
t_last_slips = [self.t_obs[islip] for islip in i_slips_obs]
else:
t_last_slips = []
# start plot
fig, ax = plt.subplots(nrows=2, sharex=True, layout="constrained")
for tslip in t_last_slips:
ax[0].axvline(tslip, c="0.7", zorder=-1)
ax[1].axvline(tslip, c="0.7", zorder=-1)
for i, ix in enumerate(isort):
if obs_noisy is not None:
ax[0].plot(self.t_obs, obs_noisy[ix, :] + i*i_off,
".", c="k", rasterized=True)
ax[1].plot(self.t_obs, obs_noisy[ix + self.n_stations, :] + i*i_off,
".", c="k", rasterized=True)
ax[0].plot(self.t_obs, obs_zeroed[ix, :] + i*i_off, c=f"C{i}")
ax[1].plot(self.t_obs, obs_zeroed[ix + self.n_stations, :] + i*i_off, c=f"C{i}")
ax[1].set_xlabel("Time")
ax[0].set_ylabel("Horizontal [m]")
ax[1].set_ylabel("Vertical [m]")
fig.suptitle("Surface Displacement")
return fig, ax
def plot_fault_velocities(self, full_state):
"""
Plot the velocities on all creeping fault patches.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import SymLogNorm
from cmcrameri import cm
# extract velocities
vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,
self.fault.n_state_upper + self.fault.n_creeping_lower:
self.fault.n_state_upper + self.fault.n_state_lower],
:] / self.v_plate
# check whether the simulation spun up, and NaN data needs to be skipped
vels, t_sub, n_cyc_completed = self._reduce_full_state(vels)
# normalize time
t_sub /= self.T_eff
# prepare plot
norm = SymLogNorm(linthresh=1, vmin=-1, vmax=100)
if self.fault.lower_rheo is None:
fig, ax = plt.subplots(figsize=(10, 5), layout="constrained")
ax = [ax]
else:
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10, 5), layout="constrained")
# plot velocities
c = ax[0].pcolormesh(t_sub,
self.fault.end_upper[0, self.fault.n_locked:] / 1e3,
vels[:self.fault.n_creeping_upper, :-1],
norm=norm, cmap=cm.vik, shading="flat")
ax[0].set_yticks(self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3)
# add vertical lines for cycle breaks
for n in range(1, n_cyc_completed):
ax[0].axvline(n, c="k", lw=1)
# make the y-axis increasing downwards to mimic depth even though we're plotting x1
ax[0].invert_yaxis()
# repeat for lower interface, if simulated
if self.fault.lower_rheo is not None:
c = ax[1].pcolormesh(t_sub,
self.fault.end_lower[0, :] / 1e3,
-vels[self.fault.n_creeping_upper:, :-1],
norm=norm, cmap=cm.vik, shading="flat")
ax[1].set_yticks(self.fault.end_lower[0, [0, -1]] / 1e3)
# add horizontal lines to show where the lower interface is below the locked zone
ax[1].axhline(0, c="k", lw=1)
ax[1].axhline(self.fault.x1_lock / 1e3, c="k", lw=1)
for n in range(1, n_cyc_completed):
ax[1].axvline(n, c="k", lw=1)
ax[1].invert_yaxis()
# finish figure
if self.fault.lower_rheo is None:
ax[0].set_ylabel("Upper Interface\n$x_1$ [km]")
ax[0].set_xlabel("Normalized Time $t/T$")
else:
ax[0].set_ylabel("Upper Interface\n$x_1$ [km]")
ax[1].set_ylabel("Lower Interface\n$x_1$ [km]")
ax[1].set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label="$v/v_{plate}$")
fig.suptitle("Normalized Fault Patch Velocities")
return fig, ax
def plot_fault_slip(self, full_state, deficit=True, include_locked=True, include_deep=True):
"""
Plot the cumulative slip (deficit) for the fault patches.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
deficit : bool, optional
If ``True`` (default), remove the plate velocity to plot slip deficit,
otherwise keep it included.
include_locked : bool, optional
If ``True`` (default), also plot the slip on the locked patches.
include_deep : bool, optional
If ``True`` (default), also plot the slip on the semi-infinite patches
at the end of the interfaces.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, SymLogNorm
from cmcrameri import cm
# extract slip
slip = full_state[np.r_[:self.fault.n_creeping_upper,
self.fault.n_state_upper:
self.fault.n_state_upper + self.fault.n_creeping_lower], :]
# check whether the simulation spun up, and NaN data needs to be skipped
slip, t_sub, n_cyc_completed = self._reduce_full_state(slip)
# normalize to slip per full cycle
cum_slip_per_cycle = self.v_plate_eff * self.T_eff * 86400 * 365.25
slip /= cum_slip_per_cycle
# add optional slip histories, if desired
if include_locked:
eq_df_joint = pd.DataFrame(
index=(self.eq_df.index.values.reshape(1, -1)
+ self.T_eff * np.arange(n_cyc_completed).reshape(-1, 1)
).ravel(),
data=np.tile(self.eq_df.values, (n_cyc_completed, 1)))
locked_slip = eq_df_joint.cumsum(axis=0) \
.reindex(index=t_sub, method="ffill", fill_value=0).values.T
locked_slip /= cum_slip_per_cycle
if include_deep:
deep_creep_slip = (np.tile(t_sub.reshape(1, -1), (3, 1))
* np.array([1, -1, -1]).reshape(3, 1)
* self.v_plate_eff * 86400 * 365.25)
deep_creep_slip /= cum_slip_per_cycle
# remove plate velocity to get slip deficit, if desired
if deficit:
cmap = cm.vik
norm = SymLogNorm(linthresh=1e-2, vmin=-1, vmax=1)
slip[:self.fault.n_creeping_upper] -= t_sub.reshape(1, -1) / self.T_eff
slip[self.fault.n_creeping_upper:] += t_sub.reshape(1, -1) / self.T_eff
slip -= slip[:, -2].reshape(-1, 1)
if include_locked:
locked_slip -= t_sub.reshape(1, -1) / self.T_eff
if include_deep:
deep_creep_slip -= (t_sub.reshape(1, -1)
* np.array([1, -1, -1]).reshape(3, 1)) / self.T_eff
else:
norm = Normalize(vmin=0, vmax=n_cyc_completed)
cmap = cm.batlow
# normalize time
t_sub /= self.T_eff
# prepare figure
nrows = (1 + int(self.fault.lower_rheo is not None)
+ int(include_locked) + int(include_deep) * 3)
hr_locked = ((self.fault.end_upper[0, self.fault.n_locked] - self.fault.end_upper[0, 0])
/ (self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0]))
hr_lower = ((self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0])
/ (self.fault.end_upper[0, -1] - self.fault.end_upper[0, self.fault.n_locked]))
hr = ([hr_locked] * int(include_locked) + [1]
+ [hr_locked, hr_locked] * int(include_deep)
+ [hr_lower] * int(self.fault.lower_rheo is not None)
+ [hr_locked] * int(include_deep))
fig, ax = plt.subplots(nrows=nrows, sharex=True, gridspec_kw={"height_ratios": hr},
figsize=(10, 5), layout="constrained")
iax = 0
# plot locked
if include_locked:
c = ax[iax].pcolormesh(t_sub,
self.fault.end_upper[0, :self.fault.n_locked + 1] / 1e3,
locked_slip[:, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Locked\n$x_1$ [km]")
temp_x1 = self.fault.end_upper[0, [0, self.fault.n_locked]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot upper creeping
c = ax[iax].pcolormesh(t_sub,
self.fault.end_upper[0, self.fault.n_locked:] / 1e3,
slip[:self.fault.n_creeping_upper, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Creeping\n$x_1$ [km]")
temp_x1 = self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot end patch on upper interface
if include_deep:
temp_x1 = np.array([self.fault.end_upper[0, -1],
self.mid_deep_x1[0]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
deep_creep_slip[0, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"])
iax += 1
# plot left end patch on lower interface
if include_deep:
temp_x1 = np.array([self.mid_deep_x1[1],
self.fault.end_lower[0, 0]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
-deep_creep_slip[1, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, ["$-\\infty$", f"{temp_x1[1]:.0f}"])
iax += 1
# plot lower creeping
if self.fault.lower_rheo is not None:
c = ax[iax].pcolormesh(t_sub,
self.fault.end_lower[0, :] / 1e3,
-slip[self.fault.n_creeping_upper:, :-1],
norm=norm, cmap=cmap, shading="flat")
ax[iax].axhline(0, c="k", lw=1)
ax[iax].axhline(self.fault.x1_lock / 1e3, c="k", lw=1)
ax[iax].set_ylabel("Creeping\n$x_1$ [km]")
temp_x1 = self.fault.end_lower[0, [0, -1]] / 1e3
ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1])
iax += 1
# plot right end patch on lower interface
if include_deep:
temp_x1 = np.array([self.fault.end_lower[0, -1],
self.mid_deep_x1[2]]) / 1e3
c = ax[iax].pcolormesh(t_sub,
temp_x1,
-deep_creep_slip[2, :-1].reshape(1, -1),
norm=norm, cmap=cmap, shading="flat")
ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]")
ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"])
iax += 1
# finish figure
for iax in range(len(ax)):
for n in range(1, n_cyc_completed):
ax[iax].axvline(n, c="k", lw=1)
ax[iax].invert_yaxis()
ax[-1].set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label="$(s - t*v_{plate})/s_{full}$" if deficit else "$s/s_{full}$")
suptitle = "Normalized Fault Patch Slip"
if deficit:
suptitle += " Deficit"
fig.suptitle(suptitle)
return fig, ax
def plot_eq_velocities(self, full_state):
"""
Plot the before and after velocities on all creeping fault patches
for each distinct earthquake.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# get indices of each last earthquake in last cycle
temp = self.eq_df.astype(bool).drop_duplicates(keep="last")
time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff
tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])
if np.any(tdiff > 0):
warn("Couldn't find exact indices, using time differences of "
f"{tdiff * 365.25 * 86400} seconds.")
ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]
n_eq_found = len(ix_eq_last)
assert n_eq_found == (self.Ds_0 > 0).sum(), \
"Couldn't find indices of each last non-zero earthquake in the " \
"last cycle, check for rounding errors."
# calculate average slip for plotted earthquakes
slip_last = self.eq_df.loc[temp.index, :]
slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()
for ieq in range(n_eq_found)]
# extract velocities
vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,
self.fault.n_state_upper + self.fault.n_creeping_lower:
self.fault.n_state_upper + self.fault.n_state_lower],
:] / self.v_plate
# prepare plot
fig, ax = plt.subplots(nrows=n_eq_found, ncols=1 if self.fault.lower_rheo is None else 2,
sharey=True, layout="constrained")
ax = np.asarray(ax).reshape(n_eq_found, -1)
# loop over earthquakes
for irow, ieq in enumerate(ix_eq_last):
# repeat plot for before and after
for ioff, label in enumerate(["before", "after"]):
ax[irow, 0].set_yscale("symlog", linthresh=1)
ax[irow, 0].plot(self.fault.mid_x1_creeping[:self.fault.n_creeping_upper] / 1e3,
vels[:self.fault.n_creeping_upper, ieq - 1 + ioff],
c=f"C{ioff}", label=label)
if self.fault.lower_rheo is not None:
ax[irow, 1].set_yscale("symlog", linthresh=1)
ax[irow, 1].plot(
self.fault.mid_x1_creeping[self.fault.n_creeping_upper:] / 1e3,
-vels[self.fault.n_creeping_upper:, ieq - 1 + ioff],
c=f"C{ioff}", label=label)
# finish plot
for irow in range(n_eq_found):
ax[irow, 0].set_title(f"Upper Interface: $s={slip_avg[irow]:.2g}$ m")
ax[irow, 0].legend()
ax[irow, 0].set_xlabel("$x_1$ [km]")
ax[irow, 0].set_ylabel("$v/v_{plate}$")
if self.fault.lower_rheo is not None:
ax[irow, 1].set_title(f"Lower Interface: $s={slip_avg[irow]:.2g}$ m")
ax[irow, 1].axvline(0, c="k", lw=1)
ax[irow, 1].axvline(self.fault.x1_lock / 1e3, c="k", lw=1)
ax[irow, 1].tick_params(labelleft=True)
ax[irow, 1].legend()
ax[irow, 1].set_xlabel("$x_1$ [km]")
ax[irow, 1].set_ylabel("$v/v_{plate}$")
fig.suptitle("Normalized Earthquake Velocity Changes")
return fig, ax
def plot_fault(self):
"""
Plot the fault.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 3), layout="constrained")
ax.plot(self.fault.end_upper[0, :self.fault.n_locked + 1]/1e3,
self.fault.end_upper[1, :self.fault.n_locked + 1]/1e3,
marker="|", markeredgecolor="k",
label="Locked")
ax.plot(self.fault.end_upper[0, self.fault.n_locked:]/1e3,
self.fault.end_upper[1, self.fault.n_locked:]/1e3,
marker="|", markeredgecolor="k",
label="Upper Creeping")
ax.plot(self.fault.end_lower[0, :]/1e3,
self.fault.end_lower[1, :]/1e3,
marker="|", markeredgecolor="k",
label="Lower Creeping")
ax.plot(self.pts_surf / 1e3, np.zeros_like(self.pts_surf),
"^", markeredgecolor="none", markerfacecolor="k",
label="Observers")
ax.axhline(0, lw=1, c="0.5", zorder=-1)
ax.legend()
ax.set_xlabel("$x_1$ [km]")
ax.set_ylabel("$x_2$ [km]")
ax.set_title("Fault Mesh and Observer Locations")
ax.set_aspect("equal")
return fig, ax
def plot_slip_phases(self, full_state, post_inter_transition=0.01, normalize=True):
"""
Plot the cumulative slip on the fault for the three different
phases (coseismic, early postseismic, and interseismic).
Only works if there is a single earthquake in the sequence.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
post_inter_transition : float, optional
Fraction of the recurrence time that should be considered
early postseismic and not interseismic.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
# check that the sequence only has one earthquake
if not self.n_eq == 1:
raise NotImplementedError("Don't know how to plot slip phases if "
"multiple earthquakes are present in the sequence.")
# get coseismic slip
co = np.concatenate([self.eq_df.values.ravel(),
self.slip_taper.ravel()])
# get index of last earthquake in last cycle
time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff
ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]
- self.ix_break_joint[-2])
# reorganize interseismic slip
slip = full_state[:self.fault.n_creeping_upper, self.ix_break_joint[-2]:]
slip_pre = slip[:, :ix_eq_last]
slip_post = slip[:, ix_eq_last:]
slip_pre += (slip_post[:, -1] - slip_pre[:, 0]).reshape(-1, 1)
slip_joint = np.hstack([slip_post, slip_pre])
slip_joint -= slip_joint[:, 0].reshape(-1, 1)
# same for time
t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()
t_last_pre = t_last[:ix_eq_last]
t_last_post = t_last[ix_eq_last:]
t_last_pre += t_last_post[-1] - t_last_pre[0]
t_last_joint = np.concatenate([t_last_post, t_last_pre])
t_last_joint -= t_last_joint[0]
# since slip_joint is now already cumulative slip since the earthquake,
# with the tapered slip removed, we can just read out the early
# postseismic and rest interseismic cumulative slip distributions
post = interp1d(t_last_joint, slip_joint)(post_inter_transition * self.T_eff)
inter = slip_joint[:, -1] - post
post = np.concatenate([np.zeros(self.fault.n_locked), post])
inter = np.concatenate([np.zeros(self.fault.n_locked), inter])
# optionally, normalize by total expected cumulative slip over the entire cycle
if normalize:
total_slip = self.T_eff * self.v_plate * 86400 * 365.25
co /= total_slip
post /= total_slip
inter /= total_slip
# make figure
fig, ax = plt.subplots(layout="constrained")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, co, label="Coseismic")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, post, label="Postseismic")
ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, inter, label="Interseismic")
ax.legend()
ax.set_xlabel("$x_1$ [km]")
ax.set_ylabel("Normalized cumulative slip [-]" if normalize
else "Cumulative Slip [m]")
ax.set_title("Slip Phases (Post-/Interseismic cutoff at "
f"{post_inter_transition:.1%} " "$T_{rec}$)")
return fig, ax
def plot_viscosity(self, full_state, return_viscosities=False):
"""
Plot the viscosity structure with depth for the steady state, as well as
for the immediate pre- and coseismic velocities.
For multiple earthquakes, it will use the minimum preseismic and maximum
postseismic velocities.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
return_viscosities : bool, optional
Also return the preseismic, steady-state, and postseismic viscosities.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
# get indices of each last earthquake in last cycle
temp = self.eq_df.astype(bool).drop_duplicates(keep="last")
time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff
tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])
if np.any(tdiff > 0):
warn("Couldn't find exact indices, using time differences of "
f"{tdiff * 365.25 * 86400} seconds.")
ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]
n_eq_found = len(ix_eq_last)
assert n_eq_found == (self.Ds_0 > 0).sum(), \
"Couldn't find indices of each last non-zero earthquake in the " \
"last cycle, check for rounding errors."
# calculate average slip for plotted earthquakes
slip_last = self.eq_df.loc[temp.index, :]
slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()
for ieq in range(n_eq_found)]
# extract preseismic velocities
vels_pre = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,
ix - 1] for ix in ix_eq_last]).T
vels_post = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,
ix] for ix in ix_eq_last]).T
if isinstance(self.fault.upper_rheo, NonlinearViscous):
# calculate viscosity profiles
vis_pre = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels_pre)
vis_ss = SubductionSimulation.get_alpha_eff(self.alpha_n_vec,
self.n_vec,
self.v_plate_eff)
vis_post = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels_post)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
vis_pre = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels_pre)
vis_ss = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), self.v_plate_eff)
vis_post = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels_post)
else:
raise NotImplementedError()
vis_mins = 10**np.floor(np.log10(np.ma.masked_invalid(vis_post*0.999).min(axis=0)))
vis_maxs = 10**np.ceil(np.log10(np.ma.masked_invalid(vis_pre*1.001).max(axis=0)))
# make plot
fig, ax = plt.subplots(ncols=n_eq_found, sharey=True, layout="constrained")
ax = np.atleast_1d(ax)
ax[0].set_ylabel("$x_2$ [km]")
for i in range(n_eq_found):
ax[i].fill_betweenx([0, self.fault.mid_x2_creeping[1] / 1e3],
vis_mins[i], vis_maxs[i], facecolor="0.8", label="Locked")
ax[i].fill_betweenx(self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,
vis_pre[:, i], vis_post[:, i], alpha=0.5, label="Simulated")
ax[i].plot(vis_ss,
self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,
label="Plate Rate")
ax[i].set_xscale("log")
ax[i].legend(loc="lower left")
ax[i].set_ylim(self.fault.mid_x2_creeping[self.fault.n_creeping_upper - 1] / 1e3,
0)
ax[i].set_xlim(vis_mins[i], vis_maxs[i])
ax[i].set_title(f"$s={slip_avg[i]:.2g}$ m")
ax[i].set_xlabel(r"$\alpha_{eff}$ [Pa * s/m]")
# finish
if return_viscosities:
return fig, ax, vis_pre, vis_ss, vis_post
else:
return fig, ax
def plot_viscosity_timeseries(self, full_state, return_viscosities=False):
"""
Plot the viscosity timeseries with depth for the entire last cycle.
Parameters
----------
full_state : numpy.ndarray
State matrix as output from :meth:`~run`.
return_viscosities : bool, optional
Also return the viscosity timeseries.
Returns
-------
matplotlib.figure.Figure
matplotlib.axes.Axes
"""
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from cmcrameri import cm
# check that the sequence only has one earthquake
if not self.n_eq == 1:
raise NotImplementedError("Don't know how to plot viscosity timeseries if "
"multiple earthquakes are present in the sequence.")
# get index of last earthquake in last cycle
time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff
ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]
- self.ix_break_joint[-2])
# reorganize interseismic velocities
vels = full_state[self.fault.n_creeping_upper:2*self.fault.n_creeping_upper,
self.ix_break_joint[-2]:]
vels_pre = vels[:, :ix_eq_last]
vels_post = vels[:, ix_eq_last:]
vels = np.hstack([vels_post, vels_pre])
# same for time
t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()
t_last_pre = t_last[:ix_eq_last]
t_last_post = t_last[ix_eq_last:]
t_last_pre += t_last_post[-1] - t_last_pre[0]
t_last_joint = np.concatenate([t_last_post, t_last_pre])
t_last_joint -= t_last_joint[0]
# convert velocities to effective viscosity
if isinstance(self.fault.upper_rheo, NonlinearViscous):
vis_ts = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),
self.n_vec.reshape(-1, 1),
vels)
elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):
vis_ts = SubductionSimulation.get_alpha_eff_from_alpha_h(
self.alpha_h_vec.reshape(-1, 1), vels)
else:
raise NotImplementedError()
# get index of deep transition
patch_depths = -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]
ix_deep = np.argmin(np.abs(patch_depths - self.fault.upper_rheo.deep_transition))
# subset vels to skip zero-velocity uppermost patch
vis_ts = vis_ts[1:, :]
# get percentage of final viscosity
rel_vis = vis_ts / vis_ts[:, -1][:, None]
rel_vis_masked = np.ma.MaskedArray(rel_vis, np.diff(rel_vis, axis=1,
prepend=rel_vis[:, 0][:, None]
) <= 0).filled(np.NaN)
levels = [0.2, 0.4, 0.6, 0.8]
rel_vis_iquant = np.concatenate([np.nanargmax(rel_vis_masked > lvl, axis=1, keepdims=True)
for lvl in levels], axis=1)
# normalize time
t_sub = t_last_joint / self.T_eff
# prepare plot
fig, ax = plt.subplots(figsize=(10, 5), layout="constrained")
# plot velocities
c = ax.pcolormesh(
t_sub,
np.abs(self.fault.end_upper[1, self.fault.n_locked+1:self.fault.n_locked+ix_deep+1]
/ 1e3),
vis_ts[:ix_deep-1, :-1],
norm=LogNorm(vmin=10**np.floor(np.log10(np.median(vis_ts[:ix_deep-1, 0]))),
vmax=10**np.ceil(np.log10(np.max(vis_ts[:ix_deep-1, -1])))),
cmap=cm.batlow, shading="flat")
for i in range(len(levels)):
ax.plot(t_sub[rel_vis_iquant[:ix_deep-1, i]],
patch_depths[1:ix_deep] / 1e3,
color="w")
ax.set_xscale("symlog", linthresh=1e-3)
ax.set_xlim([0, 1])
# make the y-axis increasing downwards to mimic depth even though we're plotting x1
ax.invert_yaxis()
# finish figure
ax.set_ylabel("Depth $x_2$ [km]")
ax.set_xlabel("Normalized Time $t/T$")
fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05,
label=r"$\alpha_{eff}$")
fig.suptitle("Effective Viscosity Timeseries")
# finish
if return_viscosities:
return fig, ax, t_sub, vis_ts
else:
return fig, ax
|
tobiscode/seqeas-public
|
seqeas/subduction2d.py
|
subduction2d.py
|
py
| 145,621 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "abc.ABC",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.logical_xor",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "numpy.logical_xor",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "numpy.sign",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "numba.njit",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "numpy.sign",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "numba.njit",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "numpy.sign",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 428,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 429,
"usage_type": "argument"
},
{
"api_name": "numpy.exp",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 481,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 482,
"usage_type": "argument"
},
{
"api_name": "numpy.concatenate",
"line_number": 591,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 603,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 534,
"usage_type": "argument"
},
{
"api_name": "numba.int64",
"line_number": 534,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 535,
"usage_type": "argument"
},
{
"api_name": "numpy.exp",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 666,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 667,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 673,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 675,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 676,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 608,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 608,
"usage_type": "argument"
},
{
"api_name": "numba.int64",
"line_number": 608,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 609,
"usage_type": "argument"
},
{
"api_name": "numpy.zeros",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "numba.float64",
"line_number": 683,
"usage_type": "argument"
},
{
"api_name": "numba.int64",
"line_number": 683,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 684,
"usage_type": "argument"
},
{
"api_name": "numpy.zeros",
"line_number": 769,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 773,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 774,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 784,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "numba.objmode",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 795,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 811,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 812,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 814,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 826,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 827,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 828,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 849,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "numba.int64",
"line_number": 710,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 712,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 710,
"usage_type": "name"
},
{
"api_name": "numba.float64",
"line_number": 711,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 920,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 921,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 922,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 923,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 925,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 926,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 927,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 937,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 937,
"usage_type": "call"
},
{
"api_name": "numba.objmode",
"line_number": 945,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 946,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 955,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 962,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 963,
"usage_type": "call"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 965,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 975,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 976,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 977,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 978,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 979,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1000,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1013,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 865,
"usage_type": "call"
},
{
"api_name": "numba.int64",
"line_number": 865,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 867,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 865,
"usage_type": "name"
},
{
"api_name": "numba.float64",
"line_number": 866,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 1080,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1081,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 1086,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 1087,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 1088,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 1097,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1097,
"usage_type": "call"
},
{
"api_name": "numba.objmode",
"line_number": 1107,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 1108,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1137,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1138,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1139,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1140,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1141,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1160,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1161,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1162,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1165,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1166,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1167,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1168,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 1019,
"usage_type": "call"
},
{
"api_name": "numba.int64",
"line_number": 1019,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 1021,
"usage_type": "argument"
},
{
"api_name": "numba.boolean",
"line_number": 1021,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 1019,
"usage_type": "name"
},
{
"api_name": "numba.float64",
"line_number": 1020,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 1247,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1248,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1249,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1250,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 1254,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 1255,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 1256,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 1265,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1265,
"usage_type": "call"
},
{
"api_name": "numba.objmode",
"line_number": 1275,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 1276,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1298,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1300,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1305,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1306,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1307,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 1308,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1309,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1328,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1331,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1332,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1333,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1334,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1347,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 1187,
"usage_type": "call"
},
{
"api_name": "numba.int64",
"line_number": 1187,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 1189,
"usage_type": "argument"
},
{
"api_name": "numba.boolean",
"line_number": 1189,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 1187,
"usage_type": "name"
},
{
"api_name": "numba.float64",
"line_number": 1188,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 1388,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1393,
"usage_type": "call"
},
{
"api_name": "numba.njit",
"line_number": 1355,
"usage_type": "call"
},
{
"api_name": "numba.int64",
"line_number": 1355,
"usage_type": "argument"
},
{
"api_name": "numba.float64",
"line_number": 1355,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 1464,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 1527,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1533,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1553,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1554,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1555,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1556,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1557,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1560,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 1565,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1566,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 1566,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1566,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1567,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 1567,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1569,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 1569,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 1570,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1571,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1574,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 1574,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 1575,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1576,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1578,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 1579,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 1580,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1581,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1582,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1586,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1588,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1590,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1600,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 1605,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 1631,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 1632,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 1633,
"usage_type": "attribute"
},
{
"api_name": "numpy.stack",
"line_number": 1636,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1636,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 1636,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1638,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 1638,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 1638,
"usage_type": "call"
},
{
"api_name": "kernels2d.Klinedisp",
"line_number": 1641,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1649,
"usage_type": "call"
},
{
"api_name": "numpy.einsum",
"line_number": 1651,
"usage_type": "call"
},
{
"api_name": "kernels2d.Klinedisp",
"line_number": 1655,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 1663,
"usage_type": "call"
},
{
"api_name": "numpy.einsum",
"line_number": 1665,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 1683,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1766,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1768,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1771,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1773,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1776,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 1778,
"usage_type": "call"
},
{
"api_name": "numpy.logical_and",
"line_number": 1789,
"usage_type": "call"
},
{
"api_name": "numpy.lcm.reduce",
"line_number": 1794,
"usage_type": "call"
},
{
"api_name": "numpy.lcm",
"line_number": 1794,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.default_rng",
"line_number": 1802,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 1802,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 1806,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1809,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1810,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 1818,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1826,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1828,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 1832,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_equal",
"line_number": 1833,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 1833,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 1834,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1836,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1838,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 1844,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 1858,
"usage_type": "call"
},
{
"api_name": "pandas.Period",
"line_number": 1861,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1863,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"line_number": 1865,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 1868,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 1870,
"usage_type": "call"
},
{
"api_name": "numpy.allclose",
"line_number": 1880,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1889,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 1891,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1892,
"usage_type": "call"
},
{
"api_name": "numpy.logspace",
"line_number": 1893,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 1893,
"usage_type": "call"
},
{
"api_name": "numpy.roll",
"line_number": 1897,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 1899,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1899,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 1900,
"usage_type": "call"
},
{
"api_name": "pandas.DatetimeIndex",
"line_number": 1919,
"usage_type": "attribute"
},
{
"api_name": "pandas.Timestamp",
"line_number": 1920,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 1922,
"usage_type": "attribute"
},
{
"api_name": "numpy.all",
"line_number": 1923,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 1926,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 1935,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 1935,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 1942,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 1942,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 1945,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 1945,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 1948,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 1948,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.pinv",
"line_number": 1976,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 1976,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 1982,
"usage_type": "attribute"
},
{
"api_name": "numpy.log",
"line_number": 1990,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 1991,
"usage_type": "call"
},
{
"api_name": "numpy.fmin",
"line_number": 1994,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 1999,
"usage_type": "attribute"
},
{
"api_name": "numpy.fmin",
"line_number": 2004,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 2014,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2014,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 2017,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2017,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 2020,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2028,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 2029,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 2031,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 2037,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 2037,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 2039,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 2039,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 2043,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 2044,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 2046,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 2050,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 2050,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 2052,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 2052,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 2056,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 2057,
"usage_type": "attribute"
},
{
"api_name": "kernels2d.Glinedisp",
"line_number": 2066,
"usage_type": "call"
},
{
"api_name": "kernels2d.Glinedisp",
"line_number": 2071,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 2075,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 2079,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 2087,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2087,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 2091,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 2094,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 2107,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2108,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 2128,
"usage_type": "call"
},
{
"api_name": "numpy.deg2rad",
"line_number": 2149,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 2165,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 2166,
"usage_type": "attribute"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2167,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2167,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2168,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2168,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2169,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2169,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2170,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2170,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2171,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2171,
"usage_type": "call"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2172,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2172,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 2174,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 2267,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 2310,
"usage_type": "attribute"
},
{
"api_name": "numpy.NaN",
"line_number": 2312,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 2337,
"usage_type": "attribute"
},
{
"api_name": "numpy.NaN",
"line_number": 2339,
"usage_type": "attribute"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 2396,
"usage_type": "call"
},
{
"api_name": "numpy.logical_and",
"line_number": 2405,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 2411,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 2421,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 2421,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 2425,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 2429,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 2434,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 2459,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 2460,
"usage_type": "call"
},
{
"api_name": "numpy.logical_and",
"line_number": 2462,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 2466,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2472,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2472,
"usage_type": "name"
},
{
"api_name": "numpy.r_",
"line_number": 2508,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors.SymLogNorm",
"line_number": 2517,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2519,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2519,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2522,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2522,
"usage_type": "name"
},
{
"api_name": "cmcrameri.cm.vik",
"line_number": 2527,
"usage_type": "attribute"
},
{
"api_name": "cmcrameri.cm",
"line_number": 2527,
"usage_type": "name"
},
{
"api_name": "cmcrameri.cm.vik",
"line_number": 2539,
"usage_type": "attribute"
},
{
"api_name": "cmcrameri.cm",
"line_number": 2539,
"usage_type": "name"
},
{
"api_name": "numpy.r_",
"line_number": 2586,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 2596,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 2598,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 2600,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 2605,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2606,
"usage_type": "call"
},
{
"api_name": "cmcrameri.cm.vik",
"line_number": 2611,
"usage_type": "attribute"
},
{
"api_name": "cmcrameri.cm",
"line_number": 2611,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.SymLogNorm",
"line_number": 2612,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2620,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 2622,
"usage_type": "call"
},
{
"api_name": "cmcrameri.cm.batlow",
"line_number": 2623,
"usage_type": "attribute"
},
{
"api_name": "cmcrameri.cm",
"line_number": 2623,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2637,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2637,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 2661,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2672,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2695,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2737,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 2737,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2737,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 2738,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 2739,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 2741,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2741,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 2748,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 2751,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2756,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2756,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 2758,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2800,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2800,
"usage_type": "name"
},
{
"api_name": "numpy.zeros_like",
"line_number": 2813,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 2851,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 2855,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 2855,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 2862,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 2869,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 2874,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 2876,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 2876,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 2877,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 2877,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2885,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2885,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 2921,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 2921,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2921,
"usage_type": "call"
},
{
"api_name": "numpy.any",
"line_number": 2922,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 2923,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 2925,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 2925,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 2932,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2935,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 2937,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff",
"line_number": 2941,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff",
"line_number": 2944,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff",
"line_number": 2947,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff_from_alpha_h",
"line_number": 2951,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff_from_alpha_h",
"line_number": 2953,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d'}.get_alpha_eff_from_alpha_h",
"line_number": 2955,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 2959,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 2959,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_invalid",
"line_number": 2959,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 2959,
"usage_type": "attribute"
},
{
"api_name": "numpy.ceil",
"line_number": 2960,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 2960,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_invalid",
"line_number": 2960,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 2960,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 2962,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 2962,
"usage_type": "name"
},
{
"api_name": "numpy.atleast_1d",
"line_number": 2963,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 3011,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 3011,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 3018,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 3024,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d', 'LogNorm': 'matplotlib.colors.LogNorm'}.get_alpha_eff",
"line_number": 3028,
"usage_type": "call"
},
{
"api_name": "{'plt': 'matplotlib.pyplot', 'SymLogNorm': 'matplotlib.colors.SymLogNorm', 'cm': 'cmcrameri.cm', 'Normalize': 'matplotlib.colors.Normalize', 'interp1d': 'scipy.interpolate.interp1d', 'LogNorm': 'matplotlib.colors.LogNorm'}.get_alpha_eff_from_alpha_h",
"line_number": 3032,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 3038,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 3038,
"usage_type": "call"
},
{
"api_name": "numpy.ma.MaskedArray",
"line_number": 3043,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 3043,
"usage_type": "attribute"
},
{
"api_name": "numpy.diff",
"line_number": 3043,
"usage_type": "call"
},
{
"api_name": "numpy.NaN",
"line_number": 3045,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 3047,
"usage_type": "call"
},
{
"api_name": "numpy.nanargmax",
"line_number": 3047,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 3052,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 3052,
"usage_type": "name"
},
{
"api_name": "numpy.abs",
"line_number": 3056,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LogNorm",
"line_number": 3059,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 3059,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 3059,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 3059,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 3060,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 3060,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 3060,
"usage_type": "call"
},
{
"api_name": "cmcrameri.cm.batlow",
"line_number": 3061,
"usage_type": "attribute"
},
{
"api_name": "cmcrameri.cm",
"line_number": 3061,
"usage_type": "name"
}
] |
72908983868
|
import os
import testinfra.utils.ansible_runner
import pytest
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
@pytest.mark.parametrize("name", [
("apt-transport-https"),
("software-properties-common"),
("unattended-upgrades"),
("mailutils"),
("bsd-mailx"),
])
def test_default_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
def test_default_config_files_present(host):
f = host.file("/etc/apt/apt.conf.d/50unattended-upgrades")
assert f.exists
assert f.is_file
|
ddrugeon/ansible-pi-bootstrap
|
roles/unattended-upgrades/molecule/default/tests/test_install_mandatory_tools.py
|
test_install_mandatory_tools.py
|
py
| 607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "testinfra.utils.ansible_runner.utils.ansible_runner.AnsibleRunner",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "testinfra.utils.ansible_runner.utils",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "testinfra.utils.ansible_runner",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
}
] |
41373296282
|
#!/usr/bin/env python
from graph import DiGraph, before_after_calculations
import os
import json
import threading
import random
from datetime import date
def read_graphml_files():
with open('tmp/interesting_graphs.txt') as fh:
lines = [line.rstrip() for line in fh]
return [(line, DiGraph.from_graphml(f"graphml/{line}")) for line in lines]
def process_graph(name, G, results):
print(f"-- Processing graph {name}")
rand_ports = [random.randrange(1, 65535) for i in range(50)]
G_simulation, G_attack, stats = G.simulate_traffic(amount_of_rules=50, rand_ports=rand_ports)
print("Finished processing")
results.append(
(
before_after_calculations(
f"{name} simulated",
f"{name} simulated + attack",
G_simulation,
G_attack
),
stats
)
)
def main():
graphs = read_graphml_files()
thread_list = []
results = []
for name, graph in graphs:
thread = threading.Thread(target=process_graph, args=(name, graph, results))
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
filename = f"networks_simulation_results_{date.today()}.json"
# Überprüfen, ob Datei bereits existiert
i = 0
while os.path.isfile(filename):
i += 1
filename = f"networks_simulation_results_{date.today()}.{i}.json"
# results = [process_graph(name, graph) for name, graph in graphs]
with open(f"tmp/{filename}", 'a') as fh:
fh.write(json.dumps(results))
if __name__ == "__main__":
main()
|
mkapra/graph_measurements_segmentation
|
simulate_networks.py
|
simulate_networks.py
|
py
| 1,686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "graph.DiGraph.from_graphml",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "graph.DiGraph",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "random.randrange",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "graph.before_after_calculations",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 62,
"usage_type": "call"
}
] |
27813014463
|
import numpy as np
import gym
import cv2
class StackedEnv(gym.Wrapper):
def __init__(self, env, width, height, n_img_stack, n_action_repeats):
super(StackedEnv, self).__init__(env)
self.width = width
self.height = height
self.n_img_stack = n_img_stack
self.n_action_repeats = n_action_repeats
self.stack = []
def reset(self):
img_rgb = super(StackedEnv, self).reset()
img_gray = self.preprocess(img_rgb)
self.stack = [img_gray] * self.n_img_stack
return np.rollaxis(np.stack(self.stack, axis=2), 2, 0)
def step(self, action):
total_reward = 0
done = False
img_rgb = None
info = None
for i in range(self.n_action_repeats):
img_rgb, reward, done, info = super(StackedEnv, self).step(action)
total_reward += reward
if done:
break
img_gray = self.preprocess(img_rgb)
self.stack.pop(0)
self.stack.append(img_gray)
assert len(self.stack) == self.n_img_stack
return np.rollaxis(np.stack(self.stack, axis=2), 2, 0), total_reward, done, info
def preprocess(self, rgb_img):
gray = np.dot(rgb_img[..., :], [0.299, 0.587, 0.114])
gray = gray / 128. - 1.
res = cv2.resize(gray, dsize=(self.height, self.width), interpolation=cv2.INTER_CUBIC)
return res
|
yiliu77/deep_rl_proj
|
environments/stacked.py
|
stacked.py
|
py
| 1,405 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "gym.Wrapper",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.rollaxis",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.rollaxis",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 40,
"usage_type": "attribute"
}
] |
28985728032
|
import sys,re, os, io, codecs
from _collections import defaultdict
def loadModelfile():
modelfile = sys.argv[1]
features = defaultdict()
#load model file into features
with open(modelfile,"r", encoding = "ISO-8859-1") as modelhandler:
model = modelhandler.readlines()
for cldata in model:
cldata = cldata.strip()
cldata = cldata.split(" ")
clname = cldata[0]
cldata = cldata[1:]
weights = defaultdict(float)
for ft in cldata:
ft = ft.split("=|")
weights[ft[0]] = float(ft[1])
features[clname] = weights
input_stream = io.TextIOWrapper(sys.stdin.buffer,encoding = "ISO-8859-1")
calculate_accuracy(input_stream,features)
def calculate_accuracy(lines,features):
for line in lines:
output=""
line = line.strip()
line = " ".join(line.split())
data = line.split(" ")
length = len(data)
bos = "*BOS*"
eos = "*EOS*"
prevpos = bos
prev2pos= bos
prevclass= bos
prev2class=bos
i=0
while(i<length):
word = data[i]
pos = word.rfind("/")
postag = word[pos+1:]
crnt = word[:pos]
if(i==length-1):
next =eos
nextpos=eos
else:
nextdata = data[i+1]
rpos = nextdata.rfind("/")
nextpos = nextdata[rpos+1:]
next = nextdata[:rpos]
wshape = wordshape(crnt)
ftarray = ["crnt|"+crnt, "crntpos|"+postag, "prevpos|"+prevpos,
"prevcls|"+prevclass, "prev2pos|"+prev2pos,"prev2cls|"+prev2class,
"next|"+next, "nextpos|"+nextpos, "wshape|"+wshape]
scores = defaultdict()
for key in features.keys():
score = calculateScore(ftarray,features[key])
scores[key]=score
predClass = max(scores.keys(), key=(lambda key: scores[key] ))
if(next == eos):
prev2pos = bos
prevpos= bos
prevclass = bos
prev2class = bos
else:
prev2pos = prevpos
prev2class = prevclass
prevpos = postag
prevclass = predClass
output+=word+"/"+predClass+" "
#calculate Fscore and accuracy
i+=1
output=output.strip()+ os.linesep
sys.stdout.write(output)
sys.stdout.flush()
def wordshape(word):
wshape = word
wshape = re.sub("[A-Z]+","A",wshape)
wshape = re.sub("[a-z]+","a",wshape)
wshape = re.sub("[0-9]+","0",wshape)
wshape = re.sub("[^A-Za-z0-9]+","_",wshape)
return wshape
def calculateScore(data,weightVector):
score = 0.0
for ft in data:
if(ft in weightVector.keys()):
score+=weightVector[ft]
return score
loadModelfile()
|
chandrashekar-cv/POS-Tagging
|
ner/netag.py
|
netag.py
|
py
| 3,029 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "_collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "_collections.defaultdict",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "_collections.defaultdict",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.linesep",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 107,
"usage_type": "call"
}
] |
19124516217
|
from components import decorators
from components import endpoints_webapp2
import webapp2
import api
import config
import notifications
import service
import swarming
README_MD = (
'https://chromium.googlesource.com/infra/infra/+/master/'
'appengine/cr-buildbucket/README.md')
class MainHandler(webapp2.RequestHandler): # pragma: no cover
"""Redirects to README.md."""
def get(self):
return self.redirect(README_MD)
class CronResetExpiredBuilds(webapp2.RequestHandler):
"""Resets expired builds."""
@decorators.require_cronjob
def get(self):
service.reset_expired_builds()
class CronUpdateBuckets(webapp2.RequestHandler): # pragma: no cover
"""Updates buckets from configs."""
@decorators.require_cronjob
def get(self):
config.cron_update_buckets()
class BuildHandler(webapp2.RequestHandler): # pragma: no cover
"""Redirects to API explorer to see the build."""
def get(self, build_id):
api_path = '/_ah/api/buildbucket/v1/builds/%s' % build_id
return self.redirect(api_path)
def get_frontend_routes(): # pragma: no cover
routes = [
webapp2.Route(r'/', MainHandler),
webapp2.Route(r'/b/<build_id:\d+>', BuildHandler),
endpoints_webapp2.discovery_service_route(),
]
routes += endpoints_webapp2.api_routes(api.BuildBucketApi)
routes += endpoints_webapp2.api_routes(swarming.SwarmbucketApi)
return routes
def get_backend_routes():
return [
webapp2.Route(
r'/internal/cron/buildbucket/reset_expired_builds',
CronResetExpiredBuilds),
webapp2.Route(
r'/internal/cron/buildbucket/update_buckets',
CronUpdateBuckets),
webapp2.Route(
r'/internal/task/buildbucket/notify/<build_id:\d+>',
notifications.TaskPublishNotification),
]
|
mithro/chromium-infra
|
appengine/cr-buildbucket/handlers.py
|
handlers.py
|
py
| 1,762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "webapp2.RequestHandler",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "service.reset_expired_builds",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "components.decorators.require_cronjob",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "components.decorators",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "config.cron_update_buckets",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "components.decorators.require_cronjob",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "components.decorators",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "webapp2.Route",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "webapp2.Route",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "components.endpoints_webapp2.discovery_service_route",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "components.endpoints_webapp2",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "components.endpoints_webapp2.api_routes",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "components.endpoints_webapp2",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "api.BuildBucketApi",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "components.endpoints_webapp2.api_routes",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "components.endpoints_webapp2",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "swarming.SwarmbucketApi",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "webapp2.Route",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "webapp2.Route",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "webapp2.Route",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "notifications.TaskPublishNotification",
"line_number": 70,
"usage_type": "attribute"
}
] |
37877845832
|
import datetime
from PySide2.QtWidgets import QDialog
from ui_add_update import Ui_Dialog
class addDialog(QDialog):
def __init__(self, *args, **kvargs):
super().__init__(*args, **kvargs)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.OkButton.clicked.connect(self.accept)
self.ui.CancelButton.clicked.connect(self.reject)
self.ui.dateEdit.setDate(datetime.date.today())
self.ui.OkButton.clicked.connect(self.get_data)
self.ui.sc1000.valueChanged.connect(self.set_label)
self.ui.sc500.valueChanged.connect(self.set_label)
self.ui.sc200.valueChanged.connect(self.set_label)
self.ui.sc100.valueChanged.connect(self.set_label)
self.ui.sc50.valueChanged.connect(self.set_label)
def set_label(self):
self.ui.lsc1000.setNum(1000*self.ui.sc1000.value())
self.ui.lsc500.setNum(500*self.ui.sc500.value())
self.ui.lsc200.setNum(200*self.ui.sc200.value())
self.ui.lsc100.setNum(100*self.ui.sc100.value())
self.ui.lsc50.setNum(50*self.ui.sc50.value())
self.ui.label.setNum(1000*self.ui.sc1000.value()+500*self.ui.sc500.value()+200*self.ui.sc200.value()+100*self.ui.sc100.value()+50*self.ui.sc50.value())
def get_data(self):
return {
"c_date": self.ui.dateEdit.date().toPython(),
"c_1000": self.ui.sc1000.value(),
"c_500": self.ui.sc500.value(),
"c_200": self.ui.sc200.value(),
"c_100": self.ui.sc100.value(),
"c_50": self.ui.sc50.value()
}
|
randrust/cashflow_pyside2
|
dialogs/add_dialog.py
|
add_dialog.py
|
py
| 1,602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PySide2.QtWidgets.QDialog",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ui_add_update.Ui_Dialog",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 16,
"usage_type": "attribute"
}
] |
5114407056
|
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.interpolate import interp1d
import astropy.units as u
import astropy.coordinates as coord
from astropy.cosmology import FlatLambdaCDM
from astropy.constants import M_sun
import tensorflow as tf
from flowpm import utils as ut
import copy
import time
from sys import argv,exit,byteorder
from field_util import Field
class FGPA(object):
def __init__(self, fn, box_size = 512*u.Mpc, box_res = 256, origin = [3550*u.Mpc, -256*u.Mpc, -256*u.Mpc],
tau_0 = 1, T_0 = 1, beta = 1.6, gamma = 1.6, from_npy = True):
if from_npy:
self.dark = self.read_from_npy(fn)
else:
self.header, self.gas, self.dark, self.star = self.tipsy_read(fn)
self.auxilinary(box_size, box_res)
self.set_origin(origin)
self.set_FGPA_param(tau_0, T_0, beta, gamma)
def set_FGPA_param(self, tau_0 = None, T_0 = None, beta = None, gamma = None):
if tau_0 is not None:
self.tau_0 = tau_0 # TBD
if T_0 is not None:
self.T_0 = T_0 # TBD
if beta is not None:
self.beta = beta
if gamma is not None:
self.gamma = gamma
return
def set_origin(self, orig_pos):
'''
Set a offset value (move the origin point to the given [x0, y0, z0])
add units!
'''
self.x0, self.y0, self.z0 =orig_pos
return
def auxilinary(self, box_size, box_res, N_particles = None):
# settings of the simulation
self.box_size = box_size # unit: Mpc
self.box_res = box_res # unit: none
self.res = box_size/box_res # unit: Mpc
if N_particles is None:
self.n_den = self.header['N'] / self.box_size**3
else:
self.n_den = N_particles / self.box_size**3
#Do general cosmo things
self.cosmo = FlatLambdaCDM(H0=100, Om0=0.315)
self.mass_res = self.cosmo.critical_density0.to(u.M_sun/u.Mpc**3)*(self.box_size)**3/self.box_res**3*self.cosmo.Om(0.)
#get speed of light in km/s
self.ckms = 299792
self.velkms = (self.box_size/8677.2079486362706)*self.ckms
def read_from_npy(self, fn, costco_style = True):
'''
use this function to read reduced data, and recover the data to tipsy style.
'''
dark = np.load(fn)
self.auxilinary(box_size = 512*u.Mpc, box_res = 256, N_particles = len(dark))
dark = pd.DataFrame(dark,columns=dark.dtype.names)
# basically what we do in the rev function
dark['x'] = dark['x'] / self.box_size - 0.5
dark['y'] = dark['y'] / self.box_size - 0.5
dark['z'] = dark['z'] / self.box_size - 0.5
dark['x'].units = None
dark['y'].units = None
dark['z'].units = None
dark['vx'] = dark['vx'] / self.velkms
dark['vy'] = dark['vy'] / self.velkms
dark['vz'] = dark['vz'] / self.velkms
dark['vx'].units = None
dark['vy'].units = None
dark['vz'].units = None
if costco_style:
dark['x'], dark['y'], dark['z'] = dark['y'], dark['x'], dark['z'] # ?
dark['vx'], dark['vy'], dark['vz'] = dark['vy'], dark['vx'], dark['vz'] # dont forget the vel data!
return dark
def tipsy_read(self, fn, costco_style = True):
tipsy = open(fn, 'rb')
header_type = np.dtype([('time', '>f8'),('N', '>i4'), ('Dims', '>i4'), ('Ngas', '>i4'), ('Ndark', '>i4'), ('Nstar', '>i4'), ('pad', '>i4')])
gas_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('rho','>f4'), ('temp','>f4'), ('hsmooth','>f4'), ('metals','>f4'), ('phi','>f4')])
dark_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('eps','>f4'), ('phi','>f4')])
star_type = np.dtype([('mass','>f4'), ('x', '>f4'),('y', '>f4'),('z', '>f4'), ('vx', '>f4'),('vy', '>f4'),('vz', '>f4'),
('metals','>f4'), ('tform','>f4'), ('eps','>f4'), ('phi','>f4')])
header = np.fromfile(tipsy,dtype=header_type,count=1)
header = dict(zip(header_type.names,header[0]))
gas = np.fromfile(tipsy,dtype=gas_type,count=header['Ngas'])
dark = np.fromfile(tipsy,dtype=dark_type,count=header['Ndark'])
star = np.fromfile(tipsy,dtype=star_type,count=header['Nstar'])
if byteorder == 'little':
gas = gas.byteswap().newbyteorder('=')
dark = dark.byteswap().newbyteorder('=')
star = star.byteswap().newbyteorder('=')
gas = pd.DataFrame(gas,columns=gas.dtype.names)
dark = pd.DataFrame(dark,columns=dark.dtype.names) # here is the raw data
# in raw_data:
# x - RA
# y - DEC
# z - red
# what we want:
# x - red
# y - RA
# z - DEC
if costco_style:
dark['x'], dark['y'], dark['z'] = dark['y'], dark['x'], dark['z'] # ?
dark['vx'], dark['vy'], dark['vz'] = dark['vy'], dark['vx'], dark['vz'] # dont forget the vel data!
star = pd.DataFrame(star,columns=star.dtype.names)
tipsy.close()
return header, gas, dark, star
def process_dark(self, dark = None):
# for painting, keep the particles in [0, 512]^3 box
# this function write in-situ results, so be careful
# only use if you want to ensure the particles are in reasonable positions
if dark is None:
dark = copy.deepcopy(self.dark)
dark['x'] = (dark['x']+0.5) * self.box_size + self.x0
dark['y'] = (dark['y']+0.5) * self.box_size + self.y0
dark['z'] = (dark['z']+0.5) * self.box_size + self.z0
dark['x'].units = u.Mpc
dark['y'].units = u.Mpc
dark['z'].units = u.Mpc
dark['vx'] = dark['vx'] * self.velkms
dark['vy'] = dark['vy'] * self.velkms
dark['vz'] = dark['vz'] * self.velkms
dark['vx'].units = u.km * u.s**-1
dark['vy'].units = u.km * u.s**-1
dark['vz'].units = u.km * u.s**-1
dark['mass'] = self.mass_res.value
dark['mass'].units= M_sun
return dark
def process_dark_rev(self, dark_processed):
'''
recover the input field (to raw format)
'''
dark_processed['x'] = (dark_processed['x']-self.x0) / self.box_size - 0.5
dark_processed['y'] = (dark_processed['y']-self.y0) / self.box_size - 0.5
dark_processed['z'] = (dark_processed['z']-self.z0) / self.box_size - 0.5
dark_processed['x'].units = None
dark_processed['y'].units = None
dark_processed['z'].units = None
dark_processed['vx'] = dark_processed['vx'] / self.velkms
dark_processed['vy'] = dark_processed['vy'] / self.velkms
dark_processed['vz'] = dark_processed['vz'] / self.velkms
dark_processed['vx'].units = None
dark_processed['vy'].units = None
dark_processed['vz'].units = None
return dark_processed
def particle_paint(self, nc, weight_col = None):
'''
nc: # of cells along any direction
raw_part_data: *raw* particle data from the simuation (x, y, z, \\in [-0.5, 0.5])
weight_col: pick one col in raw_data as weight; default value is 1 for all the particles
'''
mesh = tf.zeros([1, nc, nc, nc], dtype = float)
dark_raw = self.dark
dark_pos = tf.convert_to_tensor([dark_raw['x'], dark_raw['y'], dark_raw['z']], dtype = float)
dark_pos = tf.transpose((dark_pos + 0.5) * nc)
dark_pos = tf.expand_dims(dark_pos, axis = 0) # [1, partN, 3]
partN = dark_pos.shape[1]
n_den = partN / nc**3
if weight_col is None:
weight = tf.ones([1, partN])
else:
weight = tf.convert_to_tensor(dark_raw[weight_col])
weight = tf.expand_dims(weight, axis = 0)
return ut.cic_paint(mesh, dark_pos, weight = weight) / n_den
def particle_paint_clip_with_real_coord(self, real_coord_start, real_coord_end, dl, dark_raw = None, weight_col = None, smooth = False):
'''
translate real space box to box in [-0.5, 0.5]^3 then run particle_paint_clip
plz add units to all the parameters
Issue: get things wrong here
'''
if dark_raw is None:
dark_raw = self.dark
rcs_x, rcs_y, rcs_z = real_coord_start
rce_x, rce_y, rce_z = real_coord_end
box_scale = round((self.box_size/dl).value)
nc = np.round([((rce_x-rcs_x)/dl).value,
((rce_y-rcs_y)/dl).value,
((rce_z-rcs_z)/dl).value]).astype(int)
offset = np.round([((rcs_x-self.x0)/dl).value,
((rcs_y-self.y0)/dl).value,
((rcs_z-self.z0)/dl).value]).astype(int)
n_den = self.n_den * dl**3 # global density per grid
field_data = self.particle_paint_clip(nc, offset, box_scale, n_den, dark_raw = dark_raw, weight_col = weight_col)[0] # [nx, ny, nz] tensor
field = Field(rcs_x, rcs_y, rcs_z, dl, field_data)
if smooth:
field.smooth(dl)
return field
def particle_paint_clip(self, nc, offset, box_scale, n_den, dark_raw = None, weight_col = None):
'''
nc: [nx, ny, nz]
the shape/physical-scale of mesh.
offset: [ox, oy, oz]
[0,0,0] position of the mesh; default value ox=0, oy=0, oz=0
box_scale: the full scale of the simulation - box coord: [0, box_scale]^3
# raw_part_data: *raw* particle data from the simuation (x, y, z, \\in [-0.5, 0.5])
weight_col: pick one col in raw_data as weight; default value is 1 for all the particles
this function cutouts a box [ox, ox+nx] * [oy, oy+ny] * [oz, oz+nz] with field values.
auto periodical condition(?)
'''
if dark_raw is None:
dark_raw = self.dark
nx, ny, nz = nc
ox, oy, oz = offset
mesh = tf.zeros([1, nx, ny, nz], dtype = float)
# remove particles out of the boundary
dark_clip = dark_raw[(dark_raw['x']<=(ox+nx)/box_scale-0.5) & (dark_raw['x']>= ox/box_scale-0.5) &
(dark_raw['y']<=(oy+ny)/box_scale-0.5) & (dark_raw['y']>= oy/box_scale-0.5) &
(dark_raw['z']<=(oz+nz)/box_scale-0.5) & (dark_raw['z']>= oz/box_scale-0.5)]
dark_pos = tf.convert_to_tensor([(dark_clip['x']+ 0.5)*box_scale - ox,
(dark_clip['y']+ 0.5)*box_scale - oy,
(dark_clip['z']+ 0.5)*box_scale - oz], dtype = float)
assert (np.max(dark_pos[0]) <= nx) & (np.max(dark_pos[1]) <= ny) & (np.max(dark_pos[2]) <= nz), print(np.max(dark_pos[0]), np.max(dark_pos[1]), np.max(dark_pos[2]))
assert (np.min(dark_pos[0]) >= 0 ) & (np.min(dark_pos[1]) >= 0 ) & (np.min(dark_pos[2]) >= 0 )
dark_pos = tf.transpose(dark_pos)
dark_pos = tf.expand_dims(dark_pos, axis = 0) # [1, partN, 3]
partN = dark_pos.shape[1]
if weight_col is None:
weight = tf.ones([1, partN])
else:
weight = tf.convert_to_tensor(dark_clip[weight_col])
weight = tf.expand_dims(weight, axis = 0)
paint = ut.cic_paint(mesh, dark_pos, weight = weight) / n_den
return paint
def RSD_catalog(self, real_coord_start, real_coord_end, dl):
'''
return a particle catalog with RSDed pos
'''
opd_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl)
fvx_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vx')
fvy_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vy')
fvz_clip = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl, weight_col = 'vz')
part_new_pos = self.field_to_part_pos(opd_clip)
# generate a new particle catalog
part_new = pd.DataFrame(part_new_pos[0], columns = ['x', 'y', 'z'])
opd_tensor = tf.expand_dims(opd_clip.field_data, axis = 0)
fvx_tensor = tf.expand_dims(fvx_clip.field_data, axis = 0)
fvy_tensor = tf.expand_dims(fvy_clip.field_data, axis = 0)
fvz_tensor = tf.expand_dims(fvz_clip.field_data, axis = 0)
# flowpm.cic_readout requires a compatible mesh and coord ([nx, ny, nz] grid - [0, n*]^3 coord)
part_pos_grid = self.mesh_to_part_pos(tf.expand_dims(opd_clip.field_data, axis = 0))
part_new['opd'] = ut.cic_readout(opd_tensor, part_pos_grid)[0]
part_new['vx'] = ut.cic_readout(fvx_tensor, part_pos_grid)[0]
part_new['vy'] = ut.cic_readout(fvy_tensor, part_pos_grid)[0]
part_new['vz'] = ut.cic_readout(fvz_tensor, part_pos_grid)[0]
# convert to real space
part_new = self.process_dark(part_new)
# RSD
# TODO: consider the effect of yz distance
part_new['red_real'] = self.z_from_dist(part_new['x'] * u.Mpc)
part_new['red_rs'] = part_new['red_real'] + part_new['vx'] / self.ckms
part_new['x_red'] = self.z_to_dist(part_new['red_rs'])
return part_new
def FGPA_eval(self, mesh_catalog):
# calculate tau and T for each particle
# TODO: find out characterized tau_0 & T_0
mesh_catalog['tau'] = self.tau_0 * mesh_catalog['opd']**self.beta
mesh_catalog['T'] = self.T_0 * mesh_catalog['opd']**self.gamma
return mesh_catalog
def raw_tau_map(self, real_coord_start, real_coord_end, dl):
# generate the particle catalog
part_new = self.RSD_catalog(real_coord_start, real_coord_end, dl)
# add tau / T to the catalog
part_new = self.FGPA_eval(part_new)
# this one in the RS space
part_new_new = pd.DataFrame(part_new[['y', 'z', 'vx', 'vy', 'vz', 'tau', 'T']], columns = ['y', 'z', 'vx', 'vy', 'vz', 'tau', 'T'])
part_new_new['x'] = part_new['x_red'] # not sure why this doesn't work
# recover the raw_field
part_new_new = self.process_dark_rev(part_new_new)
# paint the final result
tau_field = self.particle_paint_clip_with_real_coord(real_coord_start, real_coord_end, dl,
dark_raw = part_new_new, weight_col = 'tau', smooth = False)
return tau_field
def tau_map(self, real_coord_start, real_coord_end, dl, z_comp = 2.30, F_obs = 0.8447):
# derive the A_norm by comparing the desired value in obs with calculation
self.set_FGPA_param(tau_0=1)
raw_tau_field = self.raw_tau_map(real_coord_start, real_coord_end, dl)
# construct the clip coord
# hardcode bad, but works here
clip_start = copy.deepcopy(real_coord_start)
clip_start[0] = self.cosmo.comoving_distance(z_comp - 0.05)
clip_end = copy.deepcopy(real_coord_end)
clip_end[0] = self.cosmo.comoving_distance(z_comp + 0.05)
test_tau_field = raw_tau_field.clip_with_coord(clip_start, clip_end)
# no newton, just interp
tau_list = test_tau_field.field_data
tau_list = tau_list[~np.isnan(tau_list)]
l = []
for A in np.arange(0, 0.5, 0.001):
l.append(np.mean(np.exp(-tau_list * A)))
A_func = interp1d(l, np.arange(0, 0.5, 0.001))
A_norm = A_func(F_obs)
tau_field = copy.deepcopy(raw_tau_field)
tau_field.field_data = tau_field.field_data * A_norm
return tau_field
def trans_map(self, real_coord_start, real_coord_end, dl):
tau_field = self.tau_map(real_coord_start, real_coord_end, dl)
trans_field = copy.deepcopy(tau_field)
trans_field.field_data = np.exp(-trans_field.field_data)
trans_field.field_data = trans_field.field_data / np.mean(trans_field.field_data)
return trans_field
def field_to_part_pos(self, field, tensor = True):
'''
'''
nx, ny, nz = field.field_data.shape
part_mesh = np.meshgrid(np.arange(nx), np.arange(ny), np.arange(nz))
part_mesh = np.reshape(part_mesh, [3, nx*ny*nz]).T.astype(float)
part_mesh[:, 0] = (part_mesh[:, 0]*field.dl + field.x0 - self.x0) / self.box_size - 0.5
part_mesh[:, 1] = (part_mesh[:, 1]*field.dl + field.y0 - self.y0) / self.box_size - 0.5
part_mesh[:, 2] = (part_mesh[:, 2]*field.dl + field.z0 - self.z0) / self.box_size - 0.5
if tensor:
part_mesh = tf.convert_to_tensor(part_mesh, dtype = float)
part_mesh = tf.expand_dims(part_mesh, 0)
return part_mesh
def mesh_to_part_pos(self, mesh):
'''
Note this function is not complete: it converts the mesh into position [0, 0, 0] ... [nx, ny, nz] regardless the position
Obselated
'''
_, nx, ny, nz = mesh.shape
part_mesh = np.meshgrid(np.arange(nx), np.arange(ny), np.arange(nz))
part_mesh = np.reshape(part_mesh, [3, nx*ny*nz]).T.astype(float)
part_mesh = tf.convert_to_tensor(part_mesh, dtype = float)
part_mesh = tf.expand_dims(part_mesh, 0)
return part_mesh
def z_from_dist(self, distance):
dummyred = np.linspace(0.,10.,10000)
dummydist = self.cosmo.comoving_distance(dummyred)
res = np.interp(distance,dummydist,dummyred)
return (res)
def z_to_dist(self, red):
return self.cosmo.comoving_distance(red).value
|
pointeee/preheat2022_public
|
FGPA.py
|
FGPA.py
|
py
| 18,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "astropy.units.Mpc",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "astropy.cosmology.FlatLambdaCDM",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "astropy.units.M_sun",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.dtype",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sys.byteorder",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "astropy.units.km",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "astropy.units.s",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.km",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "astropy.units.s",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "astropy.units.km",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "astropy.units.s",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants.M_sun",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "tensorflow.zeros",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "tensorflow.transpose",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "tensorflow.ones",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "flowpm.utils.cic_paint",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "field_util.Field",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "tensorflow.transpose",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "tensorflow.ones",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "flowpm.utils.cic_paint",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "flowpm.utils.cic_readout",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "flowpm.utils.cic_readout",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "flowpm.utils.cic_readout",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 315,
"usage_type": "name"
},
{
"api_name": "flowpm.utils.cic_readout",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "flowpm.utils",
"line_number": 316,
"usage_type": "name"
},
{
"api_name": "astropy.units.Mpc",
"line_number": 323,
"usage_type": "attribute"
},
{
"api_name": "astropy.units",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "tensorflow.expand_dims",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 420,
"usage_type": "call"
}
] |
28156199284
|
from functools import partial
from pathlib import Path
from typing import Dict, Any, Callable, Tuple, Optional, Sequence
import PIL
import imageio
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from torch.nn import Module, Tanh, Parameter
from torch.nn.functional import grid_sample, l1_loss, mse_loss
from torch.utils.tensorboard import SummaryWriter
from thre3d_atom.networks.dense_nets import SkipMLP, SkipMLPConfig
from thre3d_atom.networks.network_interface import Network
from thre3d_atom.networks.shared.layers import (
PositionalEncodingsEmbedder,
PixelwiseNorm,
)
from thre3d_atom.utils.constants import NUM_COLOUR_CHANNELS
from thre3d_atom.utils.imaging_utils import (
adjust_dynamic_range,
to8b,
mse2psnr,
get_2d_coordinates,
)
from thre3d_atom.utils.logging import log
class FeatureGrid2D(Module):
def __init__(
self,
height: int,
width: int,
feature_dims: int,
tunable: bool = True,
device: torch.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
),
) -> None:
super().__init__()
# state of the object:
self._height = height
self._width = width
self._feature_dims = feature_dims
self._tunable = tunable
self.features = torch.empty(
(1, feature_dims, height, width), device=device, requires_grad=True
)
torch.nn.init.xavier_uniform_(self.features)
if self._tunable:
self.features = Parameter(self.features)
@classmethod
def from_feature_tensor(cls, feature_tensor: Tensor) -> Any:
_, feature_dims, height, width = feature_tensor.shape
# initialize a random feature_grid
feature_grid = cls(
height=height,
width=width,
feature_dims=feature_dims,
tunable=False,
device=feature_tensor.device,
)
# use the given feature_tensor as it's features:
feature_grid.features = feature_tensor
return feature_grid
def extra_repr(self) -> str:
return (
f"grid_dims: {self.features.shape[2:]}, "
f"feature_dims: {self.features.shape[1]}, "
f"tunable: {self._tunable}"
)
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"height": self._height,
"width": self._width,
"feature_dims": self._feature_dims,
"tunable": self._tunable,
},
"state_dict": self.state_dict(),
}
def forward(self, coords: Tensor) -> Tensor:
"""coords should be of shape => [N x 2], and be in the range [-1, 1]"""
sam_vals = grid_sample(
# note the convention difference between the image and sample coordinates
self.features.permute(0, 1, 3, 2),
coords[None, None, ...],
mode="bilinear",
align_corners=False,
)
return sam_vals.permute(0, 2, 3, 1)[0, 0, ...]
class ImageDecoderMLP(Network):
# noinspection PyUnresolvedReferences
def __init__(
self,
mlp: SkipMLP,
feature_dims: int = 32,
feature_embedding_dims: int = 0,
use_local_coords: bool = False,
local_coords_embedding_dims: int = 0,
normalize_features: bool = False,
) -> None:
super().__init__()
self._mlp = mlp
self._feature_dims = feature_dims
self._feature_embedding_dims = feature_embedding_dims
self._use_local_coords = use_local_coords
self._local_coords_embedding_dims = local_coords_embedding_dims
self._normalize_features = normalize_features
# objects of modification:
self._normalizer = PixelwiseNorm()
self._feature_embedder = PositionalEncodingsEmbedder(
input_dims=self._feature_dims, emb_dims=self._feature_embedding_dims
)
self._local_coords_embedder = PositionalEncodingsEmbedder(
input_dims=2, emb_dims=self._local_coords_embedding_dims
)
@property
def input_shape(self) -> Sequence[Tuple[int, ...]]:
return self._mlp.input_shape
@property
def output_shape(self) -> Sequence[Tuple[int, ...]]:
return self._mlp.output_shape
@property
def feature_dims(self) -> int:
return self._feature_dims
@property
def use_local_coords(self) -> bool:
return self._use_local_coords
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"feature_dims": self._feature_dims,
"feature_embedding_dims": self._feature_embedding_dims,
"use_local_coords": self._use_local_coords,
"local_coords_embedding_dims": self._local_coords_embedding_dims,
"normalize_features": self._normalize_features,
},
"mlp": self._mlp.get_save_info(),
"state_dict": self.state_dict(),
}
def load_weights(self, weights: Dict[str, Any]) -> None:
self._mlp.load_state_dict(weights["mlp"]["state_dict"])
def forward(self, x: Tensor) -> Tensor:
if self._use_local_coords:
features, local_coords = (
x[..., : self._feature_dims],
x[..., self._feature_dims :],
)
else:
features, local_coords = x, torch.zeros(size=(x.shape[0], 0))
embedded_features = self._feature_embedder(features)
embedded_local_coords = self._local_coords_embedder(local_coords)
normalized_features = self._normalizer(features)
if self._use_local_coords:
feats = (
normalized_features if self._normalize_features else embedded_features
)
mlp_input = torch.cat([feats, embedded_local_coords], dim=-1)
else:
mlp_input = (
normalized_features if self._normalize_features else embedded_features
)
return self._mlp(mlp_input)
def get_default_image_decoder_mlp(
feature_dims: int = 32,
feature_embedding_dims: int = 0,
use_local_coords: bool = False,
local_coords_embedding_dims: int = 0,
normalize_features: bool = False,
) -> ImageDecoderMLP:
feat_inp_dims = feature_dims + (2 * feature_dims * feature_embedding_dims)
lc_inp_dims = 2 + (2 * 2 * local_coords_embedding_dims)
if use_local_coords:
mlp_input_dims = feat_inp_dims + lc_inp_dims
elif normalize_features:
mlp_input_dims = feature_dims
else:
mlp_input_dims = feat_inp_dims
mlp_config = SkipMLPConfig(
input_dims=mlp_input_dims,
layer_depths=[256],
output_dims=NUM_COLOUR_CHANNELS,
skips=[False],
use_equalized_learning_rate=True,
out_activation_fn=Tanh(),
)
return ImageDecoderMLP(
SkipMLP(mlp_config),
feature_dims=feature_dims,
feature_embedding_dims=feature_embedding_dims,
use_local_coords=use_local_coords,
local_coords_embedding_dims=local_coords_embedding_dims,
normalize_features=normalize_features,
)
def decode_coords_with_fg_and_mlp(
coords: Tensor,
feature_grid: FeatureGrid2D,
decoder_mlp: ImageDecoderMLP,
image_resolution: Tuple[int, int],
) -> Tensor:
"""decodes the coords tensor into RGB pixel values"""
orig_shape = coords.shape
coords = coords.reshape(-1, orig_shape[-1])
image_height, image_width = image_resolution
local_coords = adjust_dynamic_range(coords, drange_in=(-1, 1), drange_out=(0, 1))
local_coords[..., 0] *= image_height
local_coords[..., 1] *= image_width
local_coords = local_coords - torch.floor(local_coords)
decoded_features = feature_grid(coords)
decoder_input = (
torch.cat([decoded_features, local_coords], dim=-1)
if decoder_mlp.use_local_coords
else decoded_features
)
return decoder_mlp(decoder_input).reshape(*orig_shape[:-1], -1)
class ImageModel:
def __init__(
self,
image_height: int,
image_width: int,
feature_dims: int = 32,
decoder_mlp_maker: Callable[[], Network] = get_default_image_decoder_mlp,
device: torch.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
),
verbose_creation: bool = True,
) -> None:
self._image_height = image_height
self._image_width = image_width
self._feature_dims = feature_dims
self._device = device
# compute the height and width of the feature-grid so as to keep the number of
# parameters in the image and the parametric-model same:
self._setup_feature_dims()
# create a feature grid object (note that these are kept public):
self.feature_grid = FeatureGrid2D(
self._feature_height, self._feature_width, feature_dims, device=device
)
self.decoder_mlp = decoder_mlp_maker().to(self._device)
# print info related to the Feature Grid and the Decoder MLP:
if verbose_creation:
log.info(f"Created Feature grid: {self.feature_grid}")
log.info(f"Created Decoder MLP: {self.decoder_mlp}")
@property
def image_resolution(self) -> Tuple[int, int]:
return self._image_height, self._image_width
@staticmethod
def compute_feature_grid_dims(
image_resolution: Tuple[int, int], feature_dims: int
) -> Tuple[int, int]:
image_height, image_width = image_resolution
aspect_ratio = image_width / image_height
total_image_params = image_width * image_height * NUM_COLOUR_CHANNELS
needed_params = total_image_params / feature_dims
feature_grid_height = int(np.ceil(np.sqrt(needed_params / aspect_ratio)))
feature_grid_width = int(aspect_ratio * feature_grid_height)
return feature_grid_height, feature_grid_width
def _setup_feature_dims(self) -> None:
self._feature_height, self._feature_width = self.compute_feature_grid_dims(
image_resolution=(self._image_height, self._image_width),
feature_dims=self._feature_dims,
)
@staticmethod
def _shuffle_tensor_2d(tensor_2d: Tensor) -> Tensor:
""" shuffles a 2D Tensor of shape [N x C]"""
return tensor_2d[torch.randperm(len(tensor_2d))]
def _infinite_data_loader(self, data: Tensor, batch_size: int) -> Tensor:
while True:
data = self._shuffle_tensor_2d(data)
for batch_index in range(0, len(data), batch_size):
data_batch = data[batch_index : batch_index + batch_size]
if data_batch.shape[0] == batch_size:
yield data_batch
else:
break
@staticmethod
def _check_log_condition(
current_step: int, frequency_step: int, start_step: int, end_step: int
) -> bool:
return (
current_step % frequency_step == 0
or current_step == start_step
or current_step == end_step
)
def get_save_info(self) -> Dict[str, Any]:
return {
"conf": {
"image_height": self._image_height,
"image_width": self._image_width,
"feature_dims": self._feature_dims,
},
"feature_grid": self.feature_grid.get_save_info(),
"decoder_mlp": self.decoder_mlp.get_save_info(),
}
def render(
self,
render_resolution: Optional[Tuple[int, int]] = None,
chunk_size: int = 64 * 1024,
) -> Tensor:
height, width = (
(self._image_height, self._image_width)
if render_resolution is None
else render_resolution
)
# create a coordinates mesh-grid:
coords = get_2d_coordinates(height, width)
# flatten the coordinates and bring them on the GPU:
flat_coords = coords.reshape(-1, coords.shape[-1]).to(self._device)
# decode all the coordinates into pixel values chunk by chunk:
decoded_image = []
with torch.no_grad():
for chunk_index in range(0, len(flat_coords), chunk_size):
coord_chunk = flat_coords[chunk_index : chunk_index + chunk_size]
decoded_image.append(
decode_coords_with_fg_and_mlp(
coord_chunk,
self.feature_grid,
self.decoder_mlp,
self.image_resolution,
)
)
decoded_image = torch.cat(decoded_image, dim=0)
decoded_image = decoded_image.reshape(height, width, -1)
decoded_image = adjust_dynamic_range(
decoded_image.cpu(),
drange_in=(-1, 1),
drange_out=(0, 1),
slack=True,
)
return decoded_image
def train(
self,
training_image: PIL.Image.Image,
num_iterations: int = 10000,
batch_size: int = 8192,
learning_rate: float = 0.003,
lr_decay_steps: int = 5000,
feedback_frequency: int = 1000,
loss_feedback_frequency: int = 10,
testing_frequency: int = 1000,
save_frequency: int = 2000,
output_dir: Path = Path(__file__).parent.absolute() / "logs",
) -> None:
# load the training image and create a dataset of pixel_coordinates -> pixel RGB values:
image_np = np.array(training_image).astype(np.float32) / 255
if len(image_np.shape) < 3:
image_np = np.tile(image_np[..., None], (1, 1, 3))
image_np = image_np[..., :3] # in case of > 3 channel images
real_feedback_image = image_np
# bring the pixel range to (-1, 1) for training
image_np = adjust_dynamic_range(image_np, drange_in=(0, 1), drange_out=(-1, 1))
# make sure the training image is compatible with the ImageModel
assert (
self._image_height == image_np.shape[0]
and self._image_width == image_np.shape[1]
), (
f"The provided training image with size ({image_np.shape[:-1]}) is incompatible with the Image-Model's"
f"image size ({self._image_height, self._image_width})"
)
image_coords = get_2d_coordinates(self._image_height, self._image_width)
coord_rgb_image = torch.cat(
[
image_coords.to(self._device),
torch.from_numpy(image_np).to(self._device),
],
dim=-1,
)
training_data = coord_rgb_image.reshape(-1, coord_rgb_image.shape[-1])
training_data_loader = iter(
self._infinite_data_loader(training_data, batch_size=batch_size)
)
# setup optimizer:
optimizer = torch.optim.Adam(
params=[
{"params": self.feature_grid.parameters(), "lr": learning_rate},
{"params": self.decoder_mlp.parameters(), "lr": learning_rate},
],
betas=(0, 0.99),
)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
# setup output directories
# fmt: off
model_dir = output_dir / "saved_models"
logs_dir = output_dir / "training_logs"
tensorboard_dir = logs_dir / "tensorboard"
render_dir = logs_dir / "rendered_output"
for directory in (model_dir, logs_dir, tensorboard_dir,
render_dir):
directory.mkdir(exist_ok=True, parents=True)
# fmt: on
# create the tensorboard directory:
tensorboard_writer = SummaryWriter(tensorboard_dir)
# log the real image for feedback:
log.info(f"Logging real feedback image")
imageio.imwrite(
render_dir / f"1__real_log.png",
to8b(real_feedback_image),
)
log.info(f"!! Beginning Training !!")
for num_iter in range(1, num_iterations + 1):
# load the next batch of data:
data_batch = next(training_data_loader)
coords, gt_rgb = (
data_batch[..., :-NUM_COLOUR_CHANNELS],
data_batch[..., -NUM_COLOUR_CHANNELS:],
)
# forward pass and compute the loss
pred_rgb = decode_coords_with_fg_and_mlp(
coords,
self.feature_grid,
self.decoder_mlp,
self.image_resolution,
)
loss = l1_loss(pred_rgb, gt_rgb)
# perform single step of optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
# verbose logging per iteration:
loss_value = loss.item()
psnr_value = mse2psnr(mse_loss(pred_rgb, gt_rgb).item())
# tensorboard summaries feedback (logged every iteration)
for summary_name, summary_value in (
("loss", loss_value),
("psnr", psnr_value),
):
if summary_value is not None:
tensorboard_writer.add_scalar(
summary_name, summary_value, global_step=num_iter
)
# console loss feedback log
if self._check_log_condition(
num_iter, loss_feedback_frequency, 1, num_iterations
):
loss_info_string = (
f"Global Iteration: {num_iter} "
f"Loss: {loss_value: .5f} "
f"PSNR: {psnr_value: .5f} "
)
log.info(loss_info_string)
# step the learning rate schedulers
if num_iter % lr_decay_steps == 0:
lr_scheduler.step()
new_lrs = [param_group["lr"] for param_group in optimizer.param_groups]
log_string = f"Adjusted learning rate | learning rate: {new_lrs} "
log.info(log_string)
# save the rendered feedback
if self._check_log_condition(
num_iter, feedback_frequency, 1, num_iterations
):
imageio.imwrite(
render_dir / f"render_log_{num_iter}.png",
to8b(self.render().numpy()),
)
# obtain and log test metrics
if self._check_log_condition(
num_iter, testing_frequency, 1, num_iterations
):
log.info(f"Computing test score ...")
test_psnr = mse2psnr(
mse_loss(
self.render(),
torch.from_numpy(real_feedback_image),
).item()
)
log.info(f"Full image PSNR: {test_psnr: .5f}")
tensorboard_writer.add_scalar(
"full_image_psnr", test_psnr, global_step=num_iter
)
# save the model
if self._check_log_condition(num_iter, save_frequency, 1, num_iterations):
torch.save(
self.get_save_info(),
model_dir / f"model_iter_{num_iter}.pth",
)
# save the final model
torch.save(self.get_save_info(), model_dir / f"model_final.pth")
log.info("!! Training complete !!")
def load_trained_image_model(
model_path: Path, device: torch.device, verbose_creation: bool = True
) -> ImageModel:
loaded_model = torch.load(model_path)
if verbose_creation:
log.info(f"loaded trained model from: {model_path}")
img_mod = ImageModel(
**loaded_model["conf"],
device=device,
verbose_creation=verbose_creation,
decoder_mlp_maker=partial(
get_default_image_decoder_mlp,
**loaded_model["decoder_mlp"]["conf"],
),
)
img_mod.feature_grid.load_state_dict(loaded_model["feature_grid"]["state_dict"])
img_mod.decoder_mlp.load_weights(loaded_model["decoder_mlp"])
return img_mod
|
akanimax/3inGAN
|
projects/thre3ingan/singans/image_model.py
|
image_model.py
|
py
| 20,218 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "torch.empty",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.xavier_uniform_",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.grid_sample",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.networks.network_interface.Network",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.networks.dense_nets.SkipMLP",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.networks.shared.layers.PixelwiseNorm",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.networks.shared.layers.PositionalEncodingsEmbedder",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.networks.shared.layers.PositionalEncodingsEmbedder",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "typing.Sequence",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.networks.dense_nets.SkipMLPConfig",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.constants.NUM_COLOUR_CHANNELS",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.networks.dense_nets.SkipMLP",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.adjust_dynamic_range",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "torch.floor",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.networks.network_interface.Network",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.constants.NUM_COLOUR_CHANNELS",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "numpy.ceil",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "torch.randperm",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.get_2d_coordinates",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.adjust_dynamic_range",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "PIL.Image",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 390,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "numpy.tile",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.adjust_dynamic_range",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.get_2d_coordinates",
"line_number": 410,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 424,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.ExponentialLR",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 448,
"usage_type": "name"
},
{
"api_name": "imageio.imwrite",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.to8b",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.constants.NUM_COLOUR_CHANNELS",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.constants.NUM_COLOUR_CHANNELS",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.l1_loss",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.mse2psnr",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 507,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 507,
"usage_type": "name"
},
{
"api_name": "imageio.imwrite",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.to8b",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 522,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 522,
"usage_type": "name"
},
{
"api_name": "thre3d_atom.utils.imaging_utils.mse2psnr",
"line_number": 523,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 529,
"usage_type": "name"
},
{
"api_name": "torch.save",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 547,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 547,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log.info",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "thre3d_atom.utils.logging.log",
"line_number": 551,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 556,
"usage_type": "call"
}
] |
32599097431
|
from flask import Flask
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
class Hellocall(Resource):
def get(self,name,number):
return({'Name':name,'Age':number})
api.add_resource(Hellocall,"/Helloworld/<string:name>/<int:number>")
if __name__ == "__main__":
app.run(debug=True)
|
somasundaram1702/Flask-basics
|
main.py
|
main.py
|
py
| 330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 7,
"usage_type": "name"
}
] |
3167046119
|
#!/usr/bin/env python3
import base64
from functions.aes import (
gen_random_bytes,
get_blocks,
pkcs7_unpad,
pkcs7_pad,
PKCS7Error,
AESCipher,
)
_STRINGS = [
base64.b64decode(s)
for s in [
"MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=",
"MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=",
"MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==",
"MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==",
"MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl",
"MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==",
"MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==",
"MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=",
"MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=",
"MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93",
]
]
_KEY = gen_random_bytes(16)
def _encrypt(pt: bytes) -> (bytes, bytes):
iv = gen_random_bytes(16)
cbc = AESCipher(AESCipher.MODE_CBC, _KEY, iv=iv)
ct = cbc.encrypt(pkcs7_pad(pt))
return iv, ct
def _oracle(iv: bytes, ct: bytes) -> bool:
cbc = AESCipher(AESCipher.MODE_CBC, _KEY, iv=iv)
try:
pkcs7_unpad(cbc.decrypt(ct))
except PKCS7Error:
return False
return True
def _attack(iv: bytes, ct: bytes) -> bytes:
cipher_blocks = [iv] + get_blocks(ct)
pt = b""
for i in reversed(range(1, len(cipher_blocks))):
ct_block_previous = cipher_blocks[i - 1]
ct_block_current = cipher_blocks[i]
intermediate_block = b""
for j in reversed(range(16)):
ctb_prefix = gen_random_bytes(j)
ctb_suffix = b""
for k in range(len(intermediate_block)):
ctb_suffix += bytes([(16 - j) ^ intermediate_block[k]])
n = 0
for m in range(256):
ctb = ctb_prefix + bytes([m]) + ctb_suffix
if _oracle(ctb, ct_block_current):
n = m
break
intermediate_block = bytes([n ^ (16 - j)]) + intermediate_block
pt = bytes([ct_block_previous[j] ^ int(intermediate_block[0])]) + pt
return pkcs7_unpad(pt)
def challenge17() -> bool:
for msg in _STRINGS:
ret = _attack(*_encrypt(msg))
if ret not in _STRINGS:
return False
return True
if __name__ == "__main__":
assert challenge17(), "The result does not match the expected value"
print("Ok")
|
svkirillov/cryptopals-python3
|
cryptopals/set3/challenge17.py
|
challenge17.py
|
py
| 2,562 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "base64.b64decode",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "functions.aes.AESCipher",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "functions.aes.AESCipher.MODE_CBC",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "functions.aes.pkcs7_pad",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "functions.aes.AESCipher",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "functions.aes.AESCipher.MODE_CBC",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "functions.aes.pkcs7_unpad",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "functions.aes.PKCS7Error",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "functions.aes.get_blocks",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "functions.aes.gen_random_bytes",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "functions.aes.pkcs7_unpad",
"line_number": 81,
"usage_type": "call"
}
] |
29792276111
|
#!/usr/bin/env python3
import pandas as pd
import os
import rospy
import rospkg
from std_msgs.msg import Bool
from robo_demo_msgs.srv import RunPlanningTest
rospack = rospkg.RosPack()
EE_CONTROL_PATH = rospack.get_path('end_effector_control')
PLANNING_DATA_PATH = os.path.join(EE_CONTROL_PATH, 'data', 'planning')
COMMON_DATA_PATH = os.path.join(PLANNING_DATA_PATH, 'planning_data.csv')
DEFAULT_TEST_DICT = {'change_goal': 10, 'add_obstacle': 10, 'add_obstacle_change_goal': 10}
class TestAutomator():
def __init__(self, test_dict=DEFAULT_TEST_DICT):
self.test_dict = test_dict
self.update_test_counts()
self.planning_test_srv = rospy.ServiceProxy('/test_interface_service', RunPlanningTest)
rospy.sleep(0.5)
rospy.loginfo("TestAutomator initialized, running tests")
self.run_tests()
def update_test_counts(self):
if os.path.exists(COMMON_DATA_PATH):
df = pd.read_csv(COMMON_DATA_PATH)
(t1, t2, t3) = ("change_goal", "add_obstacle", "add_obstacle_change_goal")
t1_count = len(df[df["Scenario"]==t1])
t2_count = len(df[df["Scenario"]==t2])
t3_count = len(df[df["Scenario"]==t3])
self.test_dict[t1] -= t1_count
self.test_dict[t2] -= t2_count
self.test_dict[t3] -= t3_count
rospy.loginfo(f"counts: {t1_count}, {t2_count}, {t3_count}")
def run_tests(self):
for (test_type, iterations) in self.test_dict.items():
for i in range(iterations):
rospy.loginfo(f"Running {test_type}, iteration: {i+1}/{iterations}")
response = self.planning_test_srv(test_type)
rospy.loginfo(f"Received response: {response}")
if __name__ == "__main__":
rospy.init_node("automate_testing_node")
test_dict = {'add_obstacle': 2}
ta = TestAutomator(test_dict)
# ta = TestAutomator()
|
dwya222/end_effector_control
|
scripts/automate_testing_v2.py
|
automate_testing_v2.py
|
py
| 1,922 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rospkg.RosPack",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "rospy.ServiceProxy",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "robo_demo_msgs.srv.RunPlanningTest",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "rospy.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "rospy.init_node",
"line_number": 50,
"usage_type": "call"
}
] |
8568584883
|
# Create class
from Tools.Scripts.treesync import raw_input
import datetime
from datetime import date
class Person:
def __init__(self, name,year,month,day):
self.__name=name
self.__year=year
self.__month=month
self.__day=day
def getage(self):
now = datetime.datetime.now()
c_year = now.hour
c_month = now.month
c_day = now.day
current_date = date(c_year, c_month, c_day)
print(current_date)
dob = date(self.__year, self.__month, self.__day)
age = current_date - dob
print (age)
irene = Person("Irene",1988,8,19)
irene.getage()
|
iWanjugu/Personal-Development-II
|
Python/getAge.py
|
getAge.py
|
py
| 673 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 23,
"usage_type": "call"
}
] |
6288317611
|
#! /usr/bin/env python
import math
import rospy
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion
def imuCallback(imu):
quat = [imu.orientation.w, imu.orientation.x, imu.orientation.y, imu.orientation.z]
roll, pitch, yaw = euler_from_quaternion(quat)
rospy.loginfo('{} {} {}'.format(roll*180.0/math.pi, pitch*180.0/math.pi, yaw*180.0/math.pi))
def imuToYaw():
rospy.init_node('imu_to_yaw')
imu_sub = rospy.Subscriber('/android/imu', Imu, imuCallback)
rospy.spin()
if __name__ == '__main__':
imuToYaw()
|
vigneshrajap/UNSW-work
|
imu_to_yaw/src/imu_to_yaw_node.py
|
imu_to_yaw_node.py
|
py
| 558 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "tf.transformations.euler_from_quaternion",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rospy.loginfo",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rospy.init_node",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.Imu",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "rospy.spin",
"line_number": 18,
"usage_type": "call"
}
] |
26113020545
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/04/2017"
# TODO
# keep aspect ratio managed here?
# smarter dirty flag handling?
import datetime as dt
import math
import weakref
import logging
import numbers
from typing import Optional, Union
from collections import namedtuple
import numpy
from .... import qt
from ...._glutils import gl, Program
from ..._utils import checkAxisLimits, FLOAT32_MINPOS
from .GLSupport import mat4Ortho
from .GLText import Text2D, CENTER, BOTTOM, TOP, LEFT, RIGHT, ROTATE_270
from ..._utils.ticklayout import niceNumbersAdaptative, niceNumbersForLog10
from ..._utils.dtime_ticklayout import calcTicksAdaptive, bestFormatString
from ..._utils.dtime_ticklayout import timestamp
_logger = logging.getLogger(__name__)
# PlotAxis ####################################################################
class PlotAxis(object):
"""Represents a 1D axis of the plot.
This class is intended to be used with :class:`GLPlotFrame`.
"""
def __init__(self, plotFrame,
tickLength=(0., 0.),
foregroundColor=(0., 0., 0., 1.0),
labelAlign=CENTER, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=CENTER,
titleRotate=0, titleOffset=(0., 0.)):
self._ticks = None
self._plotFrameRef = weakref.ref(plotFrame)
self._isDateTime = False
self._timeZone = None
self._isLog = False
self._dataRange = 1., 100.
self._displayCoords = (0., 0.), (1., 0.)
self._title = ''
self._tickLength = tickLength
self._foregroundColor = foregroundColor
self._labelAlign = labelAlign
self._labelVAlign = labelVAlign
self._titleAlign = titleAlign
self._titleVAlign = titleVAlign
self._titleRotate = titleRotate
self._titleOffset = titleOffset
@property
def dataRange(self):
"""The range of the data represented on the axis as a tuple
of 2 floats: (min, max)."""
return self._dataRange
@dataRange.setter
def dataRange(self, dataRange):
assert len(dataRange) == 2
assert dataRange[0] <= dataRange[1]
dataRange = float(dataRange[0]), float(dataRange[1])
if dataRange != self._dataRange:
self._dataRange = dataRange
self._dirtyTicks()
@property
def isLog(self):
"""Whether the axis is using a log10 scale or not as a bool."""
return self._isLog
@isLog.setter
def isLog(self, isLog):
isLog = bool(isLog)
if isLog != self._isLog:
self._isLog = isLog
self._dirtyTicks()
@property
def timeZone(self):
"""Returnss datetime.tzinfo that is used if this axis plots date times."""
return self._timeZone
@timeZone.setter
def timeZone(self, tz):
"""Sets dateetime.tzinfo that is used if this axis plots date times."""
self._timeZone = tz
self._dirtyTicks()
@property
def isTimeSeries(self):
"""Whether the axis is showing floats as datetime objects"""
return self._isDateTime
@isTimeSeries.setter
def isTimeSeries(self, isTimeSeries):
isTimeSeries = bool(isTimeSeries)
if isTimeSeries != self._isDateTime:
self._isDateTime = isTimeSeries
self._dirtyTicks()
@property
def displayCoords(self):
"""The coordinates of the start and end points of the axis
in display space (i.e., in pixels) as a tuple of 2 tuples of
2 floats: ((x0, y0), (x1, y1)).
"""
return self._displayCoords
@displayCoords.setter
def displayCoords(self, displayCoords):
assert len(displayCoords) == 2
assert len(displayCoords[0]) == 2
assert len(displayCoords[1]) == 2
displayCoords = tuple(displayCoords[0]), tuple(displayCoords[1])
if displayCoords != self._displayCoords:
self._displayCoords = displayCoords
self._dirtyTicks()
@property
def devicePixelRatio(self):
"""Returns the ratio between qt pixels and device pixels."""
plotFrame = self._plotFrameRef()
return plotFrame.devicePixelRatio if plotFrame is not None else 1.
@property
def title(self):
"""The text label associated with this axis as a str in latin-1."""
return self._title
@title.setter
def title(self, title):
if title != self._title:
self._title = title
self._dirtyPlotFrame()
@property
def titleOffset(self):
"""Title offset in pixels (x: int, y: int)"""
return self._titleOffset
@titleOffset.setter
def titleOffset(self, offset):
if offset != self._titleOffset:
self._titleOffset = offset
self._dirtyTicks()
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._foregroundColor = color
self._dirtyTicks()
@property
def ticks(self):
"""Ticks as tuples: ((x, y) in display, dataPos, textLabel)."""
if self._ticks is None:
self._ticks = tuple(self._ticksGenerator())
return self._ticks
def getVerticesAndLabels(self):
"""Create the list of vertices for axis and associated text labels.
:returns: A tuple: List of 2D line vertices, List of Text2D labels.
"""
vertices = list(self.displayCoords) # Add start and end points
labels = []
tickLabelsSize = [0., 0.]
font = qt.QApplication.instance().font()
xTickLength, yTickLength = self._tickLength
xTickLength *= self.devicePixelRatio
yTickLength *= self.devicePixelRatio
for (xPixel, yPixel), dataPos, text in self.ticks:
if text is None:
tickScale = 0.5
else:
tickScale = 1.
label = Text2D(text=text,
font=font,
color=self._foregroundColor,
x=xPixel - xTickLength,
y=yPixel - yTickLength,
align=self._labelAlign,
valign=self._labelVAlign,
devicePixelRatio=self.devicePixelRatio)
width, height = label.size
if width > tickLabelsSize[0]:
tickLabelsSize[0] = width
if height > tickLabelsSize[1]:
tickLabelsSize[1] = height
labels.append(label)
vertices.append((xPixel, yPixel))
vertices.append((xPixel + tickScale * xTickLength,
yPixel + tickScale * yTickLength))
(x0, y0), (x1, y1) = self.displayCoords
xAxisCenter = 0.5 * (x0 + x1)
yAxisCenter = 0.5 * (y0 + y1)
xOffset, yOffset = self.titleOffset
# Adaptative title positioning:
# tickNorm = math.sqrt(xTickLength ** 2 + yTickLength ** 2)
# xOffset = -tickLabelsSize[0] * xTickLength / tickNorm
# xOffset -= 3 * xTickLength
# yOffset = -tickLabelsSize[1] * yTickLength / tickNorm
# yOffset -= 3 * yTickLength
axisTitle = Text2D(text=self.title,
font=font,
color=self._foregroundColor,
x=xAxisCenter + xOffset,
y=yAxisCenter + yOffset,
align=self._titleAlign,
valign=self._titleVAlign,
rotate=self._titleRotate,
devicePixelRatio=self.devicePixelRatio)
labels.append(axisTitle)
return vertices, labels
def _dirtyPlotFrame(self):
"""Dirty parent GLPlotFrame"""
plotFrame = self._plotFrameRef()
if plotFrame is not None:
plotFrame._dirty()
def _dirtyTicks(self):
"""Mark ticks as dirty and notify listener (i.e., background)."""
self._ticks = None
self._dirtyPlotFrame()
@staticmethod
def _frange(start, stop, step):
"""range for float (including stop)."""
while start <= stop:
yield start
start += step
def _ticksGenerator(self):
"""Generator of ticks as tuples:
((x, y) in display, dataPos, textLabel).
"""
dataMin, dataMax = self.dataRange
if self.isLog and dataMin <= 0.:
_logger.warning(
'Getting ticks while isLog=True and dataRange[0]<=0.')
dataMin = 1.
if dataMax < dataMin:
dataMax = 1.
if dataMin != dataMax: # data range is not null
(x0, y0), (x1, y1) = self.displayCoords
if self.isLog:
if self.isTimeSeries:
_logger.warning("Time series not implemented for log-scale")
logMin, logMax = math.log10(dataMin), math.log10(dataMax)
tickMin, tickMax, step, _ = niceNumbersForLog10(logMin, logMax)
xScale = (x1 - x0) / (logMax - logMin)
yScale = (y1 - y0) / (logMax - logMin)
for logPos in self._frange(tickMin, tickMax, step):
if logMin <= logPos <= logMax:
dataPos = 10 ** logPos
xPixel = x0 + (logPos - logMin) * xScale
yPixel = y0 + (logPos - logMin) * yScale
text = '1e%+03d' % logPos
yield ((xPixel, yPixel), dataPos, text)
if step == 1:
ticks = list(self._frange(tickMin, tickMax, step))[:-1]
for logPos in ticks:
dataOrigPos = 10 ** logPos
for index in range(2, 10):
dataPos = dataOrigPos * index
if dataMin <= dataPos <= dataMax:
logSubPos = math.log10(dataPos)
xPixel = x0 + (logSubPos - logMin) * xScale
yPixel = y0 + (logSubPos - logMin) * yScale
yield ((xPixel, yPixel), dataPos, None)
else:
xScale = (x1 - x0) / (dataMax - dataMin)
yScale = (y1 - y0) / (dataMax - dataMin)
nbPixels = math.sqrt(pow(x1 - x0, 2) + pow(y1 - y0, 2)) / self.devicePixelRatio
# Density of 1.3 label per 92 pixels
# i.e., 1.3 label per inch on a 92 dpi screen
tickDensity = 1.3 / 92
if not self.isTimeSeries:
tickMin, tickMax, step, nbFrac = niceNumbersAdaptative(
dataMin, dataMax, nbPixels, tickDensity)
for dataPos in self._frange(tickMin, tickMax, step):
if dataMin <= dataPos <= dataMax:
xPixel = x0 + (dataPos - dataMin) * xScale
yPixel = y0 + (dataPos - dataMin) * yScale
if nbFrac == 0:
text = '%g' % dataPos
else:
text = ('%.' + str(nbFrac) + 'f') % dataPos
yield ((xPixel, yPixel), dataPos, text)
else:
# Time series
try:
dtMin = dt.datetime.fromtimestamp(dataMin, tz=self.timeZone)
dtMax = dt.datetime.fromtimestamp(dataMax, tz=self.timeZone)
except ValueError:
_logger.warning("Data range cannot be displayed with time axis")
return # Range is out of bound of the datetime
tickDateTimes, spacing, unit = calcTicksAdaptive(
dtMin, dtMax, nbPixels, tickDensity)
for tickDateTime in tickDateTimes:
if dtMin <= tickDateTime <= dtMax:
dataPos = timestamp(tickDateTime)
xPixel = x0 + (dataPos - dataMin) * xScale
yPixel = y0 + (dataPos - dataMin) * yScale
fmtStr = bestFormatString(spacing, unit)
text = tickDateTime.strftime(fmtStr)
yield ((xPixel, yPixel), dataPos, text)
# GLPlotFrame #################################################################
class GLPlotFrame(object):
"""Base class for rendering a 2D frame surrounded by axes."""
_TICK_LENGTH_IN_PIXELS = 5
_LINE_WIDTH = 1
_SHADERS = {
'vertex': """
attribute vec2 position;
uniform mat4 matrix;
void main(void) {
gl_Position = matrix * vec4(position, 0.0, 1.0);
}
""",
'fragment': """
uniform vec4 color;
uniform float tickFactor; /* = 1./tickLength or 0. for solid line */
void main(void) {
if (mod(tickFactor * (gl_FragCoord.x + gl_FragCoord.y), 2.) < 1.) {
gl_FragColor = color;
} else {
discard;
}
}
"""
}
_Margins = namedtuple('Margins', ('left', 'right', 'top', 'bottom'))
# Margins used when plot frame is not displayed
_NoDisplayMargins = _Margins(0, 0, 0, 0)
def __init__(self, marginRatios, foregroundColor, gridColor):
"""
:param List[float] marginRatios:
The ratios of margins around plot area for axis and labels.
(left, top, right, bottom) as float in [0., 1.]
:param foregroundColor: color used for the frame and labels.
:type foregroundColor: tuple with RGBA values ranging from 0.0 to 1.0
:param gridColor: color used for grid lines.
:type gridColor: tuple RGBA with RGBA values ranging from 0.0 to 1.0
"""
self._renderResources = None
self.__marginRatios = marginRatios
self.__marginsCache = None
self._foregroundColor = foregroundColor
self._gridColor = gridColor
self.axes = [] # List of PlotAxis to be updated by subclasses
self._grid = False
self._size = 0., 0.
self._title = ''
self._devicePixelRatio = 1.
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return self._renderResources is None
GRID_NONE = 0
GRID_MAIN_TICKS = 1
GRID_SUB_TICKS = 2
GRID_ALL_TICKS = (GRID_MAIN_TICKS + GRID_SUB_TICKS)
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._foregroundColor = color
for axis in self.axes:
axis.foregroundColor = color
self._dirty()
@property
def gridColor(self):
"""Color used for frame and labels"""
return self._gridColor
@gridColor.setter
def gridColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"gridColor must have length 4, got {}".format(len(self._gridColor))
if self._gridColor != color:
self._gridColor = color
self._dirty()
@property
def marginRatios(self):
"""Plot margin ratios: (left, top, right, bottom) as 4 float in [0, 1].
"""
return self.__marginRatios
@marginRatios.setter
def marginRatios(self, ratios):
ratios = tuple(float(v) for v in ratios)
assert len(ratios) == 4
for value in ratios:
assert 0. <= value <= 1.
assert ratios[0] + ratios[2] < 1.
assert ratios[1] + ratios[3] < 1.
if self.__marginRatios != ratios:
self.__marginRatios = ratios
self.__marginsCache = None # Clear cached margins
self._dirty()
@property
def margins(self):
"""Margins in pixels around the plot."""
if self.__marginsCache is None:
width, height = self.size
left, top, right, bottom = self.marginRatios
self.__marginsCache = self._Margins(
left=int(left*width),
right=int(right*width),
top=int(top*height),
bottom=int(bottom*height))
return self.__marginsCache
@property
def devicePixelRatio(self):
return self._devicePixelRatio
@devicePixelRatio.setter
def devicePixelRatio(self, ratio):
if ratio != self._devicePixelRatio:
self._devicePixelRatio = ratio
self._dirty()
@property
def grid(self):
"""Grid display mode:
- 0: No grid.
- 1: Grid on main ticks.
- 2: Grid on sub-ticks for log scale axes.
- 3: Grid on main and sub ticks."""
return self._grid
@grid.setter
def grid(self, grid):
assert grid in (self.GRID_NONE, self.GRID_MAIN_TICKS,
self.GRID_SUB_TICKS, self.GRID_ALL_TICKS)
if grid != self._grid:
self._grid = grid
self._dirty()
@property
def size(self):
"""Size in device pixels of the plot area including margins."""
return self._size
@size.setter
def size(self, size):
assert len(size) == 2
size = tuple(size)
if size != self._size:
self._size = size
self.__marginsCache = None # Clear cached margins
self._dirty()
@property
def plotOrigin(self):
"""Plot area origin (left, top) in widget coordinates in pixels."""
return self.margins.left, self.margins.top
@property
def plotSize(self):
"""Plot area size (width, height) in pixels."""
w, h = self.size
w -= self.margins.left + self.margins.right
h -= self.margins.top + self.margins.bottom
return w, h
@property
def title(self):
"""Main title as a str in latin-1."""
return self._title
@title.setter
def title(self, title):
if title != self._title:
self._title = title
self._dirty()
# In-place update
# if self._renderResources is not None:
# self._renderResources[-1][-1].text = title
def _dirty(self):
# When Text2D require discard we need to handle it
self._renderResources = None
def _buildGridVertices(self):
if self._grid == self.GRID_NONE:
return []
elif self._grid == self.GRID_MAIN_TICKS:
def test(text):
return text is not None
elif self._grid == self.GRID_SUB_TICKS:
def test(text):
return text is None
elif self._grid == self.GRID_ALL_TICKS:
def test(_):
return True
else:
logging.warning('Wrong grid mode: %d' % self._grid)
return []
return self._buildGridVerticesWithTest(test)
def _buildGridVerticesWithTest(self, test):
"""Override in subclass to generate grid vertices"""
return []
def _buildVerticesAndLabels(self):
# To fill with copy of axes lists
vertices = []
labels = []
for axis in self.axes:
axisVertices, axisLabels = axis.getVerticesAndLabels()
vertices += axisVertices
labels += axisLabels
vertices = numpy.array(vertices, dtype=numpy.float32)
# Add main title
xTitle = (self.size[0] + self.margins.left -
self.margins.right) // 2
yTitle = self.margins.top - self._TICK_LENGTH_IN_PIXELS
labels.append(Text2D(text=self.title,
font=qt.QApplication.instance().font(),
color=self._foregroundColor,
x=xTitle,
y=yTitle,
align=CENTER,
valign=BOTTOM,
devicePixelRatio=self.devicePixelRatio))
# grid
gridVertices = numpy.array(self._buildGridVertices(),
dtype=numpy.float32)
self._renderResources = (vertices, gridVertices, labels)
_program = Program(
_SHADERS['vertex'], _SHADERS['fragment'], attrib0='position')
def render(self):
if self.margins == self._NoDisplayMargins:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], *self._foregroundColor)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.)
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, vertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(vertices))
for label in labels:
label.render(matProj)
def renderGrid(self):
if self._grid == self.GRID_NONE:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], *self._gridColor)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.) # 1/2.) # 1/tickLen
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, gridVertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(gridVertices))
# GLPlotFrame2D ###############################################################
class GLPlotFrame2D(GLPlotFrame):
def __init__(self, marginRatios, foregroundColor, gridColor):
"""
:param List[float] marginRatios:
The ratios of margins around plot area for axis and labels.
(left, top, right, bottom) as float in [0., 1.]
:param foregroundColor: color used for the frame and labels.
:type foregroundColor: tuple with RGBA values ranging from 0.0 to 1.0
:param gridColor: color used for grid lines.
:type gridColor: tuple RGBA with RGBA values ranging from 0.0 to 1.0
"""
super(GLPlotFrame2D, self).__init__(marginRatios, foregroundColor, gridColor)
self.axes.append(PlotAxis(self,
tickLength=(0., -5.),
foregroundColor=self._foregroundColor,
labelAlign=CENTER, labelVAlign=TOP,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=0))
self._x2AxisCoords = ()
self.axes.append(PlotAxis(self,
tickLength=(5., 0.),
foregroundColor=self._foregroundColor,
labelAlign=RIGHT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=BOTTOM,
titleRotate=ROTATE_270))
self._y2Axis = PlotAxis(self,
tickLength=(-5., 0.),
foregroundColor=self._foregroundColor,
labelAlign=LEFT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=ROTATE_270)
self._isYAxisInverted = False
self._dataRanges = {
'x': (1., 100.), 'y': (1., 100.), 'y2': (1., 100.)}
self._baseVectors = (1., 0.), (0., 1.)
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
def _dirty(self):
super(GLPlotFrame2D, self)._dirty()
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return (super(GLPlotFrame2D, self).isDirty or
self._transformedDataRanges is None or
self._transformedDataProjMat is None or
self._transformedDataY2ProjMat is None)
@property
def xAxis(self):
return self.axes[0]
@property
def yAxis(self):
return self.axes[1]
@property
def y2Axis(self):
return self._y2Axis
@property
def isY2Axis(self):
"""Whether to display the left Y axis or not."""
return len(self.axes) == 3
@isY2Axis.setter
def isY2Axis(self, isY2Axis):
if isY2Axis != self.isY2Axis:
if isY2Axis:
self.axes.append(self._y2Axis)
else:
self.axes = self.axes[:2]
self._dirty()
@property
def isYAxisInverted(self):
"""Whether Y axes are inverted or not as a bool."""
return self._isYAxisInverted
@isYAxisInverted.setter
def isYAxisInverted(self, value):
value = bool(value)
if value != self._isYAxisInverted:
self._isYAxisInverted = value
self._dirty()
DEFAULT_BASE_VECTORS = (1., 0.), (0., 1.)
"""Values of baseVectors for orthogonal axes."""
@property
def baseVectors(self):
"""Coordinates of the X and Y axes in the orthogonal plot coords.
Raises ValueError if corresponding matrix is singular.
2 tuples of 2 floats: (xx, xy), (yx, yy)
"""
return self._baseVectors
@baseVectors.setter
def baseVectors(self, baseVectors):
self._dirty()
(xx, xy), (yx, yy) = baseVectors
vectors = (float(xx), float(xy)), (float(yx), float(yy))
det = (vectors[0][0] * vectors[1][1] - vectors[1][0] * vectors[0][1])
if det == 0.:
raise ValueError("Singular matrix for base vectors: " +
str(vectors))
if vectors != self._baseVectors:
self._baseVectors = vectors
self._dirty()
def _updateTitleOffset(self):
"""Update axes title offset according to margins"""
margins = self.margins
self.xAxis.titleOffset = 0, margins.bottom // 2
self.yAxis.titleOffset = -3 * margins.left // 4, 0
self.y2Axis.titleOffset = 3 * margins.right // 4, 0
# Override size and marginRatios setters to update titleOffsets
@GLPlotFrame.size.setter
def size(self, size):
GLPlotFrame.size.fset(self, size)
self._updateTitleOffset()
@GLPlotFrame.marginRatios.setter
def marginRatios(self, ratios):
GLPlotFrame.marginRatios.fset(self, ratios)
self._updateTitleOffset()
@property
def dataRanges(self):
"""Ranges of data visible in the plot on x, y and y2 axes.
This is different to the axes range when axes are not orthogonal.
Type: ((xMin, xMax), (yMin, yMax), (y2Min, y2Max))
"""
return self._DataRanges(self._dataRanges['x'],
self._dataRanges['y'],
self._dataRanges['y2'])
def setDataRanges(self, x=None, y=None, y2=None):
"""Set data range over each axes.
The provided ranges are clipped to possible values
(i.e., 32 float range + positive range for log scale).
:param x: (min, max) data range over X axis
:param y: (min, max) data range over Y axis
:param y2: (min, max) data range over Y2 axis
"""
if x is not None:
self._dataRanges['x'] = checkAxisLimits(
x[0], x[1], self.xAxis.isLog, name='x')
if y is not None:
self._dataRanges['y'] = checkAxisLimits(
y[0], y[1], self.yAxis.isLog, name='y')
if y2 is not None:
self._dataRanges['y2'] = checkAxisLimits(
y2[0], y2[1], self.y2Axis.isLog, name='y2')
self.xAxis.dataRange = self._dataRanges['x']
self.yAxis.dataRange = self._dataRanges['y']
self.y2Axis.dataRange = self._dataRanges['y2']
_DataRanges = namedtuple('dataRanges', ('x', 'y', 'y2'))
@property
def transformedDataRanges(self):
"""Bounds of the displayed area in transformed data coordinates
(i.e., log scale applied if any as well as skew)
3-tuple of 2-tuple (min, max) for each axis: x, y, y2.
"""
if self._transformedDataRanges is None:
(xMin, xMax), (yMin, yMax), (y2Min, y2Max) = self.dataRanges
if self.xAxis.isLog:
try:
xMin = math.log10(xMin)
except ValueError:
_logger.info('xMin: warning log10(%f)', xMin)
xMin = 0.
try:
xMax = math.log10(xMax)
except ValueError:
_logger.info('xMax: warning log10(%f)', xMax)
xMax = 0.
if self.yAxis.isLog:
try:
yMin = math.log10(yMin)
except ValueError:
_logger.info('yMin: warning log10(%f)', yMin)
yMin = 0.
try:
yMax = math.log10(yMax)
except ValueError:
_logger.info('yMax: warning log10(%f)', yMax)
yMax = 0.
try:
y2Min = math.log10(y2Min)
except ValueError:
_logger.info('yMin: warning log10(%f)', y2Min)
y2Min = 0.
try:
y2Max = math.log10(y2Max)
except ValueError:
_logger.info('yMax: warning log10(%f)', y2Max)
y2Max = 0.
self._transformedDataRanges = self._DataRanges(
(xMin, xMax), (yMin, yMax), (y2Min, y2Max))
return self._transformedDataRanges
@property
def transformedDataProjMat(self):
"""Orthographic projection matrix for rendering transformed data
:type: numpy.matrix
"""
if self._transformedDataProjMat is None:
xMin, xMax = self.transformedDataRanges.x
yMin, yMax = self.transformedDataRanges.y
if self.isYAxisInverted:
mat = mat4Ortho(xMin, xMax, yMax, yMin, 1, -1)
else:
mat = mat4Ortho(xMin, xMax, yMin, yMax, 1, -1)
self._transformedDataProjMat = mat
return self._transformedDataProjMat
@property
def transformedDataY2ProjMat(self):
"""Orthographic projection matrix for rendering transformed data
for the 2nd Y axis
:type: numpy.matrix
"""
if self._transformedDataY2ProjMat is None:
xMin, xMax = self.transformedDataRanges.x
y2Min, y2Max = self.transformedDataRanges.y2
if self.isYAxisInverted:
mat = mat4Ortho(xMin, xMax, y2Max, y2Min, 1, -1)
else:
mat = mat4Ortho(xMin, xMax, y2Min, y2Max, 1, -1)
self._transformedDataY2ProjMat = mat
return self._transformedDataY2ProjMat
@staticmethod
def __applyLog(
data: Union[float, numpy.ndarray],
isLog: bool
) -> Optional[Union[float, numpy.ndarray]]:
"""Apply log to data filtering out """
if not isLog:
return data
if isinstance(data, numbers.Real):
return None if data < FLOAT32_MINPOS else math.log10(data)
isBelowMin = data < FLOAT32_MINPOS
if numpy.any(isBelowMin):
data = numpy.array(data, copy=True, dtype=numpy.float64)
data[isBelowMin] = numpy.nan
with numpy.errstate(divide='ignore'):
return numpy.log10(data)
def dataToPixel(self, x, y, axis='left'):
"""Convert data coordinate to widget pixel coordinate.
"""
assert axis in ('left', 'right')
trBounds = self.transformedDataRanges
xDataTr = self.__applyLog(x, self.xAxis.isLog)
if xDataTr is None:
return None
yDataTr = self.__applyLog(y, self.yAxis.isLog)
if yDataTr is None:
return None
# Non-orthogonal axes
if self.baseVectors != self.DEFAULT_BASE_VECTORS:
(xx, xy), (yx, yy) = self.baseVectors
skew_mat = numpy.array(((xx, yx), (xy, yy)))
coords = numpy.dot(skew_mat, numpy.array((xDataTr, yDataTr)))
xDataTr, yDataTr = coords
plotWidth, plotHeight = self.plotSize
xPixel = (self.margins.left +
plotWidth * (xDataTr - trBounds.x[0]) /
(trBounds.x[1] - trBounds.x[0]))
usedAxis = trBounds.y if axis == "left" else trBounds.y2
yOffset = (plotHeight * (yDataTr - usedAxis[0]) /
(usedAxis[1] - usedAxis[0]))
if self.isYAxisInverted:
yPixel = self.margins.top + yOffset
else:
yPixel = self.size[1] - self.margins.bottom - yOffset
return (
int(xPixel) if isinstance(xPixel, numbers.Real) else xPixel.astype(numpy.int64),
int(yPixel) if isinstance(yPixel, numbers.Real) else yPixel.astype(numpy.int64),
)
def pixelToData(self, x, y, axis="left"):
"""Convert pixel position to data coordinates.
:param float x: X coord
:param float y: Y coord
:param str axis: Y axis to use in ('left', 'right')
:return: (x, y) position in data coords
"""
assert axis in ("left", "right")
plotWidth, plotHeight = self.plotSize
trBounds = self.transformedDataRanges
xData = (x - self.margins.left + 0.5) / float(plotWidth)
xData = trBounds.x[0] + xData * (trBounds.x[1] - trBounds.x[0])
usedAxis = trBounds.y if axis == "left" else trBounds.y2
if self.isYAxisInverted:
yData = (y - self.margins.top + 0.5) / float(plotHeight)
yData = usedAxis[0] + yData * (usedAxis[1] - usedAxis[0])
else:
yData = self.size[1] - self.margins.bottom - y - 0.5
yData /= float(plotHeight)
yData = usedAxis[0] + yData * (usedAxis[1] - usedAxis[0])
# non-orthogonal axis
if self.baseVectors != self.DEFAULT_BASE_VECTORS:
(xx, xy), (yx, yy) = self.baseVectors
skew_mat = numpy.array(((xx, yx), (xy, yy)))
skew_mat = numpy.linalg.inv(skew_mat)
coords = numpy.dot(skew_mat, numpy.array((xData, yData)))
xData, yData = coords
if self.xAxis.isLog:
xData = pow(10, xData)
if self.yAxis.isLog:
yData = pow(10, yData)
return xData, yData
def _buildGridVerticesWithTest(self, test):
vertices = []
if self.baseVectors == self.DEFAULT_BASE_VECTORS:
for axis in self.axes:
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
vertices.append((xPixel, yPixel))
if axis == self.xAxis:
vertices.append((xPixel, self.margins.top))
elif axis == self.yAxis:
vertices.append((self.size[0] - self.margins.right,
yPixel))
else: # axis == self.y2Axis
vertices.append((self.margins.left, yPixel))
else:
# Get plot corners in data coords
plotLeft, plotTop = self.plotOrigin
plotWidth, plotHeight = self.plotSize
corners = [(plotLeft, plotTop),
(plotLeft, plotTop + plotHeight),
(plotLeft + plotWidth, plotTop + plotHeight),
(plotLeft + plotWidth, plotTop)]
for axis in self.axes:
if axis == self.xAxis:
cornersInData = numpy.array([
self.pixelToData(x, y) for (x, y) in corners])
borders = ((cornersInData[0], cornersInData[3]), # top
(cornersInData[1], cornersInData[0]), # left
(cornersInData[3], cornersInData[2])) # right
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
for (x0, y0), (x1, y1) in borders:
if min(x0, x1) <= data < max(x0, x1):
yIntersect = (data - x0) * \
(y1 - y0) / (x1 - x0) + y0
pixelPos = self.dataToPixel(
data, yIntersect)
if pixelPos is not None:
vertices.append((xPixel, yPixel))
vertices.append(pixelPos)
break # Stop at first intersection
else: # y or y2 axes
if axis == self.yAxis:
axis_name = 'left'
cornersInData = numpy.array([
self.pixelToData(x, y) for (x, y) in corners])
borders = (
(cornersInData[3], cornersInData[2]), # right
(cornersInData[0], cornersInData[3]), # top
(cornersInData[2], cornersInData[1])) # bottom
else: # axis == self.y2Axis
axis_name = 'right'
corners = numpy.array([self.pixelToData(
x, y, axis='right') for (x, y) in corners])
borders = (
(cornersInData[1], cornersInData[0]), # left
(cornersInData[0], cornersInData[3]), # top
(cornersInData[2], cornersInData[1])) # bottom
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
for (x0, y0), (x1, y1) in borders:
if min(y0, y1) <= data < max(y0, y1):
xIntersect = (data - y0) * \
(x1 - x0) / (y1 - y0) + x0
pixelPos = self.dataToPixel(
xIntersect, data, axis=axis_name)
if pixelPos is not None:
vertices.append((xPixel, yPixel))
vertices.append(pixelPos)
break # Stop at first intersection
return vertices
def _buildVerticesAndLabels(self):
width, height = self.size
xCoords = (self.margins.left - 0.5,
width - self.margins.right + 0.5)
yCoords = (height - self.margins.bottom + 0.5,
self.margins.top - 0.5)
self.axes[0].displayCoords = ((xCoords[0], yCoords[0]),
(xCoords[1], yCoords[0]))
self._x2AxisCoords = ((xCoords[0], yCoords[1]),
(xCoords[1], yCoords[1]))
if self.isYAxisInverted:
# Y axes are inverted, axes coordinates are inverted
yCoords = yCoords[1], yCoords[0]
self.axes[1].displayCoords = ((xCoords[0], yCoords[0]),
(xCoords[0], yCoords[1]))
self._y2Axis.displayCoords = ((xCoords[1], yCoords[0]),
(xCoords[1], yCoords[1]))
super(GLPlotFrame2D, self)._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
# Adds vertices for borders without axis
extraVertices = []
extraVertices += self._x2AxisCoords
if not self.isY2Axis:
extraVertices += self._y2Axis.displayCoords
extraVertices = numpy.array(
extraVertices, copy=False, dtype=numpy.float32)
vertices = numpy.append(vertices, extraVertices, axis=0)
self._renderResources = (vertices, gridVertices, labels)
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._y2Axis.foregroundColor = color
GLPlotFrame.foregroundColor.fset(self, color) # call parent property
|
silx-kit/silx
|
src/silx/gui/plot/backends/glutils/GLPlotFrame.py
|
GLPlotFrame.py
|
py
| 42,835 |
python
|
en
|
code
| 106 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "GLText.CENTER",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "weakref.ref",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "GLText.Text2D",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "GLText.Text2D",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "_utils.ticklayout.niceNumbersForLog10",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "_utils.ticklayout.niceNumbersAdaptative",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "_utils.dtime_ticklayout.calcTicksAdaptive",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "_utils.dtime_ticklayout.timestamp",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "_utils.dtime_ticklayout.bestFormatString",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 585,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 604,
"usage_type": "attribute"
},
{
"api_name": "GLText.Text2D",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "GLText.CENTER",
"line_number": 615,
"usage_type": "name"
},
{
"api_name": "GLText.BOTTOM",
"line_number": 616,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 620,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 621,
"usage_type": "attribute"
},
{
"api_name": "_glutils.Program",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 637,
"usage_type": "call"
},
{
"api_name": "_glutils.gl.glViewport",
"line_number": 639,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 639,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glLineWidth",
"line_number": 644,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 644,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glUniformMatrix4fv",
"line_number": 646,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 646,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_TRUE",
"line_number": 646,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 647,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl.glUniform4f",
"line_number": 648,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 648,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glUniform1f",
"line_number": 649,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 649,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glEnableVertexAttribArray",
"line_number": 651,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 651,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glVertexAttribPointer",
"line_number": 652,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 652,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_FLOAT",
"line_number": 654,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl",
"line_number": 654,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_FALSE",
"line_number": 655,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl",
"line_number": 655,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glDrawArrays",
"line_number": 658,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 658,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_LINES",
"line_number": 658,
"usage_type": "attribute"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 672,
"usage_type": "call"
},
{
"api_name": "_glutils.gl.glViewport",
"line_number": 674,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 674,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glLineWidth",
"line_number": 679,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 679,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glUniformMatrix4fv",
"line_number": 680,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 680,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_TRUE",
"line_number": 680,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 681,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl.glUniform4f",
"line_number": 682,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 682,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glUniform1f",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 683,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glEnableVertexAttribArray",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 685,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glVertexAttribPointer",
"line_number": 686,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 686,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_FLOAT",
"line_number": 688,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl",
"line_number": 688,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_FALSE",
"line_number": 689,
"usage_type": "attribute"
},
{
"api_name": "_glutils.gl",
"line_number": 689,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.glDrawArrays",
"line_number": 692,
"usage_type": "call"
},
{
"api_name": "_glutils.gl",
"line_number": 692,
"usage_type": "name"
},
{
"api_name": "_glutils.gl.GL_LINES",
"line_number": 692,
"usage_type": "attribute"
},
{
"api_name": "GLText.CENTER",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "GLText.TOP",
"line_number": 713,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 714,
"usage_type": "name"
},
{
"api_name": "GLText.TOP",
"line_number": 714,
"usage_type": "name"
},
{
"api_name": "GLText.RIGHT",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 722,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 723,
"usage_type": "name"
},
{
"api_name": "GLText.BOTTOM",
"line_number": 723,
"usage_type": "name"
},
{
"api_name": "GLText.ROTATE_270",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "GLText.LEFT",
"line_number": 729,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 729,
"usage_type": "name"
},
{
"api_name": "GLText.CENTER",
"line_number": 730,
"usage_type": "name"
},
{
"api_name": "GLText.TOP",
"line_number": 730,
"usage_type": "name"
},
{
"api_name": "GLText.ROTATE_270",
"line_number": 731,
"usage_type": "name"
},
{
"api_name": "_utils.checkAxisLimits",
"line_number": 867,
"usage_type": "call"
},
{
"api_name": "_utils.checkAxisLimits",
"line_number": 871,
"usage_type": "call"
},
{
"api_name": "_utils.checkAxisLimits",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 882,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 896,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 901,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 908,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 913,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 919,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 924,
"usage_type": "call"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 945,
"usage_type": "call"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 947,
"usage_type": "call"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 964,
"usage_type": "call"
},
{
"api_name": "GLSupport.mat4Ortho",
"line_number": 966,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 973,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 973,
"usage_type": "attribute"
},
{
"api_name": "numbers.Real",
"line_number": 980,
"usage_type": "attribute"
},
{
"api_name": "_utils.FLOAT32_MINPOS",
"line_number": 981,
"usage_type": "name"
},
{
"api_name": "math.log10",
"line_number": 981,
"usage_type": "call"
},
{
"api_name": "_utils.FLOAT32_MINPOS",
"line_number": 983,
"usage_type": "name"
},
{
"api_name": "numpy.any",
"line_number": 984,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 985,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 985,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 986,
"usage_type": "attribute"
},
{
"api_name": "numpy.errstate",
"line_number": 988,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 989,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 975,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 975,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 975,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 1009,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 1011,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1011,
"usage_type": "call"
},
{
"api_name": "numbers.Real",
"line_number": 1030,
"usage_type": "attribute"
},
{
"api_name": "numpy.int64",
"line_number": 1030,
"usage_type": "attribute"
},
{
"api_name": "numbers.Real",
"line_number": 1031,
"usage_type": "attribute"
},
{
"api_name": "numpy.int64",
"line_number": 1031,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 1063,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 1064,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 1064,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1066,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1104,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1127,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1136,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1193,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 1194,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 1195,
"usage_type": "call"
}
] |
28747708713
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import Context, loader
def index(request):
omi = "omprakash"
import urllib
import json
resp = urllib.urlopen('https://api.coursera.org/api/courses.v1?q=search&query=malware+underground').read()
content = json.loads(resp)
template = loader.get_template("myapp/index.html")
context = {
'omi': omi,
'content' : content,
}
return HttpResponse(template.render(context, request))
def search(request):
form = data = request.GET.get('squery')
url = "https://api.coursera.org/api/courses.v1?q=search&query=" + form
omi = "omprakash"
import urllib
import json
vari = form
resp = urllib.urlopen('https://api.coursera.org/api/courses.v1?start=1&limit=3&q=search&query='+ vari).read()
content = json.loads(resp)
template = loader.get_template("myapp/search.html")
context = {
'omi': omi,
'content' : content,
'form' : form,
}
return HttpResponse(template.render(context, request))
|
aumiom/Educational-Website-Template-with-Coursera-Search-API-Integration
|
myproject/myapp/views.py
|
views.py
|
py
| 1,090 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "urllib.urlopen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.urlopen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 38,
"usage_type": "call"
}
] |
40413480181
|
import re
import requests
from bs4 import BeautifulSoup
from time import time as timer
__author__ = "Allen Roberts"
__credits__ = ["Allen Roberts"]
__version__ = "1.0.0"
__maintainer__ = "Allen Roberts"
def readfile():
with open('KY.txt') as file:
lines = file.readlines()
print(lines)
return lines;
def writetofile(list):
filename = "emails.txt"
f = open(filename, "w")
for email in list:
f.write(email+"\n")
f.close()
def google_parse(search_string, start):
print("Test")
temp = []
url = 'http://www.google.com/search'
payload = {'q': search_string, 'start': start}
user_agent = {'User-agent': 'Mozilla/11.0'}
request_response = requests.get(url, params=payload, headers=user_agent)
soup = BeautifulSoup(request_response.text, 'html.parser')
aTags = soup.find_all('a')
print(aTags)
for a in aTags:
try:
temp.append(re.search('url\?q=(.+?)\&sa', a['href']).group(1))
except:
continue
return temp
def main(search, pages):
start = timer()
result = []
for page in range( 0, int(pages) ):
result.extend(google_parse( search, str(page*10) ) )
result = list( set( result ) )
result = removefalselinks(result)
print( *result, sep = '\n' )
print( '\nTotal URLs Scraped : %s ' % str( len( result ) ) )
print( 'Script Execution Time : %s ' % ( timer() - start, ) )
return result
def removefalselinks(pagelist):
for page in pagelist:
if 'http' in page:
continue
else:
pagelist.remove(page)
return pagelist
def scrapepages( pagelist ):
emails = []
for page in pagelist:
print(page + ":")
foundemails = finddata(page)
if foundemails is None:
continue
for email in foundemails:
emails.append(email)
print(emails)
return emails
def finddata( pageurl ):
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/11.0'
}
try:
print("Making Request")
req = requests.get(pageurl, headers, timeout=5)
print("Grabbing HTML")
soup = BeautifulSoup(req.content, 'html.parser')
print("Prettying up data")
pagedata = soup.prettify()
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
emails = re.findall(regex, pagedata)
print(emails)
return emails
except:
print("Timeout")
def removeduplicates(emails):
print(emails)
return list(set(emails))
def validatelist(emails):
newlist = removeduplicates(emails)
emailfinal = []
for email in newlist:
if '.png' in email:
print(email)
elif '.jpeg' in email:
print(email)
else:
emailfinal.append(email)
return emailfinal
if __name__ == '__main__':
businesses = readfile()
scrapedEmails = []
i = 0
while i < 3000:
i += 1
for business in businesses:
urls = main(business, 1)
emails = scrapepages(urls)
validatedlist = validatelist(emails)
for email in validatedlist:
scrapedEmails.append(email)
print(scrapedEmails)
writetofile(scrapedEmails)
print("Done")
|
AllenRoberts/EmailScraper
|
main.py
|
main.py
|
py
| 3,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 95,
"usage_type": "call"
}
] |
15060558322
|
import csv
import json
species_file = open('./data/union_list.json','r')
species_list = json.loads(species_file.read())
country_codes = [
"BG",
"HR",
"CZ",
"DK",
"NL",
"UK",
"EE",
"FI",
"FR",
"DE",
"GR",
"HU",
"IE",
"IT",
"LV",
"LT",
"MT",
"PL",
"PT",
"RO",
"SK",
"SI",
"ES",
"SE"
]
result = []
delimiter = ";"
for iso in country_codes:
iso_input = open('./data/common_names_inputs/'+iso+'_common_names.csv','r')
common_names_dict = csv.DictReader(iso_input)
common_names = []
for item in common_names_dict:
common_names.append(item)
for row in species_list:
tmp_names = set()
for item in common_names:
if item['eu_name'].startswith(row['speciesName']):
tmp_names.add(item['common_name'])
row[iso+'_CommonName'] = delimiter.join(tmp_names)
result.append(row)
print(result)
species_list_section_A = open('./data/species_list_a.json', 'w', encoding='utf-8')
json.dump(species_list, species_list_section_A, ensure_ascii=False)
#for item in country_codes:
# common_names_lang_file = open('./data/common_names_outputs/'+item+'_common_names.json', 'w', encoding='utf-8')
# json.dump(result[item], nuts_regions_file, ensure_ascii=False)
|
eea/ias-dataflow
|
scripts/parse_common_names.py
|
parse_common_names.py
|
py
| 1,330 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 56,
"usage_type": "call"
}
] |
9004329912
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'siscoer.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^grappelli/', include('grappelli.urls')),
# Apps urls
url(r'^$', 'estoque.views.start',),
url(r'^estoque/', include('estoque.urls')),
url(r'^cadastro/$', 'estoque.views.cadastro', name='cadastro'),
url(r'^request_user_pass/$', 'estoque.views.request_user_pass', name='request_user_pass'),
# Autenticação
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {"next_page": "/"}, name='logout'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
clebersa/2014-1-gps-siscoer
|
src/siscoer/siscoer/urls.py
|
urls.py
|
py
| 1,206 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.patterns",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.patterns",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns",
"line_number": 36,
"usage_type": "call"
}
] |
29478342356
|
import sys
import networkx as nx
from assignNetwork import assignNetwork
conflicts = []
messageTypes = []
incomingMessages = []
outgoingMessages = []
stableStates = []
graph = {}
G = nx.MultiDiGraph()
assert len(sys.argv[1:]) <= 2, "Too many arguments"
file = sys.argv[1]
if(len(sys.argv[1:]) == 2):
constraiantFile = sys.argv[2]
with open(file) as f:
lines = f.readlines()
line_idx = 0
while(not "RevMurphi.MurphiModular.Types.Enums.SubEnums.GenMessageTypes" in lines[line_idx]):
line_idx += 1
line_idx += 2
# Get all messages
while(not ";" in lines[line_idx]):
msg = lines[line_idx].strip()
if msg[-1] == ',':
msg = msg[:-1]
messageTypes.append(msg)
line_idx += 1
print(messageTypes)
while(not "RevMurphi.MurphiModular.Types.Enums.SubEnums.GenArchEnums" in lines[line_idx]):
line_idx += 1
line_idx += 2
#get stable states
cacheStates = []
while not "directory" in lines[line_idx]:
state = lines[line_idx].strip().replace("cache", "")
if len(state) == 0:
line_idx += 1
continue
if state[-1] == ',':
state = state[:-1]
cacheStates.append(state)
line_idx += 1
line_idx += 1
while not ";" in lines[line_idx]:
state = lines[line_idx].strip().replace("directory", "")
if state[-1] == ',':
state = state[:-1]
if state in cacheStates:
stableStates.append("cache" + state)
line_idx += 1
#find and parse state machines
while(not "----RevMurphi.MurphiModular.StateMachines.GenMessageStateMachines" in lines[line_idx]):
# print(i, lines[i])
line_idx += 1
for i in range(line_idx, len(lines)):
#find switch cbe.State then find inmsg.mtype
if ("case cache" in lines[i]): #don't need to check directory or "case directory" in lines[i]
inState = lines[i].strip()[5:-1]
outState = ""
edge = ""
print("state: " + inState)
i += 1
incoming = False
prev_message = ""
msg_types = {}
for msgType in messageTypes:
msg_types[msgType] = "stall"
while(not "endswitch;" in lines[i]):
if("case" in lines[i]):
incoming_msg = lines[i].strip()
incoming_msg = incoming_msg[5:-1]
edge = incoming_msg
print("incoming message: " + incoming_msg)
if incoming_msg not in incomingMessages:
incomingMessages.append(incoming_msg)
msg_types[incoming_msg] = "nonstall"
incoming = True
prev_message = incoming_msg
if("msg := " in lines[i]):
outgoing_msg = lines[i]
outgoing_msg = outgoing_msg.split(',')[1]
print("outgoing message: " + outgoing_msg)
if outgoing_msg not in outgoingMessages:
outgoingMessages.append(outgoing_msg)
if("Send" in lines[i]):
incoming = False
if "cbe.State :=" in lines[i]:
outState = lines[i].strip()[13:-1]
key = G.add_edge(inState, outState, edge)
G.edges[inState, outState, edge]["message"] = edge
print(outState)
i += 1
print("finished messages for this state")
#check for conflicts in this state
print("all messages:")
print(msg_types)
keys = list(msg_types.keys())
conflictNum = 0
if len(keys) > 1:
for j in range(len(keys)-1):
m1 = keys[j]
m1_type = msg_types[m1]
for k in range(j+1, len(keys)):
m2 = keys[k]
m2_type = msg_types[m2]
if(m1_type != m2_type):
if(not (m1,m2) in conflicts and not (m2,m1) in conflicts):
print("appending {} {}".format(m1, m2))
conflicts.append((m1, m2))
conflictNum += 1
print("Number of new conflict in state: " + str(conflictNum))
for msg in messageTypes:
if (msg not in incomingMessages and msg not in outgoingMessages):
outgoingMessages.append(msg)
print("original conflicts: " + str(len(conflicts)))
assignNetwork(messageTypes, conflicts)
print("")
print("INCOMING {}".format(incomingMessages))
print("OUTGOING {}".format(outgoingMessages))
print("BOTH")
for msg in messageTypes:
if (msg in incomingMessages and msg in outgoingMessages):
print(msg)
print("\n")
newConflicts = []
outOnly = []
for msg in messageTypes:
if msg in outgoingMessages and msg not in incomingMessages:
outOnly.append(msg)
for (m1, m2) in conflicts:
conflicting = True
if m1 in outOnly or m2 in outOnly:
conflicting = False
if conflicting:
newConflicts.append((m1, m2))
print("omitting incoming/outgoing non-conflicts: {} conflicts left".format(str(len(newConflicts))))
print("newConflicts Length {}".format(len(newConflicts)))
falseConflict = {}
for con in newConflicts:
falseConflict[con] = True
#get gray from trace here
def enumNode(node):
posMsg = set()
print(node)
if node in stableStates:
print("base case")
out_edges = G.out_edges(node)
for out_edge in out_edges:
outMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
print(outMsg)
for msg in outMsg:
posMsg.add(msg)
print("returning {}".format(posMsg))
return posMsg
else:
out_edges = G.out_edges(node)
# print(node, out_edges)
for out_edge in out_edges:
curOutMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
# for msg in curOutMsg:
# posMsg.append(msg)
if not out_edge[0] == out_edge[1]:
nxt_out_edges = G.out_edges(out_edge[1])
for oEdge in nxt_out_edges:
nxt_Msgs = G.get_edge_data(oEdge[0], oEdge[1]).keys()
for msg in nxt_Msgs:
posMsg.add(msg)
# for msg in curOutMsg:
# posMsg.add(msg)
print("NEXT state {}".format(out_edge[1]))
nextOutMsg = enumNode(out_edge[1])
for msg in nextOutMsg:
print("ADDING {}".format(msg))
posMsg.add(msg)
else:
print("ADDING {}".format(curOutMsg))
for msg in curOutMsg:
posMsg.add(msg)
# else:
# posMsg.append(next(iter(curOutMsg)))
# print(out_edge, outMsg)
# print()
print("returning {}".format(posMsg))
return posMsg
for n1 in G.nodes:
print("start node: {}".format(n1))
# if n1 in stableStates:
# continue
possibleMsgs = enumNode(n1)
print("POSSIBLE for {}: {}".format(n1, possibleMsgs))
currentMsgs = set()
out_edges = G.out_edges(n1)
# print(len(out_edges))
for out_edge in out_edges:
curOutMsg = G.get_edge_data(out_edge[0], out_edge[1]).keys()
for msg in curOutMsg:
currentMsgs.add(msg)
# print(possibleMsgs)
print("current msgs for {}: {}".format(n1, currentMsgs))
# if n1 not in stableStates:
nxtN = G.successors(n1)
for n in nxtN:
# if n == n1:
# continue
print("next node: {}".format(n))
nxtOut = G.out_edges(n)
for nxtOutEdge in nxtOut:
nxtOutMsg = G.get_edge_data(nxtOutEdge[0], nxtOutEdge[1]).keys()
print(nxtOutMsg)
for msg in nxtOutMsg:
if msg in currentMsgs:
if msg in possibleMsgs:
print("REMOVE {}".format(msg))
possibleMsgs.remove(msg)
# greyMsgs = []
# for msg in currentMsgs:
# if msg in possibleMsgs:
# possibleMsgs.remove(msg)
# print("{} can be received now".format(msg))
# for msg in incomingMessages:
# if msg not in possibleMsgs:
# greyMsgs.append(msg)
print("nonstall for state {}: {}".format(n1, currentMsgs))
print("stall for state {}: {}".format(n1, possibleMsgs))
for pm1 in currentMsgs:
for pm2 in possibleMsgs:
if (pm1, pm2) in newConflicts:
falseConflict[(pm1, pm2)] = False
print("True conflict {} {}".format(pm1, pm2))
elif (pm2, pm1) in newConflicts:
falseConflict[(pm2, pm1)] = False
print("True conflict {} {}".format(pm2, pm1))
# print("Grey messages: {}".format(greyMsgs))
print(falseConflict)
for k in falseConflict.keys():
if falseConflict[k] == True:
newConflicts.remove(k)
# print("removed conflict {}".format(k))
else:
pass
# print("true conflict {}".format(k))
netConstraint = []
if len(sys.argv[1:]) == 2:
print("=========Applying constraints...===========")
with open(constraiantFile) as f:
lines = f.readlines()
for line in lines:
if line[0] == '[':
sameNet = line[1:-1].replace(" ", "").split(",")
for m1 in sameNet:
for m2 in sameNet:
rmvConflict = (m1, m2)
if rmvConflict in newConflicts:
newConflicts.remove(rmvConflict)
netConstraint.append(sameNet)
else:
pair = line.strip().replace(" ","").split(",")
if (pair[0], pair[1]) in newConflicts:
rmvConflict = (pair[0], pair[1])
newConflicts.remove(rmvConflict)
elif ((pair[1], pair[0]) in newConflicts):
rmvConflict = (pair[1], pair[0])
newConflicts.remove(rmvConflict)
print("Final number of conflicts: {}".format(len(newConflicts)))
assignNetwork(incomingMessages, newConflicts, netConstraint)
print("Outgoing Network")
print(outgoingMessages)
|
ChingLingYeung/honoursProject
|
simple_conflict.py
|
simple_conflict.py
|
py
| 10,499 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "networkx.MultiDiGraph",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "assignNetwork.assignNetwork",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 288,
"usage_type": "attribute"
},
{
"api_name": "assignNetwork.assignNetwork",
"line_number": 310,
"usage_type": "call"
}
] |
17347679642
|
from django.urls import path, include
from .views import LoginView, RegisterView, \
LogoutView, DepartView, JobView, SetPwd, DepartEditView, JobEditView, \
JobRelateUserView, DepartRelateUserView, AppointWorkflowAdmin, IndexView, AppointCommon, AddDepartView, \
DepartRelateUserEditView, AddJobView, JobRelateUserEditView
from django.views import View
urlpatterns = [
path('index/<str:job_id>', IndexView.as_view()), # 个人主页
path('login/', LoginView.as_view()), # 用户初始登录
path('login/set_pwd/', SetPwd.as_view()), # 注册后的用户登录后修改密码
path('logout/', LogoutView.as_view()), # 注销
path('register/', RegisterView.as_view()), # 管理员进行用户注册
path('appoint_workflow_admin/', AppointWorkflowAdmin.as_view()), # 管理员指定一个或多个用户(GET请求,查询字符串?job_id=1&job_id=2获得用户,POST请求发送job_id列表设置用户)为工作流管理员
path('appoint_common/', AppointCommon.as_view()), # 管理员指定一个或多个工作流管理员(GET请求,查询字符串?job_id=1&job_id=2获得用户,POST请求发送job_id列表设置用户)为普通用户
path('depart/', DepartView.as_view()), # 管理员使用,查看(分页、url查询字符串)部门信息
path('add_depart/', AddDepartView.as_view()), # 管理员使用,添加部门
path('depart/edit/<int:depart_id>', DepartEditView.as_view()), # 管理员使用,修改、删除指定部门
path('depart/view/depart<int:depart_id>/relate_user/', DepartRelateUserView.as_view()), # 管理员使用,查看部门成员
path('depart/edit/depart<int:depart_id>/relate_user/', DepartRelateUserEditView.as_view()), # 管理员用户, 添加、更新、删除部门成员
path('job/', JobView.as_view()), # 管理员使用,查看(分页、url查询字符串)岗位
path('add_job/', AddJobView.as_view()), # 管理员使用,添加岗位
path('job/edit/<int:jobtitle_id>', JobEditView.as_view()), # 管理员使用,修改、删除指定岗位
path('job/view/job<int:jobtitle_id>/relate_user/', JobRelateUserView.as_view()), # 管理员使用,查看岗位成员
path('job/edit/job<int:jobtitle_id>/relate_user/', JobRelateUserEditView.as_view()), # 管理员使用,添加、更新、删除岗位成员
]
|
cuifeihe/django_WorkFlowProject
|
app_account/urls.py
|
urls.py
|
py
| 2,353 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.IndexView.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.IndexView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.LoginView.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.LoginView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.SetPwd.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.SetPwd",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.LogoutView.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.LogoutView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.RegisterView.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.RegisterView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.AppointWorkflowAdmin.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.AppointWorkflowAdmin",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "views.AppointCommon.as_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "views.AppointCommon",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.DepartView.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.DepartView",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.AddDepartView.as_view",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.AddDepartView",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "views.DepartEditView.as_view",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "views.DepartEditView",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "views.DepartRelateUserView.as_view",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "views.DepartRelateUserView",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "views.DepartRelateUserEditView.as_view",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "views.DepartRelateUserEditView",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "views.JobView.as_view",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "views.JobView",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "views.AddJobView.as_view",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "views.AddJobView",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "views.JobEditView.as_view",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "views.JobEditView",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "views.JobRelateUserView.as_view",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "views.JobRelateUserView",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "views.JobRelateUserEditView.as_view",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "views.JobRelateUserEditView",
"line_number": 35,
"usage_type": "name"
}
] |
21480310570
|
import bpy
import re
def get_group_name_from_data_path(data_path):
m = re.match(r'^pose\.bones\[\"([^\"]+)"\]', data_path)
if m:
return m[1]
# For pose blender. Should probably not be hardcoded
m = re.match(r'^\[\"([^\"]+)"\]$', data_path)
if m and m[1].endswith("_pose"):
return "Poses"
return None
class GRET_OT_channels_auto_group(bpy.types.Operator):
"""Group animation channels by their bone name"""
bl_idname = 'gret.channels_auto_group'
bl_label = "Auto-Group Channels"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data and context.space_data.type in {'DOPESHEET_EDITOR', 'GRAPH_EDITOR'}
def execute(self, context):
obj = context.active_object
action = obj.animation_data.action if (obj and obj.animation_data) else None
if not action:
return {'CANCELLED'}
fcurves = []
# Create the necessary groups first THEN assign them to prevent the following error
# https://github.com/blender/blender/blob/v3.4.1/source/blender/makesrna/intern/rna_fcurve.c#L527
for fc in action.fcurves:
group_name = get_group_name_from_data_path(fc.data_path)
if group_name and (not fc.group or fc.group.name != group_name):
fcurves.append((fc, group_name))
if group_name not in action.groups:
action.groups.new(name=group_name)
for fc, group_name in fcurves:
old_group, fc.group = fc.group, action.groups.get(group_name)
if fc.group:
fc.group.show_expanded = True
fc.group.show_expanded_graph = True
if old_group and not old_group.channels:
action.groups.remove(old_group)
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_channels_auto_group.bl_idname)
def register(settings, prefs):
if not prefs.animation__enable_channels_auto_group:
return False
# Would be nice to have this menu item next to the other group operators
bpy.utils.register_class(GRET_OT_channels_auto_group)
bpy.types.GRAPH_MT_channel.append(draw_menu)
bpy.types.DOPESHEET_MT_channel.append(draw_menu)
def unregister():
bpy.types.GRAPH_MT_channel.remove(draw_menu)
bpy.types.DOPESHEET_MT_channel.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_channels_auto_group)
|
greisane/gret
|
anim/channels_auto_group.py
|
channels_auto_group.py
|
py
| 2,546 |
python
|
en
|
code
| 298 |
github-code
|
6
|
[
{
"api_name": "re.match",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.register_class",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.GRAPH_MT_channel.append",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.DOPESHEET_MT_channel.append",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.GRAPH_MT_channel.remove",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "bpy.types.DOPESHEET_MT_channel.remove",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "bpy.utils.unregister_class",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 69,
"usage_type": "attribute"
}
] |
27251363866
|
"""
文件名: Code/Chapter07/C02_RNNImgCla/FashionMNISTRNN.py
创建时间: 2023/4/27 8:08 下午
作 者: @空字符
公众号: @月来客栈
知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest
"""
import torch
import torch.nn as nn
import sys
sys.path.append('../../')
from Chapter06.C04_LN.layer_normalization import LayerNormalization
class FashionMNISTRNN(nn.Module):
def __init__(self, input_size=28, hidden_size=128,
num_layers=2, num_classes=10):
super(FashionMNISTRNN, self).__init__()
self.rnn = nn.RNN(input_size, hidden_size,nonlinearity='relu',
num_layers=num_layers, batch_first=True)
self.classifier = nn.Sequential(LayerNormalization(hidden_size),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, num_classes))
def forward(self, x, labels=None):
x = x.squeeze(1) # [batch_size,1,28,28] --> [batch_size,28,28]
x, _ = self.rnn(x) # input: [batch_size, time_steps, input_size]
# x: [batch_size, time_steps, hidden_size]
logits = self.classifier(x[:, -1].squeeze(1))
# 取最后一个时刻进行分类,[batch_size, 1,hidden_size]---squeeze-->[batch_size,hidden_size]
# logits: [batch_size, hidden_size]
if labels is not None:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
loss = loss_fct(logits, labels)
return loss, logits
else:
return logits
if __name__ == '__main__':
model = FashionMNISTRNN()
x = torch.rand([32, 1, 28, 28])
y = model(x)
print(y.shape)
|
moon-hotel/DeepLearningWithMe
|
Code/Chapter07/C02_RNNImgCla/FashionMNISTRNN.py
|
FashionMNISTRNN.py
|
py
| 1,754 |
python
|
en
|
code
| 116 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.RNN",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "Chapter06.C04_LN.layer_normalization.LayerNormalization",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.rand",
"line_number": 45,
"usage_type": "call"
}
] |
9830880946
|
# -*- coding: utf-8 -*-
##
# @file __init__.py
# @brief Contain paths to information files
# @author Gabriel H Riqueti
# @email [email protected]
# @date 06/05/2021
#
import os
from pathlib import Path
PATH_NERNST_EQUATION_INFO = Path(os.path.abspath(__file__)).parent / 'nernst_equation.txt'
PATH_GOLDMAN_EQUATION_INFO = Path(os.path.abspath(__file__)).parent / 'goldman_equation.txt'
if not PATH_NERNST_EQUATION_INFO.exists():
raise FileNotFoundError(PATH_NERNST_EQUATION_INFO.as_posix() + ' not found!')
if not PATH_GOLDMAN_EQUATION_INFO.exists():
raise FileNotFoundError(PATH_GOLDMAN_EQUATION_INFO.as_posix() + ' not found!')
|
gabrielriqu3ti/biomedical_signal_processing
|
biomedical_signal_processing/info/__init__.py
|
__init__.py
|
py
| 670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
}
] |
29325722402
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
To read sequentially the data from the hard disk, the data format is convert to TFRecord.
"""
import re
from pathlib import Path
import pandas as pd
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from sklearn.utils import shuffle
__date__ = '2021/04/06'
def load_data(npy_path, csv_path):
images = np.load(npy_path)
df = pd.read_csv(csv_path, index_col=None, header=0)
d = {'artifact': 0, 'galx_artificial_real': 1, 'rand_artificial_real': 1}
labels = df['object_type'].map(d).values
return images, labels
def _bytes_feature(value):
"""Returns byte_list type from string / byte type."""
if isinstance(value, type(tf.constant(0))):
# BytesList won't unpack a string from an EagerTensor.
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _float_feature(value):
"""Returns float_list type from float / double type."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
"""Return Int64_list type from bool / enum / int / uint type."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def make_example(image, label, detector_id, sample_index, unique_index):
"""Convert data formats."""
feature = {
'image': _float_feature(image.reshape(-1)),
'label': _int64_feature([label]),
'detector_id': _int64_feature([detector_id]),
'sample_index': _int64_feature([sample_index]),
'unique_index': _int64_feature([unique_index])
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def main():
data_dir = Path('../../data/raw/real_bogus1')
npy_list = list(data_dir.glob('images*.npy'))
npy_list.sort()
output_dir = Path('../../data/processed/real_bogus1')
if not output_dir.exists():
output_dir.mkdir(parents=True)
r = re.compile(r'images(\d+)')
unique_id = 0
# Size and start of unique index for each detector.
data_info = {'detector_id': [], 'size': [], 'start_index': []}
for npy_path in npy_list:
m = r.search(npy_path.stem)
detector_id = int(m.group(1))
csv_path = data_dir / 'params{}.csv'.format(detector_id)
images, labels = load_data(npy_path=npy_path, csv_path=csv_path)
n = len(images)
indices = np.arange(n)
# Unique index across the entire data set.
unique_indices = indices + unique_id
data_info['detector_id'].append(detector_id)
data_info['size'].append(n)
data_info['start_index'].append(unique_id)
unique_id += n
images, labels, indices, unique_indices = shuffle(
images, labels, indices, unique_indices
)
# Write TFRecord.
record_path = str(output_dir / 'data{}.tfrecord'.format(detector_id))
with tf.io.TFRecordWriter(
record_path,
tf.io.TFRecordOptions(compression_type='GZIP')) as writer:
for image, label, index, unique_index in zip(
tqdm(images, desc=str(detector_id)), labels, indices,
unique_indices):
example = make_example(
image=image, label=label, detector_id=detector_id,
sample_index=index, unique_index=unique_index
)
writer.write(example.SerializeToString())
# Save the information of each file.
df = pd.DataFrame(data_info)
df.to_csv(output_dir / 'data_info.csv')
if __name__ == '__main__':
main()
|
ichiro-takahashi/tomoe-realbogus
|
src/data/make_record.py
|
make_record.py
|
py
| 3,756 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.BytesList",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.FloatList",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Feature",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Int64List",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Example",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Features",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "tensorflow.io.TFRecordWriter",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.io.TFRecordOptions",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.io",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 109,
"usage_type": "call"
}
] |
34199270716
|
import time
import json
import requests
import datetime
import math
import sys
from pprint import pprint
s_time = time.time()
path = sys.argv[0].replace('c_timer.py', '')
# скважина качает 4м3/ч или 4000л/ч охватывая 10соток или 1000м2, т.е. за час получается 4л/м2 или 4мм
with open(path+'json/const_zones.json') as f:
rf = json.load(f)
pprint(rf)
zones = rf['zones']
start_h_watering = rf['start_h_watering']
'''
{
"zones": [
{"name": "z1", # номер/имя зоны полива
"gpio": "14", # gpio esp zone
"norm": 3, # норма суточного полива мм/м2
"http_esp": "http://192.168.0.54:84"}, # http_esp, возможно подлл. нескольких esp
{"name": "z2",
"gpio": "12",
"norm": 3,
"http_esp": "http://192.168.0.54:84"}],
"start_h_watering": 19 # час начала полива
}
'''
city_openweather = 'Kiev'
api_openweather = 'your_api_key'
# текущая погода http://api.openweathermap.org/data/2.5/weather?q=Kiev&APPID=your_api_key&units=metric
# прогноз погоды http://api.openweathermap.org/data/2.5/forecast?q=Kiev&APPID=your_api_key&units=metric
data_weather = requests.get('http://api.openweathermap.org/data/2.5/forecast?q='+city_openweather+'&APPID='+api_openweather+'&units=metric')
weather = data_weather.json()['list']
today = datetime.datetime.today().strftime('%Y-%m-%d %H:%M')
print(today)
cur_time = round(time.time())
print(cur_time)
# считаем суточное количество осадков
day_rain = 0
for d in weather:
if d['dt'] <= (cur_time+24*60*60) and 'rain' in d:
print(d['rain'])
if '3h' in d['rain']: day_rain += d['rain']['3h']
print("day_rain = ", day_rain)
rf.update({"day_rain":day_rain})
temp_time = 0
for z in rf['zones']:
watering = z["norm"] - day_rain
if watering > 0:
h = round(watering/4-watering/4*100%25/100, 2) # необходимое время полива в часах до сотых
start_time = round(start_h_watering+temp_time, 2)
end_time = round(start_h_watering+temp_time+h, 2)
temp_time += h
z.update({"watering": [start_time, end_time]})
rf.update({"date":today})
pprint(rf)
with open(path+'json/timer.json', 'w') as outfile:
json.dump(rf, outfile)
print("--- %s seconds ---" % (time.time() - s_time))
|
sdfim/watering
|
c_timer.py
|
c_timer.py
|
py
| 2,563 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.today",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 78,
"usage_type": "call"
}
] |
27321029603
|
"""
Kela Kanta data preprocessing
Reads Kela Kanta data, applies the preprocessing steps below and writes the result to files.
- remove extra linebreaks
- remove empty lines
- transform base16 ints to base10 ints
- parse dates
- replace "," with "." as a decimal point
Note: running this script on ePouta takes several hours.
Speed could be improved with e.g. multiprocessing if needed.
Input files:
- 107_522_2021_LM_<YYYY>.csv.finreg_IDs (11 files)
- 107_522_2021_LT_<YYYY>.csv.finreg_IDs (11 files)
Output files:
- prescriptions_<YYYY>_<YYYY-MM-DD>.csv (11 files)
- deliveries_<YYYY>_<YYYY-MM-DD>.csv (11 files)
"""
import os
import re
import pandas as pd
from datetime import datetime
from functools import partial
from finregistry_data.config import KELA_KANTA_INPUT_DIR, KELA_KANTA_OUTPUT_DIR
def read_prescription_data(filepath):
"""Read drug prescriptions data in chunks"""
hash_to_int = partial(int, base=16)
hash_cols = [
"CDA_ID_MD5HASH",
"CDA_SET_ID_MD5HASH",
"DOC_GROUP_MD5HASH",
"CDA_ADDENDUM_REF_MD5HASH",
"CDA_RPLC_ID_MD5HASH",
"PRO_PERSON_REG_MD5HASH",
"ORGANIZATION_OID_MD5HASH",
]
date_cols = ["CREATION_DATE"]
dtypes = {
"PATIENT_ID": str,
"DOC_TYPE_CODE": float,
"DOC_VERSION": float,
"DRUG_NAME_C": str,
"DOSE_QUANTITY_TEXT": str,
"ATC_CODE": str,
"PURPOSE_OF_USE": str,
"DOSAGE_INSTRUCTIONS": str,
"ITERATION_CODE": float,
"TYPE_1_AMOUNT": str,
"TYPE_1_SIZE": str,
"TYPE_2_AMOUNT": str,
"TYPE_2_SIZE_UNIT": str,
"TYPE_3_TIME": str,
"TYPE_3_UNIT": str,
"PRODUCT_CODE": float,
"DOSE_DISTRIBUTION": str,
"PREPARATION_TYPE_CODE": float,
"RESEPTISTATUS": str,
"LAAKEMUOTOKOODI": float,
"ERIKOISALA_CODE": str,
"MED_EXCHANGE_BAN": str,
"RENEWAL_BAN": str,
}
chunks = pd.read_csv(
filepath,
sep=";",
engine="python",
encoding="utf-8",
encoding_errors="ignore",
on_bad_lines="warn",
converters=dict.fromkeys(hash_cols, hash_to_int),
parse_dates=date_cols,
dtype=dtypes,
chunksize=10000,
)
return chunks
def read_delivery_data(filepath):
"""Read drug delivery data in chunks"""
hash_to_int = partial(int, base=16)
hash_cols = [
"CDA_ID_MD5HASH",
"DOC_GROUP_MD5HASH",
"CDA_ADDENDUM_REF_MD5HASH",
"CDA_RPLC_ID_MD5HASH",
]
date_cols = ["CREATION_DATE"]
dtypes = {
"PATIENT_ID": str,
"DRUG_NAME_C": str,
"DOSE_QUANTITY_TEXT": str,
"ATC_CODE": str,
"MED_EXCHANGED": str,
"DIS_AMOUNT_CALC_TXT": str,
"DIS_AMT_VALUE": str,
"DIS_AMOUNT_TXT": str,
"DIS_AMT_UNIT": str,
"PRODUCT_CODE1": str,
"DOSE_DISTRIBUTION": str,
"PREPARATION_TYPE_CODE": float,
"RESEPTISTATUS": str,
"LAAKEMUOTOKOODI": float,
"DELIVERY_FEE": float,
}
chunks = pd.read_csv(
filepath,
sep=";",
engine="python",
encoding="utf-8",
encoding_errors="ignore",
on_bad_lines="warn",
decimal=",",
converters=dict.fromkeys(hash_cols, hash_to_int),
parse_dates=date_cols,
dtype=dtypes,
chunksize=10000,
)
return chunks
def get_output_filepath(input_filepath):
"""Get output filepath from input filepath."""
input_filename = os.path.basename(input_filepath)
today = datetime.today().strftime("%Y-%m-%d")
pattern = r"^107_522_2021_(.{2})_(\d{4})\.csv\.finreg_IDs"
filetype, year = re.findall(pattern, input_filename, re.IGNORECASE)[0]
filetype = "prescriptions" if filetype == "LM" else "deliveries"
output_filename = filetype + "_" + year + "_" + today + ".csv"
output_path = KELA_KANTA_OUTPUT_DIR / output_filename
return output_path
def write_chunk_to_csv(chunk, output_filepath):
"""Writes chunk to csv"""
chunk.to_csv(
output_filepath,
mode="a",
header=not os.path.exists(output_filepath),
index=False,
sep=";",
)
if __name__ == "__main__":
# Preprocess drug prescriptions
prescription_files = KELA_KANTA_INPUT_DIR.glob("107_522_2021_LM_*")
for prescription_file in prescription_files:
print(prescription_file)
output_filepath = get_output_filepath(prescription_file)
chunks = read_prescription_data(prescription_file)
for chunk in chunks:
chunk = chunk.replace("\n", " ", regex=True)
write_chunk_to_csv(chunk, output_filepath)
# Preprocess drug deliveries
delivery_files = KELA_KANTA_INPUT_DIR.glob("107_522_2021_LT_*")
for delivery_file in delivery_files:
print(delivery_file)
output_filepath = get_output_filepath(delivery_file)
chunks = read_delivery_data(delivery_file)
for chunk in chunks:
chunk = chunk.replace("\n", " ", regex=True)
write_chunk_to_csv(chunk, output_filepath)
|
dsgelab/finregistry-data
|
finregistry_data/registries/kela_kanta.py
|
kela_kanta.py
|
py
| 5,134 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "functools.partial",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "finregistry_data.config.KELA_KANTA_OUTPUT_DIR",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "finregistry_data.config.KELA_KANTA_INPUT_DIR.glob",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_KANTA_INPUT_DIR",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "finregistry_data.config.KELA_KANTA_INPUT_DIR.glob",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "finregistry_data.config.KELA_KANTA_INPUT_DIR",
"line_number": 163,
"usage_type": "name"
}
] |
7298829560
|
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.visualization import make_lupton_rgb
from matplotlib.colors import LogNorm
from astropy.wcs import WCS
import numpy as np
db_open = [fits.open('frame-g-006793-1-0130.fits'),
fits.open('frame-i-006793-1-0130.fits'),
fits.open('frame-r-006793-1-0130.fits'),
fits.open('frame-u-006793-1-0130.fits'),
fits.open('frame-z-006793-1-0130.fits')]
class Glx(object):
def __init__(self, d):
self.g = d[0]
self.i = d[1]
self.r = d[2]
self.u = d[3]
self.z = d[4]
def img_rgb(self, nome='Galáxia'):
## rgb = make_lupton_rgb(self.i[0].data[8:1396,::], self.r[0].data[0:1388,::], self.g[0].data[12:1400,::], stretch=1, Q=10)
rgb = make_lupton_rgb(self.i[0].data, self.g[0].data, self.u[0].data, stretch=1, Q=10)
plt.imshow(rgb, origin='lower')
plt.title(nome)
plt.show()
def Log_Norm(self):
plt.imshow(self.r[0].data, cmap='gray', origin='lower', norm=LogNorm())
plt.show()
def Img_1_cor(self):
fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(nrows=2, ncols=3, sharex=True, figsize=(18, 8))
ax0.imshow(self.i[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax0.set_title('Filtro I')
ax1.imshow(self.g[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax1.set_title('Filtro G')
ax3.imshow(self.r[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax3.set_title('Filtro R')
ax4.imshow(self.z[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax4.set_title('Filtro Z')
ax5.imshow(self.u[0].data, origin='lower', vmin=0.0001, vmax=0.6, cmap='RdBu')
ax5.set_title('Filtro U')
fig.delaxes(ax=ax2)
plt.show()
def pl(self):
g = self.g[0].data
print(g.shape)
print(g.min())
print(g.max())
print(g.mean())
print(np.percentile(g.flatten(),3))
print(np.percentile(g.flatten(), 97))
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(18, 8))
ax0.imshow(g, vmin=0.1, vmax=6, origin='lower', cmap='viridis')
ax1.imshow(g, vmin=np.percentile(g.flatten(),5), vmax=np.percentile(g.flatten(), 95), origin='lower', cmap='viridis')
plt.show()
def main(db):
galaxia = Glx(db)
galaxia.pl()
if __name__ == '__main__':
main(db=db_open)
|
ViniBilck/Astro-Vinicius
|
Cubos/Codes/Galaxy - 1/Galaxy1.py
|
Galaxy1.py
|
py
| 2,530 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "astropy.io.fits.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "astropy.visualization.make_lupton_rgb",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.LogNorm",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.percentile",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "numpy.percentile",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
}
] |
14205447601
|
#!/usr/bin/env python
# coding: utf-8
# ## Single Dimension Array
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
data = [10, 23, 34, 35, 45, 59]
df = pd.DataFrame(data, columns=['Score'])
df
# In[3]:
#plt.pie(df)
plt.pie(df, labels=df['Score'])
plt.title("Students Score")
plt.show()
# In[4]:
label_name = ['John', 'Tim', 'Kenny', 'AK', 'Helvetica', 'Bryan']
# In[5]:
plt.pie(df, labels=label_name)
plt.title("Students Score")
plt.show()
# In[6]:
plt.pie(df, labels=label_name, autopct='%1.1f%%')
plt.title("Students Score")
plt.show()
# In[7]:
plt.pie(df, labels=label_name, autopct='%1.2f%%')
plt.title("Students Score")
plt.show()
# In[8]:
plt.pie(df, labels=label_name, autopct='%1.3f%%')
plt.title("Students Score")
plt.show()
# ## Two Dimension Array
# In[9]:
new_data = [['John', 10], ['Tim', 24], ['AK', 34]]
# In[10]:
new_data
# In[11]:
newdf = pd.DataFrame(new_data)
# In[12]:
newdf
# In[13]:
newdf = pd.DataFrame(new_data, columns=['Name', 'Score'])
# In[14]:
newdf
# In[15]:
newdf['Score']
# In[16]:
newdf['Name']
# In[17]:
plt.pie(newdf['Score'], labels=newdf['Name'], autopct='%1.1f%%')
plt.show()
# In[ ]:
# In[ ]:
|
AileshC/PythonLearning
|
python_notebooks/MatPlotLib_Pie_Demo.py
|
MatPlotLib_Pie_Demo.py
|
py
| 1,233 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 122,
"usage_type": "name"
}
] |
70264789629
|
import django, os
from django.core.management import call_command
from dotenv import load_dotenv
def init_db():
"""Method to initialize the database with sample data"""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FriendsLessonsSystem.settings')
load_dotenv()
django.setup()
from FriendsLessonsAPI.models import User, Course, Enrollment
call_command('flush')
call_command('makemigrations')
call_command('migrate')
joe = User.objects.create(first_name='Joe', last_name='Smith', username='joe123', birth_date='2000-01-01')
mark = User.objects.create(first_name='Mark', last_name='Johnson', username='mark456', birth_date='1999-12-31')
jody = User.objects.create(first_name='Jody', last_name='Williams', username='jody789', birth_date='1998-12-30')
rachel = User.objects.create(first_name='Rachel', last_name='Smith', username='rachel246', birth_date='1997-12-29')
jane = User.objects.create(first_name='Jane', last_name='Doe', username='jane512', birth_date='1995-05-01')
joe.friends.add(mark, jody, rachel)
jane.friends.add(joe, rachel)
math = Course.objects.create(name='Math', description='Math course')
spanish = Course.objects.create(name='Spanish', description='Spanish course')
history = Course.objects.create(name='History', description='History course')
Enrollment.objects.create(user=rachel, course=math, lessons_taken=3)
Enrollment.objects.create(user=rachel, course=spanish, lessons_taken=2)
Enrollment.objects.create(user=jane, course=history, lessons_taken=10)
Enrollment.objects.create(user=jane, course=math, lessons_taken=1)
Enrollment.objects.create(user=jane, course=spanish, lessons_taken=5)
Enrollment.objects.create(user=joe, course=spanish, lessons_taken=1)
if __name__ == '__main__':
init_db()
|
ValentinGiorgetti/Desafio-Backend
|
FriendsLessonsSystem/init_db.py
|
init_db.py
|
py
| 1,830 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ.setdefault",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.core.management.call_command",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.core.management.call_command",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.core.management.call_command",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects.create",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.User",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects.create",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.User",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects.create",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.User",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects.create",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.User",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects.create",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.User.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.User",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects.create",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Course",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects.create",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Course",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects.create",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Course.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Course",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects.create",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "FriendsLessonsAPI.models.Enrollment",
"line_number": 38,
"usage_type": "name"
}
] |
22610043366
|
from django import forms
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Fieldset, Div, HTML, Submit, Button
from hybridjango.custom_layout_object import *
from hybridjango.mixins import BootstrapFormMixin
from .models import *
class EventCommentForm(forms.ModelForm):
class Meta:
model = EventComment
fields = ['event', 'text']
class EventForm(forms.ModelForm):
class Meta:
model = Event
fields = [
'title',
'type',
'ingress',
'text', # TODO: Make this field a HTMLField in form
'image',
'event_start',
'event_end',
'weight',
'location',
'hidden',
'news',
'public',
'signoff_close_on_signup_close',
'signoff_close',
]
widgets = {
'ingress': forms.Textarea(attrs={'rows': 3}),
}
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
for field in self.fields:
if self.fields[field] == self.fields['event_start'] or self.fields[field] == self.fields['event_end']:
self.fields[field].widget.attrs.update({'class': 'form_datetime', 'autocomplete': 'off'})
else:
self.fields[field].widget.attrs.update({'class': 'form-control'})
class MarkPunishmentForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = MarkPunishment
exclude = [
'rules',
'delays',
'duration',
]
def __init__(self, *args, **kwargs):
super(MarkPunishmentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-3 create-label'
self.helper.field_class = 'col-md-9'
self.helper.layout = Layout(
Div(
Field('goes_on_secondary'),
Field('too_many_marks'),
Field('signoff_close'),
HTML("<br>"),
Field('mark_on_late_signoff'),
HTML("<br>"),
Field('remove_on_too_many_marks'),
HTML("<br>"),
HTML("<br>"),
Fieldset('Add delays',
Formset('delays')),
HTML("<br>"),
Fieldset('Add rules',
Formset('rules')),
HTML("<br>"),
Submit('submit', 'Lagre'),
Button('back', "Tilbake", css_class='btn btn-default pull-right', onclick="goBack()"),
)
)
class RuleForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = Rule
exclude = [
'punishment',
]
RuleFormSet = inlineformset_factory(
MarkPunishment, Rule, form=RuleForm,
fields=['rule'], extra=1, can_delete=True
)
class DelayForm(forms.ModelForm, BootstrapFormMixin):
class Meta:
model = Delay
exclude = [
'punishment',
]
DelayFormSet = inlineformset_factory(
MarkPunishment, Delay, form=DelayForm,
fields=['marks', 'minutes'], extra=1, can_delete=True
)
|
hybrida/hybridjango
|
apps/events/forms.py
|
forms.py
|
py
| 3,382 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "django.forms.ModelForm",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.forms.Textarea",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "hybridjango.mixins.BootstrapFormMixin",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "crispy_forms.helper.FormHelper",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Layout",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Div",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Field",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Field",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Field",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Field",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Field",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Fieldset",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Fieldset",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.HTML",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Submit",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "crispy_forms.layout.Button",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "hybridjango.mixins.BootstrapFormMixin",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.forms.models.inlineformset_factory",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "hybridjango.mixins.BootstrapFormMixin",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.forms.models.inlineformset_factory",
"line_number": 109,
"usage_type": "call"
}
] |
73811427708
|
from torch.utils.data import DataLoader
from sklearn.model_selection import KFold
from .datasets import get_cifar10_datasets, get_cifar100_datasets, get_mnist_datasets, get_image_net_dataset, TruncatedDataset, MergedDataset
from .partition import partition_by_class, partition_with_dirichlet_distribution
data_path = '/home/hansel/developer/embedding/data/'
def get_datasets(data_dir, dataset, use_hdf5=False):
if dataset == 'cifar10':
trn_dataset, val_dataset = get_cifar10_datasets(data_dir=data_dir, use_hdf5=use_hdf5)
elif dataset == 'cifar100':
trn_dataset, val_dataset = get_cifar100_datasets(data_dir=data_dir)
elif dataset == 'mnist':
trn_dataset, val_dataset = get_mnist_datasets(data_dir=data_dir, use_hdf5=use_hdf5)
elif dataset == 'imagenet':
trn_dataset, val_dataset = get_image_net_dataset(data_dir=data_dir)
return trn_dataset, val_dataset
def get_dl_lists(dataset, batch_size, partition=None, n_site=None, alpha=None, net_dataidx_map_train=None, net_dataidx_map_test=None, shuffle=True, k_fold_val_id=None, seed=None, site_indices=None, use_hdf5=True):
trn_dataset, val_dataset = get_datasets(data_dir=data_path, dataset=dataset, use_hdf5=use_hdf5)
if partition == 'regular':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset) for _ in range(n_site)]
val_ds_list = [TruncatedDataset(val_dataset, dataset) for _ in range(n_site)]
elif partition == 'by_class':
(net_dataidx_map_train, net_dataidx_map_test) = partition_by_class(data_dir=data_path, dataset=dataset, n_sites=n_site, seed=seed)
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == 'dirichlet':
(net_dataidx_map_train, net_dataidx_map_test) = partition_with_dirichlet_distribution(data_dir=data_path, dataset=dataset, n_sites=n_site, alpha=alpha, seed=seed)
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == 'given':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
elif partition == '5foldval':
trn_ds_list = [TruncatedDataset(trn_dataset, dataset, idx_map) for idx_map in net_dataidx_map_train.values()]
val_ds_list = [TruncatedDataset(val_dataset, dataset, idx_map) for idx_map in net_dataidx_map_test.values()]
merged_ds_list = [MergedDataset(trn_ds_list[i], val_ds_list[i], dataset) for i in range(len(trn_ds_list))]
kfold = KFold(n_splits=5, shuffle=True, random_state=1)
splits = [list(kfold.split(range(len(merged_ds)))) for merged_ds in merged_ds_list]
indices = [split[k_fold_val_id] for split in splits]
trn_ds_list = [TruncatedDataset(merged_ds_list[i], dataset, idx_map[0]) for i, idx_map in enumerate(indices)]
val_ds_list = [TruncatedDataset(merged_ds_list[i], dataset, idx_map[1]) for i, idx_map in enumerate(indices)]
trn_dl_list = [DataLoader(dataset=trn_ds, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=0) for trn_ds in trn_ds_list]
val_dl_list = [DataLoader(dataset=val_ds, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=0) for val_ds in val_ds_list]
if site_indices is not None:
trn_dl_list = [trn_dl_list[i] for i in site_indices]
val_dl_list = [val_dl_list[i] for i in site_indices]
return trn_dl_list, val_dl_list
|
somcogo/embedding
|
utils/data_loader.py
|
data_loader.py
|
py
| 3,860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datasets.get_cifar10_datasets",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datasets.get_cifar100_datasets",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datasets.get_mnist_datasets",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datasets.get_image_net_dataset",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "partition.partition_by_class",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "partition.partition_with_dirichlet_distribution",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datasets.MergedDataset",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datasets.TruncatedDataset",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 48,
"usage_type": "call"
}
] |
4992423632
|
from __future__ import annotations
import typing
from homeassistant.components.switch import (
DOMAIN as PLATFORM_SWITCH,
SwitchEntity,
)
try:
from homeassistant.components.switch import SwitchDeviceClass
DEVICE_CLASS_OUTLET = SwitchDeviceClass.OUTLET
DEVICE_CLASS_SWITCH = SwitchDeviceClass.SWITCH
except:
from homeassistant.components.switch import DEVICE_CLASS_OUTLET, DEVICE_CLASS_SWITCH
from .merossclient import const as mc # mEROSS cONST
from . import meross_entity as me
if typing.TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_devices
):
me.platform_setup_entry(hass, config_entry, async_add_devices, PLATFORM_SWITCH)
class MLSwitch(me.MerossToggle, SwitchEntity):
"""
generic plugs (single/multi outlet and so)
"""
PLATFORM = PLATFORM_SWITCH
@staticmethod
def build_for_device(device: me.MerossDevice, channel: object, namespace: str):
return MLSwitch(device, channel, None, DEVICE_CLASS_OUTLET, None, namespace)
class ToggleXMixin(
me.MerossDevice if typing.TYPE_CHECKING else object
):
def __init__(self, api, descriptor, entry):
super().__init__(api, descriptor, entry)
# we build switches here after everything else have been
# setup since the togglex verb might refer to a more specialized
# entity than switches
togglex = descriptor.digest.get(mc.KEY_TOGGLEX)
if isinstance(togglex, list):
for t in togglex:
channel = t.get(mc.KEY_CHANNEL)
if channel not in self.entities:
MLSwitch.build_for_device(
self, channel, mc.NS_APPLIANCE_CONTROL_TOGGLEX
)
elif isinstance(togglex, dict):
channel = togglex.get(mc.KEY_CHANNEL)
if channel not in self.entities:
MLSwitch.build_for_device(
self, channel, mc.NS_APPLIANCE_CONTROL_TOGGLEX
)
# This is an euristhic for legacy firmwares or
# so when we cannot init any entity from system.all.digest
# we then guess we should have at least a switch
# edit: I guess ToggleX firmwares and on already support
# system.all.digest status broadcast
if not self.entities:
MLSwitch.build_for_device(self, 0, mc.NS_APPLIANCE_CONTROL_TOGGLEX)
def _handle_Appliance_Control_ToggleX(self, header: dict, payload: dict):
self._parse__generic(mc.KEY_TOGGLEX, payload.get(mc.KEY_TOGGLEX))
def _parse_togglex(self, payload: dict):
self._parse__generic(mc.KEY_TOGGLEX, payload)
class ToggleMixin(
me.MerossDevice if typing.TYPE_CHECKING else object
):
def __init__(self, api, descriptor, entry):
super().__init__(api, descriptor, entry)
# older firmwares (MSS110 with 1.1.28) look like dont really have 'digest'
# but have 'control' and the toggle payload looks like not carrying 'channel'
p_control = descriptor.all.get(mc.KEY_CONTROL)
if p_control:
p_toggle = p_control.get(mc.KEY_TOGGLE)
if isinstance(p_toggle, dict):
MLSwitch.build_for_device(
self,
p_toggle.get(mc.KEY_CHANNEL, 0),
mc.NS_APPLIANCE_CONTROL_TOGGLE,
)
if not self.entities:
MLSwitch.build_for_device(self, 0, mc.NS_APPLIANCE_CONTROL_TOGGLE)
def _handle_Appliance_Control_Toggle(self, header: dict, payload: dict):
self._parse_toggle(payload.get(mc.KEY_TOGGLE))
def _parse_toggle(self, payload):
"""
toggle doesn't have channel (#172)
"""
if isinstance(payload, dict):
entity: MLSwitch = self.entities[payload.get(mc.KEY_CHANNEL, 0)] # type: ignore
entity._parse_toggle(payload)
|
ZioTitanok/HomeAssistant-Configuration
|
custom_components/meross_lan/switch.py
|
switch.py
|
py
| 4,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "homeassistant.components.switch.SwitchDeviceClass.OUTLET",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.switch.SwitchDeviceClass",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.switch.SwitchDeviceClass.SWITCH",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.switch.SwitchDeviceClass",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.core.HomeAssistant",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "homeassistant.config_entries.ConfigEntry",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.switch.DOMAIN",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "homeassistant.components.switch.SwitchEntity",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.switch.DOMAIN",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.switch.DEVICE_CLASS_OUTLET",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const.KEY_TOGGLEX",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_CHANNEL",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "merossclient.const.NS_APPLIANCE_CONTROL_TOGGLEX",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_CHANNEL",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "merossclient.const.NS_APPLIANCE_CONTROL_TOGGLEX",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "merossclient.const.NS_APPLIANCE_CONTROL_TOGGLEX",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_TOGGLEX",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_TOGGLEX",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const.KEY_CONTROL",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_TOGGLE",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_CHANNEL",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "merossclient.const.NS_APPLIANCE_CONTROL_TOGGLE",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "merossclient.const.NS_APPLIANCE_CONTROL_TOGGLE",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_TOGGLE",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "merossclient.const.KEY_CHANNEL",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "merossclient.const",
"line_number": 109,
"usage_type": "name"
}
] |
6043134873
|
import logging
import certifi
import random, string
from elasticsearch import Elasticsearch
from flask import Flask, render_template, request, redirect, url_for, flash
from datetime import datetime
from quiz import quiz
app = Flask(__name__)
app.secret_key = 'dfuy48yerhfjdbsklueio'
es = Elasticsearch(
['https://host:port'],
http_auth=('user', 'pass'),
send_get_body_as='POST', # needed for GAE
use_ssl=True,
ca_certs=certifi.where()
)
@app.route('/')
def index():
return render_template('index.html', quiz=quiz)
@app.route('/submit', methods=['POST'])
def submit():
form = request.form.to_dict()
doc = {
'timestamp': datetime.utcnow(),
'email' : form['email'],
'remote_addr' : request.remote_addr,
'user_agent' : request.headers.get('User-Agent'),
'correct': True
}
for q in quiz:
doc[q['name']] = {
'question' : q['question'],
'answer' : form[q['name']]
}
if form[q['name']] != [i for i in q['options'] if i['correct']][0]['answer']:
doc['correct'] = False
es.index(index='esquiz', doc_type='answer', pipeline='esquiz', body=doc)
flash('Thanks for your response')
return redirect(url_for('index'))
@app.route('/draw', methods=['GET'])
def draw():
seed = ''.join(random.choice(string.lowercase) for i in range(20))
query = {
"query": {
"function_score": {
"query": { "term" : { "correct" : True } },
"functions": [{
"random_score": { "seed": seed }
}]
}
}
}
email = None
res = es.search(index='esquiz', body=query, size=1, _source_include="email")
if res['hits']['total'] > 0:
email = res['hits']['hits'][0]['_source']['email']
return render_template('draw.html', winner=email)
@app.errorhandler(500)
def server_error(e):
# For Google App Engine
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
if __name__ == '__main__':
app.run()
|
mcascallares/esquiz
|
main.py
|
main.py
|
py
| 2,109 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "certifi.where",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "quiz.quiz",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.request.form.to_dict",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.request.remote_addr",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "quiz.quiz",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "string.lowercase",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "logging.exception",
"line_number": 72,
"usage_type": "call"
}
] |
43095956138
|
import plugins
import sys
import data
import model
plugins.load_all('config.json')
target = sys.argv[1]
start_node = model.Node('person', target)
d = data.Storage(target)
d.add_node(start_node)
def handle(tokens):
if tokens[0].lower() == 'list':
# show list of nodes
if len(tokens) > 1:
if tokens[1].lower() == 'nodes':
nodes = d.get_nodes()
print('\n'.join(map(lambda x: str(x), nodes)))
elif tokens[1].lower() == 'actions':
if len(tokens) > 2:
actions = plugins.fetch_actions(tokens[2])
print('\n'.join(map(lambda x: str(x), actions)))
else:
print('USAGE: list actions NODE_TYPE')
else:
print('USAGE: list (nodes | actions NODE_TYPE)')
elif tokens[0].lower() == 'get':
if len(tokens) > 1:
id = int(tokens[1])
result = d.get_node(id)
print(result)
print(result.data_json)
else:
print('USAGE: get NODE_ID')
elif tokens[0].lower() == 'run':
# run plugin
if len(tokens) > 2:
action_name = tokens[1]
action = plugins.fetch(action_name)
target_id = int(tokens[2])
target = d.get_node(target_id)
result = action['func'](target)
for n in result:
d.add_node(n)
connection = model.Connection(target.id, n.id, action_name, 'concrete', '')
d.add_connection(connection)
d.add_node(target)
print(result)
else:
print('USAGE: run ACTION NODE_ID')
else:
print('< Unknown command: ' + command)
while True:
command = raw_input('> ')
tokens = command.split(' ')
if tokens[0].lower() == 'quit':
break
else:
try:
handle(tokens)
except Exception as e:
print(e)
|
tracer-sec/osint
|
console.py
|
console.py
|
py
| 2,022 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "plugins.load_all",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "model.Node",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "data.Storage",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plugins.fetch_actions",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "plugins.fetch",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "model.Connection",
"line_number": 50,
"usage_type": "call"
}
] |
71780096828
|
from colorfield.fields import ColorField
from django.core.validators import MinValueValidator, RegexValidator
from django.db import models
from users.models import User
class Ingredient(models.Model):
"""Класс интредиент"""
name = models.CharField(
verbose_name='Наименование ингредиента',
max_length=150,
help_text='Наименование ингредиента',
)
measurement_unit = models.CharField(
verbose_name='Единица измерения',
max_length=150,
help_text='Единица измерения',
)
class Meta:
ordering = ('name',)
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self):
return self.name
class Tag(models.Model):
"""Класс тег"""
name = models.CharField(
max_length=50,
verbose_name='Hазвание',
unique=True,
db_index=True
)
color = ColorField(
default='#17A400',
max_length=7,
verbose_name='Цвет',
unique=True,
help_text='Цвет в формате HEX кода',
)
slug = models.SlugField(
max_length=50,
verbose_name='slug',
unique=True,
validators=[RegexValidator(
regex=r'^[-a-zA-Z0-9_]+$',
message='Использован недопустимый символ'
)]
)
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
ordering = ('name', )
def __str__(self):
return self.name
class Recipe(models.Model):
"""Класс рецепт"""
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Автор',
related_name='recipes',
help_text='Автор рецепта',
)
name = models.CharField(
verbose_name='Название рецепта',
max_length=150,
help_text='Название рецепта',
)
image = models.ImageField(
verbose_name='Картинка',
upload_to='recipes/images',
help_text='Картинка',
)
text = models.TextField(
verbose_name='Описание',
help_text='Описание рецепта',
)
ingredients = models.ManyToManyField(
Ingredient,
verbose_name='Ингредиенты рецепта',
through='RecipeIngredient',
related_name='recipes',
help_text='Ингредиенты в составе рецепта',
)
tags = models.ManyToManyField(
Tag,
verbose_name='Тег рецепта',
related_name='recipes',
help_text='Тег рецепта',
)
cooking_time = models.IntegerField(
verbose_name='Время приготовления',
validators=[
MinValueValidator(
1,
message='Минимальное время приготовления 1 мин.'
)
],
help_text='Время приготовления',
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
help_text='Дата публикации',
)
class Meta:
ordering = ('-pub_date', )
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self):
return self.name
class RecipeIngredient(models.Model):
"""Класс рецепт-интредиент"""
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Рецепт',
related_name='ingredient',
help_text='Рецепт',
)
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
verbose_name='Ингредиент',
related_name='ingredient',
help_text='Ингредиент',
)
amount = models.IntegerField(
verbose_name='Количество',
validators=[
MinValueValidator(
1,
message='Минимальное количество 1'
)
],
help_text='Количество',
)
class Meta:
ordering = ('recipe',)
verbose_name = 'Количество ингредиента'
verbose_name_plural = 'Количество ингредиентов'
constraints = [
models.UniqueConstraint(
fields=('recipe', 'ingredient', ),
name='unique_ingredient',
),
]
def __str__(self):
return f'{self.ingredient} в {self.ingredient.measurement_unit}'
class Follow(models.Model):
"""Класс подписки"""
follower = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Подписчик',
related_name='follower',
help_text='Подписчик',
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Автор',
related_name='author',
help_text='Автор',
)
class Meta:
ordering = ('-id',)
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
constraints = [
models.UniqueConstraint(
fields=('follower', 'author', ),
name='unique_follow',
),
]
def __str__(self):
return f'{self.follower} подписался на: {self.author}'
class FavoriteRecipe(models.Model):
"""Класс избранное"""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='favorite',
help_text='Пользователь добавивший рецепт',
)
recipe = models.ForeignKey(
Recipe,
verbose_name='Избранное',
on_delete=models.CASCADE,
related_name='favorite',
help_text='Избранный рецепт',
)
class Meta:
ordering = ('id',)
verbose_name = 'Избранное'
verbose_name_plural = 'Избранные рецепты'
constraints = [
models.UniqueConstraint(
fields=('user', 'recipe', ),
name='unique_favorite',
),
]
def __str__(self):
return f'{self.recipe} добавлен в избранное'
class ShoppingCart(models.Model):
"""Класс покупок"""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='Пользователь',
related_name='shopping',
help_text='Пользователь добавивший покупки',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
verbose_name='Покупки',
related_name='shopping',
help_text='Рецепт для покупок',
)
class Meta:
ordering = ('id',)
verbose_name = 'Покупка'
verbose_name_plural = 'Покупки'
constraints = [
models.UniqueConstraint(
fields=('user', 'recipe', ),
name='unique_shopping',
),
]
def __str__(self):
return f'{self.recipe} добавлен в покупки.'
|
GirzhuNikolay/foodgram-project-react
|
backend/recipes/models.py
|
models.py
|
py
| 7,583 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "colorfield.fields.ColorField",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.core.validators.RegexValidator",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.core.validators.MinValueValidator",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "django.core.validators.MinValueValidator",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "django.db.models.UniqueConstraint",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 176,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 183,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "django.db.models.UniqueConstraint",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 209,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "django.db.models.UniqueConstraint",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "users.models.User",
"line_number": 242,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "django.db.models.UniqueConstraint",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 261,
"usage_type": "name"
}
] |
31331693062
|
#! /usr/bin/python3
import os
import sys
import glob
import time
import shutil
import logging
import argparse
import subprocess
import pandas as pd
from pathlib import Path
from itertools import repeat
from multiprocessing import Pool
from nest.bbduk import QualCheck
from nest.alignment import Bwa
from nest.alignment import Bowtie
from nest.alignment import BBMap
from nest.alignment import Snap
from nest.samtools import Samtools
from nest.gatk import GenAnTK
from nest.gatk import Picard
from nest.gatk import FreeBayes
from nest.kestrel import KestrelVar
#from nest.annotater import Annotate
from nest.kestrel import kes_runner
from nest.summarize import Summary
from nest.prepinputs import Prepper
from nest.parsers.vcfReader import Reader
from nest.parsers.vcfmerge import Merge
from nest.parsers.vcfannotate import Annotate
from nest.parsers.vcfwriter import Writer
def main(arguments):
bbduk_path = arguments[0]
alinger_path = arguments[1]
smt_path = arguments[2]
bft_path = arguments[3]
gatk_path = arguments[4]
sam_name = arguments[5]
file_list = arguments[6]
ref_path = arguments[7]
adp_path = arguments[8]
bed_path = arguments[9]
out_dir = arguments[10]
aligner = arguments[11]
pic_path = arguments[12]
voi_path = arguments[13]
java_path = arguments[14]
sra_path = arguments[15]
purge = arguments[16]
sra_list = arguments[17]
#Setup logging
#Get logger for main method
main_logger = logging.getLogger('NeST.{0}'.format(sam_name))
main_logger.debug('Starting analysis for {0}'.format(sam_name))
#Check if files are present
out_path = '{0}/{1}'.format(os.path.abspath(out_dir), sam_name)
if not os.path.exists(out_path):
os.mkdir(out_path)
fastq_path = '{0}/RawFastq'.format(out_path)
if not os.path.exists(fastq_path):
os.mkdir(fastq_path)
#Get FASTQs
prepper = Prepper(fastq_path, out_dir, sra_path)
fastq_path = prepper.sra(sam_name, sra_list, file_list)
##Note: Generalize this, right now it will only work with SRA. This is a fix for NEJM
rone_path = file_list[0]
rtwo_path = file_list[1]
if not os.path.exists(rone_path):
raise FileNotFoundException('Forward read not found; Exiting MARs')
sys.exit()
if not os.path.exists(rtwo_path):
raise FileNotFoundException('Reverse read not found; Exiting MARs')
sys.exit()
if not os.path.exists(ref_path):
raise FileNotFoundException('Reference fasta file not found; Exiting MARs')
sys.exit()
if not os.path.exists(adp_path):
raise FileNotFoundException('Adpater sequence not found; Exiting MARs')
sys.exit()
if not os.path.exists(out_path):
os.mkdir(out_path)
#Create completion folder
completion_path = '{0}/completion'.format(out_path)
if not os.path.exists(completion_path):
os.mkdir(completion_path)
#Call Bbduk
main_logger.debug('Running BBDuk')
if os.path.exists('{0}/bbduk.rt'.format(completion_path)):
brone = os.path.splitext(os.path.basename(rone_path))[0]
brtwo = os.path.splitext(os.path.basename(rtwo_path))[0]
rone_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brone)
rtwo_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brtwo)
main_logger.debug('Skipping BBDuk')
bret = 0
else:
bbduk = QualCheck(bbduk_path, adp_path, out_path, java_path)
rone_path, rtwo_path, bret = bbduk.bbduk(rone_path, rtwo_path)
if bret == 0:
Path('{0}/bbduk.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('BBDuk failed to complete; Exiting MARs')
else:
main_logger.debug('BBDuk completed')
if aligner == 'bwa':
#Call BWA
main_logger.debug('Running BWA')
if os.path.exists('{0}/align.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
main_logger.debug('Skipping BWA')
else:
bwa = Bwa(alinger_path, out_path, ref_path)
sam_path, mret = bwa.bwamem(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('Bwa mem failed to complete; Exiting MARs')
else:
main_logger.debug('BWA completed')
elif aligner == 'bowtie2':
#Call Bowtie2
main_logger.debug('Running Bowtie2')
if os.path.exists('{0}/aling.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
main_logger.debug('Skipping Bowtie2')
else:
bowtie = Bowtie(alinger_path, out_path, ref_path)
sam_path, mret = bowtie.bowtie(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('Bowtie2 failed to complete; Exiting MARs')
else:
main_logger.debug('Bowtie2 completed')
elif aligner == 'snap':
#Call Snap
main_logger.debug('Running Snap')
snap = Snap(alinger_path, out_path, ref_path)
sam_path, mret = snap.snap(rone_path, rtwo_path)
if mret != 0:
raise RuntimeError('Snap failed to complete; Exiting MARs')
else:
main_logger.debug('Snap completed')
elif aligner == 'bbmap':
#Call Bbmap
main_logger.debug('Running BBMap')
if os.path.exists('{0}/aling.rt'.format(completion_path)):
sam_path = '{0}/alignments/output.sam'.format(out_path)
mret = 0
else:
bbmap = BBMap(alinger_path, out_path, ref_path)
sam_path, mret = bbmap.bbmap(rone_path, rtwo_path)
if mret == 0:
Path('{0}/align.rt'.format(completion_path)).touch()
if mret != 0:
raise RuntimeError('BBMap failed to complete; Exitinign MARs')
else:
main_logger.debug('BBMap completed')
#Fix mate information, sort files and add read groups
varengine = Samtools(smt_path, bft_path, out_path)
if os.path.exists('{0}/fixmate.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(sam_path))[0]
bam_path = '{0}/alignments/{1}_FM.bam'.format(out_path, base)
fret = 0
main_logger.debug('Skipping fixmate')
else:
bam_path, fret = varengine.fixmate(sam_path)
if fret == 0:
Path('{0}/fixmate.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools fixmate')
if fret != 0:
raise RuntimeError('Samtools fixmate failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools fixmate completed')
if os.path.exists('{0}/sort.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_SR.bam'.format(out_path, base)
sret = 0
main_logger.debug('Skipping sort')
else:
bam_path, sret = varengine.sort(bam_path)
if sret == 0:
Path('{0}/sort.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools sort')
if sret != 0:
raise RuntimeError('Samtools sort failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools sort completed')
if os.path.exists('{0}/dedup.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_DD.bam'.format(out_path, base)
dret = 0
main_logger.debug('Skipping Dedup')
else:
bam_path, dret = varengine.dedup(bam_path)
if dret == 0:
Path('{0}/dedup.rt'.format(completion_path)).touch()
main_logger.debug('Running Samtools dedup')
if sret != 0:
raise RuntimeError('Samtools dedup failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools dedup completed')
rgadder = Picard(java_path, pic_path, out_path)
if os.path.exists('{0}/readgroup.rt'.format(completion_path)):
base = os.path.splitext(os.path.basename(bam_path))[0]
bam_path = '{0}/alignments/{1}_RG.bam'.format(out_path, base)
aret = 0
main_logger.debug('Skipping add read group')
else:
bam_path, aret = rgadder.picard(bam_path, sam_name)
main_logger.debug('Running Picard AddOrReplaceReadGroups')
if aret == 0:
Path('{0}/readgroup.rt'.format(completion_path)).touch()
if aret != 0:
raise RuntimeError('Picard AddOrReplaceReadGroups failed to complete; Exiting MARs')
else:
main_logger.debug('Picard AddOrReplaceReadGroups completed')
#Run samtools mpileup, bcftools index, call and stats to generate VCF files
if os.path.exists('{0}/pileup.rt'.format(completion_path)):
bcf_path = '{0}/{1}_variants.bcf'.format(out_path, sam_name)
pret = 0
main_logger.debug('Skipping Pileup')
else:
bcf_path, pret = varengine.pileup(ref_path, bam_path, sam_name)
main_logger.debug('Running Samtools mpileup')
if pret == 0:
Path('{0}/pileup.rt'.format(completion_path)).touch()
if pret != 0:
raise RuntimeError('Samtools mpileup failed to complete; Exiting MARs')
else:
main_logger.debug('Samtools mpileup completed')
if os.path.exists('{0}/bcfindex.rt'.format(completion_path)):
bret = 0
main_logger.debug('Skipping Bcfindex')
else:
bret = varengine.bcfindex(bcf_path)
main_logger.debug('Running Bcftools index')
if bret ==0 :
Path('{0}/bcfindex.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('Bcftools index failed to complete; Exiting MARs')
else:
main_logger.debug('Bcftools index completed')
if os.path.exists('{0}/bcfcall.rt'.format(completion_path)):
vcf_path = '{0}/{1}_variants_samtools.vcf'.format(out_path, sam_name)
bret = 0
main_logger.debug('Skipping bcfcall')
else:
vcf_path, bret = varengine.bcftools(bcf_path, sam_name)
main_logger.debug('Running Bcftools call')
if bret == 0:
Path('{0}/bcfcall.rt'.format(completion_path)).touch()
if bret != 0:
raise RuntimeError('Bcftools call failed to complete; Exiting MARs')
else:
main_logger.debug('Bcftools call completed')
#Call GATK HaplotypeCaller to generate VCF files
varcaller = GenAnTK(gatk_path, out_path, java_path, pic_path)
main_logger.debug('Running GATK HaplotypeCaller')
if os.path.exists('{0}/gatk.rt'.format(completion_path)):
gvcf_path = '{0}/{1}_variants_gatk.vcf'.format(out_path, sam_name)
gret = 0
main_logger.debug('Skipping GATK')
else:
gvcf_path, gret = varcaller.hapCaller(bam_path, ref_path, sam_name)
if gret == 0:
Path('{0}/gatk.rt'.format(completion_path)).touch()
if gret != 0:
raise RuntimeError('GATK HaplotypeCaller failed to complete; Exiting MARs')
else:
main_logger.debug('GATK HaplotypeCaller stats completed')
#Call Freebayes to generate VCF files
varcaller = FreeBayes('freebayes', out_path)
main_logger.debug('Running Freebayes')
if os.path.exists('{0}/freebayes.rt'.format(completion_path)):
fvcf_path = '{0}/{1}_variants_freebayes.vcf'.format(out_path, sam_name)
fret = 0
main_logger.debug('Skipping Freebayes')
else:
fvcf_path, fret = varcaller.freeBayes(bam_path, ref_path, sam_name)
if fret == 0:
Path('{0}/freebayes.rt'.format(completion_path)).touch()
if fret != 0:
raise RuntimeError('Freebayes failed to complete; Exiting MARs')
else:
main_logger.debug('Freebayes stats completed')
#Filer and annotate variant calls
main_logger.debug('Annotating variants')
annotate = Annotate()
gvcf_path = annotate.getAnnotation(bed_path, gvcf_path, ref_path, out_path, bam_path)
vcf_path = annotate.getAnnotation(bed_path, vcf_path, ref_path, out_path, bam_path)
fvcf_path = annotate.getAnnotation(bed_path, fvcf_path, ref_path, out_path, bam_path)
vcf_dict = {gvcf_path: 'GATK', vcf_path: 'Samtools', fvcf_path: 'Freebayes'}
merger = Merge(out_path, vcf_dict, ref_path)
merged_vcf = merger.splitter(list(vcf_dict.keys()))[0]
final_vcf= '{0}/{1}_variants_merged_annotated.vcf'.format(out_path, sam_name)
os.rename(merged_vcf, final_vcf)
#final_path = annotate.getAnnotation(bed_path, final_vcf, ref_path, out_path, bam_path)
main_logger.debug('Filetering low quality variants and merging GATK and Samtools calls')
#merged_vcf = Vcf.Merge(gvcf_file, svcf_file, out_path).merge()
summary = Summary(ref_path, bed_path, voi_path, out_dir)
var_sum = summary.getVarStats(final_vcf)
main_logger.info('Total variants : {0}; Verified calls : {1}; Exonic : {2}; Intronic : {3}; Synonymous : {4}; Non Synonymous : {5}; Transition : {6}; Transversion : {7}'.format(
var_sum[0], var_sum[1], var_sum[2], var_sum[3], var_sum[4], var_sum[5], var_sum[6], var_sum[7]))
if purge:
shutil.rmtree('{0}/RawFastq'.format(out_path))
shutil.rmtree('{0}/CleanedFastq'.format(out_path))
alignments = glob.glob('{0}/alignments/*'.format(out_path))
for files in alignments:
if 'output_FM_SR_DD_RG.ba' in files:
continue
else:
os.remove(files)
vcffiles = glob.glob('{0}/*.bcf*'.format(out_path))
for files in vcffiles:
os.remove(files)
return(final_vcf, 0)
def marsBatch(bbduk_path, aligner_path, smt_path, bft_path, gatk_path,
inp_path, ref_path, adp_path, bed_path, out_dir, aligner,
pic_path, voi_path, java_path, sra_path, verbose, threads, purge):
#Creating logger for nest
logger = logging.getLogger('NeST')
logger.setLevel(logging.DEBUG)
#Create output paths for the run
if not os.path.exists(os.path.abspath(out_dir)):
os.mkdir(os.path.abspath(out_dir))
# Creating a file handler which logs even debug messages
fh = logging.FileHandler('{0}/nest.log'.format(os.path.abspath(out_dir)))
if verbose:
fh.setLevel(logging.DEBUG)
else:
fh.setLevel(logging.INFO)
# Creating a console handler to log info messages
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# Create formatter and add it to the handlers
formatter = logging.Formatter('{asctime} - {name} - {levelname} - {message}', style="{")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
#Create file and console handlers for MaRS
logger.info('Gathering input information from input path.')
prep = Prepper(inp_path, out_dir, sra_path).prepInputs()
samples, sra_list, files = list(), list(), list()
logger.info('Running MaRS on {0} experiments'.format(len(prep)))
#summary = Summary(ref_path, bed_path, voi_path, out_dir)
#samples = config.keys()
pools = Pool(threads)
for sample in prep:
samples.append(prep[sample].sample)
files.append(prep[sample].files)
sra_list.append(prep[sample].sra)
#rone_list = list()
#rtwo_list = list()
#name_list = list()
#for samples in config:
# name_list.append(config[samples].sample)
# rone_list.append(config[samples].files[0])
# rtwo_list.append(config[samples].files[1])
#sra_list = files
vcf_list = pools.map(main, zip(repeat(bbduk_path), repeat(aligner_path),
repeat(smt_path), repeat(bft_path), repeat(gatk_path),
samples, files, repeat(ref_path), repeat(adp_path),
repeat(bed_path), repeat(out_dir), repeat(aligner),
repeat(pic_path), repeat(voi_path),
repeat(java_path), repeat(sra_path), repeat(purge), sra_list))
logger.info('Summarizing variant calls from all {0} experiments'.format(len(prep)))
summary = Summary(ref_path, bed_path, voi_path, out_dir)
#Sumarize variants of intrest
summary.getSummary()
return(0)
if __name__ == '__main__':
#Define deffault paths and aligner informations
def_path = "{0}/lib".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
ref_def_path = "{0}/ref".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
bbduk_def = 'bbduk.sh' #"{0}/bbmap/bbduk.sh".format(def_path)
bbmap_def = 'bbmap.sh' #"{0}/bbmap/bbmap.sh".format(def_path)
bwa_def = 'bwa' #"{0}/bwa/bwa".format(def_path)
bowtie_def = 'bowtie2' #"{0}/bowtie2/bowtie2".format(def_path)
snap_def = 'snap-alinger' #"{0}/snap/snap-aligner".format(def_path)
smt_def = 'samtools' #"{0}/samtools/samtools".format(def_path)
bft_def = 'bcftools' #"{0}/bcftools/bcftools".format(def_path)
gatk_def = 'gatk' #"{0}/GenomeAnalysisTK.jar".format(def_path)
pic_def = 'picard' #"{0}/picard.jar".format(def_path)
sra_def = 'fastq-dump' #'{0}/sratoolkit/bin/fastq-dump'.format(def_path)
voi_def = None #'{0}/Reportable_SNPs.csv'.format(ref_def_path)
#if 'java version "1.8.' in str(subprocess.check_output(["java", "-version"], stderr=subprocess.STDOUT).decode('UTF-8').split('\n')[0]):
java_def = 'java'
#else:
# java_def = "{0}/jdk/bin/java".format(def_path)
aligner_def = {'bwa' : bwa_def, 'snap' : snap_def, 'bowtie2': bowtie_def, 'bbmap': bbmap_def}
#Get arguments
parser = argparse.ArgumentParser(prog='NeST')
parser.add_argument('-i', '--inp_path', type=str,
help='Path to input directory (Specify only for batch mode)')
parser.add_argument('-1', '--fwd', dest='rone_path', type=str,
help='Path to forward reads fastq', )
parser.add_argument('-2', '--rev', dest='rtwo_path', type=str,
help='Path to reverse reads fastq')
parser.add_argument('-r', '--ref', dest='ref_path', type=str,
help='Path to Reference fasta file', required=True)
parser.add_argument('-a', '--adapter', dest='adp_path', type=str,
help='Path to Adpater fasta file', required=True)
parser.add_argument('-b', '--bed', dest='bed_path', type=str,
help='Path to Bed file for MDR regions', required=True)
parser.add_argument('-o', '--outpath', dest='out_path', type=str,
help='Path where all outputs will be stored', required=True)
parser.add_argument('-n', '--sam_name', dest='sam_name', type=str,
help='Sample name', default=None)
parser.add_argument('-m', '--mapper', dest='aligner', type=str,
choices=['bowtie2', 'bwa', 'bbmap', 'snap'],
default='bwa', help='The aligner to used by MARs')
parser.add_argument('--bbduk', dest='bbduk_path', type=str, default=bbduk_def,
help='Path to BBduk executable')
parser.add_argument('--aligner', dest='aligner_path', type=str, default=None,
help='Path to aligner executable')
parser.add_argument('--samtools', dest='smt_path', type=str, default=smt_def,
help='Path to Samtools executable')
parser.add_argument('--gatk', dest='gatk_path', type=str, default=gatk_def,
help='Path to GATK executable')
parser.add_argument('--bcftools', dest='bft_path', type=str, default=bft_def,
help='Path to Bcftools executable')
parser.add_argument('--picard', dest='pic_path', type=str, default=pic_def,
help='Path to Bcftools executable')
parser.add_argument('--varofint', dest='voi_path', type=str, default=voi_def,
help='Path to variant of interest')
parser.add_argument('--threads', dest='threads', type=int, default=5,
help='Number of threads')
parser.add_argument('--verbose', action='store_true',
help='Increase verbosity of log file')
parser.add_argument('--purge', action='store_true',
help='Remove intermiediate Fastq and alignment files')
args = parser.parse_args()
#Validate parsed arguments
if args.aligner_path is None:
args.aligner_path = aligner_def[args.aligner]
if not os.path.exists(args.out_path):
os.mkdir(args.out_path)
#single sample experiment.
#Check if the run command is for batch mode analysis or single sample
#analysis.
#If inp_path is empty and rone_path is not, then the experiment is a
#single sample experiment.
status = marsBatch(args.bbduk_path, args.aligner_path, args.smt_path,
args.bft_path, args.gatk_path, args.inp_path, args.ref_path,
args.adp_path, args.bed_path, args.out_path, args.aligner,
args.pic_path, args.voi_path, java_def, sra_def, args.verbose,
args.threads, args.purge)
|
ohjeyy93/NFNeST
|
nest.py
|
nest.py
|
py
| 21,367 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "nest.prepinputs.Prepper",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "nest.bbduk.QualCheck",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "nest.alignment.Bwa",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "nest.alignment.Bowtie",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "nest.alignment.Snap",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "nest.alignment.BBMap",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "nest.samtools.Samtools",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "nest.gatk.Picard",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "nest.gatk.GenAnTK",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "nest.gatk.FreeBayes",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "nest.parsers.vcfannotate.Annotate",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "nest.parsers.vcfmerge.Merge",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "nest.summarize.Summary",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 362,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "nest.prepinputs.Prepper",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "nest.summarize.Summary",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 408,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 409,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 474,
"usage_type": "call"
}
] |
6629686746
|
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
#Configure GPU
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
print(tf.config.experimental.get_memory_growth(gpu))
from tensorflow.keras import (models, layers, datasets, callbacks, optimizers,
initializers, regularizers)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
import os
import time
import numpy as np
from six import iteritems
from time import perf_counter
import ml_genn as mlg
from ml_genn import Model
from ml_genn.utils import parse_arguments, raster_plot
#Separable convolutional components MobilenetV1
def SeparableConv( x , num_filters , strides , alpha=1.0 ):
planes = int(num_filters*alpha)
x.append(layers.Conv2D(planes, kernel_size=1, strides=1, padding="same", activation='relu', use_bias=False, kernel_initializer=initializer))
x.append(layers.Conv2D(planes, kernel_size=3, strides=strides, padding="same", groups=planes, activation='relu', use_bias=False, kernel_initializer=initializer))
x.append(layers.Conv2D(planes, kernel_size=1, strides=1, padding="same", activation='relu', use_bias=False, kernel_initializer=initializer))
return x
#Convolutional components MobilenetV1
def Conv( x , num_filters , kernel_size , strides=1 , alpha=1.0 ):
x.append(layers.Conv2D( int(num_filters * alpha ) , kernel_size=kernel_size , strides=strides , activation='relu', use_bias=False , padding='same', kernel_initializer=initializer))
return x
if __name__ == '__main__':
args = parse_arguments('AlexNet classifier model')
print('arguments: ' + str(vars(args)))
#check if tensorflow is running on GPU
print(tf.test.is_gpu_available())
print(tf.test.is_built_with_cuda())
n_norm_samples=1000
#Load Dataset
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
#Normalize data
x_train = x_train / 255.0
x_test = x_test / 255.0
encoder = OneHotEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train).toarray()
y_test = encoder.transform(y_test).toarray()
index_norm=np.random.choice(x_train.shape[0], n_norm_samples, replace=False)
x_norm = x_train[index_norm]
y_norm = y_train[index_norm]
# Create L2 regularizer
regularizer = regularizers.l2(0.01)
# Create image data generator
data_gen = ImageDataGenerator(width_shift_range=0.3,height_shift_range=0.8,rotation_range=30,zoom_range=0.1,
shear_range=0.01)
# Get training iterator
iter_train = data_gen.flow(x_train, y_train, batch_size=256)
initializer="he_uniform"
#Creation Model
layers_mobilenetv1 =[
layers.Conv2D(32,3, strides=1, activation='relu', padding="same", use_bias=False, input_shape=x_train.shape[1:])
]
layers_mobilenetv1 = Conv(layers_mobilenetv1,num_filters=32 , kernel_size=3 , strides=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=32 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=64 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=64 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=128 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=128 , strides=1 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=128 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=128 , strides=2 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=256 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=256 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=2 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=512 , kernel_size=1 )
for i in range( 5 ):
layers_mobilenetv1 = SeparableConv( layers_mobilenetv1, num_filters=256 , strides=1 )
layers_mobilenetv1 = Conv( layers_mobilenetv1, num_filters=512 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv(layers_mobilenetv1, num_filters=512 , strides=2 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=1024 , kernel_size=1 )
layers_mobilenetv1 = SeparableConv(layers_mobilenetv1, num_filters=1024 , strides=2 )
layers_mobilenetv1 = Conv(layers_mobilenetv1, num_filters=1024 , kernel_size=1 )
layers_mobilenetv1.append(layers.GlobalAveragePooling2D())
layers_mobilenetv1.append(layers.Dense(10,activation='softmax', use_bias=False))
tf_model = models.Sequential(layers_mobilenetv1,name="mobilenetv1")
tf_model.summary()
if args.reuse_tf_model:
tf_model = models.load_model('mobilenetv1')
else:
optimizer = optimizers.SGD(lr=0.05, momentum=0.9)
tf_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1,
save_best_only=True,
monitor='val_accuracy')
#train TensorFlow model
steps_per_epoch = x_train.shape[0] // 256
tf_model.fit(iter_train, steps_per_epoch=steps_per_epoch, epochs=200, callbacks=cp_callback, validation_data=(x_test,y_test))
#Save Mobilenetv1_tf_model
models.save_model(tf_model, 'mobilenetv1', save_format='h5')
#Evaluate TensorFlow model
tf_model.evaluate(x_test, y_test)
tf_eval_start_time = perf_counter()
tf_model.evaluate(x_test, y_test)
print("TF evaluation:%f" % (perf_counter() - tf_eval_start_time))
# Convert and compile ML GeNN model
converter = args.build_converter(x_norm, K=10, norm_time=500)
# Convert and compile ML GeNN model
mlg_model = Model.convert_tf_model(
tf_model, converter=converter, connectivity_type=args.connectivity_type,
input_type=args.input_type, dt=args.dt, batch_size=args.batch_size, rng_seed=args.rng_seed,
kernel_profiling=args.kernel_profiling)
time = 10 if args.converter == 'few-spike' else 500
mlg_eval_start_time = perf_counter()
acc, spk_i, spk_t = mlg_model.evaluate([x_test], [y_test], time, save_samples=args.save_samples)
print("MLG evaluation:%f" % (perf_counter() - mlg_eval_start_time))
if args.kernel_profiling:
print("Kernel profiling:")
for n, t in iteritems(mlg_model.get_kernel_times()):
print("\t%s: %fs" % (n, t))
# Report ML GeNN model results
print('Accuracy of MobileNetv1 GeNN model: {}%'.format(acc[0]))
if args.plot:
neurons = [l.neurons.nrn for l in mlg_model.layers]
raster_plot(spk_i, spk_t, neurons, time=time)
|
jfgf11/ml_genn_examples_ssh
|
Sequential API/mobilenetv1.py
|
mobilenetv1.py
|
py
| 7,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.InteractiveSession",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.config.experimental.list_physical_devices",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.experimental.set_memory_growth",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.experimental.get_memory_growth",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "ml_genn.utils.parse_arguments",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.test.is_gpu_available",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.test",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.test.is_built_with_cuda",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.test",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.datasets.cifar10.load_data",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.datasets.cifar10",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.datasets",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "tensorflow.data.Dataset.from_tensor_slices",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.regularizers.l2",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.regularizers",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.GlobalAveragePooling2D",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.models.load_model",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.optimizers.SGD",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.optimizers",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.callbacks.ModelCheckpoint",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.save_model",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "ml_genn.Model.convert_tf_model",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "ml_genn.Model",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "ml_genn.utils.raster_plot",
"line_number": 166,
"usage_type": "call"
}
] |
70005437309
|
from __future__ import with_statement
from fabric.api import *
import os, glob, socket
import fabric.contrib.project as project
PROD = 'spreadwebm.org'
PROD_PATH = 'domains/spreadwebm.com/web/public/'
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
DEPLOY_PATH = os.path.join(ROOT_PATH, 'deploy')
def clean():
local('rm -rf ./deploy/*')
def regen():
clean()
local('hyde.py -g -s .')
def pushcss():
"""
For pushing CSS-only changes to the local docroot during testing.
"""
local('cp -r media/css/* deploy/media/css/')
def serve():
## Kill any heel process
local('heel --kill')
## Start webserver on local hostname
#local('heel --daemonize --address ' + socket.gethostbyaddr(socket.gethostname())[0] + ' --root ./deploy')
## Start webserver on development hostname
local('heel --daemonize --address localhost --root ./deploy')
def reserve():
regen()
local('heel --kill')
serve()
@hosts(PROD)
def publish():
regen()
project.rsync_project(
remote_dir=PROD_PATH,
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True
)
|
louquillio/spreadwebm.com
|
fabfile.py
|
fabfile.py
|
py
| 1,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "fabric.contrib.project.rsync_project",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "fabric.contrib.project",
"line_number": 40,
"usage_type": "name"
}
] |
1293789301
|
import numpy as np
import onnx
from tests.tools import expect
class Sqrt:
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
x = np.array([1, 4, 9]).astype(np.float32)
y = np.sqrt(x) # expected output [1., 2., 3.]
expect(node, inputs=[x], outputs=[y], name='test_sqrt_example')
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = np.sqrt(x)
expect(node, inputs=[x], outputs=[y], name='test_sqrt')
if __name__ == '__main__':
Sqrt.export()
|
gglin001/onnx-jax
|
tests/node/test_sqrt.py
|
test_sqrt.py
|
py
| 632 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "onnx.helper.make_node",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "onnx.helper",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tests.tools.expect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tests.tools.expect",
"line_number": 22,
"usage_type": "call"
}
] |
28031383283
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import rospy
import cv2
import cv_bridge
import sensor_msgs.msg
import argparse
import numpy as np
class image_converter:
def __init__(self, input_topic, output_topic):
self.image_pub = rospy.Publisher(
output_topic, sensor_msgs.msg.Image, queue_size=10)
self.bridge = cv_bridge.CvBridge()
self.image_sub = rospy.Subscriber(
input_topic, sensor_msgs.msg.Image,
self.callback)
def callback(self, data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
#
# DO SOMETHING
#
cv_image = self.canny_edge(cv_image)
try:
ros_img = self.bridge.cv2_to_imgmsg(cv_image, "mono8") # canny: mono8(8UC1)
except cv_bridge.CvBridgeError as e:
print(e)
self.image_pub.publish(ros_img)
def canny_edge(self, img):
"""
return canny edge image
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 100, 150)
return canny
# Usage:
# rosrun PACKAGE image_converter.py input:=/camera/image_raw_throttle output:=/test
#
def main(args):
rospy.init_node('image_converter', anonymous=True)
input_topic = rospy.resolve_name("input")
output_topic = rospy.resolve_name("output")
print("input_topic: %s" % (input_topic,))
print("output_topic: %s" % (output_topic,))
sys.stdout.flush()
ic = image_converter(input_topic, output_topic)
try:
print("Invoke rospy.spin().")
sys.stdout.flush()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
kargenk/image_converter
|
image_converter.py
|
image_converter.py
|
py
| 1,762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rospy.Publisher",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.msg",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sensor_msgs.msg",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "cv_bridge.CvBridge",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.msg",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sensor_msgs.msg",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cv_bridge.CvBridgeError",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.Canny",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "rospy.init_node",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "rospy.resolve_name",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "rospy.resolve_name",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "rospy.spin",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 65,
"usage_type": "attribute"
}
] |
44713652316
|
import sys
import xmltodict
color_names = {
'Foreground Color': 'ForegroundColour',
'Background Color': 'BackgroundColour',
'Cursor Text Color': 'CursorColour',
'Ansi 0 Color': 'Black',
'Ansi 1 Color': 'Red',
'Ansi 2 Color': 'Green',
'Ansi 3 Color': 'Yellow',
'Ansi 4 Color': 'Blue',
'Ansi 5 Color': 'Magenta',
'Ansi 6 Color': 'Cyan',
'Ansi 7 Color': 'White',
'Ansi 8 Color': 'BoldBlack',
'Ansi 9 Color': 'BoldRed',
'Ansi 10 Color': 'BoldGreen',
'Ansi 11 Color': 'BoldYellow',
'Ansi 12 Color': 'BoldBlue',
'Ansi 13 Color': 'BoldMagenta',
'Ansi 14 Color': 'BoldCyan',
'Ansi 15 Color': 'BoldWhite'
}
def get_color(data, name):
color_data = data['dict'][data['key'].index(name)]
red = get_component(color_data, 'Red Component')
green = get_component(color_data, 'Green Component')
blue = get_component(color_data, 'Blue Component')
return (red, green, blue)
def get_component(color_data, component_name):
component_index = color_data['key'].index(component_name)
component_value = color_data['real'][component_index]
return round(float(component_value) * 256)
input_filename = sys.argv[1]
with open(input_filename) as fd:
iterm = xmltodict.parse(fd.read())['plist']['dict']
fg_data = get_color(iterm, 'Foreground Color')
for iterm_color in color_names.keys():
mintty_color = color_names[iterm_color]
color = get_color(iterm, iterm_color)
print("{} = {}, {}, {}".format(mintty_color, color[0], color[1], color[2]))
|
arcadecoffee/iterm-to-mintty
|
iterm-to-mintty.py
|
iterm-to-mintty.py
|
py
| 1,706 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "xmltodict.parse",
"line_number": 44,
"usage_type": "call"
}
] |
70501561789
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('django_sprinkler',
url(r"^get_context/?", "views.get_context", name="get_context"),
url(r"^logs/?", "views.watering_logs", name="watering_logs"),
url(r"^toggle_valve/(\d+)?/?", "views.toggle_valve", name="toggle_valve"),
url(r"^activate_program/(\d+)?/?", "views.activate_program", name="activate_program"),
url(r"^set_state/(\w+)?", "views.set_state", name="set_state"),
url(r'^$', "views.home", name="home"),
)
|
jpardobl/django_sprinkler
|
django_sprinkler/urls.py
|
urls.py
|
py
| 517 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
}
] |
33967529914
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @Time :2020/7/7 15:39
# @Author :Sheng Chen
# @Email :[email protected]
import sys
sys.path.append(r'/home/chensheng/likou')
from typing import List, Tuple
class Solution:
rows = [{} for i in range(9)]
columns = [{} for i in range(9)]
boxes = [{} for i in range(9)]
fillIndex = []
isValid = False
ini = False
def isValidSudoku(self, board: List[List[str]]):
# global rows,columns,boxes,fillIndex,isValid
for i in range(9):
for j in range(9):
num = board[i][j]
if num != '.':
self.rows[i][num] = self.rows[i].get(num, 0) + 1
self.columns[j][num] = self.columns[j].get(num, 0) + 1
boxIndex = (i // 3) * 3 + j // 3
self.boxes[boxIndex][num] = self.boxes[boxIndex].get(num, 0) + 1
if self.rows[i][num] > 1 or self.columns[j][num] > 1 or self.boxes[boxIndex][num] > 1:
return
else:
self.fillIndex.append((i, j))
self.isValid = True
self.ini = True
def solveSudoku(self, board: List[List[str]]) -> None:
if not self.ini:
self.isValidSudoku(board)
if not self.isValid:
return
if len(self.fillIndex) == 0:
return True
i, j = self.fillIndex.pop(0)
row = self.rows[i]
column = self.columns[j]
box = self.boxes[(i // 3) * 3 + j // 3]
dic = {**row, **column, **box}
candidate_num = [str(num) for num in range(1, 10) if str(num) not in dic]
if len(candidate_num) == 0:
self.fillIndex.insert(0, (i, j))
return False
else:
for num in candidate_num:
board[i][j] = num
self.rows[i][num] = 1
self.columns[j][num] = 1
self.boxes[(i // 3) * 3 + j // 3][num] = 1
a = self.solveSudoku(board)
if not a:
board[i][j] = '.'
del self.rows[i][num]
del self.columns[j][num]
del self.boxes[(i // 3) * 3 + j // 3][num]
else:
return True
self.fillIndex.insert(0, (i, j))
return False
if __name__ == '__main__':
board = [[".", ".", "9", "7", "4", "8", ".", ".", "."], ["7", ".", ".", ".", ".", ".", ".", ".", "."],
[".", "2", ".", "1", ".", "9", ".", ".", "."], [".", ".", "7", ".", ".", ".", "2", "4", "."],
[".", "6", "4", ".", "1", ".", "5", "9", "."], [".", "9", "8", ".", ".", ".", "3", ".", "."],
[".", ".", ".", "8", ".", "3", ".", "2", "."], [".", ".", ".", ".", ".", ".", ".", ".", "6"],
[".", ".", ".", "2", "7", "5", "9", ".", "."]]
obj = Solution()
obj.solveSudoku(board)
print(board)
print(4//2*2)
|
fqlovetu/likou_python
|
37解数独/solution1.py
|
solution1.py
|
py
| 2,979 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
}
] |
28765664515
|
from async_scrape import Scrape
import requests
import json
from selenium.webdriver import Edge
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
url = "https://order.marstons.co.uk/"
base_dir = "C:/Users/robert.franklin/Desktop/local_projects/random/marstons"
# GET ALL RESTAURANT DATA - selenium
browser = Edge()
browser.get(url)
wait = WebDriverWait(browser, 100).until(
EC.presence_of_all_elements_located((By.CLASS_NAME, "venues-list"))
)
elements = browser.find_elements(By.CLASS_NAME, "venue-card")
hrefs = [e.get_dom_attribute("href") for e in elements]
browser.close()
print(f"Fetched {len(hrefs)} hrefs from {url}")
def post_process_func(html, resp, *args, **kwargs):
# Save to file
fn = resp.url.split("/")[-1]
content = json.loads(resp.content)
with open(f"{base_dir}/data/raw/{fn}.json", "w") as f:
json.dump(content, f, indent=4)
base_url = "https://api-cdn.orderbee.co.uk/venues"
urls = [base_url + href for href in hrefs]
scrape = Scrape(post_process_func=post_process_func)
print(f"Begin scrape of {len(urls)} - Example: {urls[0]}")
scrape.scrape_all(urls)
|
cia05rf/marstons
|
webscrape/scrape.py
|
scrape.py
|
py
| 1,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Edge",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "async_scrape.Scrape",
"line_number": 33,
"usage_type": "call"
}
] |
27214868635
|
from enum import IntEnum, auto
from typing import List, Mapping, Union, Tuple, Optional
from .aetg import AETGGenerator
from .matrix import MatrixGenerator
from ...model import int_enum_loads
from ...reflection import progressive_for
__all__ = ['tmatrix']
@int_enum_loads(enable_int=False, name_preprocess=str.upper)
class MatrixMode(IntEnum):
AETG = auto()
MATRIX = auto()
def tmatrix(ranges: Mapping[Union[str, Tuple[str, ...]], List],
mode='aetg', seed: Optional[int] = 0, level: int = 2) -> Tuple[List[str], List[Tuple]]:
"""
Overview:
Test matrix generator, which can be used in ``pytest.mark.parameterize``.
:param ranges: Ranges of the values
:param mode: Mode of the matrix, should be one of the ``aetg`` or ``matrix``. Default is ``aetg``.
:param seed: Random seed, default is ``0`` which means the result is fixed (recommended).
:param level: Lavel of AETG generating algorithm, default is ``2``.
:returns: A tuple - ``(names, values)``.
Examples::
>>> from hbutils.testing import tmatrix
>>> names, values = tmatrix(
... {
... 'a': [2, 3],
... 'e': ['a', 'b', 'c'],
... ('b', 'c'): [(1, 7), (4, 6), (9, 12)],
... }
... )
>>> print(names)
['a', 'e', 'b', 'c']
>>> for i, v in enumerate(values):
... print(i, v)
0 (2, 'c', 9, 12)
1 (3, 'c', 4, 6)
2 (2, 'c', 1, 7)
3 (3, 'b', 9, 12)
4 (2, 'b', 4, 6)
5 (3, 'b', 1, 7)
6 (3, 'a', 9, 12)
7 (2, 'a', 4, 6)
8 (3, 'a', 1, 7)
.. note::
This can be directly used in ``pytest.mark.parametrize`` function.
>>> @pytest.mark.unittest
... class TestTestingGeneratorFunc:
... @pytest.mark.parametrize(*tmatrix({
... 'a': [2, 3],
... 'e': ['a', 'b', 'c'],
... ('b', 'c'): [(1, 7), (4, 6), (9, 12)],
... }))
... def test_tmatrix_usage(self, a, e, b, c):
... print(a, e, b, c)
"""
mode = MatrixMode.loads(mode)
key_map = {}
final_names = []
final_values = {}
for ki, (key, value) in enumerate(ranges.items()):
kname = f'key-{ki}'
key_map[kname] = key
final_names.append(kname)
final_values[kname] = value
names = []
for key in ranges.keys():
if isinstance(key, str):
names.append(key)
elif isinstance(key, tuple):
for k in key:
names.append(k)
if mode == MatrixMode.MATRIX:
generator = MatrixGenerator(final_values, final_names)
elif mode == MatrixMode.AETG:
generator = AETGGenerator(
final_values, final_names, rnd=seed,
pairs=list(progressive_for(final_names, min(level, len(names)))),
)
else:
raise ValueError(f'Invalid mode - {mode!r}.') # pragma: no cover
pairs = []
for case in generator.cases():
_v_case = {}
for name in final_names:
key = key_map[name]
if isinstance(key, str):
_v_case[key] = case[name]
elif isinstance(key, tuple):
for ikey, ivalue in zip(key, case[name]):
_v_case[ikey] = ivalue
pairs.append(tuple(_v_case[name] for name in names))
return names, pairs
|
HansBug/hbutils
|
hbutils/testing/generator/func.py
|
func.py
|
py
| 3,442 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "enum.IntEnum",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "enum.auto",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "model.int_enum_loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing.Mapping",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matrix.MatrixGenerator",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "aetg.AETGGenerator",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "reflection.progressive_for",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
}
] |
28382656931
|
import cgi
import sys
import io
import genshin.database.operation as gdo
form = cgi.FieldStorage()
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
template = """
<html>
<head>
<meta charset="utf-8">
<script type="text/javascript">
location.replace('/cgi-bin/characters.py?dname={name}');
</script>
</head>
<body>
<p>Deleting...</p>
</body>
</html>
"""
def delete_character_data(name):
gdo.delete_character(name)
def main():
name = form.getvalue("del")
delete_character_data(name)
print("Content-type: text/html\n")
print(template.format(name=name))
main()
|
waigoma/genshin-charatraining-supporter
|
src/cgi-bin/character_delete.py
|
character_delete.py
|
py
| 628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cgi.FieldStorage",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "genshin.database.operation.delete_character",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "genshin.database.operation",
"line_number": 25,
"usage_type": "name"
}
] |
72777565307
|
# Plotting solution of x''(t) + x(t) = 0 equation
import numpy as np
import matplotlib.pyplot as plt
import os
from io import StringIO
import pandas as pd
from find_solution import find_solution
from plot_utils import create_dir
def plot_solution(plot_dir, t_end, delta_t):
data = find_solution(t_end=t_end, delta_t=delta_t, print_last=False)
create_dir(plot_dir)
if data is None:
return
df = pd.read_csv(StringIO(data), skipinitialspace=True)
plt.plot(df['t'], df['x'], label='Approximation')
exact_t = np.arange(0.0, t_end, 0.01)
exact_x = np.cos(exact_t)
plt.plot(exact_t, exact_x, label='Exact $x=\cos(t)$', linestyle='--')
plt.title(r'Solution of $\ddot{x} + x = 0, x(0)=1, \dot{x}(0)=0$ for dt=' + f'{delta_t}')
plt.xlabel('t')
plt.ylabel(r'x')
plt.legend()
plt.grid()
plt.tight_layout()
plotfile = os.path.join(plot_dir, f"approx_vs_exact_dt_{delta_t}.pdf")
plt.savefig(plotfile)
plt.show()
if __name__ == '__main__':
plot_solution(plot_dir="plots", t_end=6.28, delta_t=1)
plot_solution(plot_dir="plots", t_end=6.28, delta_t=0.1)
|
evgenyneu/ASP3162
|
03_second_order_ode/plotting/plot_solution.py
|
plot_solution.py
|
py
| 1,130 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "find_solution.find_solution",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plot_utils.create_dir",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
}
] |
9842073923
|
import redneuronal
import random
import time
import statistics
class RedNeuronalGA:
def __init__(self, size, config:list, inputsize, mut_rate = 0.01, lastbest_rate = 0.5, tour_size = 10):
'''
:param size:
:param n_genes:
:param config: lista que indica [numero de layers, [numero de neuronas por layer]]
:param mut_rate:
:param lastbest_rate:
:param tour_size:
'''
self.totalfitness = []
self.inputsize = inputsize
self.lastbest_rate = lastbest_rate
self.pop_size = size
self.mutation_rate = mut_rate
self.tournament_size = tour_size
self.current_generation = [] #lista de redes
self.current_fitness = [] #valores de fitness encontrados despues de jugar
self.final_ind = None
self.config = config
def set_tournamentsize(self, size):
self.tournament_size = size
def set_mutationrate(self, rate):
self.mutation_rate = rate
def set_survivalrate(self, rate):
self.lastbest_rate = rate
def initialize(self):
for i in range(self.pop_size):
red = []
n_layers = self.config[0]
for j in range(n_layers): #numero de layers
layer = []
n_neuronasj = self.config[1][j]
for k in range(n_neuronasj):
neuron = []
if j == 0:
for p in range(self.inputsize):
neuron.append(random.random()*2)
else:
for p in range(self.config[1][j-1]):
neuron.append(random.random()*2)
neuron.append(0.8)
layer.append(neuron)
red.append(layer)
self.current_generation.append(redneuronal.RedN(red, i))
def tournament_selection(self, population: list, k):
''' Randomly select the best individual after k iterations'''
N = len(population)
best = None
for i in range(k):
ind = population[random.randint(0, N - 1)]
if best == None or self.fitness(ind) > self.fitness(best):
best = ind
return best
def reproduce(self, red1:redneuronal.RedN, red2:redneuronal.RedN, index):
nlayers = len(red1.red)
new = []
for i in range(nlayers):
layer1 = red1.red[i]
layer2 = red2.red[i]
l = len(layer1)
r = random.randint(1, l - 1)
rep = layer1[0:r] + layer2[r:l]
baby = []
for i in range(l):
if random.random() < self.mutation_rate:
baby.append(self.mutneuron(rep[i]))
else:
baby.append(rep[i])
new.append(baby)
return redneuronal.RedN(new, index)
def find(self):
# best individuals of last generation
best = []
size = self.pop_size
# selecciono a los posibles mejores
while (len(best) < size*self.lastbest_rate):
sel = self.tournament_selection(self.current_generation, self.tournament_size)
if sel not in best:
best.append(sel)
self.current_generation.remove(sel)
# crear nueva generacion a partir de los mejores anteriores
gen = []
count = 0
while (len(gen) < size):
ind1, ind2 = random.sample(best, 2)
baby = self.reproduce(ind1, ind2,count)
count+=1
gen.append(baby)
self.current_generation = gen
self.savefitness()
self.current_fitness = []
def fitness(self, ind:redneuronal.RedN):
return self.current_fitness[ind.index]
def savefitness(self):
self.totalfitness.append(statistics.mean(self.current_fitness))
def mutneuron(self, neuron:list):
print('mutante!')
new = []
for i in range(len(neuron)-1):
if random.random()<0.5:
new.append(random.random() * 2)
else:
new.append(neuron[i])
new.append((neuron[-1]+random.random())%2)
return new
|
plt1994/cc5114ne
|
genalg.py
|
genalg.py
|
py
| 4,234 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.random",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "redneuronal.RedN",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "redneuronal.RedN",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "redneuronal.RedN",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "redneuronal.RedN",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "statistics.mean",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 126,
"usage_type": "call"
}
] |
39839377690
|
#Library
import datetime
import time
from tkinter import *
import tkinter.ttk as ttk
from urllib.request import urlretrieve
import serial
import os
import RPi.GPIO as GPIO
#End Library
#Firmwares
ultra1 = serial.Serial("/dev/ttyUSB0",baudrate=9600, timeout=1)
gsm = serial.Serial("/dev/ttyAMA0",baudrate=9600, timeout=1)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(31,GPIO.OUT) #Relay 1
GPIO.setup(33,GPIO.OUT) #Relay 2
GPIO.setup(35,GPIO.OUT) #Relay 3
GPIO.setup(37,GPIO.OUT) #Relay 4
GPIO.setup(12,GPIO.OUT) #Sensor1 Enable
#End Firmwares
root=Tk()
root.geometry("%dx%d+%d+%d"%(800,480,100,50)) #x,y,horizental,vertical
root.title('SAJAB')
root.configure(background='lightblue')
f1=open("relay1_source.txt","r")
f2=open("relay2_source.txt","r")
f3=open("relay3_source.txt","r")
f4=open("relay4_source.txt","r")
v1 = IntVar()
v1.set(int(f1.read())) # initializing the choice, i.e. Python
v2 = IntVar()
v2.set(int(f2.read())) # initializing the choice, i.e. Python
v3 = IntVar()
v3.set(int(f3.read())) # initializing the choice, i.e. Python
v4 = IntVar()
v4.set(int(f4.read())) # initializing the choice, i.e. Python
GPIO.output(31,v1.get())
GPIO.output(33,v2.get())
GPIO.output(35,v3.get())
GPIO.output(37,v4.get())
f1.close()
f2.close()
f3.close()
f4.close()
#Variables
station_name = "Golestan Uni"
pass_main='1120'
time_xloc= 20
time_yloc= 5
date_xloc=10
date_yloc=30
table_x=20
table_y=150
global default_sampling_rate
global default_bias_value
global default_coefficent
global default_max_level
global default_hysteresis_level
global default_mobile_phone1
global default_mobile_phone2
global default_mobile_phone3
step = 20
stepx = 40
global sampling_rate
sampling_rate=2
#End Variables
#Functions
def relay1():
relay1_source = open("relay1_source.txt","w+")
GPIO.setup(31,GPIO.OUT) #Relay 1
if v1.get()==0:
print("is off")
GPIO.output(31,0)
relay1_source.write("0")
else:
print("in on")
GPIO.output(31,1)
relay1_source.write("1")
relay1_source.close()
def relay2():
relay2_source = open("relay2_source.txt","w+")
GPIO.setup(33,GPIO.OUT) #Relay 1
if v2.get()==0:
print("is off")
GPIO.output(33,0)
relay2_source.write("0")
else:
print("in on")
GPIO.output(33,1)
relay2_source.write("1")
relay2_source.close()
def relay3():
relay3_source = open("relay3_source.txt","w+")
GPIO.setup(35,GPIO.OUT) #Relay 1
if v3.get()==0:
print("is off")
GPIO.output(35,0)
relay3_source.write("0")
else:
print("in on")
GPIO.output(35,1)
relay3_source.write("1")
relay3_source.close()
def relay4():
relay4_source = open("relay4_source.txt","w+")
GPIO.setup(37,GPIO.OUT) #Relay 1
if v4.get()==0:
print("is off")
GPIO.output(37,0)
relay4_source.write("0")
else:
print("in on")
GPIO.output(37,1)
relay4_source.write("1")
relay4_source.close()
#Send Data
def send_data():
gsm.write("AT\r\n".encode('ascii'))
rcv = gsm.read(10)
print (rcv)
gsm.write("AT+CSQ\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+CGATT?\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+SAPBR=3,1,\"CONTYPE\",\"GPRS\"\r\n".encode('ascii'))
time.sleep(1)
gsm.write("AT+SAPBR=3,1,\"APN\",\"mtnirancell\"\r\n".encode('ascii'))
time.sleep(4)
gsm.write("AT+SAPBR=1,1\r\n".encode('ascii'))
time.sleep(2)
gsm.write("AT+HTTPINIT\r\n".encode('ascii'))
time.sleep(2)
data_link = "AT+HTTPPARA=\"URL\",\"http://sajab.sazabgolestan.com/server.php?action=save&station_index=3&ha=%d&hb=3&imei=9359374362\"\r\n" %(sensor1_read(sen1))
gsm.write(data_link.encode('ascii'))
time.sleep(1)
gsm.write("AT+HTTPACTION=0\r\n".encode('ascii'))
time.sleep(10)
#End Send Data
#Sensor Read
def sensor1_read(sen):
def count():
GPIO.output(12,0)
global u1
u1 = ultra1.read(12)
u1 = str(u1)
loc = u1.find('R')
u1 = u1[loc+1:loc+6]
GPIO.output(12,1)
sen.config(text=str(u1))
sen.after(sampling_rate*500,count)
count()
return int(u1)
#End Sensor read
#Samle Rate read
#def sr_read(sr_label):
# def count5():
# sr_label.config(text=str(sampling_rate))
# sr_label.after(sampling_rate*500,count5)
# count5()
#End Samle Rate read
def pass_check():
global passcheck
passcheck = Toplevel()
passcheck.geometry("%dx%d+%d+%d"%(200,140,100,50)) #x,y,horizental,vertical
passcheck.title('Setting')
passcheck.configure(background='lightblue')
Label(passcheck,text="Enter Password:",fg=top_bg_color,bg=color,width=0).place(x=40,y=20)
global pass_in
pass_in=Entry(passcheck,width=18)
pass_in.place(x=25,y=50)
Button(passcheck,text="OK",command=pass_check2).place(x=80,y=80)
def pass_check2():
if pass_in.get()==pass_main:
passcheck.destroy()
setting()
else:
Label(passcheck,text="Password is wrong!",fg=top_bg_color,bg=color,width=0).place(x=40,y=110)
def setting():
global setting_frame
setting_frame = Toplevel()
setting_frame.geometry("%dx%d+%d+%d"%(700,420,100,50)) #x,y,horizental,vertical
setting_frame.title('Setting')
setting_frame.configure(background='lightblue')
Label(setting_frame,text="Station name:",fg=top_bg_color,bg=color,width=0).grid(row=0,column=0,ipadx=30,pady=8)
Label(setting_frame,text=station_name,fg=top_bg_color,bg=color,width=0).grid(row=0,column=1,ipadx=30,pady=8)
Label(setting_frame,text="Sampling rate:",fg=top_bg_color,bg=color,width=0).grid(row=1,column=0,ipadx=30,pady=8)
global samp_rate
samp_rate=Entry(setting_frame,width=8)
samp_rate.place(x=140,y=45)
samp_rate.insert(10,str(sampling_rate))
Label(setting_frame,text="Sec.",fg=top_bg_color,bg=color,width=0).place(x=220,y=46)
Label(setting_frame,text="Calibration:",fg=top_bg_color,bg=color,width=0).grid(row=2,column=0,ipadx=30,pady=8)
Label(setting_frame,text="Bias Value:",fg=top_bg_color,bg=color,width=0).grid(row=3,column=0,ipadx=30,pady=8)
global bs_value
bs_value=Entry(setting_frame,width=8)
bs_value.place(x=140,y=118)
bs_value.insert(10,str(default_bias_value))
Label(setting_frame,text="Coefficent:",fg=top_bg_color,bg=color,width=0).grid(row=4,column=0,ipadx=30,pady=8)
global coef
coef=Entry(setting_frame,width=8)
coef.place(x=140,y=154)
coef.insert(10,str(default_coefficent))
Label(setting_frame,text="Alert:",fg=top_bg_color,bg=color,width=0).grid(row=0,column=3,ipadx=30,pady=8)
Label(setting_frame,text="Max level:",fg=top_bg_color,bg=color,width=0).grid(row=1,column=3,ipadx=30,pady=8)
global mx_level
mx_level=Entry(setting_frame,width=8)
mx_level.place(x=465,y=45)
mx_level.insert(10,str(default_max_level))
Label(setting_frame,text="m.m.",fg=top_bg_color,bg=color,width=0).place(x=545,y=46)
Label(setting_frame,text="Hysteresis level:",fg=top_bg_color,bg=color,width=0).grid(row=2,column=3,ipadx=30,pady=8)
global hys_level
hys_level=Entry(setting_frame,width=8)
hys_level.place(x=465,y=81)
hys_level.insert(10,str(default_hysteresis_level))
Label(setting_frame,text="m.m.",fg=top_bg_color,bg=color,width=0).place(x=545,y=82)
Label(setting_frame,text="Mobile Phone 1:",fg=top_bg_color,bg=color,width=0).grid(row=3,column=3,ipadx=30,pady=8)
global mob_phone1
mob_phone1=Entry(setting_frame,width=15)
mob_phone1.place(x=465,y=117)
mob_phone1.insert(10,str(default_mobile_phone1))
Label(setting_frame,text="Mobile Phone 2:",fg=top_bg_color,bg=color,width=0).grid(row=4,column=3,ipadx=30,pady=8)
global mob_phone2
mob_phone2=Entry(setting_frame,width=15)
mob_phone2.place(x=465,y=153)
mob_phone2.insert(10,str(default_mobile_phone2))
Label(setting_frame,text="Mobile Phone 3:",fg=top_bg_color,bg=color,width=0).grid(row=5,column=3,ipadx=30,pady=8)
global mob_phone3
mob_phone3=Entry(setting_frame,width=15)
mob_phone3.place(x=465,y=189)
mob_phone3.insert(10,str(default_mobile_phone3))
Button(setting_frame,text="OK",command=ok).place(x=30,y=220)
Button(setting_frame,text="Set as default",command=setasdefault).place(x=120,y=220)
Button(setting_frame,text="Default values",command=defaultvals).place(x=300,y=220)
Button(setting_frame,text="Cancel",command=cncl).place(x=500,y=220)
def ok():
global sampling_rate
sampling_rate = int(samp_rate.get())
print(int(samp_rate.get()))
print(sampling_rate)
bias_value = int(bs_value.get())
coefficent = int(coef.get())
max_level = int(mx_level.get())
hysteresis_value = int(hys_level.get())
mobile_phone1 = mob_phone1.get()
mobile_phone2 = mob_phone2.get()
mobile_phone3 = mob_phone3.get()
conf_str = "http://sajab.sazabgolestan.com/server.php?action=station&imei=9359374362&station_index=3&status=1&sampling_rate=%d&bias_value=%d&coefficent=%d&max_level=%d&hysteresis_value=%d&mobile_phone1=%s&mobile_phone2=%s&mobile_phone3=%s" %(sampling_rate,bias_value,coefficent,max_level,hysteresis_value,mobile_phone1,mobile_phone2,mobile_phone3)
conf_file = open("conf.txt","w+")
conf_file.write(conf_str)
conf_file.close()
setting_frame.destroy()
def cncl():
setting_frame.destroy()
def defaultvals():
samp_rate.delete(0,END)
samp_rate.insert(10,str(default_sampling_rate))
bs_value.delete(0,END)
bs_value.insert(10,str(default_bias_value))
coef.delete(0,END)
coef.insert(10,str(default_coefficent))
mx_level.delete(0,END)
mx_level.insert(10,str(default_max_level))
hys_level.delete(0,END)
hys_level.insert(10,str(default_hysteresis_level))
mob_phone1.delete(0,END)
mob_phone1.insert(10,str(default_mobile_phone1))
mob_phone2.delete(0,END)
mob_phone2.insert(10,str(default_mobile_phone2))
mob_phone3.delete(0,END)
mob_phone3.insert(10,str(default_mobile_phone3))
def setasdefault():
default_conf_str = "http://sajab.sazabgolestan.com/server.php?action=station&imei=9359374362&station_index=3&status=1&sampling_rate=%d&bias_value=%d&coefficent=%d&max_level=%d&hysteresis_value=%d&mobile_phone1=%s&mobile_phone2=%s&mobile_phone3=%s" %(default_sampling_rate,default_bias_value,default_coefficent,default_max_level,default_hysteresis_value,default_mobile_phone1,default_mobile_phone2,default_mobile_phone3)
default_conf_file = open("default_conf.txt","w+")
default_conf_file.write(default_conf_str)
default_conf_file.close()
#Functions for splitting the different components of date and time
def nowYear():
now = datetime.datetime.now()
year = now.year
return str(year)
def nowMonth():
now = datetime.datetime.now()
month = now.month
return str(month)
def nowDay():
now = datetime.datetime.now()
day = now.day
return str(day)
def nowHour():
now = datetime.datetime.now()
hour = now.hour
return str(hour)
def nowMinute():
now = datetime.datetime.now()
minute = now.minute
return str(minute)
def nowSecond():
now = datetime.datetime.now()
second = now.second
return str(second)
def year_label(label):
def count1():
label.config(text=nowYear())
label.after(1000, count1)
count1()
def month_label(label):
def count2():
label.config(text=nowMonth())
label.after(1000, count2)
count2()
def day_label(label):
def count3():
label.config(text=nowDay())
label.after(1000, count3)
count3()
def hour_label(label):
def count4():
label.config(text=nowHour())
label.after(1000, count4)
count4()
def minute_label(label):
def count5():
label.config(text=nowMinute())
label.after(1000, count5)
count5()
def second_label(label):
def count6():
label.config(text=nowSecond())
label.after(1000, count6)
count6()
def about():
filewin = Toplevel(root)
tx ="""
Development by: Sina Meshkini
+98 911 380 6028
[email protected]
@SinaMeshkini
"""
message = Message(filewin, text=tx, relief = RIDGE , width = 400)
message.pack(fill="both", expand="yes")
#End Functions
#Desigen Param
color = 'lightblue'
top_fg_color = 'lightblue'
top_bg_color = '#111131'
#End Desigen Param
#Header
w = Canvas(root,width= 800,height= 100)
w.pack()
w.create_rectangle(0,0,800,100,fill=top_bg_color)
Label(root,text='SAJAB Management System',fg=top_fg_color,bg=top_bg_color,font="tahoma 24 bold",pady=10).place(x=150,y=5)
#Time
hourLabel = Label(root,text=nowHour(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
hourLabel.place(x=time_xloc,y=time_yloc)
hour_label(hourLabel)
colon = Label(root, text = ":",fg=top_fg_color,bg=top_bg_color,font=("Ravie",14))
colon.place(x=time_xloc+step,y=time_yloc-5)
minuteLabel = Label(root, text = nowMinute(),fg=top_fg_color,bg=top_bg_color,font=("Ravie",10))
minuteLabel.place(x=time_xloc+2*step,y=time_yloc)
minute_label(minuteLabel)
colon = Label(root, text = ":",fg=top_fg_color,bg=top_bg_color,font=("Ravie",14))
colon.place(x=time_xloc+3*step,y=time_yloc-5)
secondLabel = Label(root, text = nowSecond(),fg=top_fg_color,bg=top_bg_color,font=("Ravie",10))
secondLabel.place(x=time_xloc+4*step,y=time_yloc)
second_label(secondLabel)
#End Time
#Date
yearLabel = Label(root,text=nowYear(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
yearLabel.place(x=date_xloc,y=date_yloc)
year_label(yearLabel)
colon = Label(root, text = "/",fg=top_fg_color,bg=top_bg_color)
colon.place(x=date_xloc+36,y=date_yloc)
monthLabel = Label(root,text=nowMonth(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
monthLabel.place(x=date_xloc+45,y=date_yloc)
month_label(monthLabel)
colon = Label(root, text = "/",fg=top_fg_color,bg=top_bg_color)
colon.place(x=date_xloc+60,y=date_yloc)
dayLabel = Label(root,text=nowDay(),fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
dayLabel.place(x=date_xloc+68,y=date_yloc)
day_label(dayLabel)
#End Date
#Temp
temp_label = Label(root,text="Temp: ",fg=top_fg_color,bg=top_bg_color,font=("Ravie", 10))
temp_label.place(x=date_xloc,y=date_yloc+20)
#End Temp
#End Header
#Body
sensors = ['Sensor 1:','Sensor 2:','Sensor 3:','Sensor 4:']
relays = ['Relay 1','Relay 2','Relay 3','Relay 4']
r=0
for c in sensors:
Label(root,text=c,fg=top_bg_color,bg=color,width=0).place(x=table_x,y=table_y+r*stepx)
r=r+1
r=0
for c in relays:
Label(root,text=c,fg=top_bg_color,bg=color,width=0).place(x=table_x+300,y=table_y+r*stepx)
r=r+1
#Sensors Display
global sen1
sen1 = Label(root,fg=top_bg_color,bg=color)
sen1.place(x= table_x+130,y=table_y)
sensor1_read(sen1)
#End Sensors Display
#Relay control
Radiobutton(root,text="OFF",variable=v1,command=relay1,value=1).place(x= table_x+400,y=table_y)
Radiobutton(root,text="ON",variable=v1,command=relay1,value=0).place(x= table_x+500,y=table_y)
Radiobutton(root,text="OFF",variable=v2,command=relay2,value=1).place(x= table_x+400,y=table_y+stepx)
Radiobutton(root,text="ON",variable=v2,command=relay2,value=0).place(x= table_x+500,y=table_y+stepx)
Radiobutton(root,text="OFF",variable=v3,command=relay3,value=1).place(x= table_x+400,y=table_y+2*stepx)
Radiobutton(root,text="ON",variable=v3,command=relay3,value=0).place(x= table_x+500,y=table_y+2*stepx)
Radiobutton(root,text="OFF",variable=v4,command=relay4,value=1).place(x= table_x+400,y=table_y+3*stepx)
Radiobutton(root,text="ON",variable=v4,command=relay4,value=0).place(x= table_x+500,y=table_y+3*stepx)
#End Relay control
Label(root,text="Sampling rate:",fg=top_bg_color,bg=color,width=0).place(x=table_x+50,y=350)
global sr_label
sr_label = Label(root,fg=top_bg_color,bg=color)
sr_label.place(x=table_x+150,y=350)
#sr_read(sr_label)
Label(root,text="Sec.",fg=top_bg_color,bg=color,width=0).place(x=table_x+230,y=351)
Button(root,text="Setting",command=pass_check).place(x=700,y=410)
Button(root,text="Send Data",command=send_data).place(x=200,y=410)
#End Body
root.mainloop()
|
sinameshkini/python_samples
|
sajab4.py
|
sajab4.py
|
py
| 16,045 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.setwarnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setmode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BOARD",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 169,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 347,
"usage_type": "attribute"
}
] |
25004993355
|
from typing import cast, Any
from aea.skills.behaviours import TickerBehaviour
from aea.helpers.search.models import Constraint, ConstraintType, Query
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.tac_control.dialogues import (
OefSearchDialogues,
)
from packages.gdp8.skills.agent_action_each_turn.strategy import BasicStrategy
DEFAULT_REGISTER_AND_SEARCH_INTERVAL = 5.0
environmentFound = False
DEFAULT_SEARCH_QUERY = {
"search_key": "env",## is that the key of the environment ?
"search_value": "v1",
"constraint_type": "==",
}
class EnvSearchBehaviour(TickerBehaviour):
"""This class scaffolds a behaviour."""
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
def act(self) -> None:
"""
Implement the act.
:return: None
"""
if not environmentFound:
self._search_for_environment()
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
def _search_for_environment(self) -> None:
"""
Search for active environment (simulation controller).
We assume that the environment is registered as a service
(and with an attribute version = expected_version_id ## ??? do we really need to have that attribute ?)
:return: None
"""
## can add a filter: close to my service if there are too many results
service_key_filter = Constraint(
DEFAULT_SEARCH_QUERY["search_key"],
ConstraintType(
DEFAULT_SEARCH_QUERY["constraint_type"],
DEFAULT_SEARCH_QUERY["search_value"],
),
)
query = Query([service_key_filter],)
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg, _ = oef_search_dialogues.create(
counterparty=self.context.search_service_address,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query=query,
)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info(
"searching for environment, search_id={}".format(oef_search_msg.dialogue_reference)
)
class AgentLogicBehaviour(TickerBehaviour):
"""Behaviour looks at if actions required in each tick:
is there agent asking for water info? if so, tell them
is the round done (on my end)? if so, stop
is there enough info for making a decision? if so, do so,
if not, might have to send message to ask for info"""
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
pass
def act(self) -> None:
strategy = cast(BasicStrategy, self.context.strategy)
there_is_agent_asking_for_water_info = True
while there_is_agent_asking_for_water_info:
there_is_agent_asking_for_water_info = strategy.deal_with_an_agent_asking_for_water_info
if not strategy.is_round_done:
info_is_enough = strategy.enough_info_to_make_decision
if info_is_enough:
strategy.make_decision_send_to_env()
else:
asking_for_info = True
while asking_for_info:
asking_for_info = strategy.potentially_ask_for_info
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
pass
|
DENE-dev/dene-dev
|
RQ1-data/exp2/1010-OCzarnecki@gdp8-e6988c211a76ac3a2736d49d00f0a6de8b44c3b0/agent_aea/skills/agent_action_each_turn/behaviours.py
|
behaviours.py
|
py
| 3,601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "aea.skills.behaviours.TickerBehaviour",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "aea.helpers.search.models.Constraint",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "aea.helpers.search.models.ConstraintType",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "aea.helpers.search.models.Query",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "packages.fetchai.skills.tac_control.dialogues.OefSearchDialogues",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "packages.fetchai.protocols.oef_search.message.OefSearchMessage.Performative",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "packages.fetchai.protocols.oef_search.message.OefSearchMessage",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "aea.skills.behaviours.TickerBehaviour",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "packages.gdp8.skills.agent_action_each_turn.strategy.BasicStrategy",
"line_number": 94,
"usage_type": "argument"
}
] |
7930998505
|
import numpy as np
import matplotlib.pyplot as plt
# seulement deux etats
def epidemie_1(temps = 50, population = 10**6):
propagation = np.array( [[0.9 , 0.1], [0.3, 0.7]]) # 0 -> infecte, 1 -> sain
popu = np.array([0, 1])
X_temps = np.linspace(0, temps, temps)
Y_infectes = []
for t in range(temps):
Y_infectes.append(popu[0]*population)
popu = np.dot(popu, propagation)
plt.plot(X_temps, Y_infectes)
# illustration de markov deux etats
# modele irrealiste a cause de la pente, voir site worldometers.info juste pour donner une impression de ce que ca devrait donner pas pour donner
#epidemie_1()
#plt.show()
# 5 etats cette fois:
def epidemie_2(temps = 100, population = 10**6):
propagation = np.array( [
[0.7, 0.2, 0, 0, 0.0001, 0.0999], # 0 -> infecte vaccine
[0.2, 0.8, 0, 0, 0, 0], # 1 -> sain vaccine
[0 , 0.2, 0.1, 0.7, 0, 0], # 2 -> sain non vaccin
[0,0.2, 0, 0.7, 0.001, 0.099], # 3 -> infecte non vaccine
[0, 0, 0, 0, 1, 0 ], # 4 -> mort
[0, 0, 0, 0, 0, 1 ] # 5 -> immunise
])
popu = np.array([0, 0, 1, 0, 0, 0])
X_temps = np.linspace(0, temps, temps)
Y_infectes = []
for t in range(temps):
Y_infectes.append(popu[0]*population)
popu = np.dot(popu, propagation)
plt.plot(X_temps, Y_infectes)
#epidemie_2()
#plt.show()
# modele bcp plus complique car bcp d'etats mais
# resultats bien plus satisfaisant
# on peut jouer sur la propagation de l'epidemie
# qui prennent en compte la reaction des gens, des gouvernements, le confinement ( notamment probabilite d'infection qui descend, celle de vaccination monte, mais aussi mutation aleatoire)
# Chaine de Markov cachee
# changement de domaine
def max_arg(liste):
""" renvoie le max de la liste et le plus petit indice ou il a ete realise"""
m = liste[0]
i_max = 0
for n in range(len(liste)):
if liste[n] > m:
m = liste[n]
i_max = n
return m, i_max
def Viterbi(A, B, Obs):
# A matrice de transition
# B les probabilites d'observation tq b_j(o_t) = B[j][t]
# On travaille avec des logarithmes
logA = np.log(A)
logB = np.log(B)
N = len(A)
T = len(Obs)
pointeurs = np.reshape(np.zeros(T*N), (N,T)) # sert a retracer le chemin a la fin
alpha_prec = np.array(B[:][Obs[0]])
alpha_suiv = np.zeros(N)
for t in range(T):
nouv_alpha = np.zeros(N)
for j in range(N):
nouv_alpha[j], pointeurs[j][t] = max_arg( np.array( np.log(alpha_suiv[i]) + logA[i][j] + logB[j][Obs[t]] for i in range(N)))
# log est croissante, conserve donc le max
# on met en pointeur l'etat i qui realise le maximum : c'etait l'etat precedent
alpha_prec = alpha_suiv[:]
alpha_suiv = nouv_alpha[:]
pmax, i_final = max_arg(alpha_suiv)
pmax = np.exp(pmax)
etats_successifs = np.zeros(T)
i = i_final
for t in range(1, T+1, -1):
etats_successifs[t] = i
i = pointeurs[i][t-1]
return pmax, etats_successifs
def forward(A, B, Obs):
N = len(A)
T = len(B)
alpha_prec = np.array(B[:][Obs[0]])
alpha_suiv = np.zeros(N)
for t in range(T):
nouv_alpha = np.zeros(N)
for j in range(N):
nouv_alpha[j] = B[j][Obs[t]] * sum( alpha_suiv[i] * A[i][j] for i in range(N))
alpha_prec = alpha_suiv[:]
alpha_suiv = nouv_alpha[:]
return sum(alpha_suiv)
# def baum-welch(A,B, Obs):
# if # condition de convergence
# else:
# N = len(A)
# T = len(Obs)
# alphas = np.reshape(np.zeros(N*T), (N, T))
# betas = np.reshape(np.zeros(N*T), (N, T))
# # a initialiser correctement avec un for ICI
# C = sum(alphas)
# D = sum(beta)
# alphas = alphas/C
# beta = beta/D
# for t in range(1, T): # on construit
# for i in range(N):
# alphas[i][t] = B[i][Obs[t]] *
# betas[i][t] =
# ABANDON MOMOENTANE
def baum_welch_naif(A, B, Obs):
N = len(A)
T = len(Obs)
alphas = np.reshape(np.zeros(N*T), (T, N))
betas = np.reshape(np.zeros(N*T), (T, N))
# trouver toutes les valeurs des alphas et betas
alphas[:][0] = B[:][Obs[0]]
betas[T-1][:] = np.ones(N)
for t in range(1, T-2):
for j in range(N):
alphas[t][j] = B[j][Obs[t]] * sum( alphas[t-1][i] * A[i][j] for i in range(N))
betas[T-1-t][j] = B[Obs[T-t]][j] * sum( betas[T-t][i] * A[j][i] for i in range(N))
Pobs = sum(alphas[T-1][:])
# step Expectations
zeta = np.reshape(np.zeros(N*N*T), (T,N, N))
gamma = np.reshape(np.zeros(N*T), (T,N))
for t in range(T-1):
for i in range(N):
for j in range(N):
zeta[t][i][j] = alphas[t][i] * betas[t+1][j] * A[i][j] * B[Obs[t]][j] / Pobs
for t in range(T):
for i in range(N):
gamma[t][i] = (alphas[t][j] * betas[t][j]) / Pobs
#step S
nouvA = np.reshape(np.zeros(N**2), (N,N))
nouvB = np.reshape(np.zeros(N * len(B[0])), (N, len(B[0])))
for i in range(N):
denom = sum( sum( zeta[t][i][k] for k in range(N)) for t in range(T))
for j in range(N):
nouvA[i][j] = sum( zeta[t][i][j] for t in range(T)) / denom
for j in range(N):
for k in range(len(B)):
denom = sum(gamma[t][j] for t in range(T))
for t in range(T):
if Obs[t] == k:
nouvB[j][k] = nouvB[j][k] + gamma[t][j] / denom
return nouvA, nouvB
def traite_fichier_adn():
nucleotide = open("adn_pur.txt", "r")
nombres = open("adn_traite", "a")
lignes = nucleotide.readlines()
N = ['a', 'c', 't', 'g']
for l in lignes:
for carac in l:
if carac == 'a':
nombres.write("0 ")
if carac == 'c':
nombres.write("1 ")
if carac == 't':
nombres.write("2 ")
if carac == 'g':
nombres.write("3 ")
nucleotide.close()
nombres.close()
adn = open("adn_traite", "r")
sequence = adn.readlines()
Ob = []
for ligne in sequence:
for nclt in ligne:
if nclt in ['0', '1', '2', '3']:
Ob.append(int(nclt))
adn.close()
def sequencageADN(Obs):
precision = 0.1
A = 0.25 * np.reshape(np.ones(16), (4, 4))
B = 0.25 * np.reshape(np.ones(16), (4, 4))
Ap, Bp = baum_welch_naif(A, B, Obs)
while np.linalg.norm(A - Ap) < precision or np.linalg.norm(B - Bp)<precision:
A = Ap
B = Bp
Ap, Bp = baum_welch_naif(A, B, Obs)
return A, B
#print(sequencageADN(Ob))
def ruine_du_joueur(N, p, T = 100):
X_t = np.zeros(2*N+1)
X_t[N] = 1.0
T = list(range(1, T))
A = np.reshape(np.zeros((2*N+1)**2), ((2*N+1),(2*N+1)))
A[0][0] = 1
A[-1][-1] = 1
for i in range(1, 2*N):
A[i][i-1] = 1-p
A[i][i+1] = p
print(A)
Argent = []
for t in T:
m = max(X_t)
for k in range(2*N+1):
if X_t[k] == m:
Argent.append(k)
break
X_t = np.dot(X_t, A)
plt.plot(T, Argent)
import random as rd
def vol_du_joueur(N, p):
for _ in range(3):
X = [0]
Y = [N]
temps = 0
A = N
while A > 0 and A < 2*N:
temps += 1
if rd.random()< p:
A += 1
else:
A -= 1
X.append(temps)
Y.append(A)
plt.plot(X, Y)
plt.xlabel("temps")
plt.ylabel("Pieces")
plt.show()
#vol_du_joueur(20, 0.5)
def temps_de_vol(N,p):
Y = []
nb_essais = 100000
for k in range(nb_essais):
temps = 0
A = N
while A > 0 and A < 2*N:
temps += 1
if rd.random()< p:
A += 1
else:
A -= 1
Y.append(temps)
Yp = [0]*(max(Y)+1)
for y in Y:
Yp[y] += 1
plt.bar(list(range(max(Y)+1)), Yp, width=1.0, edgecolor = "#981FFA")
plt.show()
#temps_de_vol(200, 0.7)
import cmath
def mouvement_brownien(N):
position = 0 + 0j
X = [position]
t = 1
i = 1j
direction = 1
while t < N:
dir = rd.random()
dist = rd.random()
if dir < 0.05 or 0.50> dir > 0.45:
if dist <0.01:
direction *= cmath.exp(dir*2*np.pi*1j)
position = position + dist * direction
X.append(position)
t += 1
plt.plot( [ z.real for z in X], [z.imag for z in X])
plt.show()
mouvement_brownien(10000)
def baum_welch(A, B, Obs):
N = len(A)
T = len(Obs)
alphas = np.reshape(np.zeros(N*T), (T, N))
betas = np.reshape(np.zeros(N*T), (T, N))
# trouver toutes les valeurs des alphas et betas
alphas[:][0] = B[:][Obs[0]]
betas[T-1][:] = np.ones(N)
for t in range(1, T-2):
for j in range(N):
alphas[t][j] = B[j][Obs[t]] * sum( alphas[t-1][i] * A[i][j] for i in range(N))
betas[T-1-t][j] = B[Obs[T-t]][j] * sum( betas[T-t][i] * A[j][i] for i in range(N))
constantesC = []
for t in range(T):
C = sum(alphas[t][:])**(-1)
constantes.append(C)
alphas[t][:] = alphas[t][:] / C
constancesc = []
for t in range(T):
c = 0
for y in range(N):
c += np.dot(alphas[t][:], A[y]) * B[t][y]
c = 1 / c
constantesc.append(c)
Pobs = sum(alphas[T-1][:])
# step Expectations
zeta = np.reshape(np.zeros(N*N*T), (T,N, N))
gamma = np.reshape(np.zeros(N*T), (T,N))
for t in range(T-1):
for i in range(N):
for j in range(N):
zeta[t][i][j] = alphas[t][i] * betas[t+1][j] * A[i][j] * B[Obs[t]][j] / Pobs
for t in range(T):
for i in range(N):
gamma[t][i] = (alphas[t][j] * betas[t][j]) / Pobs
#step S
nouvA = np.reshape(np.zeros(N**2), (N,N))
nouvB = np.reshape(np.zeros(N * len(B[0])), (N, len(B[0])))
for i in range(N):
denom = sum( sum( zeta[t][i][k] for k in range(N)) for t in range(T))
for j in range(N):
nouvA[i][j] = sum( zeta[t][i][j] for t in range(T)) / denom
for j in range(N):
for k in range(len(B)):
denom = sum(gamma[t][j] for t in range(T))
for t in range(T):
if Obs[t] == k:
nouvB[j][k] = nouvB[j][k] + gamma[t][j] / denom
return nouvA, nouvB
|
kmlst/TIPE-Coding-regions-in-DNA-with-Hidden-Markov-Model
|
tipe_code.py
|
tipe_code.py
|
py
| 10,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 311,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "cmath.exp",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 360,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 407,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 407,
"usage_type": "call"
}
] |
2282915747
|
import torch
from torch import Tensor
from kornia.utils import one_hot
import torch.nn.functional as F
import numpy as np
from matplotlib import pyplot as plt
def reg_loss(prediction, ED, ES, device):
# print(prediction)
prediction_toSyn = prediction.squeeze().detach().cpu().numpy()
y_k = synthetic_label(prediction_toSyn, ED.numpy(), ES.numpy())
# print(prediction.squeeze())
# print(y_k)
# print(ED)
# print(ES)
# print('-----')
mse_loss = F.mse_loss(prediction.squeeze(), y_k.to(device))
temp_loss = ltemp(y_k, prediction_toSyn)
loss = mse_loss + temp_loss
return loss
def synthetic_label(prediction, ED, ES):
y_k = []
for k in range(len(prediction)):
if (int(ED) < k) and (k <= int(ES)):
y_k.append((abs((k-ES)/(ES-ED)))**3)
# print(1)
else:
y_k.append((abs((k-ES)/(ES-ED)))**(1/3))
# print(y_k)
# plt.plot(y_k)
# plt.savefig('y_k.png')
return torch.from_numpy(np.array(y_k, dtype= "float32"))
def ltemp(y_k, prediction):
Linc = linc(y_k, prediction)
Ldec = ldec(y_k, prediction)
ltemp = (Linc+Ldec)/2
# print(ltemp)
return torch.from_numpy(np.array(ltemp, dtype= "float32"))
def linc(y_k, prediction):
Linc = 0
for k in range(len(prediction)-1):
if y_k[k+1] > y_k[k]:
Linc = Linc + max(0,prediction[k]-prediction[k+1])
# print('linc')
return Linc/len(prediction)
def ldec(y_k, prediction):
Ldec = 0
for k in range(len(prediction)-1):
if y_k[k+1] < y_k[k]:
Ldec = Ldec + max(0,prediction[k+1]-prediction[k])
# print('ldec')
return Ldec/len(prediction)
|
carlesgarciac/regression
|
regression-cmr/utils/reg_loss.py
|
reg_loss.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
}
] |
41843852670
|
# Code courtesy: https://towardsdatascience.com/support-vector-machine-python-example-d67d9b63f1c8
# Theory: https://www.youtube.com/watch?v=_PwhiWxHK8o
import numpy as np
import cvxopt
from sklearn.datasets.samples_generator import make_blobs
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
class SVM:
"""
"""
def __init__(self):
self.alpha = None
self.w = None
self.b = None
self.support_vectors = None
self.support_vector_y = None
def fit(self, X_train, y_train):
n_samples, n_features = X_train.shape
# P = X_train^T X_train
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i, j] = np.dot(X_train[i], X_train[j])
P = cvxopt.matrix(np.outer(y_train, y_train) * K)
# q = -1 (1xN)
q = cvxopt.matrix(np.ones(n_samples) * -1)
# A = y_train^T
A = cvxopt.matrix(y_train, (1, n_samples))
# b = 0
b = cvxopt.matrix(0.0)
# -1 (NxN)
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
# 0 (1xN)
h = cvxopt.matrix(np.zeros(n_samples))
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
# Lagrange multipliers
a = np.ravel(solution['x'])
# Lagrange have non zero lagrange multipliers
sv = a > 1e-5
ind = np.arange(len(a))[sv]
self.alpha = a[sv]
self.support_vectors = X_train[sv]
self.support_vector_y = y_train[sv]
# Intercept
self.b = 0
for n in range(len(self.alpha)):
self.b += self.support_vector_y[n]
self.b -= np.sum(self.alpha * self.support_vector_y * K[ind[n], sv])
self.b /= len(self.alpha)
# Weights
self.w = np.zeros(n_features)
for n in range(len(self.alpha)):
self.w += self.alpha[n] * self.support_vector_y[n] * self.support_vectors[n]
def predict(self, X_test):
return self.sign(np.dot(X_test, self.w) + self.b)
def f1_score(self, X_test, y_test):
pass
if __name__ == '__main__':
X, y = make_blobs(n_samples=250, centers=2, random_state=100, cluster_std=1)
y[y == 0] = -1
tmp = np.ones(len(X))
y = tmp * y
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='winter')
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svm = SVM()
svm.fit(X_train, y_train)
def f(x, w, b, c=0):
return (-w[0] * x - b + c) / w[1]
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='winter')
# w.x + b = 0
a0 = -4
a1 = f(a0, svm.w, svm.b)
b0 = 4;
b1 = f(b0, svm.w, svm.b)
plt.plot([a0, b0], [a1, b1], 'k')
# w.x + b = 1
a0 = -4;
a1 = f(a0, svm.w, svm.b, 1)
b0 = 4;
b1 = f(b0, svm.w, svm.b, 1)
plt.plot([a0, b0], [a1, b1], 'k--')
# w.x + b = -1
a0 = -4;
a1 = f(a0, svm.w, svm.b, -1)
b0 = 4;
b1 = f(b0, svm.w, svm.b, -1)
plt.plot([a0, b0], [a1, b1], 'k--')
plt.show()
|
gmortuza/machine-learning-scratch
|
machine_learning/instance_based/svm/svm.py
|
svm.py
|
py
| 3,218 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.outer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cvxopt.matrix",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cvxopt.solvers.qp",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cvxopt.solvers",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "numpy.ravel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.samples_generator.make_blobs",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
}
] |
39540715020
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 2021
@author: sagrana
"""
from rest_framework import status
from rest_framework.generics import CreateAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from .models import User
from .serializers import UserRegistrationSerializer
from .serializers import UserLoginSerializer
class UserRegistrationView(CreateAPIView):
""""UserRegistrationView
"""
serializer_class = UserRegistrationSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {
'success': 'True',
'status code': status.HTTP_200_OK,
'message': 'User registered successfully',
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserLoginView(RetrieveAPIView):
"""UserLoginView
"""
permission_classes = (AllowAny,)
serializer_class = UserLoginSerializer
queryset = User.objects.all()
def post(self, request):
"""post
:param request:
:return:
"""
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success': 'True',
'status code': status.HTTP_200_OK,
'message': 'User logged in successfully',
'token': serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
|
theRuthless/stark_ly3000_web_app
|
backend/users/views.py
|
views.py
|
py
| 1,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "serializers.UserRegistrationSerializer",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.RetrieveAPIView",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "serializers.UserLoginSerializer",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "models.User.objects.all",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "rest_framework.status.HTTP_200_OK",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.status",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 59,
"usage_type": "call"
}
] |
27643482594
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from base.serializers import ProductSerializer, UserSerializer, UserSerializerWithToken
from base.models import Product
@api_view(['GET'])
def getProducts(request):
query = request.query_params.get('keyword')
if query == None:
query = ""
products = Product.objects.filter(name__icontains=query)
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getTopProducts(request):
products = Product.objects.filter(rating__gte=4).order_by('-rating')[0:5]
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
@api_view(['GET'])
def getProduct(request, pk):
product = Product.objects.get(_id=pk)
serializer = ProductSerializer(product, many=False)
return Response(serializer.data)
|
hitrocs-polito/smart-bozor
|
base/views/product_views.py
|
product_views.py
|
py
| 917 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "base.models.Product.objects.filter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "base.models.Product.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "base.models.Product",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "base.serializers.ProductSerializer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "base.models.Product.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "base.models.Product.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "base.models.Product",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "base.serializers.ProductSerializer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "base.models.Product.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "base.models.Product.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "base.models.Product",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "base.serializers.ProductSerializer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.api_view",
"line_number": 27,
"usage_type": "call"
}
] |
38169404173
|
from . import parse as parser
from modules import YaraRules, GeoIP, ProjectHoneyPot, LangDetect
class Scanner:
def __init__(self):
self.yara_manager = YaraRules.YaraManager()
def parse_email(self, email_content: str):
return parser.parse_email(email_content)
def scan(self, email: str):
# parse it
parsed_email = self.parse_email(email)
# Use LangDetect on the body of the email, check if it's HTML or not
# If it's HTML, use BeautifulSoup to parse it
# If it's not HTML, just continue like usual
content = parsed_email.get_payload()
#lang = LangDetect.detect_language(content)
potentialLanguage = []
# Loop around the content if it has multiple parts
# Then use the LangDetect to detect the language of each part
# Append the result to the potentialLanguage list
if parsed_email.is_multipart():
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
# Extract text/plain content
if "attachment" not in content_disposition and "text/plain" in content_type:
content = part.get_payload(decode=True)
print("Content -> ", content)
# turn content into string but also fix some encoding issues, and make it prettier for it to read
content = content.decode('utf-8', 'ignore')
content = content.replace("\r\n", "")
content = content.replace("\n", "")
content = content.replace("\t", "")
lang = LangDetect.detect_language(content)
potentialLanguage.append(lang)
else:
continue
print("Language -> ", potentialLanguage)
# get ip and geoip
ip = parsed_email.get("Received-SPF").split("client-ip=")[1].split(";")[0]
print("IP Address -> " + str(ip))
#geoip = GeoIP.GeoIP(ip)
#print("GeoIP -> ", geoip)
# check if ip is in honeypot
honeypot = ProjectHoneyPot.ProjectHoneyPot(ip)
print("Honeypot -> " + str(honeypot))
# analyze it
analysis_result = self.yara_manager.analyze_email(email)
# return the result
return {
"analysis_result": analysis_result,
"parsed_email": parsed_email,
#"geoip": geoip,
"honeypot": honeypot,
}
|
lukasolsen/EmailAnalyser
|
server/base/service/scan.py
|
scan.py
|
py
| 2,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "modules.YaraRules.YaraManager",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "modules.YaraRules",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "modules.LangDetect.detect_language",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "modules.LangDetect",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "modules.ProjectHoneyPot.ProjectHoneyPot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "modules.ProjectHoneyPot",
"line_number": 57,
"usage_type": "name"
}
] |
34913449577
|
import numpy as np
import scipy.integrate
import scipy.optimize
import bokeh.plotting
from bokeh.plotting import figure, output_file, show
import bokeh.io
from bokeh.models import Span
def dilute(molecule_diluted,molecules_0,DR=0.2): #input Object want to dilute and where the parameters is stored
molecules_0[molecule_diluted.idx] *= (1-DR)
def replenish(molecule_replenished, molecules_0, DR=0.2) : #input Object want to replenish and where the parameters is stored
molecules_0[molecule_replenished.idx] += molecule_replenished.lc * DR
def dilute_species(molecules_diluted,molecules_0,DR=0.2): #dilute a list of molecules
for molecule in (molecules_diluted):
dilute(molecule,molecules_0,DR)
def replenish_species(molecules_replenished, molecules_0, DR=0.2) : #replenish a list of molecules
for molecule in (molecules_replenished):
replenish(molecule,molecules_0,DR)
def run_model(model,t,parameters_list,molecules_0,dilute_list,replenish_list,result_all,DR=0.2):
start_cycle,end_cycle = np.array(t)*4
for n in range (start_cycle,end_cycle):
#define time
t_start= n*15
t_end = (n+1)*15
t= np.linspace(t_start,t_end,2)
#solve equation and save result
result = scipy.integrate.odeint(model, molecules_0, t, args=parameters_list)
result_all = np.append(result_all,result[1])
#update parameter
molecules_0 = result.transpose()[:,-1]
#dilution
###diute out
dilute_species((dilute_list),molecules_0,DR)
###replenish
replenish_species((replenish_list),molecules_0,DR)
return result_all,molecules_0
def plot_result(molecule):
t = np.linspace(0, 15*(len(molecule)-1), len(molecule))
p = bokeh.plotting.figure(
plot_width=800,
plot_height=400,
x_axis_label="t",
y_axis_type="linear",
)
colors = bokeh.palettes.d3["Category10"][3]
# Populate glyphs
p.line(
t/60, molecule, line_width=2, color=colors[0]
)
vline1 = Span(location=4, dimension='height', line_color='black', line_width=1,line_dash='dashed')
vline2 = Span(location=16, dimension='height', line_color='black', line_width=1,line_dash='dashed')
p.add_layout(vline1)
p.add_layout(vline2)
show(p)
def plot_result_two_state(molecule):
t = np.linspace(0, 15*(len(molecule)-1), len(molecule))
p = bokeh.plotting.figure(
plot_width=800,
plot_height=400,
x_axis_label="t",
y_axis_type="linear",
)
colors = bokeh.palettes.d3["Category10"][3]
# Populate glyphs
p.line(
t/60, molecule, line_width=2, color=colors[0]
)
vline1 = Span(location=4, dimension='height', line_color='black', line_width=1,line_dash='dashed')
#vline2 = Span(location=16, dimension='height', line_color='black', line_width=1,line_dash='dashed')
p.add_layout(vline1)
#p.add_layout(vline2)
show(p)
|
william831015/GRN-in-chemostat
|
scripts/functions.py
|
functions.py
|
py
| 3,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate.odeint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.integrate",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.plotting.figure",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.plotting",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "bokeh.plotting",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "bokeh.plotting.palettes",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "bokeh.plotting",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "bokeh.models.Span",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "bokeh.models.Span",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.show",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.plotting.figure",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.plotting",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "bokeh.plotting",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "bokeh.plotting.palettes",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "bokeh.plotting",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "bokeh.models.Span",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "bokeh.plotting.show",
"line_number": 95,
"usage_type": "call"
}
] |
35574468725
|
from collections import defaultdict
def createGraph():
g=defaultdict(list)
return g
def topoSort(g,indeg,q,cnt,n,res):
for i in range(n):
if indeg[i] is 0:
q.append(i)
while(q):
cur=q.pop(0)
for i in g[cur]:
indeg[i]-=1
if(indeg[i] is 0):
q.append(i)
res.append(cur)
cnt+=1
if cnt is n:
return True
return False
def kahnsAlgo(g,indeg,n):
q,res=[],[]
if topoSort(g,indeg,q,0,n,res) is True:
return res
return []
if __name__ == "__main__":
# prequisites=[[1,0],[2,0],[3,1],[3,2]]
prequisites=[[1,0],[2,1],[3,2],[1,3]]
g=createGraph()
n=4
indeg=[0]*n
for i,j in prequisites:
g[j].append(i)
indeg[i]+=1
ans=kahnsAlgo(g,indeg,n)
print(ans)
|
goyalgaurav64/Graph
|
topological-sort-kahns-algo-bfs.py
|
topological-sort-kahns-algo-bfs.py
|
py
| 868 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 3,
"usage_type": "call"
}
] |
1149669859
|
from lib.contents_reader import ContentsReader
import asyncio
CLEAR_SCREEN = "\u001b[2J"
NEW_LINE = "\r\n"
class ZineFunctions:
def __init__(self, reader, writer, index_file_path):
self.reader = reader
self.writer = writer
self.contents_reader = ContentsReader(index_file_path)
async def run_index(self):
for welcome_line in self.contents_reader.read_hello_file():
self.writer.write(welcome_line)
await self.writer.drain()
# Read one byte (any key)
await self.reader.read(1)
running = True
while (running):
for index_line in self.contents_reader.read_index_lines():
self.writer.write(index_line)
item_choice = await self.reader.read(1)
item_choice_int = -1
if item_choice.upper() == 'X':
running = False
continue
item_choice_int = self.contents_reader.map_input_to_numerical_index(item_choice)
if item_choice_int == -1:
self.writer.write(f"{NEW_LINE}{NEW_LINE}Pick a story, or X to quit.{NEW_LINE}")
continue
self.writer.write(f"{NEW_LINE}{NEW_LINE}...you picked: %s" % (item_choice))
self.writer.write(f"{NEW_LINE}{NEW_LINE}...press RETURN to start reading, and to continue after each page")
await self.reader.read(1)
self.writer.write(NEW_LINE + CLEAR_SCREEN)
await asyncio.sleep(1)
await self.run_story(item_choice_int)
self.disconnect()
async def run_story(self, story_number):
page_number = 1
story_lines = self.contents_reader.read_story(story_number, page_number)
while len(story_lines) > 0:
self.writer.write(CLEAR_SCREEN)
for story_line in story_lines:
self.writer.write(story_line)
await self.writer.drain()
char_read = await self.reader.readline()
page_number += 1
story_lines = self.contents_reader.read_story(story_number, page_number)
def disconnect(self):
self.writer.close()
|
caraesten/dial_a_zine
|
dialazine/lib/zine_functions.py
|
zine_functions.py
|
py
| 2,161 |
python
|
en
|
code
| 58 |
github-code
|
6
|
[
{
"api_name": "lib.contents_reader.ContentsReader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 36,
"usage_type": "call"
}
] |
19107028474
|
"""Extract data on near-Earth objects and close approaches from CSV and JSON files.
The `load_neos` function extracts NEO data from a CSV file, formatted as
described in the project instructions, into a collection of `NearEarthObject`s.
The `load_approaches` function extracts close approach data from a JSON file,
formatted as described in the project instructions, into a collection of
`CloseApproach` objects.
The main module calls these functions with the arguments provided at the command
line, and uses the resulting collections to build an `NEODatabase`.
You'll edit this file in Task 2.
"""
import csv
import json
from models import NearEarthObject, CloseApproach
def load_neos(neo_csv_path):
"""Read near-Earth object information from a CSV file.
:param neo_csv_path: A path to a CSV file containing data about near-Earth objects.
:return: A collection of `NearEarthObject`s.
"""
neo_list = []
neo_collection = []
with open(neo_csv_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
neo_list.append((dict([('designation',row['pdes']), ('name', row['name']),('diameter', row['diameter']),('hazardous', row['pha'])])))
for neo_dict in neo_list:
neo_collection.append(NearEarthObject(**neo_dict))
return neo_collection
def load_approaches(cad_json_path):
"""Read close approach data from a JSON file.
:param cad_json_path: A path to a JSON file containing data about close approaches.
:return: A collection of `CloseApproach`es.
"""
cap_list = []
cap_collection = []
with open(cad_json_path, 'r') as json_file:
json_reader = json.load(json_file)
for i in range(len(json_reader['data'])):
cap_list += [dict(zip(['_designation', 'time', 'distance', 'velocity'], [json_reader['data'][i][0], json_reader['data'][i][3], json_reader['data'][i][4], json_reader['data'][i][7]]))]
for cap_dict in cap_list:
cap_collection.append(CloseApproach(**cap_dict))
return cap_collection
|
rcmadden/Near-Earth-Objects
|
extract.py
|
extract.py
|
py
| 2,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.DictReader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.NearEarthObject",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "models.CloseApproach",
"line_number": 56,
"usage_type": "call"
}
] |
14988584675
|
from setuptools import setup
package_name = 'leg_controller'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='pi',
maintainer_email='[email protected]',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
"servo_node = leg_controller.servoController:main",
"kin_node = leg_controller.pointToAngle:main",
"animation_node = leg_controller.simpleCommands:main"
],
},
)
|
PetriJF/Hexapod
|
src/leg_controller/setup.py
|
setup.py
|
py
| 813 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 5,
"usage_type": "call"
}
] |
13303958661
|
from fastapi import APIRouter, Depends
from app.dependencies import verify_api_key
from app.libraries.libpoller import Poller
from app.schemas.poller import PollerModel, PollerUpdateModel, PollerCreateModel
router = APIRouter(tags=["poller"])
oPoller = Poller()
# Poller Requests ( API_KEY required )
@router.get("/poller/devices")
async def get_poller_devices(poller = Depends(verify_api_key)):
return await oPoller.get_poller_devices(poller)
# CRUID Poller Requests ( JWT required )
@router.get("/poller/schema")
async def get_poller_schema(joined: bool = False):
return await oPoller.get_poller_schema(joined=joined)
@router.get("/poller")
async def get_poller_list(joined: bool = False, limit: int = 100, offset: int = 0, sortField: str = None, sortOrder: str = "asc", search: str = ""):
return await oPoller.get_poller_list(joined=joined, limit=limit, offset=offset, sortField=sortField, sortOrder=sortOrder, search=search)
@router.get("/poller/{pollerid}")
async def get_poller(pollerid: int, joined: bool = False):
return await oPoller.get_poller(pollerid, joined=joined)
@router.post("/poller")
async def create_poller(poller: PollerCreateModel):
return await oPoller.create_poller(poller)
@router.put("/poller/{pollerid}")
async def update_poller(pollerid: int, poller: PollerUpdateModel):
return await oPoller.update_poller(pollerid, poller)
@router.delete("/poller/{pollerid}")
async def delete_poller(pollerid: int):
return await oPoller.delete_poller(pollerid)
|
treytose/Pyonet-API
|
pyonet-api/app/routers/poller.py
|
poller.py
|
py
| 1,534 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "app.libraries.libpoller.Poller",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.dependencies.verify_api_key",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "app.schemas.poller.PollerCreateModel",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "app.schemas.poller.PollerUpdateModel",
"line_number": 35,
"usage_type": "name"
}
] |
39253810380
|
from mangaki.models import Artist, Manga, Genre
from django.db.utils import IntegrityError, DataError
import re
from collections import Counter
def run():
with open('../data/manga-news/manga.csv') as f:
next(f)
artists = {}
hipsters = Counter()
for i, line in enumerate(f):
# print(len(line.split(';;')))
title, vo_title, writer, mangaka, editor, origin, genre1, genre2, manga_type, synopsis, poster = line.split(';;')
for artist in [writer, mangaka]:
if artist in artists:
continue
m = re.match('^([A-ZÔÛÏ\'-]+) (.*)$', writer)
if m:
last_name, first_name = m.groups()
last_name = last_name.lower().capitalize()
if not m:
first_name = ''
last_name = artist
if Artist.objects.filter(first_name=first_name, last_name=last_name).count() == 0:
a = Artist(first_name=first_name, last_name=last_name)
a.save()
else:
a = Artist.objects.get(first_name=first_name, last_name=last_name)
artists[artist] = a
with open('../data/manga-news/manga.csv') as f:
next(f)
for i, line in enumerate(f):
title, vo_title, writer, mangaka, editor, origin, genre1, genre2, manga_type, synopsis, poster = line.split(';;')
try:
if Manga.objects.filter(title=title, vo_title=vo_title).count() == 0:
manga = Manga(title=title, vo_title=vo_title, mangaka=artists[mangaka], writer=artists[writer], editor=editor, origin=origin.lower().replace('hong kong', 'hong-kong').replace('international', 'intl'), manga_type=manga_type.lower(), source='', poster=poster, synopsis=synopsis)
manga.save()
else:
manga = Manga.objects.get(title=title, vo_title=vo_title)
if genre1:
manga.genre.add(Genre.objects.get(title=genre1))
if genre2:
manga.genre.add(Genre.objects.get(title=genre2))
except IntegrityError as err:
print(line)
print(writer)
print(err)
break
except DataError as err:
print(line)
print(origin)
print(err)
break
except Genre.DoesNotExist as err:
print(line)
print('Genres: [%s] [%s]' % (genre1, genre2))
print(err)
break
run()
|
mangaki/mangaki
|
mangaki/tools/add_manga.py
|
add_manga.py
|
py
| 2,689 |
python
|
en
|
code
| 137 |
github-code
|
6
|
[
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Artist.objects.filter",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Artist.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Artist",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Artist",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Artist.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Artist.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Artist",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Manga.objects.filter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Manga.objects",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Manga",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Manga",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Manga.objects.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Manga.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Manga",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Genre.objects.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Genre.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Genre",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Genre.objects.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "mangaki.models.Genre.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Genre",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.db.utils.IntegrityError",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.db.utils.DataError",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "mangaki.models.Genre.DoesNotExist",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "mangaki.models.Genre",
"line_number": 55,
"usage_type": "name"
}
] |
16421467025
|
# Test the models with LG_chem stock
# If the prediction is success, Expand the number of stock
import math
import os
import pdb
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from sklearn.preprocessing import MinMaxScaler
base = os.path.abspath(__file__)
base = base.split('/')
def save_stock_plot(rawdata, stock_name="LG_chme"):
global base
try:
plt.figure(figsize=(20,5))
plt.plot(range(len(rawdata)), rawdata['Close'])
path = "/".join(base[:-2]+["models"])
file_name = f"/{stock_name}.jpg"
path += file_name
plt.savefig(path)
print("Save Success!!")
except Exception as e:
print(f"Save Stock plot Failed!!: {e}")
class windowDataset(Dataset):
def __init__(self, y, input_window=80, output_window=20, stride=5, n_attr=1):
#총 데이터의 개수
L = y.shape[0]
#stride씩 움직일 때 생기는 총 sample의 개수
num_samples = (L - input_window - output_window) // stride + 1
if n_attr == 1:
#input과 output
X = np.zeros([input_window, num_samples])
Y = np.zeros([output_window, num_samples])
for i in np.arange(num_samples):
start_x = stride*i
end_x = start_x + input_window
X[:,i] = y[start_x:end_x]
start_y = stride*i + input_window
end_y = start_y + output_window
Y[:,i] = y[start_y:end_y]
X = X.reshape(X.shape[0], X.shape[1], n_attr)
Y = Y.reshape(Y.shape[0], Y.shape[1], n_attr)
X = X.transpose((1,0,2))
Y = Y.transpose((1,0,2))
self.x = X
self.y = Y
else:
#input과 output
X = np.zeros([input_window, n_attr, num_samples])
Y = np.zeros([output_window, n_attr, num_samples])
for i in np.arange(num_samples):
start_x = stride*i
end_x = start_x + input_window
X[:,:,i] = y[start_x:end_x]
start_y = stride*i + input_window
end_y = start_y + output_window
Y[:,:,i] = y[start_y:end_y]
X = X.reshape(X.shape[2], X.shape[0], X.shape[1])
Y = Y.reshape(Y.shape[2], Y.shape[0], Y.shape[1])
self.x = X
self.y = Y
self.len = len(X)
def __getitem__(self, i):
return self.x[i], self.y[i]
#return self.x[i], self.y[i, :-1], self.y[i,1:]
def __len__(self):
return self.len
class TFModel(nn.Module):
def __init__(self,iw, ow, d_model, nhead, nlayers, dropout=0.5, n_attr=1):
super(TFModel, self).__init__()
self.encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=nlayers)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.encoder = nn.Sequential(
nn.Linear(n_attr, d_model//2),
nn.ReLU(),
nn.Linear(d_model//2, d_model)
)
self.linear = nn.Sequential(
nn.Linear(d_model, d_model//2),
nn.ReLU(),
nn.Linear(d_model//2, n_attr)
)
self.linear2 = nn.Sequential(
nn.Linear(iw, (iw+ow)//2),
nn.ReLU(),
nn.Linear((iw+ow)//2, ow)
)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src, srcmask):
src = self.encoder(src)
src = self.pos_encoder(src)
output = self.transformer_encoder(src.transpose(0,1), srcmask).transpose(0,1)
output = self.linear(output)[:,:,0]
output = self.linear2(output)
return output
class TFModel2(nn.Module):
def __init__(self,d_model, nhead, nhid, nlayers, dropout=0.5, n_attr=7):
super(TFModel2, self).__init__()
self.transformer = nn.Transformer(d_model=d_model, nhead=nhead, dim_feedforward=nhid, num_encoder_layers=nlayers, num_decoder_layers=nlayers,dropout=dropout)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.pos_encoder_d = PositionalEncoding(d_model, dropout)
self.linear = nn.Linear(d_model, n_attr)
self.encoder = nn.Linear(n_attr, d_model)
self.encoder_d = nn.Linear(n_attr, d_model)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src, tgt, srcmask, tgtmask):
src = self.encoder(src)
src = self.pos_encoder(src)
tgt = self.encoder_d(tgt)
tgt = self.pos_encoder_d(tgt)
output = self.transformer(src.transpose(0,1), tgt.transpose(0,1), srcmask, tgtmask)
output = self.linear(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def gen_attention_mask(x):
mask = torch.eq(x, 0)
return mask
def evaluate(data_train, device, model, iw, n_attr, length):
# 마지막 30*2일 입력으로 넣어서 그 이후 30일 예측 결과 얻음.
input = torch.tensor(data_train[-iw:]).reshape(1,-1,n_attr).to(device).float().to(device)
model.eval()
src_mask = model.generate_square_subsequent_mask(input.shape[1]).to(device)
predictions = model(input, src_mask)
return predictions.detach().cpu().numpy()
"""
input = torch.tensor(data_train[-iw:]).reshape(1,-1,n_attr).to(device).float().to(device)
output = torch.tensor(data_train[-1].reshape(1,-1,n_attr)).float().to(device)
model.eval()
for i in range(length):
src_mask = model.generate_square_subsequent_mask(input.shape[1]).to(device)
tgt_mask = model.generate_square_subsequent_mask(output.shape[1]).to(device)
predictions = model(input, output, src_mask, tgt_mask).transpose(0,1)
predictions = predictions[:, -1:, :]
output = torch.cat([output, predictions.to(device)], axis=1)
return torch.squeeze(output, axis=0).detach().cpu().numpy()[1:]
"""
def predict(stock, period):
global base
print(f"Notice: Since it is in the initial stage of the service, \
we predict only the stock price of LG Chem, not the stock price \
of the designated company.\n\n")
# 이 코드대신 지수형이 spl로 얻어온 data가 rawdata가 되어야 함.
# 추가적인 정보 없는건 1729일
print(f"Loading Stock Data ...")
n_attr = 1
path = "/".join(base[:-3]+["data","lg_chem_closing_prices.csv"])
model_path = "/".join(base[:-2]+["Prediction", f"{stock}_{datetime.now().date()}.pth"])
rawdata = pd.read_csv(path)
print(f"Saving Stock data as .png ...")
save_stock_plot(rawdata, stock)
#pdb.set_trace()
print(f"Preprocessing Data with MinMaxScaling ...")
min_max_scaler = MinMaxScaler()
rawdata["Close"] = min_max_scaler.fit_transform(rawdata["Close"].to_numpy().reshape(-1,n_attr))
print(f"Spliting Data ...")
iw = 30*7
ow = 10
train = rawdata[:-iw]
data_train = train["Close"].to_numpy()
test = rawdata[-iw:]
data_test = test["Close"].to_numpy()
print(f"Preparing Dataset ...")
train_dataset = windowDataset(data_train, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
train_loader = DataLoader(train_dataset, batch_size=64)
#test_dataset = windowDataset(data_test, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
#test_loader = DataLoader(test_dataset)
"""
# 성능 올리기위해 종가말고 다른 것도 같이 넣음.
# 총 1720일의 data있음
print(f"Loading Stock Data ...")
n_attr = 7
path = "/".join(base[:-3]+["data","lg_chem_prices.csv"])
rawdata = pd.read_csv(path)
#print(f"Saving Stock data as .png ...")
#save_stock_plot(rawdata, stock)
print(f"Preprocessing Data with MinMaxScaling ...")
min_max_scaler = MinMaxScaler()
rawdata.loc[:,rawdata.columns] = min_max_scaler.fit_transform(rawdata.to_numpy())
print(f"Spliting Data ...")
iw = 60
ow = 5
#pdb.set_trace()
train = rawdata[:-(iw)]
data_train = train.to_numpy()
test = rawdata[-(iw):]
data_test = test.to_numpy()
print(f"Preparing Dataset ...")
train_dataset = windowDataset(data_train, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
train_loader = DataLoader(train_dataset, batch_size=64)
#test_dataset = windowDataset(data_test, input_window=iw, output_window=ow, stride=1, n_attr=n_attr)
#test_loader = DataLoader(test_dataset)
"""
print(f"Model Constructing ...")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
lr = 1e-4
#model = TFModel2(256, 8, 256, 2, 0.1, n_attr).to(device)
model = TFModel(iw, ow, 512, 8, 4, 0.4, n_attr).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
if not os.path.exists(model_path):
print("Trainig ...")
epoch = 10
model.train()
for i in range(epoch):
batchloss = 0.0
for (inputs, outputs) in tqdm(train_loader):
optimizer.zero_grad()
src_mask = model.generate_square_subsequent_mask(inputs.shape[1]).to(device)
result = model(inputs.float().to(device), src_mask)
loss = criterion(result, outputs[:,:,0].float().to(device))
loss.backward()
optimizer.step()
batchloss += loss
print(f"{i+1}th epoch MSEloss:" + "{:0.6f}".format(batchloss.cpu().item() / len(train_loader)))
torch.save(model, model_path)
"""
model.train()
progress = tqdm(range(epoch))
for i in progress:
batchloss = 0.0
for (inputs, dec_inputs, outputs) in train_loader:
optimizer.zero_grad()
src_mask = model.generate_square_subsequent_mask(inputs.shape[1]).to(device)
tgt_mask = model.generate_square_subsequent_mask(dec_inputs.shape[1]).to(device)
result = model(inputs.float().to(device), dec_inputs.float().to(device), src_mask, tgt_mask)
loss = criterion(result.permute(1,0,2), outputs.float().to(device))
loss.backward()
optimizer.step()
batchloss += loss
progress.set_description("{:0.5f}".format(batchloss.cpu().item() / len(train_loader)))
"""
torch.save(model.state_dict(), model_path)
print("Predicting ...")
result = evaluate(data_test, device, model, iw, n_attr, ow)
result = min_max_scaler.inverse_transform(result)[0]
real = rawdata["Close"].to_numpy()
real = min_max_scaler.inverse_transform(real.reshape(-1,1))[:,0]
#pdb.set_trace()
"""
tmp = np.zeros((10,7))
tmp[:,:] = result.reshape(10,-1)
result = tmp
result = min_max_scaler.inverse_transform(result).reshape(-1,10)[3]
real = rawdata.to_numpy()
real = min_max_scaler.inverse_transform(real)[:,3]
"""
plt.figure(figsize=(20,5))
#plt.plot(range(1419,1719),real[1420:], label="real")
plt.plot(range(1419,1719),real[1418:],label="real")
plt.plot(range(1719-ow,1719),result, label="predict")
plt.legend()
path = "/".join(base[:-2]+["models","prediction2.jpg"])
plt.savefig(path)
print(f"Complete!!")
# 예측된 가격의 평균과, 직전의 값을 비교했을 때, 평균이 크면 사라, 작으면 사지 마라.
mean_pred = np.mean(result)
if mean_pred >= real[-1]:
answer = f"""You should buy the stock you want to know the price, because we predict the price will rise.
Maybe it will be {mean_pred}won."""
else:
answer = f"""You shouldn't buy the stock you want to know the price, because we predict the price will go down.
Maybe it will be {mean_pred}won."""
return answer
if __name__=="__main__":
print(predict("", ""))
|
groundwater98/Miraeasset_Bigdata_Festival
|
ML/Prediction/predict.py
|
predict.py
|
py
| 13,225 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.abspath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "torch.nn.TransformerEncoderLayer",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.nn.TransformerEncoder",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "torch.triu",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.nn.Transformer",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "torch.triu",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "torch.cos",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.eq",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "numpy.mean",
"line_number": 336,
"usage_type": "call"
}
] |
29157516812
|
#!/usr/bin/env python3
import asyncio
from mavsdk import System
from mavsdk.gimbal import GimbalMode, ControlMode
async def run():
# Init the drone
drone = System()
await drone.connect(system_address="udp://:14540")
# Start printing gimbal position updates
print_gimbal_position_task = \
asyncio.ensure_future(print_gimbal_position(drone))
print("Taking control of gimbal")
await drone.gimbal.take_control(ControlMode.PRIMARY)
# Set the gimbal to YAW_LOCK (= 1) mode (see docs for the difference)
# Other valid values: YAW_FOLLOW (= 0)
# YAW_LOCK will fix the gimbal pointing to an absolute direction,
# whereas YAW_FOLLOW will point relative to vehicle heading.
print("Setting gimbal mode")
await drone.gimbal.set_mode(GimbalMode.YAW_FOLLOW)
print("Look forward first")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(1)
print("Look down")
await drone.gimbal.set_pitch_and_yaw(-90, 0)
await asyncio.sleep(2)
print("Back to horizontal")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Slowly look up")
await drone.gimbal.set_pitch_rate_and_yaw_rate(10, 0)
await asyncio.sleep(3)
print("Back to horizontal")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Look right")
await drone.gimbal.set_pitch_and_yaw(0, 90)
await asyncio.sleep(2)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Slowly look to the left")
await drone.gimbal.set_pitch_rate_and_yaw_rate(0, -20)
await asyncio.sleep(3)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
# Set the gimbal to track a region of interest (lat, lon, altitude)
# Units are degrees and meters MSL respectively
print("Look at a ROI (region of interest)")
await drone.gimbal.set_roi_location(47.39743832, 8.5463316, 488)
await asyncio.sleep(3)
print("Look forward again")
await drone.gimbal.set_pitch_and_yaw(0, 0)
await asyncio.sleep(2)
print("Release control of gimbal again")
await drone.gimbal.release_control()
print_gimbal_position_task.cancel()
async def print_gimbal_position(drone):
# Report gimbal position updates asynchronously
# Note that we are getting gimbal position updates in
# euler angles; we can also get them as quaternions
async for angle in drone.telemetry.camera_attitude_euler():
print(f"Gimbal pitch: {angle.pitch_deg}, yaw: {angle.yaw_deg}")
if __name__ == "__main__":
# Run the asyncio loop
asyncio.run(run())
|
mavlink/MAVSDK-Python
|
examples/gimbal.py
|
gimbal.py
|
py
| 2,709 |
python
|
en
|
code
| 246 |
github-code
|
6
|
[
{
"api_name": "mavsdk.System",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "asyncio.ensure_future",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mavsdk.gimbal.ControlMode.PRIMARY",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "mavsdk.gimbal.ControlMode",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mavsdk.gimbal.GimbalMode.YAW_FOLLOW",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mavsdk.gimbal.GimbalMode",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 89,
"usage_type": "call"
}
] |
26023685530
|
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
from fealpy.mesh.uniform_mesh_2d import UniformMesh2d
from scipy.sparse.linalg import spsolve
#from ..decorator import cartesian
class MembraneOscillationPDEData: # 点击这里可以查看 FEALPy 中的代码
def __init__(self, D=[0, 1, 0, 1], T=[0, 5]):
"""
@brief 模型初始化函数
@param[in] D 模型空间定义域
@param[in] T 模型时间定义域
"""
self._domain = D
self._duration = T
def domain(self):
"""
@brief 空间区间
"""
return self._domain
def duration(self):
"""
@brief 时间区间
"""
return self._duration
def source(self, p, t):
"""
@brief 方程右端项
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 0
"""
return np.zeros_like(p[..., 0])
def init_solution(self, p):
"""
@brief 初值条件
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 返回 val
"""
x, y = p[..., 0], p[..., 1]
val = x**2*(x+y)
return val
def init_solution_diff_t(self, p):
"""
@brief 初值条件的导数
@param[in] p numpy.ndarray, 空间点
"""
return np.zeros_like(p[..., 0])
#@cartesian
def dirichlet(self, p, t):
"""
@brief Dirichlet 边界条件
@param[in] p numpy.ndarray, 空间点
@param[in] t float, 时间点
@return 边界条件函数值
"""
return np.zeros_like(p[..., 0])
pde = MembraneOscillationPDEData()
# 空间离散
domain = pde.domain()
nx = 100
ny = 100
hx = (domain[1] - domain[0])/nx
hy = (domain[3] - domain[2])/ny
mesh = UniformMesh2d([0, nx, 0, ny], h=(hx, hy), origin=(domain[0], domain[2]))
# 时间离散
duration = pde.duration()
nt = 1000
tau = (duration[1] - duration[0])/nt
# 准备初值
uh0 = mesh.interpolate(pde.init_solution, 'node') # (nx+1, ny+1)
vh0 = mesh.interpolate(pde.init_solution_diff_t, 'node') # (nx+1, ny+1)
uh1 = mesh.function('node') # (nx+1, ny+1)
def advance_explicit(n, *frags):
"""
@brief 时间步进为显格式
@param[in] n int, 表示第 n 个时间步
"""
t = duration[0] + n*tau
if n == 0:
return uh0, t
elif n == 1:
rx = tau/hx
ry = tau/hy
uh1[1:-1, 1:-1] = 0.5*rx**2*(uh0[0:-2, 1:-1] + uh0[2:, 1:-1]) + \
0.5*ry**2*(uh0[1:-1, 0:-2] + uh0[1:-1, 2:]) + \
(1 - rx**2 - ry**2)*uh0[1:-1, 1:-1] + tau*vh0[1:-1, 1:-1]
gD = lambda p: pde.dirichlet(p, t)
mesh.update_dirichlet_bc(gD, uh1)
return uh1, t
else:
A = mesh.wave_operator_explicit(tau)
source = lambda p: pde.source(p, t + tau)
f = mesh.interpolate(source, intertype='node')
f *= tau**2
uh2 = [email protected] - uh0.flat
uh0[:] = uh1[:]
uh1.flat = uh2
gD = lambda p: pde.dirichlet(p, t + tau)
mesh.update_dirichlet_bc(gD, uh1)
#solution = lambda p: pde.solution(p, t + tau)
#e = mesh.error(solution, uh1, errortype='max')
#print(f"the max error is {e}")
return uh1, t
def advance_implicit(n, *frags):
"""
@brief 时间步进为隐格式
@param[in] n int, 表示第 n 个时间步
"""
t = duration[0] + n*tau
if n == 0:
return uh0, t
elif n == 1:
rx = tau/hx
ry = tau/hy
uh1[1:-1, 1:-1] = 0.5*rx**2*(uh0[0:-2, 1:-1] + uh0[2:, 1:-1]) + \
0.5*ry**2*(uh0[1:-1, 0:-2] + uh0[1:-1, 2:]) + \
(1 - rx**2 - ry**2)*uh0[1:-1, 1:-1] + tau*vh0[1:-1, 1:-1]
gD = lambda p: pde.dirichlet(p, t)
mesh.update_dirichlet_bc(gD, uh1)
return uh1, t
else:
A0, A1, A2 = mesh.wave_operator_implicit(tau)
source = lambda p: pde.source(p, t + tau)
f = mesh.interpolate(source, intertype='node')
f *= tau**2
f.flat += [email protected] + [email protected]
uh0[:] = uh1[:]
gD = lambda p: pde.dirichlet(p, t + tau)
A0, f = mesh.apply_dirichlet_bc(gD, A0, f)
uh1.flat = spsolve(A0, f)
#solution = lambda p: pde.solution(p, t + tau)
#e = mesh.error(solution, uh1, errortype='max')
#print(f"the max error is {e}")
return uh1, t
"""
box = [0, 1, 0, 1, 0, 5]
fig, axes = plt.subplots()
mesh.show_animation(fig, axes, box, advance_explicit,
fname='explicit.mp4', plot_type='imshow', frames=nt+1)
plt.show()
"""
box = [0, 1, 0, 1, -2, 2]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
mesh.show_animation(fig, axes, box, advance_explicit,
fname='explicit.mp4', plot_type='surface', frames=nt+1)
plt.show()
"""
box = [0, 1, 0, 1, -1, 1]
fig, axes = plt.subplots()
mesh.show_animation(fig, axes, box, advance_implicit,fname='implicit.mp4', plot_type='imshow', frames=nt+1)
plt.show()
box = [0, 1, 0, 1, -2.0, 2.0]
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
mesh.show_animation(fig, axes, box, advance_implicit,fname='implicit.mp4', plot_type='surface', frames=nt+1)
plt.show()
"""
|
suanhaitech/pythonstudy2023
|
Mia_wave/wace_2.py
|
wace_2.py
|
py
| 5,381 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros_like",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "fealpy.mesh.uniform_mesh_2d.UniformMesh2d",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.spsolve",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 186,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.