hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1904779111ad03b3fcf85f5ea88241c74cfe55ac | 200 | py | Python | tushare/bond/bonds.py | li-yong/tushare | 26da8129fb770e26128b9c2cebc7ef72c9491243 | [
"BSD-3-Clause"
]
| 12,490 | 2015-01-11T09:49:07.000Z | 2022-03-31T15:03:16.000Z | tushare/bond/bonds.py | li-yong/tushare | 26da8129fb770e26128b9c2cebc7ef72c9491243 | [
"BSD-3-Clause"
]
| 1,532 | 2015-02-05T11:20:59.000Z | 2022-03-29T13:30:26.000Z | tushare/bond/bonds.py | li-yong/tushare | 26da8129fb770e26128b9c2cebc7ef72c9491243 | [
"BSD-3-Clause"
]
| 4,867 | 2015-01-07T08:18:09.000Z | 2022-03-31T07:03:53.000Z | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2017/10/01
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
def get_bond_info(code):
pass
if __name__ == '__main__':
pass | 11.764706 | 26 | 0.635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.694444 |
1905b552f6b906092520144e21a33c6cfbd7fe0b | 883 | py | Python | src/save_docs.py | j-c-m-code/gutenbergsearch | b08f69d1d35fcca57e8ad0fcceaab614b9104abc | [
"MIT"
]
| null | null | null | src/save_docs.py | j-c-m-code/gutenbergsearch | b08f69d1d35fcca57e8ad0fcceaab614b9104abc | [
"MIT"
]
| null | null | null | src/save_docs.py | j-c-m-code/gutenbergsearch | b08f69d1d35fcca57e8ad0fcceaab614b9104abc | [
"MIT"
]
| null | null | null | """
Processes a folder of .txt files to Spacy docs then saves the docs
"""
# first import standard modules
import glob
import os
from pathlib import Path
# then import third-party modules
import spacy
# finally import my own code (PEP-8 convention)
from askdir import whichdir
nlp = spacy.load("en_core_web_lg")
source_directory = whichdir()
os.chdir(source_directory)
filelist = glob.glob("*")
output_directory = whichdir()
for filename in filelist:
with open(filename, "r", encoding="utf-8") as f:
novel = f.read()
# the novel is too long for the default, so increase allocated memory
nlp.max_length = len(novel) + 100
# Process a text
doc = nlp(novel)
short_name = Path(filename).stem
# r for raw string--no escape characters
# f for format string--allow me to pass in variable
doc.to_disk(rf"{output_directory}\{short_name}")
| 23.864865 | 73 | 0.711212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.481314 |
190623703fe56b71a26a9d008afda8919d9e105d | 253 | py | Python | output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/nmtoken/schema_instance/nistschema_sv_iv_list_nmtoken_max_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_max_length_2_xsd.nistschema_sv_iv_list_nmtoken_max_length_2 import NistschemaSvIvListNmtokenMaxLength2
__all__ = [
"NistschemaSvIvListNmtokenMaxLength2",
]
| 42.166667 | 194 | 0.893281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.146245 |
190750f0b978b05cd4e96bab0a727296c6a7e5d0 | 462 | py | Python | jython/src/sample_src.py | adrianpothuaud/Sikuli-WS | 6210a949768fb4eb2b80693818ae3eb31ec9c406 | [
"MIT"
]
| 1 | 2018-02-20T16:28:45.000Z | 2018-02-20T16:28:45.000Z | jython/src/sample_src.py | adrianpothuaud/Sikuli-WS | 6210a949768fb4eb2b80693818ae3eb31ec9c406 | [
"MIT"
]
| null | null | null | jython/src/sample_src.py | adrianpothuaud/Sikuli-WS | 6210a949768fb4eb2b80693818ae3eb31ec9c406 | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
"""
file: src/sample.py
Sample Source file
==================
Description
-----------
Sample description ...
Content
-------
- say_hello_sikuli
Status
------
Test with: tests/sample.py
last verification date: xx/xx/xxxx
last verification status: XX
"""
from sikuli import *
def say_hello_sikuli():
"""
:return:
"""
popup("Hello World !", title="Sikuli")
| 12.486486 | 42 | 0.515152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.835498 |
190774ef04a1a93a7a9832bb8db9d5fd37d72396 | 533 | py | Python | k8s/images/codalab/apps/chahub/provider.py | abdulari/codalab-competitions | fdfbb77ac62d56c6b4b9439935037f97ffcd1423 | [
"Apache-2.0"
]
| 333 | 2015-12-29T22:49:40.000Z | 2022-03-27T12:01:57.000Z | k8s/images/codalab/apps/chahub/provider.py | abdulari/codalab-competitions | fdfbb77ac62d56c6b4b9439935037f97ffcd1423 | [
"Apache-2.0"
]
| 1,572 | 2015-12-28T21:54:00.000Z | 2022-03-31T13:00:32.000Z | k8s/images/codalab/apps/chahub/provider.py | abdulari/codalab-competitions | fdfbb77ac62d56c6b4b9439935037f97ffcd1423 | [
"Apache-2.0"
]
| 107 | 2016-01-08T03:46:07.000Z | 2022-03-16T08:43:57.000Z | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ChaHubAccount(ProviderAccount):
pass
class ChaHubProvider(OAuth2Provider):
"""Chahub OAuth authentication backend"""
id = 'chahub'
name = 'ChaHub'
account_class = ChaHubAccount
package = 'apps.chahub'
def get_default_scope(self):
return ['read', 'write']
providers.registry.register(ChaHubProvider)
| 23.173913 | 74 | 0.75985 | 297 | 0.557223 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.155722 |
1907beae999c84a846e911c9160f122031a33418 | 3,046 | py | Python | tools/perf/contrib/cluster_telemetry/screenshot_unittest.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
]
| 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/perf/contrib/cluster_telemetry/screenshot_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
]
| 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/perf/contrib/cluster_telemetry/screenshot_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
]
| 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import page_test_test_case
from telemetry.util import image_util
from contrib.cluster_telemetry import screenshot
class ScreenshotUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._png_outdir = tempfile.mkdtemp('_png_test')
def tearDown(self):
shutil.rmtree(self._png_outdir)
@decorators.Enabled('linux')
def testScreenshot(self):
# Screenshots for Cluster Telemetry purposes currently only supported on
# Linux platform.
page_set = self.CreateStorySetFromFileInUnittestDataDir(
'screenshot_test.html')
measurement = screenshot.Screenshot(self._png_outdir)
self.RunMeasurement(measurement, page_set, options=self._options)
path = self._png_outdir + '/' + page_set.stories[0].file_safe_name + '.png'
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
self.assertTrue(os.access(path, os.R_OK))
image = image_util.FromPngFile(path)
screenshot_pixels = image_util.Pixels(image)
special_colored_pixel = bytearray([217, 115, 43])
self.assertTrue(special_colored_pixel in screenshot_pixels)
@decorators.Enabled('linux')
def testIsScreenshotWithinDynamicContentThreshold(self):
# TODO(lchoi): This unit test fails on Windows due to an apparent platform
# dependent image decoding behavior that will need to be investigated in the
# future if Cluster Telemetry ever becomes compatible with Windows.
width = 2
height = 1
num_total_pixels = width * height
content_pixels = bytearray([0, 0, 0, 128, 128, 128])
base_screenshot = image_util.FromRGBPixels(width, height, content_pixels)
next_pixels = bytearray([1, 1, 1, 128, 128, 128])
next_screenshot = image_util.FromRGBPixels(width, height, next_pixels)
expected_pixels = bytearray([0, 255, 255, 128, 128, 128])
self.assertTrue(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.51))
self.assertTrue(expected_pixels == content_pixels)
next_pixels = bytearray([0, 0, 0, 1, 1, 1])
next_screenshot = image_util.FromRGBPixels(2, 1, next_pixels)
expected_pixels = bytearray([0, 255, 255, 0, 255, 255])
self.assertTrue(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.51))
self.assertTrue(expected_pixels == content_pixels)
self.assertFalse(screenshot.IsScreenshotWithinDynamicContentThreshold(
base_screenshot, next_screenshot, content_pixels,
num_total_pixels, 0.49))
| 41.726027 | 80 | 0.738345 | 2,618 | 0.859488 | 0 | 0 | 2,363 | 0.775772 | 0 | 0 | 521 | 0.171044 |
1908a3c6547cb1830569167b36fc11ceff479110 | 652 | py | Python | bosm2015/pcradmin_old/urls.py | dvm-bitspilani/BITS-BOSM-2015 | df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a | [
"MIT"
]
| 1 | 2015-09-15T17:19:30.000Z | 2015-09-15T17:19:30.000Z | bosm2015/pcradmin_old/urls.py | DVM-BITS-Pilani/BITS-BOSM-2015 | df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a | [
"MIT"
]
| null | null | null | bosm2015/pcradmin_old/urls.py | DVM-BITS-Pilani/BITS-BOSM-2015 | df3e69ee6ee9b179a2d6cd6cad61423c177dbe0a | [
"MIT"
]
| 1 | 2016-03-28T19:44:41.000Z | 2016-03-28T19:44:41.000Z | from pcradmin import views
from django.conf.urls import url, include
urlpatterns = [
url(r'^(?P<pagename>\w+)/', views.index),
#url(r'^sendmail$', views.sendmail),
#url(r'^sentmail$', views.sentmail),
url(r'^changelimit$', views.change_team_limits),
url(r'^change_team_limit$', views.change_team_limit_list),
url(r'^limit_changed$', views.change_limits),
url(r'^changesportslimit$', views.change_sports_limits),
url(r'^sports_limits_changed$', views.save_sports_limits),
url(r'^setstatus', views.set_status),
url(r'^showstatus', views.save_status),
url(r'^emailsend', views.send_mail),
url(r'^compose', views.compose),
]
| 38.352941 | 62 | 0.71319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.381902 |
1908ece0bbcf8875b565e097e59669305dfcf236 | 354 | py | Python | _aulas/ex004.py | CarlosJunn/Aprendendo_Python | cddb29b5ee2058c3fb612574eb4af414770b7422 | [
"MIT"
]
| null | null | null | _aulas/ex004.py | CarlosJunn/Aprendendo_Python | cddb29b5ee2058c3fb612574eb4af414770b7422 | [
"MIT"
]
| null | null | null | _aulas/ex004.py | CarlosJunn/Aprendendo_Python | cddb29b5ee2058c3fb612574eb4af414770b7422 | [
"MIT"
]
| null | null | null | a = input('digite algo :')
print('O tipo primitivo desswe valor é ', type(a))
print("Só tem espaços? ", a.isspace())
print('É um número? ', a.isnumeric())
print('E alfabetico?', a.isalpha())
print('É alphanumerico?', a.isalnum())
print('Esta em maiúsculas?', a.isupper())
print('Esta em minúsculas?', a.islower())
print('Está capitalizada', a.istitle()) | 35.4 | 50 | 0.675141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.509642 |
190ab0b7b7eed8792f426c4ad62cea8612750811 | 3,966 | py | Python | authentication/login.py | ICTKevinWong/webservices-samples | 35a8b8571d88276ff12ad60959192ce20ef5bf19 | [
"BSD-3-Clause"
]
| 6 | 2018-01-03T14:13:57.000Z | 2021-07-28T21:12:35.000Z | authentication/login.py | ICTKevinWong/webservices-samples | 35a8b8571d88276ff12ad60959192ce20ef5bf19 | [
"BSD-3-Clause"
]
| 5 | 2018-01-03T15:28:47.000Z | 2020-08-28T08:25:07.000Z | authentication/login.py | ICTKevinWong/webservices-samples | 35a8b8571d88276ff12ad60959192ce20ef5bf19 | [
"BSD-3-Clause"
]
| 6 | 2017-10-17T19:37:44.000Z | 2021-08-19T13:10:16.000Z | """
Examples of authenticating to the API.
Usage:
login <username> <password> <server>
login -h
Arguments:
username ID to provide for authentication
password Password corresponding to specified userid.
server API endpoint.
Options:
-h --help Show this screen.
--version Show version.
Description:
There are two ways that you can authenticate to the Web Services API. Both options are viable and are demonstrated
below with examples.
Basic-Authentication is probably the most popular option, especially for shorter/simpler usages of the API, mostly
because of its simplicity. The credentials are simply provided with each request.
There is a login endpoint (POST /devmgr/utils/login), that will allow you to explicitly authenticate with the API.
Upon authenticating, a JSESSIONID will be provided in the Response headers and as a Cookie that can be utilized
to create a persistent session (that will eventually timeout).
"""
import logging
import docopt
import requests
LOG = logging.getLogger(__name__)
def login(server, username, password):
# Define a re-usable Session object and set some standard headers
con = requests.Session()
con.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json'})
# Here we do a login that will define a persistent session on the server-side upon successful authentication
result = con.post(server + "/devmgr/utils/login", json={'userId': username, 'password': password})
# You'll notice the JSESSIONID as a part of the Response headers
LOG.info("Headers: %s", result.headers)
# Notice how the JSESSIONID is now set as a cookie on the Session object?
LOG.info("Cookie Set: JSESSIONID: %s", con.cookies.get('JSESSIONID'))
# Now we make a subsequent request to a different Resource. Notice how the JESSIONID is persisted on the connection?
# Requests is intelligent enough to perist the cookie that is sent back in the Response on the requests.Session()!
result = con.get(server + "/devmgr/v2/storage-systems")
assert result.cookies.get('JSESSIONID') == con.cookies.get('JSESSIONID')
# Now let's avoid using a persistent session with the login
result1 = requests.post(server + "/devmgr/utils/login", json={'userId': username, 'password': password})
# Okay, now we have a different JSESSIONID, that's okay, that's what we expected.
assert result1.cookies.get('JSESSIONID') != con.cookies.get('JSESSIONID')
result2 = requests.get(server + "/devmgr/v2/storage-systems", auth=(username, password))
# Uh oh, we got an authentication error!?! That's because the JESSIONID wasn't set on a persistent session,
# and we didn't use Basic-Auth to authenticate directly!
LOG.warn("Request without a session or auth: %s", result2.status_code)
# This time we'll provide credentials using Basic-Authentication
result2 = requests.get(server + "/devmgr/v2/storage-systems", auth=(username, password))
# It works, but we got a new session.
assert result1.cookies.get('JSESSIONID') != result2.cookies.get('JSESSIONID')
# We can do something similar to what requests does for us by manually persisting the cookie. This may be necessary
# for less full-featured clients.
result1 = requests.post(server + "/devmgr/utils/login", json={'userId': username, 'password': password})
result2 = requests.get(server + "/devmgr/v2/storage-systems", cookies=result1.cookies)
# See, they match, and we don't have to provide authentication for this request!
assert result1.cookies.get('JSESSIONID') == result2.cookies.get('JSESSIONID')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
args = docopt.docopt(__doc__)
login(args.get('<server>'), args.get('<username>'), args.get('<password>'))
| 47.783133 | 120 | 0.72113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,766 | 0.697428 |
190b6799d93e741a949b082cb1fde511c62a4b57 | 487 | py | Python | Chapter08/qt08_winBkground03.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
]
| null | null | null | Chapter08/qt08_winBkground03.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
]
| null | null | null | Chapter08/qt08_winBkground03.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
【简介】
界面背景颜色设置
'''
from PyQt5.QtWidgets import QApplication, QLabel ,QWidget, QVBoxLayout , QPushButton, QMainWindow
from PyQt5.QtGui import QPalette , QBrush , QPixmap
from PyQt5.QtCore import Qt
import sys
app = QApplication(sys.argv)
win = QMainWindow()
win.setWindowTitle("界面背景颜色设置")
win.resize(350, 250)
palette = QPalette()
palette.setColor(QPalette.Background , Qt.red )
win.setPalette(palette)
win.show()
sys.exit(app.exec_())
| 20.291667 | 99 | 0.710472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.197343 |
190c0898a136d9b08e445150bbf358f595547ad3 | 8,349 | py | Python | tcsv.py | eadanfahey/transform-csv | 40d3aaf34b286fe9d6262fe69c7245e3a44a5b41 | [
"MIT"
]
| null | null | null | tcsv.py | eadanfahey/transform-csv | 40d3aaf34b286fe9d6262fe69c7245e3a44a5b41 | [
"MIT"
]
| null | null | null | tcsv.py | eadanfahey/transform-csv | 40d3aaf34b286fe9d6262fe69c7245e3a44a5b41 | [
"MIT"
]
| null | null | null | import csv
class ConstraintError(Exception):
def __init__(self, column, value, fn_name, rownumber):
self.column = column
self.value = value
self.fn_name = fn_name
self.rown = rownumber
def __str__(self):
message = "{} value {} does not satisfy the constraint {} on row {}"
return message.format(self.column, self.value, self.fn_name, self.rown)
class TransformError(Exception):
def __init__(self, row, err):
self.row = row
self.err = err
def __str__(self):
return "on csv row {} with error: {}".format(self.row, self.err)
class TransformCSV(object):
def __init__(self, input_file, skiprows=0):
self.rownumber = 1
self.ifile = open(input_file)
self.names = None
self.reader = None
self.idx = None
self._create_reader(skiprows)
self.mutation_fns = []
self.constraint_fns = []
self.select_fn = lambda row: row
def __iter__(self):
return self
def __next__(self):
row = next(self.reader)
self.rownumber += 1
try:
mutated = row[:]
for fn in self.mutation_fns:
mutated = fn(row)
mutated_cp = mutated[:]
for cfn in self.constraint_fns:
cfn(mutated_cp)
return self.select_fn(mutated)
except Exception as e:
raise TransformError(self.rownumber, e)
def close(self):
self.ifile.close()
def _create_reader(self, skiprows):
"""
Create a csv reader object from the input csv file.
"""
# with open(self.input_file) as f:
reader = csv.reader(self.ifile)
for _ in range(skiprows):
next(reader)
self.rownumber += 1
names = next(reader)
self.reader = reader
self.names = names
self.idx = dict(zip(names, range(len(names))))
def rename(self, name_map):
"""
Change the column names.
Args:
name_map: A dictionary mapping the current names to new names.
Returns:
None
"""
new_names = []
for name in self.names:
new_name = name_map.get(name)
if new_name is None:
new_names.append(name)
else:
new_names.append(new_name)
self.names = new_names
self.idx = dict(zip(self.names, range(len(self.names))))
def add(self, name, val):
"""
Add a column to the csv containing a constant value.
TODO: replace this method with add_column
Args:
name: The name of the new column.
val: The value to place in each row of the new column.
Returns:
None
"""
def f(row):
row.append(val)
return row
self.mutation_fns.append(f)
self.names.append(name)
self.idx[name] = len(self.names) - 1
def add_column(self, name, fn, col):
"""
Add a column to the csv with the new value produced by a
user defined function that can access all entries on the same row.
TODO: Perhaps I use inspect.signature to verify that the number of
arguments that `fn` takes is the same as the number of columns
passed. But, this doesn't work for some builting functions e.g. int.
Args:
name: The name of the new column.
fn: The function to apply to the row.
col: The columns that are arguments to the function.
Returns:
None
"""
if isinstance(col, str):
columns = [col]
elif isinstance(col, list) or isinstance(col, tuple):
columns = col
else:
raise TypeError('The parameter col must be of type str, list or tuple')
# check that the column names are valid.
for c in columns:
try:
self.idx[c]
except KeyError:
raise KeyError("The column '{}' does not exist".format(c))
def add_column_fn(row):
vals = [row[self.idx[c]] for c in columns]
new_val = fn(*vals)
row.append(new_val)
return row
self.mutation_fns.append(add_column_fn)
self.names.append(name)
self.idx[name] = len(self.names) - 1
def mutate(self, fn, col=None):
"""
Mutate a column by applying a function to it.
Args:
fn: The function to apply. Takes a string or numeric argument and
returns a string or numeric argument.
col: The name of the column to be mutated. Can be of three forms:
1) None (default): the function is applied to all columns.
2) list/tuple of column names to apply the function to.
3) A single column name to apply the function to.
Returns:
None
Raises:
TypeError: The parameter `col` is the wrong type.
KeyError: When trying to mutate a column that doesn't exist.
"""
if col is None:
columns = self.names
elif isinstance(col, str):
columns = [col]
elif isinstance(col, list) or isinstance(col, tuple):
columns = col
else:
raise TypeError("col must be of type None, str, list or tuple")
# check that the column names are valid.
for c in columns:
try:
self.idx[c]
except KeyError:
raise KeyError("The column '{}' does not exist".format(c))
def mutate_fn(row):
for c in columns:
row[self.idx[c]] = fn(row[self.idx[c]])
return row
self.mutation_fns.append(mutate_fn)
def constraint(self, fn, col):
"""
Check that a column satisfies a constraint.
Args:
fn: A function of a single argument that returns True if the
column value satisfies the constraint, or False otherwise.
col: The name of the column to check.
Returns:
None
Raises:
ConstraintError: If fn returns False.
TypeError: If col is not the correct type.
KeyError: If a column name does not exist.
"""
if col is None:
columns = self.names
elif isinstance(col, str):
columns = [col]
elif isinstance(col, list) or isinstance(col, tuple):
columns = col
else:
raise TypeError("col must be of type None, str, list or tuple")
# check that the column names are valid.
for c in columns:
try:
self.idx[c]
except KeyError:
raise KeyError("The column '{}' does not exist".format(c))
def constraint_fn(row):
for c in columns:
val = row[self.idx[c]]
if not fn(val):
raise ConstraintError(c, val, fn.__name__, self.rownumber)
self.constraint_fns.append(constraint_fn)
def select(self, columns):
"""
Select only the supplied columns.
columns:
columns: A list of column names to select.
Returns:
None
Raises:
KeyError: If a column does not exist.
"""
for c in columns:
try:
self.idx[c]
except KeyError:
raise KeyError("The column '{}' does not exist".format(c))
def select_fn(row):
return [row[self.idx[col]] for col in columns]
self.names = columns
self.select_fn = select_fn
def write(self, filename):
"""
Write the csv to file. This will exhaust the iterator.
Args:
filename: the name of the csv file.
Returns:
None
Raises:
FileNotFoundError: the file could not be created.
"""
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.names)
while True:
try:
writer.writerow(self.__next__())
except StopIteration:
break
| 31.387218 | 83 | 0.542819 | 8,329 | 0.997605 | 0 | 0 | 0 | 0 | 0 | 0 | 3,300 | 0.395257 |
190dc436e49d1655496d4e4796285c2ff4464f81 | 13,074 | py | Python | selim/datasets/lidc.py | tilacyn/dsb2018_topcoders | e0f95ef70bc062d4dea321d2aa73231a9538cd63 | [
"MIT"
]
| null | null | null | selim/datasets/lidc.py | tilacyn/dsb2018_topcoders | e0f95ef70bc062d4dea321d2aa73231a9538cd63 | [
"MIT"
]
| null | null | null | selim/datasets/lidc.py | tilacyn/dsb2018_topcoders | e0f95ef70bc062d4dea321d2aa73231a9538cd63 | [
"MIT"
]
| null | null | null | import numpy as np
from tensorflow.keras.preprocessing.image import Iterator
import time
import os
import xml.etree.ElementTree as ET
import cv2
import pydicom as dicom
from os.path import join as opjoin
import json
from tqdm import tqdm
def make_mask(image, image_id, nodules):
height, width = image.shape
# print(image.shape)
filled_mask = np.full((height, width), 0, np.uint8)
contoured_mask = np.full((height, width), 0, np.uint8)
# todo OR for all masks
for nodule in nodules:
for roi in nodule['roi']:
if roi['sop_uid'] == image_id:
edge_map = roi['xy']
cv2.fillPoly(filled_mask, np.int32([np.array(edge_map)]), 255)
# cv2.polylines(contoured_mask, np.int32([np.array(edge_map)]), color=255, isClosed=False)
# mask = np.swapaxes(np.array([contoured_mask, filled_mask]), 0, 2)
# cv2.imwrite('kek0.jpg', image)
# cv2.imwrite('kek1.jpg', filled_mask)
return np.reshape(filled_mask, (height, width, 1)) / 255
def get_files_with_nodules(nodules, root):
files = os.listdir(root)
image_ids_with_nodules = set()
for nodule in nodules:
for roi in nodule['roi']:
image_ids_with_nodules.add(roi['sop_uid'])
result = []
for file in files:
if not file.endswith('dcm'):
continue
_, ds = imread(opjoin(root, file))
if ds.SOPInstanceUID in image_ids_with_nodules:
result.append(opjoin(root, file))
return result
def test(a, b):
root = '/Users/mkryuchkov/lung-ds/3000566-03192'
nodules = parseXML('/Users/mkryuchkov/lung-ds/3000566-03192')
image = cv2.imread('/Users/mkryuchkov/lung-ds/000001.jpg')
for im_name in os.listdir(root):
if not im_name.endswith('dcm'):
continue
image, dcm_ds = imread(root + '/' + im_name)
print(dcm_ds.SliceLocation)
if dcm_ds.SliceLocation == a:
print(im_name)
return make_mask(image, dcm_ds.SOPInstanceUID, nodules, b)
# break
# print(dcm_ds.get('UID'))
# return make_mask(image, image_id, nodules)
def imread(image_path):
ds = dicom.dcmread(image_path)
img = ds.pixel_array
img_2d = img.astype(float)
img_2d_scaled = (np.maximum(img_2d, 0) / img_2d.max()) * 255.0
img_2d_scaled = np.uint8(img_2d_scaled)
image = img_2d_scaled
return image, ds
def parseXML(scan_path):
'''
parse xml file
args:
xml file path
output:
nodule list
[{nodule_id, roi:[{z, sop_uid, xy:[[x1,y1],[x2,y2],...]}]}]
'''
file_list = os.listdir(scan_path)
xml_file = None
for file in file_list:
if '.' in file and file.split('.')[1] == 'xml':
xml_file = file
break
prefix = "{http://www.nih.gov}"
if xml_file is None:
print('SCAN PATH: {}'.format(scan_path))
tree = ET.parse(scan_path + '/' + xml_file)
root = tree.getroot()
readingSession_list = root.findall(prefix + "readingSession")
nodules = []
for session in readingSession_list:
# print(session)
unblinded_list = session.findall(prefix + "unblindedReadNodule")
for unblinded in unblinded_list:
nodule_id = unblinded.find(prefix + "noduleID").text
edgeMap_num = len(unblinded.findall(prefix + "roi/" + prefix + "edgeMap"))
if edgeMap_num >= 1:
# it's segmentation label
nodule_info = {}
nodule_info['nodule_id'] = nodule_id
nodule_info['roi'] = []
roi_list = unblinded.findall(prefix + "roi")
for roi in roi_list:
roi_info = {}
# roi_info['z'] = float(roi.find(prefix + "imageZposition").text)
roi_info['sop_uid'] = roi.find(prefix + "imageSOP_UID").text
roi_info['xy'] = []
edgeMap_list = roi.findall(prefix + "edgeMap")
for edgeMap in edgeMap_list:
x = float(edgeMap.find(prefix + "xCoord").text)
y = float(edgeMap.find(prefix + "yCoord").text)
xy = [x, y]
roi_info['xy'].append(xy)
nodule_info['roi'].append(roi_info)
nodules.append(nodule_info)
return nodules
class LIDCDatasetIterator(Iterator):
def __init__(self, image_dir, batch_size, val_len, test_len=0, data_shape=(64, 64), grid_size=1):
seed = np.uint32(time.time() * 1000)
self.image_dir = image_dir
self.image_ids = self.create_image_ids()
n = len(self.image_ids)
self.val_len = val_len
self.train_index_list = np.arange(n)
np.random.shuffle(self.train_index_list)
self.val_index_list = self.train_index_list[:val_len]
self.test_index_list = self.train_index_list[val_len:(val_len + test_len)]
self.train_index_list = self.train_index_list[val_len + test_len:]
self.val_i = 0
self.train_i = 0
self.grid_size = grid_size
self.data_shape = data_shape
print("total len: {}".format(n))
print("train index array: {}".format(len(self.train_index_list)))
print("val index array: {}".format(len(self.val_index_list)))
super().__init__(n, batch_size, False, seed)
def train_generator(self):
def index_inc_function():
prev = self.train_i
self.train_i += self.batch_size // 2
if self.train_i >= len(self.train_index_list):
np.random.shuffle(self.train_index_list)
prev = 0
self.train_i = self.batch_size // 2
return prev, self.train_i
return self.generator(index_inc_function, self.train_index_list)
def val_generator(self):
def index_inc_function():
prev = self.val_i
self.val_i += self.batch_size // 2
if self.val_i >= len(self.val_index_list):
np.random.shuffle(self.val_index_list)
prev = 0
self.val_i = self.batch_size // 2
return prev, self.val_i
return self.generator(index_inc_function, self.val_index_list)
def generator(self, index_inc_function, index_list):
def gen():
while 1:
batch_x = []
batch_y = []
index, next_index = index_inc_function()
index_array = index_list[index: next_index]
for image_index in index_array:
file_name, parent_name = self.image_ids[image_index]
image, dcm_ds = imread(file_name)
image = self.pad_if_need(image)
nodules = parseXML(parent_name)
mask = make_mask(image, dcm_ds.SOPInstanceUID, nodules)
image_parts, mask_parts = self.split(image, mask)
for i in range(2):
image = image_parts[i]
image = self.preprocess_x(image)
mask = mask_parts[i]
mask = self.preprocess_y(mask)
batch_x.append(image)
batch_y.append(mask)
batch_x = np.array(batch_x, dtype=np.uint8)
batch_y = np.array(batch_y, dtype=np.uint8)
yield batch_x, batch_y
return gen
def pad_if_need(self, image):
h, w = image.shape
if 2022 == h or 2022 == w:
hpad = (2048 - h) // 2
wpad = (2048 - w) // 2
image = np.pad(image, ((hpad, hpad), (wpad, wpad)), constant_values=0)
return image
def preprocess_x(self, image):
image = np.reshape(image, (image.shape[0], image.shape[1], 1))
image = np.repeat(image, 3, axis=2)
image = cv2.resize(image, self.data_shape)
return image
def preprocess_y(self, mask):
mask = cv2.resize(mask, self.data_shape)
mask = np.reshape(mask, (self.data_shape[0], self.data_shape[1], 1))
return mask
def split(self, image, mask):
if self.grid_size == 1:
return [image, image], [mask, mask]
h, w = image.shape
gs = h // self.grid_size
image_parts = image.reshape(h // gs, gs, -1, gs).swapaxes(1, 2).reshape(-1, gs, gs)
mask_parts = mask.reshape(h // gs, gs, -1, gs).swapaxes(1, 2).reshape(-1, gs, gs)
max_part_idx = np.argmax([np.count_nonzero(part > 0) for part in mask_parts])
max_mask = mask_parts[max_part_idx]
random_idx = np.random.randint(self.grid_size * self.grid_size)
return [image_parts[max_part_idx], image_parts[random_idx]], [max_mask, mask[random_idx]]
def create_image_ids(self):
with open("index.json", "r") as read_file:
dcms = json.load(read_file)
image_ids = {}
# print('total training ds len: {}'.format(len(dcms)))
for i, dcm in enumerate(dcms):
image_ids[i] = dcm, '/'.join(dcm.split('/')[:-1])
return image_ids
def create_index(image_dir):
dcms = []
for root, folders, files in tqdm(os.walk(image_dir)):
xml_file = None
for file in files:
if 'xml' in file:
xml_file = file
break
if xml_file is None:
continue
print('extending with {}'.format(root))
dcms.extend(get_files_with_nodules(parseXML(root), root))
print('total training ds len: {}'.format(len(dcms)))
with open("index.json", "w") as write_file:
json.dump(dcms, write_file)
class LIDCTestDatasetIterator(LIDCDatasetIterator):
def __init__(self, image_dir, batch_size, test_index_list, val_len, data_shape=(64, 64), grid_size=1):
super().__init__(image_dir, batch_size, 0, data_shape=data_shape, grid_size=grid_size)
self.test_index_list = test_index_list
self.test_i = 0
self.all_images = []
self.create_negative()
self.batch_size //= 2
def create_negative(self):
for root, folders, files in tqdm(os.walk(self.image_dir)):
xml_file = None
for file in files:
if 'xml' in file:
xml_file = file
break
if xml_file is None:
continue
else:
extension = [(dcm, root) for dcm in files if dcm.endswith('dcm')]
self.all_images.extend(extension)
if len(self.all_images) > 10000:
break
def split_for_test(self, image, mask):
if self.grid_size == 1:
return [image, image], [mask, mask]
h, w = image.shape
gs = h // self.grid_size
image_parts = image.reshape(h // gs, gs, -1, gs).swapaxes(1, 2).reshape(-1, gs, gs)
mask_parts = mask.reshape(h // gs, gs, -1, gs).swapaxes(1, 2).reshape(-1, gs, gs)
return image_parts, mask_parts
def test_generator(self):
def index_inc_function():
prev = self.test_i
self.test_i += self.batch_size
if self.test_i >= len(self.test_index_list):
np.random.shuffle(self.test_index_list)
prev = 0
self.test_i = self.batch_size
return prev, self.test_i
index_list = self.test_index_list
def gen():
while 1:
batch_x = []
batch_y = []
index, next_index = index_inc_function()
index_array = index_list[index: next_index]
print(index_array)
new_index_array = []
for i in index_array:
new_index_array.append(i)
new_index_array.append(-1)
index_array = new_index_array
for image_index in index_array:
if image_index == -1:
ii = np.random.randint(1e4)
file_name, parent_name = self.all_images[ii]
file_name = opjoin(parent_name, file_name)
else:
file_name, parent_name = self.image_ids[image_index]
image, dcm_ds = imread(file_name)
image = self.pad_if_need(image)
nodules = parseXML(parent_name)
mask = make_mask(image, dcm_ds.SOPInstanceUID, nodules)
image_parts, mask_parts = self.split_for_test(image, mask)
image_parts = [self.preprocess_x(image_part) for image_part in image_parts]
mask_parts = [self.preprocess_y(mask_part) for mask_part in mask_parts]
image = self.preprocess_x(image)
mask = self.preprocess_y(mask)
batch_x.append((image, image_parts))
batch_y.append((mask, mask_parts))
yield batch_x, batch_y
return gen
| 38.795252 | 106 | 0.567998 | 8,140 | 0.62261 | 3,222 | 0.246443 | 0 | 0 | 0 | 0 | 1,173 | 0.08972 |
190de2ec2acd9e5640757238ffbce83a69af9dc2 | 2,058 | py | Python | hexagon/__main__.py | redbeestudios/hexagon | dc906ae31a14eb750a3f9bde8dd0633d8e1af486 | [
"Apache-2.0"
]
| 8 | 2021-06-27T21:46:04.000Z | 2022-02-26T18:03:10.000Z | hexagon/__main__.py | redbeestudios/hexagon | dc906ae31a14eb750a3f9bde8dd0633d8e1af486 | [
"Apache-2.0"
]
| 31 | 2021-06-24T14:35:38.000Z | 2022-02-17T03:01:23.000Z | hexagon/__main__.py | redbeestudios/hexagon | dc906ae31a14eb750a3f9bde8dd0633d8e1af486 | [
"Apache-2.0"
]
| 1 | 2021-08-16T16:15:16.000Z | 2021-08-16T16:15:16.000Z | from hexagon.support.hooks import HexagonHooks
from hexagon.support.execute.tool import select_and_execute_tool
from hexagon.support.update.cli import check_for_cli_updates
import sys
from hexagon.support.args import fill_args
from hexagon.domain import cli, tools, envs
from hexagon.support.help import print_help
from hexagon.support.tracer import tracer
from hexagon.support.printer import log
from hexagon.support.update.hexagon import check_for_hexagon_updates
from hexagon.support.storage import (
HexagonStorageKeys,
store_user_data,
)
from hexagon.plugins import collect_plugins
def main():
_, _tool, _env = fill_args(sys.argv, 3)
if _tool == "-h" or _tool == "--help":
return print_help(cli, tools, envs)
collect_plugins()
HexagonHooks.start.run()
log.start(f"[bold]{cli.name}")
log.gap()
check_for_hexagon_updates()
if cli.name == "Hexagon":
log.info(
"This looks like your first time running Hexagon.",
'You should probably run "Install CLI".',
gap_end=1,
)
else:
check_for_cli_updates()
try:
result = select_and_execute_tool(tools, _tool, _env, sys.argv[3:])
log.gap()
if result:
for item in result:
log.info(item)
log.finish()
if tracer.has_traced():
log.extra(
"[cyan dim]To run again do:[/cyan dim]",
f"[cyan] {cli.command} {tracer.command()}[/cyan]",
)
command_as_aliases = tracer.command_as_aliases(tools, envs)
if command_as_aliases:
log.extra(
"[cyan dim] or:[/cyan dim]",
f"[cyan] {cli.command} {command_as_aliases}[/cyan]",
)
store_user_data(
HexagonStorageKeys.last_command.value,
f"{cli.command} {tracer.command()}",
)
except KeyboardInterrupt:
sys.exit(1)
HexagonHooks.end.run()
if __name__ == "__main__":
main()
| 27.078947 | 76 | 0.614189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.170068 |
190e10d1d867b6f965986f63ffa52b804353b9e8 | 18,322 | py | Python | ligpy/ligpy_utils.py | LigninTools/UWCAP_10 | 0f665d3a2895657d9dda8ea9cc395583f3437dcc | [
"BSD-2-Clause"
]
| 7 | 2016-06-30T18:14:14.000Z | 2020-04-20T22:18:47.000Z | ligpy/ligpy_utils.py | LigninTools/UWCAP_10 | 0f665d3a2895657d9dda8ea9cc395583f3437dcc | [
"BSD-2-Clause"
]
| null | null | null | ligpy/ligpy_utils.py | LigninTools/UWCAP_10 | 0f665d3a2895657d9dda8ea9cc395583f3437dcc | [
"BSD-2-Clause"
]
| 5 | 2016-07-30T04:05:29.000Z | 2021-08-14T13:58:11.000Z | """
Misc utility functions required by several modules in the ligpy program.
"""
import os
import numpy as np
from constants import GAS_CONST, MW
def set_paths():
"""
Set the absolute path to required files on the current machine.
Returns
-------
reactionlist_path : str
path to the file `complete_reactionlist.dat`
rateconstantlist_path : str
path to the file `complete_rateconstantlist.dat`
compositionlist_path : str
path to the file `compositionlist.dat`
"""
module_dir = os.path.abspath(__file__).split('ligpy_utils')[0]
reactionlist_path = module_dir + 'data/complete_reaction_list.dat'
rateconstantlist_path = module_dir + 'data/complete_rateconstant_list.dat'
compositionlist_path = module_dir + 'data/compositionlist.dat'
return reactionlist_path, rateconstantlist_path, compositionlist_path
def get_specieslist(completereactionlist):
"""
Make a list of all the molecular species involved in the kinetic scheme.
Parameters
----------
completereactionlist : str
the path to the `complete_reaction_list.dat` file
Returns
-------
specieslist : list
a list of all the species in the kinetic scheme
"""
specieslist = []
for line in open(completereactionlist, 'r').readlines():
for spec in line.split(','):
# If the species has already been added to the list then move on.
if spec.split('_')[1].split()[0] in specieslist:
continue
else:
specieslist.append(spec.split('_')[1].split()[0])
specieslist.sort()
return specieslist
def get_speciesindices(specieslist):
"""
Create a dictionary to assign an arbitrary index to each of the species in
the kinetic scheme.
Parameters
----------
specieslist : list
a list of all the species in the model
Returns
-------
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
"""
speciesindices = {}
index = 0
for x in specieslist:
speciesindices[x] = index
index += 1
indices_to_species = dict(zip(speciesindices.values(),
speciesindices.keys()))
return speciesindices, indices_to_species
def define_initial_composition(compositionlist, species):
"""
Read the plant ID specified and define the initial composition of the
lignin polymer in terms of the three model components (PLIGC, PLIGH,
PLIGO).
Parameters
----------
compositionlist : str
the path of the `compositionlist.dat` file
species : str
the name of a lignin species that exists in the
`compositionlist.dat` file
Returns
-------
pligc_0 : float
The initial composition (mol/L) of PLIGC
pligh_0 : float
The initial composition (mol/L) of PLIGH
pligo_0 : float
The initial composition (mol/L) of PLIGO
"""
for line in open(compositionlist, 'rb').readlines():
if line.split(',')[0] == species:
# Initial compositions [mole fraction]
pligc_mol = float(line.split(',')[1])
pligh_mol = float(line.split(',')[2])
pligo_mol = float(line.split(',')[3])
# The weighted average molar mass of mixture [kg/mol]
weighted_m = (301*pligc_mol + 423*pligh_mol + 437*pligo_mol)/1000
# the density of the condensed phase [kg/L]
density = 0.75
# Initial compositions [mol/L]
pligc_0 = density/weighted_m * pligc_mol
pligh_0 = density/weighted_m * pligh_mol
pligo_0 = density/weighted_m * pligo_mol
break
return pligc_0, pligh_0, pligo_0
def build_k_matrix(rateconsts):
"""
Build a matrix of all the rate constant parameters (A, n, E).
Parameters
----------
rateconsts : str
the path to the file `complete_rateconstant_list.dat`
Returns
-------
kmatrix : list
a list of lists that defines a matrix. Each entry in the list
is A, n, E for a given reaction
"""
num_lines = sum(1 for line in open(rateconsts))
kmatrix = [None]*num_lines
for i, line in enumerate(open(rateconsts, 'r').readlines()):
kmatrix[i] = [line.split(' ')[0], line.split(' ')[1],
line.split(' ')[2].split()[0]]
return kmatrix
def get_k_value(T, reaction_index, kmatrix):
"""
Returns the value of the rate constant for a particular reaction index.
Parameters
----------
T : float
temperature in Kelvin
reaction_index : int
the index of the reaction for which you want the rate
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
k : float
the value of the rate constant for the given reaction at the given
temperature.
"""
k = (eval(kmatrix[reaction_index][0]) *
T**eval(kmatrix[reaction_index][1]) *
np.exp(-1 * eval(kmatrix[reaction_index][2]) /(GAS_CONST * T)))
return k
def get_k_value_list(T, kmatrix):
"""
Returns a list of all the k-values for a given temperature.
Parameters
----------
T : float
temperature in Kelvin
kmatrix : list
the kmatrix generated by build_k_matrix()
Returns
-------
kvaluelist : list
a list of all the rate constant values for a given temperature
"""
kvaluelist = []
for index, row in enumerate(kmatrix):
kvaluelist.append(get_k_value(T, index, kmatrix))
return kvaluelist
def build_reactant_dict(completereactionlist, speciesindices):
"""
Build a dictionary of the reactants involved in each reaction,
along with their stoichiometric coefficients. The keys of the
dictionary are the reaction numbers, the values are lists of lists
[[reactant1index, -1*coeff1],...]
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
speciesindices : dict
the dictionary speciesindices from
get_speciesindices()
Returns
-------
reactant_dict : dict
a dictionary where keys are reaction numbers and values
are lists of lists with the reactants and their
stoichiometric coefficients for each reaction
"""
reactant_dict = {}
for rxnindex, reaction in enumerate(open(completereactionlist, 'rb')
.readlines()):
reactants = []
# x is each coefficient_species set
for x in reaction.split(','):
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactants.append([speciesindices[x.split('_')[1].split()[0]],
-1*float(x.split('_')[0])])
# in preceding line: *-1 because I want the |stoich coeff|
reactant_dict[rxnindex] = reactants
return reactant_dict
def build_species_rxns_dict(completereactionlist):
"""
Build a dictionary where keys are species and values are lists with the
reactions that species is involved in, that reaction's sign in the net
rate equation, and the stoichiometric coefficient of the species in that
reaction.
Parameters
----------
completereactionlist : str
path to the file `complete_reaction_list.dat`
Returns
-------
species_rxns : dict
keys are the species in the model; values are lists of
[reaction that species is involved in,
sign of that species in the net rate equation,
stoichiometric coefficient]
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = {}
for species in specieslist:
# This loop makes a list of which reactions "species" takes part in
# and what sign that term in the net rate eqn has
# and what the stoichiometric coefficient is
reactions_involved = []
for rxnindex, line in enumerate(open(completereactionlist, 'rb')
.readlines()):
# example of x = '-1_ADIO'
for x in line.split(','):
# If the species being iterated over is part of this reaction
if species == x.split('_')[1].split()[0]:
# if the species is a reactant
if float(x.split('_')[0]) < 0:
reactions_involved.append(
[rxnindex, -1, x.split('_')[0]])
# if the species is a product
if float(x.split('_')[0]) > 0:
reactions_involved.append(
[rxnindex, 1, '+' + x.split('_')[0]])
species_rxns[species] = reactions_involved
return species_rxns
def build_rates_list(rateconstlist, reactionlist, speciesindices,
indices_to_species, human='no'):
""" This function writes the list of rate expressions for each reaction.
Parameters
----------
rateconstlist : str
the path to the file `complete_rateconstant_list.dat`
reactionlist : str
the path to the file `complete_reaction_list.dat`
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
rates_list : list
a list of the rate expressions for all the reactions in the
model
"""
kmatrix = build_k_matrix(rateconstlist)
reactant_dict = build_reactant_dict(reactionlist, speciesindices)
rates_list = []
for i, line in enumerate(kmatrix):
rate = 'rate[%s] = kvalue(T,%s) ' % (i, i)
concentrations = ''
for entry in reactant_dict[i]:
if entry == 'n': # if there is no reaction
concentrations = '* 0'
break
else:
if human == 'no':
concentrations += '* y[%s]**%s ' % (entry[0], entry[1])
elif human == 'yes':
concentrations += '* [%s]**%s ' % \
(indices_to_species[entry[0]], entry[1])
else:
raise ValueError('human must be a string: yes or no')
rate += concentrations
rates_list.append(rate)
return rates_list
def build_dydt_list(rates_list, specieslist, species_rxns, human='no'):
"""This function returns the list of dydt expressions generated for all
the reactions from rates_list.
Parameters
----------
rates_list : list
the output of build_rates_list()
specieslist : list
a list of all the species in the kinetic scheme
species_rxns : dict
dictionary where keys that are the model species and
values are the reactions they are involved in
human : str, optional
indicate whether the output of this function should
be formatted for a human to read ('yes'). Default
is 'no'
Returns
-------
dydt_expressions : list
expressions for the ODEs expressing the concentration
of each species with time
"""
dydt_expressions = []
for species in specieslist:
rate_formation = 'd[%s]/dt = ' % (species)
# "entry" is [reaction#, sign of that reaction, coefficient]
for entry in species_rxns[species]:
if human == 'no':
rate_formation += '%s*%s ' % \
(entry[2], rates_list[entry[0]].split(' = ')[1])
elif human == 'yes':
rate_formation += '%s*rate[%s] ' % (entry[2], entry[0])
else:
raise ValueError('human must be a string: yes or no')
dydt_expressions.append(rate_formation)
return dydt_expressions
def write_rates_and_odes(filename, rates, odes):
"""
Writes a file that contains the model equations to be solved (a list of
rate expressions, followed by a list of ODEs for each species). This
file is just for reference for humans to be able to look at the specific
reactions that are modeled, it is not actually used by the program. Users
should only need to generate this file if they've changed anything about
the kinetic scheme (it already exists in the data folder).
Parameters
----------
filename : str
the filename (including relative path if appropriate) of the
ratesandodes file to write
rates : list
the output of build_rates_list() with human='yes'
odes : list
the output of build_dydt_list() with human='yes'
Returns
-------
None
"""
with open(filename, 'wb') as initialize:
initialize.write('Reaction Rates:\n')
with open(filename, 'ab') as writer:
for line in rates:
writer.write(line+'\n')
writer.write('\n\nODE''s:\n')
for line in odes:
writer.write(line+'\n')
# These are some functions for checking the integrity of some model
# components, but they are not used except for exploratory or verification
# purposes
def check_species_in_MW(specieslist=None):
"""
Check to make sure that everything in the specieslist is in the MW
dictionary from `constants.py`.
Parameters
----------
specieslist : list, optional
a list of species to check against. If no list is
specified then the function get_specieslist() will be used
to generate the default list
Returns
-------
None
"""
if specieslist == None:
specieslist = get_specieslist(set_paths()[0])
for item in MW.keys():
if item in specieslist:
print '%s is in specieslist' % ('{: <20}'.format(item))
else:
print '********'+item
for item in specieslist:
if item in MW.keys():
print '%s is in MW dictionary' % ('{: <20}'.format(item))
else:
print '********'+item
print '\n%s should equal %s' % (len(MW.keys()), len(specieslist))
def check_mass_balance():
"""
Check for conservation of mass, and if mass is not conserved, see which
reactions are creating or losing mass.
Note that mass will not be wholly conserved in this model because
protons are not accounted for when radicals are involved in
non-Hydrogen-abstraction reactions, but all other reactions should
conserve mass.
Parameters
----------
None
Returns
-------
total_mass_balance : numpy array
an array with the amount of mass gained or lost
in each reaction
"""
specieslist = get_specieslist(set_paths()[0])
speciesindices = get_speciesindices(specieslist)[0]
kmatrix = build_k_matrix(set_paths()[1])
species_rxns = build_species_rxns_dict(set_paths()[0])
# Make vector of the MW's of each species, in the order from speciesindices
mw_vector = np.zeros((len(MW), 1))
for species in MW:
mw_vector[speciesindices[species]] = MW[species][0]
mw_vector = mw_vector.transpose()
# In this stoichiometric matrix, rows are species, columns are reactions
stoicmatrix = np.zeros((len(speciesindices), len(kmatrix)), dtype='float')
for species in species_rxns:
i = speciesindices[species]
for reaction in species_rxns[species]:
j = reaction[0]
stoicmatrix[i, j] += float(reaction[2])
# The result of this dot product should be a vector full of zeros.
# This will not be the case because protons are not accounted for when
# radicals are involved in non-H-abstraction rxns,
# but all other reactions should be 0
total_mass_balance = np.dot(mw_vector, stoicmatrix[:, :])
# Use this to look at which reactions are creating or losing mass
# (from missing Hydrogen)
h_sum = 0
for i, value in enumerate(total_mass_balance[0, :]):
if value != 0:
print i, value
h_sum += value
print '\nNet mass change = %s' % h_sum
return total_mass_balance
def check_species_fate():
"""
Check to see which species (if any) are only produced, but never
consumed in the model reactions (assuming that all reactions occur).
Parameters
----------
None
Returns
-------
fate_dict : dictionary
a dictionary with the fate of model species
"""
specieslist = get_specieslist(set_paths()[0])
species_rxns = build_species_rxns_dict(set_paths()[0])
fate_dict = {}
for species in specieslist:
fate_dict[species] = 'produced only'
for entry in species_rxns[species]:
if entry[1] < 0:
fate_dict[species] = 'consumed'
for species in specieslist:
if fate_dict[species] == 'consumed':
del fate_dict[species]
return fate_dict
| 35.370656 | 79 | 0.581705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,813 | 0.590165 |
ef6ac86677970e875c525f92de89d605e9c5d009 | 7,836 | py | Python | data_prep_helper.py | mayofcumtb/PaperWrite | 4a2154d68fa00e1912a3d4ce7b514364314c55e3 | [
"Apache-2.0"
]
| null | null | null | data_prep_helper.py | mayofcumtb/PaperWrite | 4a2154d68fa00e1912a3d4ce7b514364314c55e3 | [
"Apache-2.0"
]
| null | null | null | data_prep_helper.py | mayofcumtb/PaperWrite | 4a2154d68fa00e1912a3d4ce7b514364314c55e3 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import random
import tempfile
import shutil
mayan_debug = 1
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from caffe_utils import *
'''
@brief:
extract labels from rendered images
@input:
xxx/03790512_13a245da7b0567509c6d15186da929c5_a035_e009_t-01_d004.png
@output:
(35,9,359)
'''
def path2label(path):
parts = os.path.basename(path).split('_')
azimuth = int(parts[2][1:]) % 360
elevation = int(parts[3][1:]) % 360
tilt = int(parts[4][1:]) % 360
return (azimuth, elevation, tilt)
def outspath2label(path):
'''
:param path: input_labels+=bookshelf_16_a007_e023_t359_d002px_216.00_py_499.00_bbwidth_280.00_bbheight_485.00.jpg===
=====0======1==2=====3===4=====5======6====7===8=======9=======10======11======12=========
:return:
'''
parts = os.path.basename(path).split('_')
class_name = str(parts[0])
cad_index = str(parts[1])
azimuth = int(parts[2][1:])
elevation = int(parts[3][1:])
tilt = -int(parts[4][1:])
distance = float(parts[5][1:-2])
px = float(parts[6])
py = float(parts[8])
bbox_width = float(parts[10])
bbox_height = float(parts[12][:-4])
return (class_name, cad_index, azimuth, elevation, tilt, distance, px, py, bbox_width, bbox_height)
'''
@brief:
get rendered image filenames and annotations, save to specified files.
@input:
shape_synset - like '02958343' for car
[train,test]_image_label_file - output file list filenames
train_ratio - ratio of training images vs. all images
@output:
save "<image_filepath> <class_idx> <azimuth> <elevation> <tilt>" to files.
'''
def get_one_category_image_label_file(shape_synset, train_image_label_file, test_image_label_file, train_ratio = 0.9):
if mayan_debug:
train_ratio = 1
class_idx = g_shape_synsets.index(shape_synset)
image_folder = os.path.join(g_syn_images_bkg_overlaid_folder, shape_synset)
all_md5s = os.listdir(image_folder)
train_test_split = int(len(all_md5s)*train_ratio)
train_md5s = all_md5s[0:train_test_split]
test_md5s = all_md5s[train_test_split:]
for md5s_list, image_label_file in [(train_md5s, train_image_label_file), (test_md5s, test_image_label_file)]:
image_filenames = []
for k,md5 in enumerate(md5s_list):
if k%(1+len(md5s_list)/20)==0:
print('shape: %s clsidx: %d, %d/%d: %s' % (shape_synset, class_idx, k,len(md5s_list),md5))
shape_folder = os.path.join(image_folder, md5)
shape_images = [os.path.join(shape_folder, x) for x in os.listdir(shape_folder)]
image_filenames += shape_images
image_filename_label_pairs = [(fpath,path2label(fpath)) for fpath in image_filenames]
random.shuffle(image_filename_label_pairs)
fout = open(image_label_file, 'w')
for filename_label in image_filename_label_pairs:
label = filename_label[1]
fout.write('%s %d %d %d %d\n' % (filename_label[0], class_idx, label[0], label[1], label[2]));
fout.close()
no_bkg = 0
def ours_get_one_category_image_label_file(shape_synset, train_image_label_file, test_image_label_file, train_ratio = 0.9):
class_idx = g_shape_synsets.index(shape_synset)
#image_source_file = "/data/zairan.wang/YanMA/RenderForCNN/data_ours/syn_images_cropped_bkg_overlaid_real/"
image_folder = os.path.join(g_syn_images_bkg_overlaid_folder, shape_synset)
all_md5s = os.listdir(image_folder)
train_test_split = int(len(all_md5s)*train_ratio)
train_md5s = all_md5s[0:train_test_split]
test_md5s = all_md5s[train_test_split:]
#############path control##########################################################################
all_file_paths = "/data/zairan.wang/YanMA/RenderForCNN/data_ours/VOC_format/SUN_bkg/all_info_cropped_bkg.txt"
images_file = "/data/zairan.wang/YanMA/RenderForCNN/data_ours/VOC_format/SUN_bkg/Images_uniform_bkg_cropped/"
###########################################mayan########################################
if os.path.exists(all_file_paths):
f_all = open(all_file_paths, 'a')
else:
f_all = open(all_file_paths, 'w')
for md5s_list, image_label_file in [(train_md5s, train_image_label_file), (test_md5s, test_image_label_file)]:
image_filenames = []
for k,md5 in enumerate(md5s_list):
if k%(1+len(md5s_list)/20)==0:
print('shape: %s clsidx: %d, %d/%d: %s' % (shape_synset, class_idx, k,len(md5s_list),md5))
shape_folder = os.path.join(image_folder, md5)
shape_images = [os.path.join(shape_folder, x) for x in os.listdir(shape_folder)]
image_filenames += shape_images
image_filename_label_pairs = [(fpath,outspath2label(fpath)) for fpath in image_filenames]
random.shuffle(image_filename_label_pairs)
#fout = open(image_label_file, 'w')
for filename_label in image_filename_label_pairs:
label = filename_label[1]
shutil.copy(filename_label[0], images_file)
f_all.write('%s %s %s %d %d %d %d %f %f %f %f\n' % (filename_label[0], label[0], label[1], label[2], label[3], label[4], label[5], label[6], label[7], label[8], label[9]));
#fout.write('%s %s %s %d %d %d %d %f %f %f %f\n' % (filename_label[0], label[0], label[1], label[2], label[3], label[4], label[5], label[6], label[7], label[8], label[9]));
#fout.close()
f_all.close()
'''
@brief:
combine lines from input files and save the shuffled version to output file.
@input:
input_file_list - a list of input file names
output_file - output filename
'''
def combine_files(input_file_list, output_file, shuffle=1):
all_lines = []
for filelist in input_file_list:
lines = [x.rstrip() for x in open(filelist,'r')]
all_lines += lines
if shuffle: random.shuffle(all_lines)
fout = open(output_file,'w')
for line in all_lines:
fout.write('%s\n' % (line))
fout.close()
'''
@brief:
convert 360 view degree to view estimation label
e.g. for bicycle with class_idx 1, label will be 360~719
'''
def view2label(degree, class_index):
return int(degree)%360 + class_index*360
'''
@brief:
generate LMDB from files containing image filenames and labels
@input:
image_label_file - each line is <image_filepath> <class_idx> <azimuth> <elelvation> <tilt>
output_lmdb: LMDB pathname-prefix like xxx/xxxx_lmdb
image_resize_dim (D): resize image to DxD square
@output:
write TWO LMDB corresponding to images and labels,
i.e. xxx/xxxx_lmdb_label (each item is class_idx, azimuth, elevation, tilt) and xxx/xxxx_lmdb_image
'''
def generate_image_view_lmdb(image_label_file, output_lmdb):
lines = [line.rstrip() for line in open(image_label_file,'r')]
tmp_label_fout = tempfile.NamedTemporaryFile(dir=g_syn_images_lmdb_folder, delete=False)
for line in lines:
ll = line.split(' ')
class_idx, azimuth, elevation, tilt = [int(x) for x in ll[1:]]
tmp_label_fout.write('%d %d %d %d\n' % (class_idx, view2label(azimuth, class_idx), view2label(elevation, class_idx), view2label(tilt, class_idx)))
tmp_label_fout.close()
print("Tmp label file generated: %s" % tmp_label_fout.name)
if not os.path.exists(output_lmdb+'_label'):
write_vector_lmdb(tmp_label_fout.name, output_lmdb+'_label')
print "Label DB done ..."
if not os.path.exists(output_lmdb+'_image'):
write_image_lmdb(image_label_file, output_lmdb+'_image')
print "Image DB done ..."
# clean up
os.system('rm %s' % (tmp_label_fout.name))
| 41.026178 | 185 | 0.661179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,588 | 0.330271 |
ef6c9cef1dd0ae3c36a242179a531b49d4c57a72 | 486 | py | Python | pyvisio/__init__.py | i-wan/pyvisio | 6beed5a18644793e5c6769c5a4fa5f64f9dc436b | [
"MIT"
]
| 1 | 2018-06-05T13:15:35.000Z | 2018-06-05T13:15:35.000Z | pyvisio/__init__.py | i-wan/pyvisio | 6beed5a18644793e5c6769c5a4fa5f64f9dc436b | [
"MIT"
]
| 1 | 2017-06-05T18:17:16.000Z | 2017-06-05T18:17:16.000Z | pyvisio/__init__.py | i-wan/pyvisio | 6beed5a18644793e5c6769c5a4fa5f64f9dc436b | [
"MIT"
]
| 1 | 2019-06-30T17:36:35.000Z | 2019-06-30T17:36:35.000Z | # -*- coding: utf-8 -*-
"""
PyVisio visDocuments - Visio Document manipulation library
See docstring for class VisDocument for usage
"""
#TODO docstring
__author__ = 'Ivo Velcovsky'
__email__ = '[email protected]'
__copyright__ = "Copyright (c) 2015"
__license__ = "MIT"
__status__ = "Development"
from .visCOM import *
from .documents import *
from .stencils import *
from .shapes import *
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21.130435 | 59 | 0.691358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.493827 |
ef6ed1166f6e406d8fb8cc64a8cdbbcd50db4769 | 7,103 | py | Python | store/main.py | Soemonewho2/pi-ware | 86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9 | [
"MIT"
]
| null | null | null | store/main.py | Soemonewho2/pi-ware | 86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9 | [
"MIT"
]
| null | null | null | store/main.py | Soemonewho2/pi-ware | 86d2cd84ca85e36cbcdbc7511f6a4565b18e81d9 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# Pi-Ware main UI
from tkinter import *
from tkinter.ttk import *
import tkinter as tk
import os
import webbrowser
from functools import partial
import getpass
#Set global var username
global username
username = getpass.getuser()
#Set global install/uninstall scripts
global install_script
global uninstall_script
#Import custom pi-ware functions
#import function
import classes
window = tk.Tk()
#Functions
def show_desc(apt,*args):
mainwinx = str(window.winfo_x())
mainwiny = str(window.winfo_y())
item = tree.selection()[0]
app = tree.item(item,"text")
global install_script, uninstall_script, desc_win
desc_win = tk.Toplevel(window)
p2 = PhotoImage(file = f'/home/{username}/pi-ware/apps/{app}/icon.png')
# Icon set for program window
desc_win.iconphoto(False, p2)
window.resizable(0, 0)
desc_win.title(f"{app}")
print("320x500+" + mainwinx + "+" + mainwiny)
desc_win.geometry("320x500+" + mainwinx + "+" + mainwiny)
window.withdraw()
desc = open(f"/home/{username}/pi-ware/apps/{app}/description.txt", "r")
desc_contents = desc.read()
text_box = Text(desc_win, height=12, width=40)
text_box.pack()
text_box.insert('end', desc_contents)
text_box.config(state='disabled')
#Disabled for now.
#app_desc = tk.Label(desc_win, text=desc_contents, font="Arial 9")
#app_desc.pack()
#Check if website file exist
filepath = f"/home/{username}/pi-ware/apps/{app}/website"
try:
file_tst = open(filepath)
file_tst.close()
except FileNotFoundError:
Web = "False"
else:
Web = "True"
#Add website from file
if Web == "True":
websiteurlfile = open(f'/home/{username}/pi-ware/apps/{app}/website', 'r')
websiteurl = websiteurlfile.readlines()
# Strips the newline character
for line in websiteurl:
#print("{}".format(line.strip()))
Website = classes.HyperLink(desc_win, f"""{line}""");
Website.pack()
install = tk.Button(desc_win, text="INSTALL", font="Arial 11 bold", width=200, bg="darkblue", fg="white", command=install_app)
install.pack()
uninstall = tk.Button(desc_win, text="UNINSTALL", font="Arial 11 bold", width=200, bg="red", fg="white", command=uninstall_app)
uninstall.pack()
ucommand = f"""bash /home/{username}/pi-ware/func/term/uninst '{app}' 'Uninstalling {app}'"""
command = f"""bash /home/{username}/pi-ware/func/term/inst '{app}' 'Installing {app}'"""
install_script = "'%s'" % command
uninstall_script = "'%s'" % ucommand
back_to_menu_button = tk.Button(desc_win, text="BACK", font="Arial 11 bold", width=200, height=2, bg="green", fg="white", command=back_to_menu)
back_to_menu_button.pack(side = "bottom")
desc_win.protocol("WM_DELETE_WINDOW",back_to_menu)
def back_to_menu(window, parent, app=None):
parent.destroy()
window.deiconify()
def install_app():
global install_script
if IsDev == "True":
print(f"bash /home/{username}/pi-ware/func/term/term-run {install_script}")
os.system(f"bash /home/{username}/pi-ware/func/term/term-run {install_script}")
def uninstall_app():
global uninstall_script
if IsDev == "True":
print(f"bash /home/{username}/pi-ware/func/term/term-run {uninstall_script}")
os.system(f"bash /home/{username}/pi-ware/func/term/term-run {uninstall_script}")
def back_to_menu():
window.deiconify()
desc_win.destroy()
window.title("Pi-Ware")
#window.eval('tk::PlaceWindow . center')
def quit():
window.destroy()
#Check if dev files exist
filepath = f"/home/{username}/pi-ware/.dev"
try:
file_tst = open(filepath)
file_tst.close()
except FileNotFoundError:
IsDev = "False"
else:
IsDev = "True"
#Set window icon
p1 = PhotoImage(file = f'/home/{username}/pi-ware/icons/logo.png')
window.iconphoto(False, p1)
#Main
window.resizable(0, 0)
window.geometry("330x500")
window.eval('tk::PlaceWindow . center')
window.title("Pi-Ware")
# Window tabs
tab_control = Notebook(window)
apps_tab = Frame(tab_control)
news_tab = Frame(tab_control)
credits_tab = Frame(tab_control)
DEV_tab = Frame(tab_control)
tab_control.add(apps_tab, text="Apps")
tab_control.add(news_tab, text="News")
tab_control.add(credits_tab, text="Credits")
#Show dev tab if dev files are found
if IsDev == "True":
tab_control.add(DEV_tab, text="Dev")
tab_control.pack(expand=0, fill="both")
#Show DEV stuff
PiWareVersionFile = open(f"/home/{username}/.local/share/pi-ware/version", "r")
PiWareVersioncontent = PiWareVersionFile.read()
files = folders = 0
for _, dirnames, filenames in os.walk(f"/home/{username}/pi-ware/apps"):
files += len(filenames)
folders += len(dirnames)
InstallibleApps = "{:,} installible Apps".format(folders)
PiWareVersion = tk.Label(DEV_tab, text=f"Pi-Ware Version:\n{PiWareVersioncontent}", font="Arial 11 bold")
PiWareInstallableApps = tk.Label(DEV_tab, text=f"{InstallibleApps}", font="Arial 11 bold")
PiWareVersion.pack()
PiWareInstallableApps.pack()
#Show latest news message
NewsMessagefile = open(f"/home/{username}/pi-ware/func/info/latestnewsmessage", "r")
NewsMessagecontent = NewsMessagefile.read()
NewsMessage = tk.Label(news_tab, text=f"Latest news:\n{NewsMessagecontent}", font="Arial 11 bold")
NewsMessage.pack()
#Show info message
InfoMessagefile = open(f"/home/{username}/pi-ware/func/info/infomessage", "r")
InfoMessagecontent = InfoMessagefile.read()
InfoMessage = tk.Label(credits_tab, text=f"{InfoMessagecontent}", font="Arial 11 bold")
InfoMessage.pack()
#Show commit links
commitmessage = tk.Label(credits_tab, text=f"To see commits, please go to the link below.", font="Arial 11 bold")
commitmessage.pack()
commit = classes.HyperLink(credits_tab, f"""https://github.com/piware14/pi-ware/graphs/contributors""");
commit.pack()
#Add pi-ware website
piwarewebsite = tk.Label(credits_tab, text=f"To vist the pi-ware website, click the link below.", font="Arial 11 bold")
piwarewebsite.pack()
Website = classes.HyperLink(credits_tab, f"""https://pi-ware.ml""");
Website.pack()
tree = Treeview(apps_tab)
tree.pack(expand=YES, fill=BOTH)
tree.column("#0", minwidth=0, width=330, stretch=NO)
s = Style()
s.configure('Treeview', rowheight=35)
ap = next(os.walk(f"/home/{username}/pi-ware/apps"))[1]
applist = sorted(ap)
print("Current apps:\n")
for app in applist:
print(app)
appb = ""
for a in app:
if(a == " "):
appb += "_"
else:
appb += a
tree.bind("<<TreeviewSelect>>", partial(show_desc,app))
exec(appb + """_button = PhotoImage(file=f'/home/{username}/pi-ware/apps/{app}/icon.png')""")
exec("""tree.insert('', 'end', text=f"{app}",image=""" + appb + """_button)""")
ScrollForMore = tk.Label(apps_tab, text="Scroll down for more apps.", font="Arial 11 bold")
ScrollForMore.pack()
quitbutton = tk.Button(window, text="Quit", font="Arial 11 bold", width=200, bg="grey", fg="white", command=quit)
quitbutton.pack(side="bottom")
window.mainloop()
| 33.504717 | 147 | 0.68464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,632 | 0.370548 |
ef700e08b8631cf4f5d03872e7a2e1c13a5f31f4 | 50,478 | py | Python | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
]
| 3 | 2018-05-09T17:55:53.000Z | 2019-07-22T09:14:41.000Z | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
]
| 9 | 2017-04-07T01:44:15.000Z | 2018-12-16T20:47:08.000Z | shwirl/shaders/render_volume.py | macrocosme/shwirl | 87147ba1e99463e96b7f4295fd24ab57440d9981 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import division
# This file implements a RenderVolumeVisual class. It is derived from the
# VolumeVisual class in vispy.visuals.volume, which is released under a BSD
# license included here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the (new) BSD license:
#
# Copyright (c) 2015, Dany Vohl
# All rights reserved.
#
# A copy of the license is available in the root directory of this project.
#
from ..extern.vispy.gloo import Texture3D, TextureEmulated3D, VertexBuffer, IndexBuffer
from ..extern.vispy.visuals import Visual
from ..extern.vispy.visuals.shaders import Function
from ..extern.vispy.color import get_colormap
from ..extern.vispy.scene.visuals import create_visual_node
from ..extern.vispy.io import load_spatial_filters
import numpy as np
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
// attribute vec3 a_texcoord;
uniform vec3 u_shape;
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
// v_texcoord = a_texcoord;
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
""" # noqa
# Fragment shader
FRAG_SHADER = """
// uniforms
uniform $sampler_type u_volumetex;
uniform vec3 u_shape;
uniform vec3 u_resolution;
uniform float u_threshold;
uniform float u_relative_step_size;
//uniform int u_color_scale;
//uniform float u_data_min;
//uniform float u_data_max;
// Moving box filter variables
uniform int u_filter_size;
uniform float u_filter_coeff;
uniform int u_filter_arm;
uniform int u_filter_type;
uniform int u_use_gaussian_filter;
uniform int u_gaussian_filter_size;
//uniform int u_log_scale;
// Volume Stats
uniform float u_volume_mean;
uniform float u_volume_std;
//uniform float u_volume_madfm;
uniform float u_high_discard_filter_value;
uniform float u_low_discard_filter_value;
uniform float u_density_factor;
uniform int u_color_method;
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
float rand(vec2 co)
{{
// Create a pseudo-random number between 0 and 1.
// http://stackoverflow.com/questions/4200224
return fract(sin(dot(co.xy ,vec2(12.9898, 78.233))) * 43758.5453);
}}
float colorToVal(vec4 color1)
{{
return color1.g;
}}
vec4 movingAverageFilter_line_of_sight(vec3 loc, vec3 step)
{{
// Initialise variables
vec4 partial_color = vec4(0.0, 0.0, 0.0, 0.0);
for ( int i=1; i<=u_filter_arm; i++ )
{{
partial_color += $sample(u_volumetex, loc-i*step);
partial_color += $sample(u_volumetex, loc+i*step);
}}
partial_color += $sample(u_volumetex, loc);
// Evaluate mean
partial_color *= u_filter_coeff;
return partial_color;
}}
vec4 Gaussian_5(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.3333333333333333 * direction;
color += color_original * 0.29411764705882354;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.35294117647058826;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.35294117647058826;
return color;
}}
vec4 Gaussian_9(vec4 color_original, vec3 loc, vec3 direction)
{{
vec4 color = vec4(0.0);
vec3 off1 = 1.3846153846 * direction;
vec3 off2 = 3.2307692308 * direction;
color += color_original * 0.2270270270;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.3162162162;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.0702702703;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.0702702703;
return color;
}}
vec4 Gaussian_13(vec4 color_original, vec3 loc, vec3 direction) {{
vec4 color = vec4(0.0);
vec3 off1 = 1.411764705882353 * direction;
vec3 off2 = 3.2941176470588234 * direction;
vec3 off3 = 5.176470588235294 * direction;
color += color_original * 0.1964825501511404;
color += $sample(u_volumetex, loc + (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc - (off1 * u_resolution)) * 0.2969069646728344;
color += $sample(u_volumetex, loc + (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc - (off2 * u_resolution)) * 0.09447039785044732;
color += $sample(u_volumetex, loc + (off3 * u_resolution)) * 0.010381362401148057;
color += $sample(u_volumetex, loc - (off3 * u_resolution)) * 0.010381362401148057;
return color;
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Edge detection Pass
// (adapted from https://www.shadertoy.com/view/MscSzf#)
// ----------------------------------------------------------------
float checkSame(vec4 center, vec4 sample, vec3 resolution) {{
vec2 centerNormal = center.xy;
float centerDepth = center.z;
vec2 sampleNormal = sample.xy;
float sampleDepth = sample.z;
vec2 sensitivity = (vec2(0.3, 1.5) * resolution.y / 50.0);
vec2 diffNormal = abs(centerNormal - sampleNormal) * sensitivity.x;
bool isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
float diffDepth = abs(centerDepth - sampleDepth) * sensitivity.y;
bool isSameDepth = diffDepth < 0.1;
return (isSameNormal && isSameDepth) ? 1.0 : 0.0;
}}
vec4 edge_detection(vec4 color_original, vec3 loc, vec3 step, vec3 resolution) {{
vec4 sample1 = $sample(u_volumetex, loc + (vec3(1., 1., 0.) / resolution));
vec4 sample2 = $sample(u_volumetex, loc + (vec3(-1., -1., 0.) / resolution));
vec4 sample3 = $sample(u_volumetex, loc + (vec3(-1., 1., 0.) / resolution));
vec4 sample4 = $sample(u_volumetex, loc + (vec3(1., -1., 0.) / resolution));
float edge = checkSame(sample1, sample2, resolution) *
checkSame(sample3, sample4, resolution);
return vec4(color_original.rgb, 1-edge);
}}
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Used with iso surface
vec4 calculateColor(vec4 betterColor, vec3 loc, vec3 step)
{{
// Calculate color by incorporating lighting
vec4 color1;
vec4 color2;
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N; // normal
color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
N[0] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
N[1] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
N[2] = colorToVal(color1) - colorToVal(color2);
betterColor = max(max(color1, color2),betterColor);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N,V) > 0.0);
N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Get color of the texture (albeido)
color1 = betterColor;
color2 = color1;
// todo: parametrise color1_to_color2
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 final_color;
// todo: allow multiple light, define lights on viewvox or subscene
int nlights = 1;
for (int i=0; i<nlights; i++)
{{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L+(1.0-lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow( max(dot(H,N),0.0), u_shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * u_specular;
}}
// Calculate final color by componing different components
final_color = color2 * ( ambient_color + diffuse_color) + specular_color;
final_color.a = color2.a;
// Done
return final_color;
}}
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
//distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
// (u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_relative_step_size + 0.5);
if( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//gl_FragColor = vec4(0.0, nsteps / 3.0 / u_shape.x, 1.0, 1.0);
//return;
{before_loop}
vec3 loc = start_loc;
int iter = 0;
float discard_ratio = 1.0 / (u_high_discard_filter_value - u_low_discard_filter_value);
float low_discard_ratio = 1.0 / u_low_discard_filter_value;
for (iter=0; iter<nsteps; iter++)
{{
// Get sample color
vec4 color;
if (u_filter_size == 1)
color = $sample(u_volumetex, loc);
else {{
color = movingAverageFilter_line_of_sight(loc, step);
}}
if (u_use_gaussian_filter==1) {{
vec4 temp_color;
vec3 direction;
if (u_gaussian_filter_size == 5){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_5(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_5(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_5(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 9){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_9(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_9(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_9(temp_color, loc, direction);
}}
if (u_gaussian_filter_size == 13){{
// horizontal
direction = vec3(1., 0., 0.);
temp_color = Gaussian_13(color, loc, direction);
// vertical
direction = vec3(0., 1., 0.);
temp_color = Gaussian_13(temp_color, loc, direction);
// depth
direction = vec3(0., 0., 1.);
temp_color = Gaussian_13(temp_color, loc, direction);
}}
color = temp_color;
}}
float val = color.g;
// To force activating the uniform - this should be done differently
float density_factor = u_density_factor;
if (u_filter_type == 1) {{
// Get rid of very strong signal values
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
// Don't consider noisy values
//if (val < u_volume_mean - 3*u_volume_std)
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
if (u_low_discard_filter_value == u_high_discard_filter_value)
{{
if (u_low_discard_filter_value != 0.)
{{
val *= low_discard_ratio;
}}
}}
else {{
val -= u_low_discard_filter_value;
val *= discard_ratio;
}}
}}
else {{
if (val > u_high_discard_filter_value)
{{
val = 0.;
}}
if (val < u_low_discard_filter_value)
{{
val = 0.;
}}
}}
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
{after_loop}
//gl_FragColor = edge_detection(gl_FragColor, loc, step, u_shape);
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
""" # noqa
MIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
int maxi = 0; // Where the maximum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
// Refine search for max value
loc = start_loc + step * (float(maxi) - 0.5);
for (int i=0; i<10; i++) {
maxval = max(maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (maxval > u_high_discard_filter_value || maxval < u_low_discard_filter_value)
{{
maxval = 0.;
}}
// Color is associated to voxel intensity
// Moment 0
if (u_color_method == 0) {
gl_FragColor = $cmap(maxval);
}
// Moment 1
else if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = maxval;
}
// Color is associated to RGB cube
else if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = maxval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (maxval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., maxval);
}
// < 3 sigmas
if ( (maxval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(maxval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., maxval);
}
if ( (maxval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., maxval);
}
}
else {
// Moment 2
// TODO: verify implementation of MIP-mom2.
gl_FragColor = $cmap((maxval * ((maxval - loc.y) * (maxval - loc.y))) / maxval);
}
""",
)
MIP_FRAG_SHADER = FRAG_SHADER.format(**MIP_SNIPPETS)
LMIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // The maximum encountered value
float local_maxval = -99999.0; // The local maximum encountered value
int maxi = 0; // Where the maximum value was encountered
int local_maxi = 0; // Where the local maximum value was encountered
bool local_max_found = false;
""",
in_loop="""
if( val > u_threshold && !local_max_found ) {
local_maxval = val;
local_maxi = iter;
local_max_found = true;
}
if( val > maxval) {
maxval = val;
maxi = iter;
}
""",
after_loop="""
if (!local_max_found) {
local_maxval = maxval;
local_maxi = maxi;
}
// Refine search for max value
loc = start_loc + step * (float(local_maxi) - 0.5);
for (int i=0; i<10; i++) {
local_maxval = max(local_maxval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (local_maxval > u_high_discard_filter_value) {
local_maxval = 0.;
}
if (local_maxval < u_low_discard_filter_value) {
local_maxval = 0.;
}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(local_maxval);
gl_FragColor.a = local_maxval;
}
// Color is associated to redshift/velocity
else {
gl_FragColor = $cmap(loc.y);
gl_FragColor.a = local_maxval;
}
""",
)
LMIP_FRAG_SHADER = FRAG_SHADER.format(**LMIP_SNIPPETS)
TRANSLUCENT_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float mom0 = 0.;
float mom1 = 0.;
float ratio = 1/nsteps; // final average
float a1 = 0.;
float a2 = 0.;
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
/*color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;*/
color = $cmap(val);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
if (u_color_method == 2){
color.r = loc.y;
color.g = loc.z;
color.b = loc.x;
a1 = integrated_color.a;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 4: Mom2
// TODO: Finish implementation of mom2 (not correct in its present form).
else {
// mom0
a1 = mom0;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom0 *= a1 / alpha;
mom0 += val * a2 / alpha;
// mom1
a1 = mom1;
a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
mom1 *= a1 / alpha;
mom1 += loc.y * a2 / alpha;
}
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
if (u_color_method != 3){
gl_FragColor = integrated_color;
}
else {
gl_FragColor = $cmap((mom0 * (mom0-mom1 * mom0-mom1)) / mom0);
}
""",
)
TRANSLUCENT_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT_SNIPPETS)
TRANSLUCENT2_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
float ratio = 1/nsteps; // final average
""",
in_loop="""
float alpha;
// Case 1: Color is associated to voxel intensity
if (u_color_method == 0) {
color = $cmap(val);
integrated_color = (val * density_factor + integrated_color.a * (1 - density_factor)) * color;
alpha = integrated_color.a;
//alpha = a1+a2;
// integrated_color *= a1 / alpha;
// integrated_color += color * a2 / alpha;
}
else{
// Case 2: Color is associated to redshift/velocity
if (u_color_method == 1) {
color = $cmap(loc.y);
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
// Case 3: Color is associated to RGB cube
else {
color.r = loc.x;
color.g = loc.z;
color.b = loc.y;
float a1 = integrated_color.a;
float a2 = val * density_factor * (1 - a1);
alpha = max(a1 + a2, 0.001);
integrated_color *= a1 / alpha;
integrated_color.rgb += color.rgb * a2 / alpha;
}
}
integrated_color.a = alpha;
// stop integrating if the fragment becomes opaque
if( alpha > 0.99 ){
iter = nsteps;
}
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
TRANSLUCENT2_FRAG_SHADER = FRAG_SHADER.format(**TRANSLUCENT2_SNIPPETS)
ADDITIVE_SNIPPETS = dict(
before_loop="""
vec4 integrated_color = vec4(0., 0., 0., 0.);
""",
in_loop="""
color = $cmap(val);
integrated_color = 1.0 - (1.0 - integrated_color) * (1.0 - color);
""",
after_loop="""
gl_FragColor = integrated_color;
""",
)
ADDITIVE_FRAG_SHADER = FRAG_SHADER.format(**ADDITIVE_SNIPPETS)
ISO_SNIPPETS = dict(
before_loop="""
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative
gl_FragColor = vec4(0.0);
""",
in_loop="""
if (val > u_threshold-0.2) {
// Take the last interval in smaller steps
vec3 iloc = loc - step;
for (int i=0; i<10; i++) {
val = $sample(u_volumetex, iloc).g;
if (val > u_threshold) {
color = $cmap(val);
gl_FragColor = calculateColor(color, iloc, dstep);
iter = nsteps;
break;
}
iloc += step * 0.1;
}
}
""",
after_loop="""
""",
)
ISO_FRAG_SHADER = FRAG_SHADER.format(**ISO_SNIPPETS)
MINIP_SNIPPETS = dict(
before_loop="""
float maxval = -99999.0; // maximum encountered
float minval = 99999.0; // The minimum encountered value
int mini = 0; // Where the minimum value was encountered
""",
in_loop="""
if( val > maxval ) {
maxval = val;
}
if( val < minval ) {
minval = val;
mini = iter;
}
""",
after_loop="""
// Refine search for min value
loc = start_loc + step * (float(mini) - 0.5);
for (int i=0; i<10; i++) {
minval = min(minval, $sample(u_volumetex, loc).g);
loc += step * 0.1;
}
if (minval > u_high_discard_filter_value || minval < u_low_discard_filter_value)
{{
minval = 0.;
}}
// Color is associated to voxel intensity
if (u_color_method == 0) {
gl_FragColor = $cmap(minval);
//gl_FragColor.a = minval;
}
else{
// Color is associated to redshift/velocity
if (u_color_method == 1) {
gl_FragColor = $cmap(loc.y);
//if (minval == 0)
gl_FragColor.a = 1-minval;
}
// Color is associated to RGB cube
else {
if (u_color_method == 2) {
gl_FragColor.r = loc.y;
gl_FragColor.g = loc.z;
gl_FragColor.b = loc.x;
gl_FragColor.a = minval;
}
// Color by sigma values
else if (u_color_method == 3) {
if ( (1-minval < (u_volume_mean + (3.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 0., 1., 1-minval);
}
// < 3 sigmas
if ( (1-minval >= (u_volume_mean + (3.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (4.0 * u_volume_std))) )
{
gl_FragColor = vec4(0., 1., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (4.0 * u_volume_std))) &&
(1-minval < (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 0., 0., 1-minval);
}
if ( (1-minval >= (u_volume_mean + (5.0 * u_volume_std))) )
{
gl_FragColor = vec4(1., 1., 1., 1-minval);
}
}
// Case 4: Mom2
// TODO: verify implementation of MIP-mom2.
else {
gl_FragColor = $cmap((minval * ((minval - loc.y) * (minval - loc.y))) / minval);
}
}
}
""",
)
MINIP_FRAG_SHADER = FRAG_SHADER.format(**MINIP_SNIPPETS)
frag_dict = {
'mip': MIP_FRAG_SHADER,
'lmip': LMIP_FRAG_SHADER,
'iso': ISO_FRAG_SHADER,
'avip': TRANSLUCENT_FRAG_SHADER,
'minip': MINIP_FRAG_SHADER,
'translucent2': TRANSLUCENT2_FRAG_SHADER,
'additive': ADDITIVE_FRAG_SHADER,
}
# _interpolation_template = """
# #include "misc/spatial-filters.frag"
# vec4 texture_lookup_filtered(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return %s($texture, $shape, texcoord);
# }"""
#
# _texture_lookup = """
# vec4 texture_lookup(vec2 texcoord) {
# if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
# texcoord.y < 0.0 || texcoord.y > 1.0) {
# discard;
# }
# return texture2D($texture, texcoord);
# }"""
class RenderVolumeVisual(Visual):
""" Displays a 3D Volume
Parameters
----------
vol : ndarray
The volume to display. Must be ndim==3.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'avip', 'additive', 'iso'}
The render method to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render method. By default
the mean of the given volume is used.
relative_step_size : float
The relative step size to step through the volume. Default 0.8.
Increase to e.g. 1.5 to increase performance, at the cost of
quality.
cmap : str
Colormap to use.
emulate_texture : bool
Use 2D textures to emulate a 3D texture. OpenGL ES 2.0 compatible,
but has lower performance on desktop platforms.
"""
def __init__(self, vol, clim=None, method='mip', threshold=None,
relative_step_size=0.8, cmap='grays',
emulate_texture=False, color_scale='linear',
filter_type = 0, filter_size = 1,
use_gaussian_filter = False, gaussian_filter_size=9,
density_factor=0.01, color_method='Moment 0', log_scale=0,
interpolation='linear'):
tex_cls = TextureEmulated3D if emulate_texture else Texture3D
# Storage of information of volume
self._vol_shape = ()
self._clim = None
self._need_vertex_update = True
# Set the colormap
self._cmap = get_colormap(cmap)
# Create gloo objects
self._vertices = VertexBuffer()
self._texcoord = VertexBuffer(
np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
], dtype=np.float32))
# # load 'float packed rgba8' interpolation kernel
# # to load float interpolation kernel use
# # `load_spatial_filters(packed=False)`
# kernel, self._interpolation_names = load_spatial_filters()
#
# fun = [Function(_interpolation_template % n)
# for n in self._interpolation_names]
#
# self._interpolation_names = [n.lower()
# for n in self._interpolation_names]
#
# self._interpolation_fun = dict(zip(self._interpolation_names, fun))
# self._interpolation_names.sort()
# self._interpolation_names = tuple(self._interpolation_names)
#
# print self._interpolation_fun
#
# # overwrite "nearest" and "bilinear" spatial-filters
# # with "hardware" interpolation _data_lookup_fn
# self._interpolation_fun['nearest'] = Function(_texture_lookup)
# self._interpolation_fun['bilinear'] = Function(_texture_lookup)
#
# if interpolation not in self._interpolation_names:
# raise ValueError("interpolation must be one of %s" %
# ', '.join(self._interpolation_names))
#
# self._interpolation = interpolation
# check texture interpolation
# if self._interpolation == 'bilinear':
# self._interpolation = 'linear'
# else:
# self._interpolation = 'nearest'
self._tex = tex_cls((10, 10, 10), interpolation=interpolation,
wrapping='clamp_to_edge')
# self._tex = tex_cls((10, 10, 10), interpolation='linear',
# wrapping='clamp_to_edge')
# Create program
Visual.__init__(self, vcode=VERT_SHADER, fcode="")
self.shared_program['u_volumetex'] = self._tex
self.shared_program['a_position'] = self._vertices
self.shared_program['a_texcoord'] = self._texcoord
self._draw_mode = 'triangle_strip'
self._index_buffer = IndexBuffer()
# Only show back faces of cuboid. This is required because if we are
# inside the volume, then the front faces are outside of the clipping
# box and will not be drawn.
self.set_gl_state('translucent', cull_face=False)
# Set data
self.set_data(vol, clim)
# Set params
self.method = method
self.relative_step_size = relative_step_size
#self.color_scale = color_scale
# self.data_min = self._clim[0]
# self.data_max = self._clim[1]
# moving_box_filter (=1 means no filter)
self.filter_type = filter_type
self.filter_size = filter_size
# 3D gaussian filter
self.use_gaussian_filter = use_gaussian_filter
self.gaussian_filter_size = gaussian_filter_size
self.log_scale = log_scale
self.density_factor = density_factor
self.color_method = color_method
self.threshold = threshold if (threshold is not None) else vol.mean()
# print ("threshold", self.threshold)
self.freeze()
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = np.nanmin(vol), np.nanmax(vol)
# Apply clim
vol = np.flipud(np.array(vol, dtype='float32', copy=False))
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Deal with nan
if np.isnan(vol).any():
vol = np.nan_to_num(vol)
self.high_discard_filter_value = self._clim[1]
self.low_discard_filter_value = self._clim[0]
self.volume_mean = np.mean(vol)
self.volume_std = np.std(vol)
#self.volume_madfm = self.madfm(vol)
# Apply to texture
print ("min:", np.min(vol), "max:", np.max(vol))
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1], vol.shape[0])
self.shared_program['u_resolution'] = (1/vol.shape[2], 1/vol.shape[1], 1/vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024
@property
def interpolation(self):
""" Current interpolation function.
"""
return self._tex.interpolation
@interpolation.setter
def interpolation(self, interpolation):
# set interpolation technique
self._tex.interpolation = interpolation
@property
def clim(self):
""" The contrast limits that were applied to the volume data.
Settable via set_data().
"""
return self._clim
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def method(self):
"""The render method to use
Current options are:
* avip: voxel colors are blended along the view ray until
the result is opaque.
* mip: maxiumum intensity projection. Cast a ray and display the
maximum value that was encountered.
* additive: voxel colors are added along the view ray until
the result is saturated.
* iso: isosurface. Cast a ray until a certain threshold is
encountered. At that location, lighning calculations are
performed to give the visual appearance of a surface.
"""
return self._method
@method.setter
def method(self, method):
# Check and save
known_methods = list(frag_dict.keys())
if method not in known_methods:
raise ValueError('Volume render method should be in %r, not %r' %
(known_methods, method))
self._method = method
# Get rid of specific variables - they may become invalid
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = None
self.shared_program.frag = frag_dict[method]
self.shared_program.frag['sampler_type'] = self._tex.glsl_sampler_type
self.shared_program.frag['sample'] = self._tex.glsl_sample
self.shared_program.frag['cmap'] = Function(self._cmap.glsl_map)
self.update()
@property
def color_method(self):
"""The way color is associated with voxel
Current options are:
* regular: Color is associated to voxel intensity (defined by the VR method)
* velocity/redshit: Color is associated to depth coordinate
and alpha to voxel intensity (defined by the VR method)
"""
return self._color_method
@color_method.setter
def color_method(self, color_method):
if color_method == 'Moment 0':
self._color_method = 0
elif color_method == 'Moment 1':
self._color_method = 1
elif color_method == 'rgb_cube':
self._color_method = 2
elif color_method == 'Sigmas':
self._color_method = 3
else:
self._color_method = 4
# print ("color_method", self._color_method)
self.shared_program['u_color_method'] = int(self._color_method)
self.update()
@property
def threshold(self):
""" The threshold value to apply for the isosurface render method.
Also used for the lmip transfer function.
"""
return self._threshold
@threshold.setter
def threshold(self, value):
self._threshold = float(value)
if 'u_threshold' in self.shared_program:
self.shared_program['u_threshold'] = self._threshold
self.update()
@property
def color_scale(self):
return self._color_scale
@color_scale.setter
def color_scale(self, color_scale):
if (color_scale == 'linear'):
self._color_scale = 0
else:
self._color_scale = 1
self.shared_program['u_color_scale'] = int(self._color_scale)
self.update()
@property
def log_scale(self):
return self._log_scale
@log_scale.setter
def log_scale(self, log_scale):
self._log_scale = int(log_scale)
#self.shared_program['u_log_scale'] = int(self._log_scale)
self.update()
@property
def data_min(self):
return self._data_min
@data_min.setter
def data_min(self, data_min):
self._data_min = 0.
self.shared_program['u_data_min'] = float(self._data_min)
self.update()
@property
def data_max(self):
return self._data_max
@data_max.setter
def data_max(self, data_max):
self._data_max = 0.
self.shared_program['u_data_max'] = float(self._data_max)
self.update()
@property
def moving_box_filter(self):
return self._moving_box_filter
@moving_box_filter.setter
def moving_box_filter(self, moving_box_filter):
self.shared_program['u_moving_box_filter'] = int(self._moving_box_filter)
self.update()
@property
def volume_mean(self):
return self._volume_mean
@volume_mean.setter
def volume_mean(self, volume_mean):
self._volume_mean = float(volume_mean)
self.shared_program['u_volume_mean'] = self._volume_mean
print ("self._volume_mean", self._volume_mean)
self.update()
@property
def volume_std(self):
return self._volume_std
@volume_std.setter
def volume_std(self, volume_std):
self._volume_std = float(volume_std)
self.shared_program['u_volume_std'] = self._volume_std
print("self._volume_std", self._volume_std)
self.update()
@property
def volume_madfm(self):
return self._volume_madfm
@volume_madfm.setter
def volume_madfm(self, volume_madfm):
self._volume_madfm = float(volume_madfm)
self._volume_madfm -= self._clim[0]
self._volume_madfm /= self._clim[1] - self._clim[0]
self.shared_program['u_volume_madfm'] = self._volume_madfm
self.update()
@property
def filter_size(self):
return self._filter_size
@filter_size.setter
def filter_size(self, filter_size):
self._filter_size = int(filter_size)
self.shared_program['u_filter_size'] = int(self._filter_size)
self.shared_program['u_filter_arm'] = int(np.floor(self._filter_size/2))
self.shared_program['u_filter_coeff'] = float(1/self._filter_size)
self.update()
@property
def filter_type(self):
return self._filter_type
@filter_type.setter
def filter_type(self, filter_type):
if filter_type == 'Rescale':
self._filter_type = 1
else:
self._filter_type = 0
self.shared_program['u_filter_type'] = int(self._filter_type)
self.update()
@property
def use_gaussian_filter(self):
return self._use_gaussian_filter
@use_gaussian_filter.setter
def use_gaussian_filter(self, use_gaussian_filter):
# print ("use_gaussian_filter", use_gaussian_filter)
self._use_gaussian_filter = int(use_gaussian_filter)
self.shared_program['u_use_gaussian_filter'] = int(self._use_gaussian_filter)
self.update()
@property
def gaussian_filter_size(self):
return self._gaussian_filter_size
@gaussian_filter_size.setter
def gaussian_filter_size(self, gaussian_filter_size):
self._gaussian_filter_size = int(gaussian_filter_size)
self.shared_program['u_gaussian_filter_size'] = int(self._gaussian_filter_size)
self.update()
@property
def high_discard_filter_value(self):
return self._high_discard_filter_value
@high_discard_filter_value.setter
def high_discard_filter_value(self, high_discard_filter_value):
self._high_discard_filter_value = float(high_discard_filter_value)
self._high_discard_filter_value -= self._clim[0]
self._high_discard_filter_value /= self._clim[1] - self._clim[0]
self.shared_program['u_high_discard_filter_value'] = self._high_discard_filter_value
self.update()
@property
def low_discard_filter_value(self):
return self._low_discard_filter_value
@low_discard_filter_value.setter
def low_discard_filter_value(self, low_discard_filter_value):
self._low_discard_filter_value = float(low_discard_filter_value)
self._low_discard_filter_value -= self._clim[0]
self._low_discard_filter_value /= self._clim[1] - self._clim[0]
self.shared_program['u_low_discard_filter_value'] = self._low_discard_filter_value
self.update()
@property
def density_factor(self):
return self._density_factor
@density_factor.setter
def density_factor(self, density_factor):
self._density_factor = float(density_factor)
self.shared_program['u_density_factor'] = self._density_factor
self.update()
@property
def relative_step_size(self):
""" The relative step size used during raycasting.
Larger values yield higher performance at reduced quality. If
set > 2.0 the ray skips entire voxels. Recommended values are
between 0.5 and 1.5. The amount of quality degradation depends
on the render method.
"""
return self._relative_step_size
@relative_step_size.setter
def relative_step_size(self, value):
value = float(value)
if value < 0.1:
raise ValueError('relative_step_size cannot be smaller than 0.1')
self._relative_step_size = value
self.shared_program['u_relative_step_size'] = value
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
pos = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Apply
self._vertices.set_data(pos)
self._index_buffer.set_data(indices)
def _compute_bounds(self, axis, view):
return 0, self._vol_shape[axis]
def _prepare_transforms(self, view):
trs = view.transforms
view.view_program.vert['transform'] = trs.get_transform()
view_tr_f = trs.get_transform('visual', 'document')
view_tr_i = view_tr_f.inverse
view.view_program.vert['viewtransformf'] = view_tr_f
view.view_program.vert['viewtransformi'] = view_tr_i
def _prepare_draw(self, view):
if self._need_vertex_update:
self._create_vertex_data()
def madfm(self, volume):
# As defined in Whiting, M. T. "DUCHAMP: a 3D source finder for spectral-lines data", MNRAS, 2012.
return np.median(volume - np.median(volume)) * 1.4826042
RenderVolume = create_visual_node(RenderVolumeVisual)
def get_interpolation_fun():
return get_interpolation_fun()
| 33.252964 | 110 | 0.567297 | 19,046 | 0.377313 | 0 | 0 | 9,387 | 0.185962 | 0 | 0 | 36,163 | 0.716411 |
ef703db82c659a484347e75656e30bf7c5cabb9f | 854 | py | Python | data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
]
| 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
]
| 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/TILING_WITH_DOMINOES.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
]
| 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
A = [ 0 ] * ( n + 1 )
B = [ 0 ] * ( n + 1 )
A [ 0 ] = 1
A [ 1 ] = 0
B [ 0 ] = 0
B [ 1 ] = 1
for i in range ( 2 , n + 1 ) :
A [ i ] = A [ i - 2 ] + 2 * B [ i - 1 ]
B [ i ] = A [ i - 1 ] + B [ i - 2 ]
return A [ n ]
#TOFILL
if __name__ == '__main__':
param = [
(29,),
(13,),
(25,),
(65,),
(27,),
(42,),
(19,),
(50,),
(59,),
(13,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 21.897436 | 64 | 0.456674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.258782 |
ef70dbcac1c09bceb1d774bc2a9dbf1cb0f819da | 3,342 | py | Python | make3d.py | BritishMuseumDH/scaffold3D | 314ee4ca5f52304c89fac71b8f293774341d6278 | [
"CC0-1.0"
]
| 4 | 2017-03-30T09:41:21.000Z | 2021-10-01T09:18:02.000Z | make3d.py | BritishMuseumDH/scaffold3D | 314ee4ca5f52304c89fac71b8f293774341d6278 | [
"CC0-1.0"
]
| null | null | null | make3d.py | BritishMuseumDH/scaffold3D | 314ee4ca5f52304c89fac71b8f293774341d6278 | [
"CC0-1.0"
]
| 3 | 2018-01-30T09:18:34.000Z | 2019-06-16T17:55:24.000Z | import os
import shutil
from textwrap import dedent
import argparse
import subprocess
parser = argparse.ArgumentParser(description='This is a script to create 3D model folder structure')
parser.add_argument('-p', '--project', help='3D project name', required=True)
parser.add_argument('-wd', '--wd', help='Working directory', required=True)
args = parser.parse_args()
os.chdir(args.wd)
root_dir = os.path.join(args.wd, args.project)
if os.path.exists(root_dir) and os.listdir(root_dir):
# If the path already exists and it is not empty, raise an error
err_msg = '''
{directory} already exists and it is not empty.
Please try a different project name or root directory.
'''.format(directory=root_dir)
raise IOError(000, dedent(err_msg))
else:
os.mkdir(root_dir) # Create the root directory
dirnames = ('images', 'masks', 'models')
# Create all the other directories
for item in dirnames:
path3D = os.path.join(args.wd, args.project, item)
os.mkdir(path3D)
def write_readme(project, root_dir):
readme_path = os.path.join(root_dir, "README.md")
readme_content = get_readme_text(project)
with open(readme_path, 'w') as readme_file:
readme_file.write(readme_content)
def get_readme_text(project):
readme_text = """
[](http://creativecommons.org/licenses/by-sa/4.0/)
[](http://orcid.org/0000-0002-0246-2335)
# {project}
3D data for recreation of a British Museum object.
# LICENSE
The contents of this repository are licensed under CC-BY-NC-SA
# Credits
Photographs and models by {author} <{author_email}>, Digital Humanities Lead, British Museum
Copyright Trustees of the British Museum
""".format(
project=project,
license=license,
author=get_user_name_from_git() or "My Name",
author_email=get_user_email_from_git() or "My email.")
return dedent(readme_text)
def get_user_name_from_git():
try:
git_process = subprocess.Popen(['git', 'config', 'user.name'], stdout=subprocess.PIPE
, stderr=subprocess.PIPE)
user_name, err = git_process.communicate()
return user_name.rstrip().decode()
except OSError:
return None
def get_user_email_from_git():
try:
git_process = subprocess.Popen(['git', 'config', 'user.email'], stdout=subprocess.PIPE
, stderr=subprocess.PIPE)
user_email, err = git_process.communicate()
return user_email.rstrip().decode()
except OSError:
return None
def write_license(root_dir):
license_path = os.path.join(root_dir, "LICENSE.md")
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),'scaffold3D/templates/LICENSE.md'), license_path)
return None
def write_ignore(root_dir):
ignore_path = os.path.join(root_dir, ".gitignore")
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),'scaffold3D/templates/.gitignore'), ignore_path)
return None
write_readme(args.project, root_dir)
write_license(root_dir)
write_ignore(root_dir)
| 35.178947 | 153 | 0.680132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,212 | 0.362657 |
ef724512834ae77b7fe4b2559fc21eb34f4025f5 | 5,476 | py | Python | integration/experiment/common_args.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | integration/experiment/common_args.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | integration/experiment/common_args.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | #
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
'''
Common command line arguments for experiments.
'''
def setup_run_args(parser):
"""Add common arguments for all run scripts:
--output-dir --node-count --trial-count --cool-off-time
"""
add_output_dir(parser)
add_node_count(parser)
add_trial_count(parser)
add_cool_off_time(parser)
add_enable_traces(parser)
add_enable_profile_traces(parser)
def add_output_dir(parser):
parser.add_argument('--output-dir', dest='output_dir',
action='store', default='.',
help='location for reports and other output files')
def add_trial_count(parser):
parser.add_argument('--trial-count', dest='trial_count',
action='store', type=int, default=2,
help='number of experiment trials to launch')
def add_node_count(parser):
parser.add_argument('--node-count', dest='node_count',
default=1, type=int,
help='number of nodes to use for launch')
def add_show_details(parser):
parser.add_argument('--show-details', dest='show_details',
action='store_true', default=False,
help='print additional data analysis details')
def add_min_power(parser):
parser.add_argument('--min-power', dest='min_power',
action='store', type=float, default=None,
help='bottom power limit for the sweep')
def add_max_power(parser):
parser.add_argument('--max-power', dest='max_power',
action='store', type=float, default=None,
help='top power limit for the sweep')
def add_step_power(parser):
parser.add_argument('--step-power', dest='step_power',
action='store', type=float, default=10,
help='increment between power steps for sweep')
def add_label(parser):
parser.add_argument('--label', action='store', default="APP",
help='name of the application to use for plot titles')
def add_min_frequency(parser):
parser.add_argument('--min-frequency', dest='min_frequency',
action='store', type=float, default=None,
help='bottom core frequency limit for the sweep')
def add_max_frequency(parser):
parser.add_argument('--max-frequency', dest='max_frequency',
action='store', type=float, default=None,
help='top core frequency limit for the sweep')
def add_step_frequency(parser):
parser.add_argument('--step-frequency', dest='step_frequency',
action='store', type=float, default=None,
help='increment between core frequency steps for sweep')
def add_run_max_turbo(parser):
parser.add_argument("--run-max-turbo", dest="run_max_turbo",
action='store_true', default=False,
help='add extra run to the experiment at maximum turbo frequency')
def add_use_stdev(parser):
parser.add_argument('--use-stdev', dest='use_stdev',
action='store_true', default=False,
help='use standard deviation instead of min-max spread for error bars')
def add_cool_off_time(parser):
parser.add_argument('--cool-off-time', dest='cool_off_time',
action='store', type=float, default=60,
help='wait time between workload execution for cool down')
def add_agent_list(parser):
parser.add_argument('--agent-list', dest='agent_list',
action='store', type=str, default=None,
help='comma separated list of agents to be compared')
def add_enable_traces(parser):
parser.add_argument('--enable-traces', dest='enable_traces',
action='store_const', const=True,
default=False, help='Enable trace generation')
parser.add_argument('--disable-traces', dest='enable_traces',
action='store_const', const=False,
help='Disable trace generation')
def add_disable_traces(parser):
add_enable_traces(parser)
parser.set_defaults(enable_traces=True)
def add_enable_profile_traces(parser):
parser.add_argument('--enable-profile-traces', dest='enable_profile_traces',
action='store_const', const=True,
default=False, help='Enable profile trace generation')
parser.add_argument('--disable-profile-traces', dest='enable_profile_traces',
action='store_const', const=False,
help='Disable profile trace generation')
def add_disable_profile_traces(parser):
add_enable_profile_traces(parser)
parser.set_defaults(enable_profile_traces=True)
def add_performance_metric(parser):
parser.add_argument('--performance-metric', dest='performance_metric',
action='store', type=str, default='FOM',
help='metric to use for performance (default: figure of merit)')
def add_analysis_dir(parser):
parser.add_argument('--analysis-dir', dest='analysis_dir',
action='store', default='analysis',
help='directory for output analysis files')
| 36.506667 | 95 | 0.611578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,982 | 0.361943 |
ef7445ff5f0dbb5c8605cf8ad95f6ecbcc7f04a5 | 10,466 | py | Python | Source Code.py | S-AlMazrouai/Chess-king-last-position-finder | 609346ee660655bd7aa2afe486c4ad074e3d33fc | [
"MIT"
]
| 1 | 2022-02-04T11:14:13.000Z | 2022-02-04T11:14:13.000Z | Source Code.py | S-AlMazrouai/Chess-king-last-position-finder | 609346ee660655bd7aa2afe486c4ad074e3d33fc | [
"MIT"
]
| null | null | null | Source Code.py | S-AlMazrouai/Chess-king-last-position-finder | 609346ee660655bd7aa2afe486c4ad074e3d33fc | [
"MIT"
]
| null | null | null | import requests
import json
import chess
import chess.pgn
import io
from collections import Counter
from openpyxl import load_workbook
import numpy
#API link: https://api.chess.com/pub/player/{user}/games/{year}/{month}/pgn
baseUrl='https://api.chess.com/pub/player/'
users=['Mazrouai'] # You can add one or more chess.com profile/s, make sure to type the prfile name/s as it's/they're written in chess.com.
for user in users:
years = range(2000,2022) # Add the range of the years you want this code to analyze (from,to).
months = ['01','02','03','04','05','06','07','08','09','10','11','12'] # Keep this as it is.
count=0
winBlackKingPos=[] # Array to collect King position in the games won as black.
lossBlackKingPos=[] # Array to collect King position in the games lost as black.
winWhiteKingPos=[] # Array to collect King position in the games won as white.
lossWhiteKingPos=[] # Array to collect King position in the games lost as white.
for i in years: # For loop to irritate through the specified years range.
for j in months: # For loop to irritate through the monthes of the specified years.
extension=str(str(user)+'/games/'+str(i)+'/'+str(j)+'/pgn') # Creates the extension for the baseUrl.
url=baseUrl+extension # Merges baseUrl with the extension.
response = requests.get(url)
pgns = io.StringIO(response.text)
if response.text == '': # Checks if pgn file is empty and if it is, it jumps to the next PGN file.
continue
while True:
games=chess.pgn.read_game(pgns) # Reads PGN file.
if games == None: # Checks if there is a game available to read inside the pgn file, if not it exits this loop to the next PGN file.
break
if games.headers['Black'] == '?': # Checks if game data is missing, if true it jumps to the next game.
continue
if games.headers['White'] == '?': # Checks if game data is missing, if true it jumps to the next game.
continue
board=games.board()
for move in games.mainline_moves(): # Moves to the last position in the game.
board.push(move)
map=board.piece_map() # Collect the position of the pieces in thier last move.
if games.headers['Black']== str(user): # Checks if the specified user is playing as black
for x,y in map.items():
if str(y) == 'k':
kingPos=chess.square_name(x) # Gets the black king postion.
if games.headers['Result'] == '0-1': # Collects the king position in the games won as black.
winBlackKingPos.append(kingPos)
if games.headers['Result'] == '1-0': # Collects the king position in the games lost as black.
lossBlackKingPos.append(kingPos)
else: # If the if condition is not satisfied then the specificed user is playing as white.
for x,y in map.items():
if str(y) == 'K':
kingPos=chess.square_name(x) # Gets the white king postion.
if games.headers['Result'] == '0-1': # Collects the king position in the games lost as white.
lossWhiteKingPos.append(kingPos)
if games.headers['Result'] == '1-0': # Collects the king position in the games won as white.
winWhiteKingPos.append(kingPos)
gamesWon=len(winBlackKingPos)+len(winWhiteKingPos) # Counts # of won games.
gamesLost=len(lossBlackKingPos)+len(lossWhiteKingPos) # Counts # of lost games.
gamesPlayed=gamesWon+gamesLost # counts # of analyzed games
print("Player: ",user) # Prints the name of the player.
print("games played: ",gamesPlayed) # Prints # of won games.
print("games won: ",gamesWon) # Prints # of lost games.
print("games lost: ",gamesLost) # Prints # of analyzed games
print("\n")
winWhiteKingPosCount= Counter(winWhiteKingPos) # Creates a list with a position and the number of times the wining white king was in that position.
lossWhiteKingPosCount= Counter(lossWhiteKingPos) # Creates a list with a position and the number of times the losing white king was in that position.
winBlackKingPosCount= Counter(winBlackKingPos) # Creates a list with a position and the number of times the wining black king was in that position.
lossBlackKingPosCount= Counter(lossBlackKingPos) # Creates a list with a position and the number of times the losing black king was in that position.
posCounts=[winWhiteKingPosCount,lossWhiteKingPosCount,winBlackKingPosCount,lossBlackKingPosCount] # Merges the lists into an array.
Data = load_workbook(filename='Data_Template.xlsx') # Opens the template excel file .
sheets=Data.sheetnames # Register the sheets name.
cellLetters=[] # Array for the cell letters in the excel file.
cellNum=[] # Array for the cell numbers in the excel file.
for j in range(8): # Generates cell letters to get the cells this code will work .
for i in range(66, 74):
cellLetters.append(chr(i))
for i in [10,9,8,7,6,5,4,3]: # Generates cell numbers to get the cells this code will work .
for j in range(8):
cellNum.append(i)
c = 0 # This variable will be used as an index to go thorugh the lists that have been merged into an array.
for sheet in sheets: # For loop to irritate through the excel sheets.
workSheet=Data[sheet]
posCount=posCounts[c] # Gets the postion list.
c=c+1
for i in range(64): # For loop to go through the sheet cells and assign them the king recurrence value.
cell=str(cellLetters[i])+str(cellNum[i]) # Constructs the excel cell name (e.g. A12).
count=posCount[chess.square_name(i)] # Gets the king postion count that correlates with the cell name.
if count== 0: # If king recurrence equals 0 set the cell to None.
count= None
workSheet[cell] = count # Makes the cell value equales the king recurrence in that position.
Data.save(filename='Data_'+str(user)+'.xlsx') # Saves the data into a new xlsx file
| 87.94958 | 228 | 0.391458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,286 | 0.313969 |
ef798894676eb6e1574bdd64f329e5761081b579 | 1,464 | py | Python | src/FileHandler.py | mohitgupta07/tipr-1st-assgn | be2f742de69dbf7c300410c230eaa541d8d0eab8 | [
"MIT"
]
| null | null | null | src/FileHandler.py | mohitgupta07/tipr-1st-assgn | be2f742de69dbf7c300410c230eaa541d8d0eab8 | [
"MIT"
]
| null | null | null | src/FileHandler.py | mohitgupta07/tipr-1st-assgn | be2f742de69dbf7c300410c230eaa541d8d0eab8 | [
"MIT"
]
| 1 | 2019-02-15T16:44:02.000Z | 2019-02-15T16:44:02.000Z | import csv
import numpy as np
def loadData(path=r'dolphins.csv',type='data',hasHeaders=False):
with open(path,'r') as f:
reader=csv.reader(f,delimiter=',')
if hasHeaders:headers=next(reader)#If Headers are provided
data=list(reader)
#print(data[0])
if type=='data':
data=[list(map(float,tmp[0].strip().split())) for tmp in data]
data=np.array(data).astype(float)
else:
data=[list(map(int,tmp[0].strip().split()))[0] for tmp in data]
#print(data)
data=np.array(data).astype(int)
#print(data.shape)
#print(data[:3])
return data
def loadDataX(path=r'dolphins.csv',hasHeaders=None):
import pandas as pd
x=pd.read_csv(path,sep=' ',header=hasHeaders)
return x
def loadDataText(path=r'twitter.txt',type='data',hasHeaders=False):
with open(path,'r') as f:
reader=csv.reader(f,delimiter=',')
if hasHeaders:headers=next(reader)#If Headers are provided
data=list(reader)
#print(data[0])
#return data
if type=='data':
data=[list(map(str,tmp[0].strip().split(' '))) for tmp in data]
#data=np.array(data).astype(str)
else:
data=[list(map(int,tmp[0].strip().split()))[0] for tmp in data]
#print(data)
data=np.array(data).astype(int)
#print(data.shape)
#print(data[:3])
return data
| 32.533333 | 75 | 0.57377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.204918 |
ef7a8845996df8b5695e947565280cd90979fd06 | 1,840 | py | Python | load.py | ontocord/create_pii_dataset | bfd246a8f8b443e238f260f307bd41d86adc3136 | [
"Apache-2.0"
]
| null | null | null | load.py | ontocord/create_pii_dataset | bfd246a8f8b443e238f260f307bd41d86adc3136 | [
"Apache-2.0"
]
| null | null | null | load.py | ontocord/create_pii_dataset | bfd246a8f8b443e238f260f307bd41d86adc3136 | [
"Apache-2.0"
]
| null | null | null | # coding=utf-8
# Copyright, 2021 Ontocord, LLC, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datasets import load_dataset
import os
import re
import itertools
from re import finditer
import glob
import random
import fsspec
import json
from random import randint, choice
from collections import Counter
import spacy, itertools
import langid
from nltk.corpus import stopwords
import fsspec, os, gzip
from faker import Faker
from faker.providers import person, company, geo, address
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer, MarianMTModel, AutoTokenizer, pipeline
import torch
import sys
from tqdm import tqdm
model_name = 'Helsinki-NLP/opus-mt-en-hi'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_name = 'Helsinki-NLP/opus-mt-en-ar'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model_name = 'Helsinki-NLP/opus-mt-en-zh'
model = MarianMTModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
nlp = spacy.load('en_core_web_lg')
stopwords_en = set(stopwords.words('english'))
| 33.454545 | 112 | 0.807609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.409783 |
ef7b9c110a5e75cb118c0870480aa130248a1ef2 | 1,432 | py | Python | piWriters/graphiteSender.py | shackledtodesk/piWeather | e0b4b4ded7ebd01fe7844807de6949a83aa3913f | [
"Apache-2.0"
]
| null | null | null | piWriters/graphiteSender.py | shackledtodesk/piWeather | e0b4b4ded7ebd01fe7844807de6949a83aa3913f | [
"Apache-2.0"
]
| null | null | null | piWriters/graphiteSender.py | shackledtodesk/piWeather | e0b4b4ded7ebd01fe7844807de6949a83aa3913f | [
"Apache-2.0"
]
| null | null | null | ## Send data to a Graphite/Carbon Server
import traceback
import sys, time, socket, datetime
from datetime import datetime
class piSender:
carbon_server = '127.0.0.1'
carbon_port = 2003
station = "pi2wu"
def __init__(self, config):
if config.has_option('graphite','server'):
self.carbon_server = config.get('graphite','server')
if config.has_option('graphite','port'):
self.carbon_port = config.get('graphite','port')
if config.has_option('general','station'):
self.station = config.get('general','station')
self.sock = socket.socket()
try:
self.sock.connect( (self.carbon_server, self.carbon_port) )
except socket.error:
raise SystemExit("Could not connect to carbon server.")
def genReq(self, inTime, data):
epoch_time = time.mktime(datetime.strptime(inTime, "%Y-%m-%dT%H:%M:%S.%fZ").timetuple())
lines = []
for name, value in data.items():
lines.append("%s.%s %s %d" %
(self.station, name, value, epoch_time))
message = '\n'.join(lines) + '\n'
return message
def sendReq(self, req):
try:
self.sock.sendall(req)
except Exception:
traceback.print_exc()
e = sys.exc_info()[0]
return e
else:
return "ok"
| 31.130435 | 96 | 0.561453 | 1,293 | 0.902933 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.172486 |
ef7bcd09324b9928a69882ed98ebf81368b8f074 | 650 | bzl | Python | rules/nunjucks.bzl | MHASgamer/js-samples | 56ec58ca91a48da09b62a390efb8c5095a54d06f | [
"Apache-2.0"
]
| 535 | 2015-08-06T18:30:26.000Z | 2022-03-30T08:34:35.000Z | rules/nunjucks.bzl | HazelAndrews/js-samples | b7e5e7ceb411830fa1bf6b125d938e1c415edc21 | [
"Apache-2.0"
]
| 915 | 2015-08-05T05:36:03.000Z | 2022-03-28T17:08:42.000Z | rules/nunjucks.bzl | HazelAndrews/js-samples | b7e5e7ceb411830fa1bf6b125d938e1c415edc21 | [
"Apache-2.0"
]
| 790 | 2015-08-05T13:51:17.000Z | 2022-03-31T16:56:59.000Z | def nunjucks(name, outs, template, json, data, mode):
# this genrule moves the generated html file to the correct location
# nunjucks-cli does not allow specifying a single output file
# nunjucks-cli converts the .njk to a .html by default
native.genrule(
name = name,
srcs = data,
tools = ["//rules:nunjucks", "//rules:nunjucks-cli", "//rules:json"],
cmd = "$(location //rules:nunjucks) $(location {template}) $(location {json}) $(location //rules:nunjucks-cli) {mode} $@".format(template = template, json = json, mode = mode),
outs = outs,
visibility = ["//visibility:public"],
)
| 50 | 184 | 0.630769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.573846 |
ef7c2ae59e8e02d4a104708b8f76dca033259df6 | 479 | py | Python | src/main/python/bots/b_jira.py | jceaser/gcmd_bot | 2b2ae0631d69d9f95a3a23b04e12a4467a116ffa | [
"MIT"
]
| null | null | null | src/main/python/bots/b_jira.py | jceaser/gcmd_bot | 2b2ae0631d69d9f95a3a23b04e12a4467a116ffa | [
"MIT"
]
| null | null | null | src/main/python/bots/b_jira.py | jceaser/gcmd_bot | 2b2ae0631d69d9f95a3a23b04e12a4467a116ffa | [
"MIT"
]
| null | null | null |
from b_bot import BBot
from rand_str import *
class BJira(BBot):
def __init__(self):
BBot.__init__(self)
self.responses = RandomString(
[
"Looks like you were talking about ticket"
, "You might find that ticket at"
, "Try"
])
def action(self, cmd, id, found):
url = "https://bugs.earthdata.nasa.gov/browse/%s" % found.group(1)
return "%s %s" % (self.responses.pick(), url) | 28.176471 | 74 | 0.553236 | 423 | 0.88309 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.267223 |
ef7c66788463fc4b72dcc5b29d43203643002b12 | 2,295 | py | Python | preprocessors/neg_sample_from_run.py | felipemoraes/pyNeuIR | 5256857387c8fe57d28167e42077ad1dcade1983 | [
"MIT"
]
| 4 | 2019-11-09T19:46:44.000Z | 2022-01-03T07:58:20.000Z | preprocessors/neg_sample_from_run.py | felipemoraes/pyNeuIR | 5256857387c8fe57d28167e42077ad1dcade1983 | [
"MIT"
]
| null | null | null | preprocessors/neg_sample_from_run.py | felipemoraes/pyNeuIR | 5256857387c8fe57d28167e42077ad1dcade1983 | [
"MIT"
]
| 3 | 2019-06-18T12:31:49.000Z | 2020-11-22T08:35:07.000Z | """Samples negative pairs from run."""
import argparse
from utils import load_qrels, load_run
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-run')
parser.add_argument('-qrel')
parser.add_argument('-p', type=int)
parser.add_argument('-n', type=int)
parser.add_argument('-top', type=int)
parser.add_argument('-o')
args = parser.parse_args()
qrels, _ = load_qrels(args.qrel)
n = args.n
p = args.p
top = args.top
f = open(args.o, "w")
np.random.seed(230)
# Here documents with the lowest label (e.g, 0) should be ranked lower
previous_qid = "-"
c = 0
for line in open(args.run):
qid, _, doc, _, score, _ = line.strip().split()
if qid not in qrels:
continue
# Get relevants for query
rels = set()
if qid != previous_qid and previous_qid != "-" :
c += 1
for label in qrels[qid]:
if label != "0":
for doc in qrels[qid][label]:
rels.add(doc)
# Get top 100 non rel docs
top_nonrels = [doc for doc in sorted(results, key=results.get, reverse=True) if doc not in rels][:top]
rels = list(rels)
if len(top_nonrels) == 0:
results = {doc: float(score)}
break
if len(rels) > p:
rels = np.random.choice(rels, p, replace=False)
for rel_doc in rels:
if len(top_nonrels) < n:
sample_neg_docs = top_nonrels
r = n - len(sample_neg_docs)
for i in range(r):
sample_neg_docs.append(np.random.choice(top_nonrels, 1)[0])
else:
sample_neg_docs = np.random.choice(top_nonrels, n, replace=False)
sample_neg_docs = " ".join(sample_neg_docs)
f.write("{} {} {}\n".format(qid, rel_doc, sample_neg_docs))
results = {doc: float(score)}
elif previous_qid == "-":
results = {doc: float(score)}
else:
results[doc] = float(score)
previous_qid = qid
print(c)
f.close()
if __name__ == "__main__":
main()
| 28.333333 | 114 | 0.525054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.100218 |
ef7cc313a84b2a9b9ea62469241644f5f1b9560b | 1,123 | py | Python | Search_Algorithms/testing/python_scripts/GridNet.py | JAOP1/GO | 48c0275fd37bb552c0db4b968391a5a95ed6c860 | [
"MIT"
]
| null | null | null | Search_Algorithms/testing/python_scripts/GridNet.py | JAOP1/GO | 48c0275fd37bb552c0db4b968391a5a95ed6c860 | [
"MIT"
]
| null | null | null | Search_Algorithms/testing/python_scripts/GridNet.py | JAOP1/GO | 48c0275fd37bb552c0db4b968391a5a95ed6c860 | [
"MIT"
]
| 2 | 2019-12-12T18:55:35.000Z | 2019-12-12T19:03:35.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
#Unicamente como esta ahorita funciona para un grafo de 5x5.
class NNGrid(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels = 3, out_channels= 5, kernel_size= 3, padding= 1)
self.conv2 = nn.Conv2d(in_channels = 5, out_channels= 5, kernel_size= 3, padding= 1)
self.conv3 = nn.Conv2d(in_channels = 5, out_channels= 5, kernel_size= 2, padding= 1)
self.fc1 = nn.Linear(180,90)
self.fc2 = nn.Linear(90,45)
self.fc3 = nn.Linear(45,1)
self.drop1 = nn.Dropout()
self.drop2 = nn.Dropout()
def forward(self, x):
#print(x.shape)
x = F.relu(self.conv1(x))
#print(x.shape)
x = F.relu(self.conv2(x))
#print(x.shape)
x = F.relu(self.conv3(x))
#print(x.shape)
x = x.view(-1,180)
#print(x.shape)
x = self.drop1(F.relu(self.fc1(x)))
#print(x.shape)
x = self.drop2(F.relu(self.fc2(x)))
#print(x.shape)
return torch.tanh(self.fc3(x))
| 31.194444 | 92 | 0.577026 | 989 | 0.880677 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.146928 |
ef7d0ee9d64040c9087075b823e521c746835c31 | 3,436 | py | Python | instances/game_instances.py | Napam/MayhemPacman | cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf | [
"MIT"
]
| 1 | 2021-04-07T12:54:13.000Z | 2021-04-07T12:54:13.000Z | instances/game_instances.py | Napam/MayhemPacman | cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf | [
"MIT"
]
| null | null | null | instances/game_instances.py | Napam/MayhemPacman | cbcb3b4a2c83ed920e32748a8aaadb29b19ab5bf | [
"MIT"
]
| null | null | null | '''
Module containing the in-game mayhem instances
such as the ship, planets, asteroid objects etc etc...
Written by Naphat Amundsen
'''
import numpy as np
import pygame as pg
import configparser
import sys
import os
sys.path.insert(0,'..')
from classes import spaceship
from classes import planet
from classes import maps
from classes import interface
import user_settings as user_cng
from instances import instance_config as icng
pg.font.init()
w_shape = user_cng.w_shape
w_norm = np.linalg.norm(w_shape)
COLORS = pg.colordict.THECOLORS
# The initial values of the objects
# are mostly just educated guesses
game_map = maps.game_map(
map_shape=(icng.map_shape)
)
minimap = maps.minimap(
gmap=game_map,
w_shape=w_shape,
w_norm=w_norm)
ship = spaceship.spaceship(
pos=(200,200),
init_dir=icng.RIGHT
)
sun = planet.planet(
pos=game_map.center,
init_vel=None,
init_dir=None,
rforce=None
)
earth = planet.rotating_planet(
pos=(game_map.shape[0]/2, 800),
init_vel=[-3,0],
init_dir=[1,0],
r_force=25000,
omega=0.25
)
venus = planet.rotating_planet(
pos=(game_map.shape[0]/2, 2000),
init_vel=[-5,0],
init_dir=[1,0],
r_force=40000,
omega=0.25
)
asteroids = [
planet.rotating_planet(
pos=(3000, 1000),
init_vel=[-8,2],
init_dir=[1,0],
r_force=150000,
omega=0.25
),
planet.rotating_planet(
pos=(1200, 1000),
init_vel=[10,1],
init_dir=[1,0],
r_force=390000,
omega=0.25
),
planet.rotating_planet(
pos=(500, 2000),
init_vel=[2,10],
init_dir=[1,0],
r_force=540000,
omega=0.25
),
planet.rotating_planet(
pos=(6500, 6000),
init_vel=[5,-15],
init_dir=[1,0],
r_force=1500000,
omega=0.5
),
planet.rotating_planet(
pos=(6000, 6000),
init_vel=[-15,1],
init_dir=[1,0],
r_force=1000000,
omega=0.5
),
planet.rotating_planet(
pos=(6000, 500),
init_vel=[-8,-2],
init_dir=[1,0],
r_force=600000,
omega=0.25
),
planet.rotating_planet(
pos=(5000, 2000),
init_vel=[-2,-8],
init_dir=[1,0],
r_force=200000,
omega=0.25
),
planet.rotating_planet(
pos=(game_map.shape[0]/2, 800),
init_vel=[15,0],
init_dir=[1,0],
r_force=590000,
omega=0.25
),
planet.rotating_planet(
pos=(5000, game_map.shape[1]/2),
init_vel=[0,10],
init_dir=[1,0],
r_force=150000,
omega=0.25
),
]
# For convenience
planets = [earth, venus]
all_celestials = planets + asteroids
minimap_colors = [
COLORS['white'],
COLORS['orange'],
COLORS['blue'],
COLORS['green']
]
minimap_sizes = [
1,
int(500/5000*minimap.shape[0]),
int(250/5000*minimap.shape[0]),
1
]
'''Minimap stuff for LAN-mayhem'''
minimap_colors_online = [
COLORS['white'],
COLORS['orange'],
COLORS['blue'],
COLORS['green'],
COLORS['red'],
]
minimap_sizes_online = [
1,
int(500/5000*minimap.shape[0]),
int(250/5000*minimap.shape[0]),
1,
3
]
| 20.093567 | 55 | 0.556752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.096333 |
ef7dd46d9034574570b5f449e1ddf8eb84731597 | 286 | py | Python | test.py | RoyLQ/Advanced-_TCGAIntegrator | 4767ab74b14e9d7e65e2c1ffe656619ef414148b | [
"MIT"
]
| 2 | 2021-09-14T05:53:16.000Z | 2021-12-01T23:59:18.000Z | test.py | RoyLQ/Advanced-_TCGAIntegrator | 4767ab74b14e9d7e65e2c1ffe656619ef414148b | [
"MIT"
]
| null | null | null | test.py | RoyLQ/Advanced-_TCGAIntegrator | 4767ab74b14e9d7e65e2c1ffe656619ef414148b | [
"MIT"
]
| null | null | null | import sys
import os
simp_path = 'TCGAIntegrator'
abs_path = os.path.abspath(simp_path)
sys.path.append(abs_path)
from TCGAIntegrator import TCGAData as TCGAData
def main():
df = TCGAData.loadData("LGG",mode="Hybird")
print(df.shape)
if __name__ == '__main__':
main()
| 15.888889 | 47 | 0.716783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.136364 |
ef7f8e86f21851da0cc13ef9dc3a597eb38daaa9 | 1,649 | py | Python | synergy/conf/global_context.py | mushkevych/scheduler | 8228cde0f027c0025852cb63a6698cdd320838f1 | [
"BSD-3-Clause"
]
| 15 | 2015-02-01T09:20:23.000Z | 2021-04-27T08:46:45.000Z | synergy/conf/global_context.py | mushkevych/scheduler | 8228cde0f027c0025852cb63a6698cdd320838f1 | [
"BSD-3-Clause"
]
| 26 | 2015-01-12T22:28:40.000Z | 2021-07-05T01:22:17.000Z | synergy/conf/global_context.py | mushkevych/scheduler | 8228cde0f027c0025852cb63a6698cdd320838f1 | [
"BSD-3-Clause"
]
| 2 | 2016-07-21T03:02:46.000Z | 2019-10-03T23:59:23.000Z | from synergy.db.model.queue_context_entry import queue_context_entry
from synergy.scheduler.scheduler_constants import PROCESS_GC, TOKEN_GC, PROCESS_MX, TOKEN_WERKZEUG, EXCHANGE_UTILS, \
PROCESS_SCHEDULER, TOKEN_SCHEDULER, QUEUE_UOW_STATUS, QUEUE_JOB_STATUS, PROCESS_LAUNCH_PY, TOKEN_LAUNCH_PY, \
ROUTING_IRRELEVANT
from synergy.supervisor.supervisor_constants import PROCESS_SUPERVISOR, TOKEN_SUPERVISOR
from synergy.db.model.daemon_process_entry import daemon_context_entry
process_context = {
PROCESS_LAUNCH_PY: daemon_context_entry(
process_name=PROCESS_LAUNCH_PY,
classname='',
token=TOKEN_LAUNCH_PY,
routing=ROUTING_IRRELEVANT,
exchange=EXCHANGE_UTILS),
PROCESS_MX: daemon_context_entry(
process_name=PROCESS_MX,
token=TOKEN_WERKZEUG,
classname=''),
PROCESS_GC: daemon_context_entry(
process_name=PROCESS_GC,
token=TOKEN_GC,
classname=''),
PROCESS_SCHEDULER: daemon_context_entry(
process_name=PROCESS_SCHEDULER,
classname='synergy.scheduler.synergy_scheduler.Scheduler.start',
token=TOKEN_SCHEDULER,
queue='',
routing='',
exchange=''),
PROCESS_SUPERVISOR: daemon_context_entry(
process_name=PROCESS_SUPERVISOR,
classname='synergy.supervisor.synergy_supervisor.Supervisor.start',
token=TOKEN_SUPERVISOR),
}
mq_queue_context = {
QUEUE_UOW_STATUS: queue_context_entry(exchange=EXCHANGE_UTILS, queue_name=QUEUE_UOW_STATUS),
QUEUE_JOB_STATUS: queue_context_entry(exchange=EXCHANGE_UTILS, queue_name=QUEUE_JOB_STATUS),
}
timetable_context = {
}
| 35.085106 | 117 | 0.753184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.073378 |
ef81335057d05cc62a7c03fc8a45db94b745d375 | 1,890 | py | Python | fylesdk/apis/fyle_v3/fyle_v3.py | fylein/fyle-sdk-py | 826f804ec4d94d5f95fb304254a373679a494238 | [
"MIT"
]
| 4 | 2019-05-07T07:38:27.000Z | 2021-09-14T08:39:12.000Z | fylesdk/apis/fyle_v3/fyle_v3.py | snarayanank2/fyle-sdk-py | 826f804ec4d94d5f95fb304254a373679a494238 | [
"MIT"
]
| 3 | 2019-09-23T11:50:31.000Z | 2020-02-10T12:12:10.000Z | fylesdk/apis/fyle_v3/fyle_v3.py | fylein/fyle-sdk-py | 826f804ec4d94d5f95fb304254a373679a494238 | [
"MIT"
]
| 12 | 2019-05-06T09:48:51.000Z | 2020-11-13T10:00:26.000Z | """
Fyle V3 APIs Base Class
"""
from .expenses import Expenses
from .reports import Reports
from .employees import Employees
from .orgs import Orgs
from .reimbursements import Reimbursements
from .cost_centes import CostCenters
from .categories import Categories
from .projects import Projects
from .refunds import Refunds
from .balance_transfers import BalanceTransfers
from .settlements import Settlements
from .advance_requests import AdvanceRequests
from .advances import Advances
from .bank_transactions import BankTransactions
from .trip_requests import TripRequests
from .expense_custom_properties import ExpenseCustomProperties
from .employee_custom_properties import EmployeeCustomProperties
from .advance_request_custom_properties import AdvanceRequestCustomProperties
from .trip_request_custom_properties import TripRequestCustomProperties
class FyleV3:
def __init__(self):
"""
Constructor to Initialize all APIs
"""
# Initialize V3 API Classes
self.expenses = Expenses()
self.reports = Reports()
self.employees = Employees()
self.orgs = Orgs()
self.reimbursements = Reimbursements()
self.cost_centers = CostCenters()
self.categories = Categories()
self.projects = Projects()
self.refunds = Refunds()
self.balance_transfers = BalanceTransfers()
self.settlements = Settlements()
self.advance_requests = AdvanceRequests()
self.advances = Advances()
self.bank_transactions = BankTransactions()
self.trip_requests = TripRequests()
self.expense_custom_properties = ExpenseCustomProperties()
self.employee_custom_properties = EmployeeCustomProperties()
self.advance_request_custom_properties = AdvanceRequestCustomProperties()
self.trip_request_custom_properties = TripRequestCustomProperties()
| 37.8 | 81 | 0.757672 | 1,036 | 0.548148 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.061376 |
ef8179e868198d6a8e03937bb76a29cb988fcda9 | 6,164 | py | Python | 67-2.py | paqul/ALX | 0f397b53f8208df62ed3bc1f63f27a087799eb32 | [
"MIT"
]
| null | null | null | 67-2.py | paqul/ALX | 0f397b53f8208df62ed3bc1f63f27a087799eb32 | [
"MIT"
]
| null | null | null | 67-2.py | paqul/ALX | 0f397b53f8208df62ed3bc1f63f27a087799eb32 | [
"MIT"
]
| null | null | null | from datetime import date as d
#---------------------------HOTEL---------------------------#
def exit_function():
print("Do zobaczenia!")
exit()
def forumarz_rezerwacji(var1, var2, var3, var4, var5, var6, var7, var8, ile_dni, ile_osob, sniadanie, imie):
print("\n"*3)
print("================================")
print("| FORMULARZ REJESTRACYJNY |")
print("|------------------------------|")
print("| Data przybycia: %i%i-%i%i-%i%i%i%i |" % (var7, var8, var5, var6, var1, var2, var3, var4))
print("| Ilość dni pobytu: %3i |" % ile_dni)
print("| Ilość osób: %3i |" % ile_osob)
print("| Śniadania w każdy dzień: %s |" % sniadanie)
print("| Rezerwujący: %15s |" % imie)
print("|------------------------------|")
print("| ŻYCZYMY MIŁEGO POBYTU |")
print("| W NASZYM HOTELU |")
print("================================")
exit_function()
def termin_przybycia():
print("Podaj date przybycia do hotelu w formacie (rrrr-mm-dd) np. 2018-12-07")
while True:
data_przybycia=input("Data przybycia: ")
tab = []
for var in data_przybycia:
try:
var = int(var)
tab.append(var)
except:
myslnik = var
if len(tab) == 8:
dzien = int(("%i" + "%i") % (tab[6], tab[7]))
miesiac = int(("%i" + "%i") % (tab[4], tab[5]))
if dzien > 31 or miesiac > 12:
print("Popełniłeś błąd przy wpisywaniu daty - proszę sprawź to i wpisz jeszcze raz poprawnie!")
termin_przybycia()
else:
try:
if data_przybycia == ("%i%i%i%i-%i%i-%i%i"% (tab[0], tab[1], tab[2], tab[3], tab[4], tab[5], tab[6], tab[7])):
if data_przybycia > str(d.today()):
print("Podana data: %s - została zaakceptowana" % data_przybycia)
return tab[0], tab[1], tab[2], tab[3], tab[4], tab[5], tab[6], tab[7]
else:
print("Podana data jest z przeszłości lub teraźniejszości, podaj ją jeszcze raz najbliższy możliwy termin rezerwacji to jutro!")
except IndexError:
print("Podałeś datę przybycia w niewłaściwym formacie")
else:
print("Podales date w niewłaściwym formacie - spróboj jeszcze raz")
def ilosc_dni():
while True:
try:
ile_dni = int(input("Podaj liczbę dni, przez jaką zostaniesz w hotelu: "))
if ile_dni > 731:
print("Jeżeli chcesz zostać w hotelu powyżej dwóch lat to powinieneś to osobiście ustalić z włąścicielem bezpośrednio podczas pobytu")
print("Wpisz np 14 dni - przyjedź na 2 tygodnie i resztę swojego pobytu ustal z właścicielem")
elif ile_dni <= 0:
print("Dalsze wypełnianie formularza nie ma sensu, skoro nawet 1 dnia nie zostaniesz w hotelu")
while True:
dec = input("Czy chcesz zacząc wypełniać formularz od początku: (t/n)")
dec = dec.lower()
if dec == "t":
main()
elif dec == "n":
exit_function()
else:
print("Wpisałeś nie poprawnie")
else:
return ile_dni
except ValueError:
print("Podaj prosze ilość dni (nie używaj ułamków)")
def ilosc_osob():
while True:
try:
ile_osob = int(input("Podaj ile osob z tobą przyjedzie?, Jeżeli będziesz sam to wpisz \"0\""))
ile_osob += 1
if ile_osob > 100:
print("Jeżeli chcesz zabrać ze sobą regiment wojska to proszę o bezpośredni kontakt z hotelem")
else:
return ile_osob
except ValueError:
print("Podaj prosze ilość osób które przybędą razem z tobą (nie używaj ułamków)")
def sniadanie():
while True:
sniadanie = input("Czy chcesz zamówić dodatkowo śniadanie w hotelu na każdy dzień pobytu: (T/N)")
sniadanie = sniadanie.lower()
if sniadanie == "t":
return "tak"
elif sniadanie == "n":
return "nie"
else:
print("Wpisałeś nie poprawnie")
def imie_fun():
imie = input("Podaj swoje imie: ")
imie_OK = imie #takie szybkie obejscie :)
imie = list(imie)
counter_error = 0
for sprawdz in imie:
if sprawdz.isdigit() == True or sprawdz in "~!@#$%^&*()_+-={}[]'\/,.<>":
counter_error += 1
if counter_error >= 1:
print("Błąd! - Podaj imie bez cyfr oraz znaków innych niż litery w imieniu!")
return imie_fun()
else:
return imie_OK.capitalize()
def rezerwacja():
print("Prosze o podanie natepujacych informacji w celu rezerwacji w naszym hotelu")
[var1, var2, var3, var4, var5, var6, var7, var8] = termin_przybycia()
days = ilosc_dni()
people = ilosc_osob()
breakfest = sniadanie()
name = imie_fun()
return forumarz_rezerwacji(var1, var2, var3, var4, var5, var6, var7, var8, days, people, breakfest, name)
def opishotelu():
print("Nasz hotel wogole jest super i inny \"marketingowy belkot\", \ntak aby zachecic Cie do rezerwacji i wydania pieniedzy na hotel!")
while True:
decyzja=input("Czy napewno chcesz wyjsc z aplikacji? (t/n)")
if decyzja == "t":
exit_function()
elif decyzja == "n":
main()
else:
print("Wpisz \"t\" lub \"n\"")
def main():
print("Witaj w aplikacji Hotelowej!\nCzy chcesz zarezerwować sobie miejsce w Hotelu? (t/n)", end="")
while True:
decyzja=input()
if decyzja == "t":
rezerwacja()
elif decyzja == "n":
opishotelu()
else:
print("Wpisz \"t\" lub \"n\"", end="")
main()
| 39.512821 | 157 | 0.514114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,442 | 0.390283 |
ef8277ef3ae9c0ea2b164ecadf77b6f20ca717ce | 598 | py | Python | core/mayaScripts/SimpleCube.py | Bernardrouhi/HandFree | fbb9623cba0b8e7eb18649d29465393f06c2b9ee | [
"MIT"
]
| null | null | null | core/mayaScripts/SimpleCube.py | Bernardrouhi/HandFree | fbb9623cba0b8e7eb18649d29465393f06c2b9ee | [
"MIT"
]
| null | null | null | core/mayaScripts/SimpleCube.py | Bernardrouhi/HandFree | fbb9623cba0b8e7eb18649d29465393f06c2b9ee | [
"MIT"
]
| null | null | null | import sys
import maya.standalone
maya.standalone.initialize(name='python')
from maya import cmds
# try:
def run():
# get scene file
file_path = sys.argv[1]
# Open Scene
cmds.file(file_path, open=True)
#-------------- Script --------------
cmds.polyCube( d=10, h=10 , w=10)
cmds.polyCube( d=10, h=10 , w=10)
cmds.polyCube( d=10, h=10 , w=10)
cmds.polyCube( d=10, h=10 , w=10)
cmds.polyCube( d=10, h=10 , w=10)
#------------------------------------
# Save Scene
cmds.file(save=True, force=True)
run()
maya.standalone.uninitialize()
# except Exception as e:
# sys.stdout.write(1) | 19.933333 | 41 | 0.602007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.29097 |
ef841a52c1f626cc7c84690f06d3bbb17715d9c8 | 3,733 | py | Python | GreedyGRASP/Solver_Greedy.py | HamidL/AMMM_Project | 7679d1c336578464317b8326311c1ab4b69cbf11 | [
"MIT"
]
| null | null | null | GreedyGRASP/Solver_Greedy.py | HamidL/AMMM_Project | 7679d1c336578464317b8326311c1ab4b69cbf11 | [
"MIT"
]
| null | null | null | GreedyGRASP/Solver_Greedy.py | HamidL/AMMM_Project | 7679d1c336578464317b8326311c1ab4b69cbf11 | [
"MIT"
]
| null | null | null |
from GreedyGRASP.Solver import Solver
from GreedyGRASP.Solution import Solution
from GreedyGRASP.LocalSearch import LocalSearch
# Inherits from a parent abstract solver.
class Solver_Greedy(Solver):
def greedyFunctionCost(self, solution, remainCap, busesAssignments):
for busAssi in busesAssignments:
bus = solution.getBuses()[busAssi.bus]
service = solution.getServices()[busAssi.service]
if (remainCap <= bus.getCapacity()):
cost = busAssi.cost + busAssi.cost*(bus.getCapacity()-remainCap)/bus.getCapacity()
else:
cost = busAssi.cost + (busAssi.cost + service.getMinutes()*solution.inputData.CBM) * remainCap / bus.getCapacity()
busAssi.greedyCost = cost
return busesAssignments
def greedyConstruction(self, config, problem):
# get an empty solution for the problem
solution = Solution.createEmptySolution(config, problem)
# get tasks and sort them by their total required resources in descending order
services = problem.getServices()
sortedServices = sorted(services,
key=lambda service: (service.getPassengers(), service.getNumOverlappingServices()),
reverse=True)
elapsedEvalTime = 0
evaluatedCandidates = 0
# for each task taken in sorted order
for service in sortedServices:
serviceId = service.getId()
busesAssignments, driversAssignments = solution.findFeasibleAssignments(serviceId)
remainCap = service.getPassengers()
selBuses = []
while (remainCap > 0 and len(busesAssignments) > 0):
busesAssignments = self.greedyFunctionCost(solution, remainCap, busesAssignments)
busesAssignments = sorted(busesAssignments, key=lambda busAssi: busAssi.greedyCost)
candidate = busesAssignments[0]
if (candidate is None):
solution.makeInfeasible()
break
selBuses.append(candidate)
busesAssignments.remove(candidate)
remainCap -= problem.getBuses()[candidate.bus].getCapacity()
if (remainCap > 0):
solution.makeInfeasible()
break
sortedDriversAssignments = sorted(driversAssignments, key=lambda driverAssi: driverAssi.cost)
if (len(sortedDriversAssignments) < len(selBuses)):
solution.makeInfeasible()
break
for i in range(0,len(selBuses)):
solution.assign(sortedDriversAssignments[i], selBuses[i])
return(solution, elapsedEvalTime, evaluatedCandidates)
def solve(self, config, problem):
self.startTimeMeasure()
self.writeLogLine(float('infinity'), 0)
solution, elapsedEvalTime, evaluatedCandidates = self.greedyConstruction(config, problem)
self.writeLogLine((solution.cost), 1)
localSearch = LocalSearch(config)
solution = localSearch.run(solution)
self.writeLogLine(solution.cost, 1)
avg_evalTimePerCandidate = 0.0
if (evaluatedCandidates != 0):
avg_evalTimePerCandidate = 1000.0 * elapsedEvalTime / float(evaluatedCandidates)
print ('')
print ('Greedy Candidate Evaluation Performance:')
print (' Num. Candidates Eval.', evaluatedCandidates)
print (' Total Eval. Time ', elapsedEvalTime, 's')
print (' Avg. Time / Candidate', avg_evalTimePerCandidate, 'ms')
localSearch.printPerformance()
return(solution)
| 41.477778 | 130 | 0.625234 | 3,560 | 0.953657 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.088937 |
ef854c6d7447ee5fbf75a72fb0ffd6549ac302f6 | 5,654 | py | Python | statslib/_lib/gmodel.py | ashubertt/statslib | 5a35c0d10c3ca44c2d48f329c4f3790c91c385ac | [
"Apache-2.0"
]
| null | null | null | statslib/_lib/gmodel.py | ashubertt/statslib | 5a35c0d10c3ca44c2d48f329c4f3790c91c385ac | [
"Apache-2.0"
]
| 1 | 2021-04-06T10:55:34.000Z | 2021-04-06T10:55:34.000Z | statslib/_lib/gmodel.py | ashubertt/statslib | 5a35c0d10c3ca44c2d48f329c4f3790c91c385ac | [
"Apache-2.0"
]
| null | null | null | import inspect
import math as _math
from copy import deepcopy
import matplotlib.pyplot as _plt
import numpy as np
import pandas as pd
import statsmodels.api as _sm
from statslib._lib.gcalib import CalibType
class GeneralModel:
def __init__(self, gc, DM):
self.gc = deepcopy(gc)
self.DM = deepcopy(DM)
self.calibrator = None
self.fitted = None
self.v_hat = None
self.y0 = None
self.y_hat = None
self.residuals = None
def exog(self, idx):
return self.DM.gX.iloc[idx] if self.DM.gX is not None else None
def endog(self, idx):
return self.DM.dm.v.iloc[idx]
def fit(self, idx, **kwargs):
if self.gc.calib_type is CalibType.sm:
self.calibrator = self.gc.cf(endog=self.endog(idx),
exog=self.exog(idx),
**self.gc.kwargs)
self.fitted = self.calibrator.fit(**kwargs)
if self.gc.calib_type is CalibType.sk:
self.calibrator = self.gc.cf(**self.gc.kwargs)
self.fitted = self.calibrator.fit(self.exog(idx), self.endog(idx))
self.y0 = self.DM.dm.y.iloc[idx].tail(self.DM.f.n)
def forecast(self, idx):
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x ** 2, axis=axis)
self.forecast_index = idx
if 'start' in inspect.signature(self.fitted.predict).parameters:
self.v_hat = self.fitted.predict(
self.exog(idx).index.min(),
self.exog(idx).index.max(),
exog=self.exog(idx))
else:
if self.gc.calib_type is CalibType.sm:
self.v_hat = self.fitted.predict(exog=self.exog(idx))
if self.gc.calib_type is CalibType.sk:
self.v_hat = self.fitted.predict(self.exog(idx))
self.v_hat = pd.Series(self.v_hat, index=self.exog(idx).index).rename('v_hat')
self.y_hat = self.DM.f.inv(self.v_hat, y0=self.y0, idx=self.v_hat.index)
try:
self.residuals = self.DM.dm.loc[self.v_hat.index]['v'].values - self.v_hat.values
sigma2 = 1.0 / self.fitted.nobs * sumofsq(self.residuals)
self.std_residuals = self.residuals / np.sqrt(sigma2)
self.residuals = pd.Series(self.std_residuals, index=self.v_hat.index)
self.std_residuals = pd.Series(self.std_residuals, index=self.v_hat.index)
except Exception:
pass
def plot_diagnostics(self, figsize=(15, 15), drop_names=None):
import math
if drop_names is None:
drop_names = list()
std_resid = self.std_residuals
if std_resid is not None:
fig, axs = _plt.subplots(3, 2, figsize=figsize)
from statslib.utils.plots import get_standard_colors
clrs = get_standard_colors()
std_resid.plot(ax=axs[0, 0], color=clrs[1])
axs[0, 0].hlines(0, self.v_hat.index.min(), self.v_hat.index.max())
axs[0, 0].set_title('Standardized residuals')
axs[0, 1].hist(std_resid.values, density=True)
from scipy.stats import gaussian_kde, norm
kde = gaussian_kde(std_resid)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
axs[0, 1].plot(x, kde(x), label="KernelDensityEstimator")
axs[0, 1].plot(x, norm.pdf(x), label="N(0,1)")
axs[0, 1].set_xlim(xlim)
axs[0, 1].legend()
axs[0, 1].set_title("Histogram plus estimated density")
_sm.graphics.qqplot(std_resid.values, line='q', fit=True, ax=axs[1, 0])
axs[1, 0].set_title('Normal QQ Plot')
_sm.graphics.tsa.plot_acf(std_resid, ax=axs[1, 1])
axs[1, 1].set_title('Correlogram')
axs[2, 0].scatter(self.fitted.fittedvalues, self.residuals.values, color='red')
axs[2, 0].hlines(0, min(self.fitted.fittedvalues), max(self.fitted.fittedvalues), color='blue')
axs[2, 0].set_xlabel('fitted')
axs[2, 0].set_ylabel('resid')
axs[2, 0].set_title('Fitted values vs. Residuals')
axs[2, 1].scatter(range(len(self.std_residuals)), self.std_residuals.values, color='red')
axs[2, 1].hlines(0, 0, len(self.std_residuals.values), color='blue')
axs[2, 1].set_xlabel('index')
axs[2, 1].set_ylabel('std_resid')
axs[2, 1].set_title('Index plot of standardized residuals')
_plt.tight_layout()
_plt.show()
print(" ")
L = 2
K = _math.ceil(len([k for k in self.DM.exog_names if k not in drop_names]) / L)
i = j = 0
fig, axs = _plt.subplots(K, L, figsize=(15, 15))
for curve in self.DM.exog_names:
if curve not in drop_names:
x_vals = self.DM.dm_ext[curve].iloc[self.forecast_index].values.tolist()
axs[i, j].scatter(x_vals, self.std_residuals.values)
min_max_x = [x for x in x_vals if not math.isnan(x)]
axs[i, j].hlines(0, min(min_max_x), max(min_max_x), color='blue')
axs[i, j].set_xlabel(curve)
axs[i, j].set_ylabel('std_res')
j += 1
if j % L == 0:
i += 1
j = 0
_plt.suptitle('Standardized Residuals vs. Explanatory Variable')
_plt.tight_layout(pad=3)
_plt.show()
| 40.385714 | 107 | 0.561726 | 5,441 | 0.962328 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.069685 |
ef85c06ba18faf8168c199da975507a6176f5a0a | 174 | py | Python | Richard.py | Jpowell10/firstrepo | c41ac4a0526b6e56449df5adaa448091d930f731 | [
"CC0-1.0"
]
| null | null | null | Richard.py | Jpowell10/firstrepo | c41ac4a0526b6e56449df5adaa448091d930f731 | [
"CC0-1.0"
]
| null | null | null | Richard.py | Jpowell10/firstrepo | c41ac4a0526b6e56449df5adaa448091d930f731 | [
"CC0-1.0"
]
| null | null | null | List1 = [1, 2, 3, 4]
List2 = ['I', 'tripped', 'over', 'and', 'hit', 'the', 'floor']
print(List1 + List2)
List3 = List1 + List2
print(List3)
fibs = (0, 1, 2, 3)
print(fibs[3]) | 24.857143 | 62 | 0.563218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.229885 |
ef86d428f2e17ef9b526fc491dcb0a17513a95ba | 1,581 | py | Python | app.py | Chen-Junbao/MalwareClassification | a2ef045c1e5f1f57ff183bfc6577275b14bf84d2 | [
"MIT"
]
| 4 | 2020-06-17T03:14:47.000Z | 2022-03-29T12:15:33.000Z | app.py | Chen-Junbao/MalwareClassification | a2ef045c1e5f1f57ff183bfc6577275b14bf84d2 | [
"MIT"
]
| 1 | 2020-12-20T03:14:33.000Z | 2021-02-01T17:13:44.000Z | app.py | Chen-Junbao/MalwareClassification | a2ef045c1e5f1f57ff183bfc6577275b14bf84d2 | [
"MIT"
]
| 1 | 2021-03-07T15:43:20.000Z | 2021-03-07T15:43:20.000Z | import os
from flask import Flask, render_template, request, jsonify
from display.predict import predict_file
app = Flask(__name__, static_folder="./display/static", template_folder="./display/templates")
@app.route('/')
def en():
return render_template('index_EN.html')
@app.route('/chs')
def chs():
return render_template('index_CHS.html')
@app.route('/uploader', methods=['POST', 'GET'])
def predict_image():
if request.method == 'POST':
# get uploaded file
file = request.files['file']
# save uploaded file to "files" directory
file.save("./display/files/" + file.filename)
file_type = file.filename.split('.')[-1]
ans = {}
if file_type == "asm":
probability = predict_file("./display/files/" + file.filename, "asm")
ans = {
'fileName': file.filename,
'probabilityLD': probability
}
elif file_type == "bytes":
probability = predict_file("./display/files/" + file.filename, "bytes")
ans = {
'fileName': file.filename,
'probabilityResNet': probability
}
elif file_type == "bmp":
probability = predict_file("./display/files/" + file.filename, "bmp")
ans = {
'fileName': file.filename,
'probabilityResNet': probability
}
return jsonify(ans)
if __name__ == '__main__':
if not os.path.exists('./display/files'):
os.mkdir('./display/files')
app.run(debug=True)
| 29.277778 | 94 | 0.571157 | 0 | 0 | 0 | 0 | 1,231 | 0.778621 | 0 | 0 | 409 | 0.258697 |
ef873aee93350e545e2097a1a737710da0346193 | 886 | py | Python | test_linprog_curvefit.py | drofp/linprog_curvefit | 96ba704edae7cea42d768d7cc6d4036da2ba313a | [
"Apache-2.0"
]
| null | null | null | test_linprog_curvefit.py | drofp/linprog_curvefit | 96ba704edae7cea42d768d7cc6d4036da2ba313a | [
"Apache-2.0"
]
| 3 | 2019-11-22T08:04:18.000Z | 2019-11-26T06:55:36.000Z | test_linprog_curvefit.py | drofp/linprog_curvefit | 96ba704edae7cea42d768d7cc6d4036da2ba313a | [
"Apache-2.0"
]
| null | null | null | import unittest
from ortools.linear_solver import pywraplp
class TestLinprogCurvefit(unittest.TestCase):
def setUp(self):
linprog_curvefit = __import__('linprog_curvefit')
self.generate_variables = linprog_curvefit._generate_variables
self.ErrorDefinition = linprog_curvefit.ErrorDefinition
def test_generate_variables_2PointsLinearCorrectVarCnt(self):
points = ((0, 0), (1.5, 3))
coeff_ranges = ((-10, 10), (-10, 10))
solver = pywraplp.Solver(
'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
err_def = self.ErrorDefinition.SUM_ABS_DEV
expected_num_of_vars = 6
variables = self.generate_variables(
solver, points=points, coeff_ranges=coeff_ranges, err_max=10000,
error_def=err_def)
self.assertEqual(len(variables), expected_num_of_vars)
| 40.272727 | 80 | 0.700903 | 824 | 0.930023 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.041761 |
ef87a851b0ff397ab056489c49ee4d54f1a8b8b0 | 14,278 | py | Python | uno_ct_v3.py | simple-circuit/Component-Curve-Tracer | 3842f1b0054230325f55296cbc88628b3f88fa88 | [
"MIT"
]
| 1 | 2021-08-04T03:08:07.000Z | 2021-08-04T03:08:07.000Z | uno_ct_v3.py | simple-circuit/Component-Curve-Tracer | 3842f1b0054230325f55296cbc88628b3f88fa88 | [
"MIT"
]
| null | null | null | uno_ct_v3.py | simple-circuit/Component-Curve-Tracer | 3842f1b0054230325f55296cbc88628b3f88fa88 | [
"MIT"
]
| 1 | 2021-08-29T14:05:42.000Z | 2021-08-29T14:05:42.000Z | # Uno PWM bipolar curve tracer app by simple-circuit 12-22-19
# rev 3 1-13-20
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import font
import numpy as np
import serial
root = Tk()
default_font = font.nametofont("TkDefaultFont")
default_font.configure(size=9)
canvas = Canvas(root)
root.geometry("720x540")
root.title('Uno Curve Tracer in Python')
canvas.grid(column=0, row=0, sticky=(N,W,E,S))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
xtvar = BooleanVar()
xtvar.set(False)
contvar = BooleanVar()
contvar.set(False)
crampvar = BooleanVar()
crampvar.set(False)
#ser = serial.Serial('/dev/ttyACM0', baudrate=115200, timeout = 1)
ser = serial.Serial('COM10', baudrate=115200, timeout = 1)
def globalVar():
global x
global y
global tms
global mkv,mki,mkt
global dv,di,dt
x = np.zeros((10,256))
y = np.zeros((10,256))
tms = np.zeros((10,256))
mkv = np.zeros((1))
mki = np.zeros((1))
mkt = np.zeros((1))
dv = np.zeros((1))
di = np.zeros((1))
dt = np.zeros((1))
def plotxy(xtra = 0):
if xtra == 0:
canvas.delete('currentline')
canvas.delete('currentcursor')
a = int(curvar.get())
n = int(trcvar.get())
m = int(mtrcvar.get())
if m == 1:
m=n+1
n2 = n
else:
n2 = 0
if xtvar.get() == False:
if xtra == 0:
for j in range(n2,m):
for i in range(255):
canvas.create_line((256+x[j][i]*22.07, 256-y[j][i]*51.2, 256+x[j][i+1]*22.07, 256-y[j][i+1]*51.2), fill='lime', width=2, tags='currentline')
canvas.create_oval(253+x[n][a]*22.07, 253-y[n][a]*51.2, 259+x[n][a]*22.07, 259-y[n][a]*51.2, fill='', outline='white', width=1, tags='currentcursor')
else:
if xtra == 0:
for j in range(n2,m):
for i in range(255):
canvas.create_line((i*2, 256-y[j][i]*51.2, (i+1)*2, 256-y[j][i+1]*51.2), fill='blue', width=2, tags='currentline')
for i in range(255):
canvas.create_line((i*2, 256-x[j][i]*22.07, (i+1)*2, 256-x[j][i+1]*22.07), fill='orange', width=2, tags='currentline')
canvas.create_oval(a*2-3, 253-x[n][a]*22.07, a*2 + 3, 259-x[n][a]*22.07, fill='', outline='white', width=1, tags='currentcursor')
canvas.create_oval(a*2-3, 253-y[n][a]*51.2, a*2 + 3, 259-y[n][a]*51.2, fill='', outline='white', width=1, tags='currentcursor')
label1.config(text = str(format(x[n][a],'0.3f')+'V'))
label2.config(text = str(format(y[n][a],'0.3f')+'mA'))
label3.config(text = str(format(tms[n][a],'3.2f')+'ms'))
runDelta()
def sweep(c = b'cct'):
m = int(trcvar.get())
if c == b'cct':
tms[m][:] = np.linspace(0,30.95,256,endpoint=True)
else:
tms[m][:] = np.linspace(0,240.0,256,endpoint=True)
ser.write(b'@' + c + b'0000')
for i in range(256):
line = ser.readline()
x[m][i] = int(line[line.find(c)+3:line.find(c)+7])
x[m][i] = (x[m][i]-512)*0.02266
y[m][i] = int(line[line.find(c)+7:])
y[m][i] = -(y[m][i]-509.7)*0.00977 + x[m][i]*0.0055
plotxy()
def runSine(event):
sweep(b'cct')
def runCont():
if contvar.get() == True:
sweep(b'cct')
if (crampvar.get() == True) & (contvar.get() == False):
if stepn.get() >= 0:
trcvar.set(stepn.get())
m = float(startvar.get()) + stepn.get() * float(stepvar.get())
if m > 5.0:
m = 5.0
mi = int(m*51)
ms = '@dac' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
ser.readline()
stepn.set(stepn.get() + 1)
if stepn.get() > 4:
stepn.set(-1)
crampvar.set(False)
sweep(b'mea')
root.after(500, runCont)
def runRamp(event):
sweep(b'mea')
def runMouse(event):
xx = canvas.canvasx(event.x)
xx = int(xx/2)
if xx <= 255:
curvar.set(xx)
cursor.invoke("buttondown")
def runMag(xtra = 0):
m = float(sinmag.get())
if m>11.5:
m = 11.5
sinmag.invoke("buttonup")
if m<2.4:
m=2.3
sinmag.invoke("buttonup")
mi = int(m*127/11.5)
ms = '@mag' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
magvar.set(m)
def runPos(xtra = 0):
m = float(posmag.get())
if m>11.5:
m = 11.5
posmag.invoke("buttonup")
if m<-11.5:
m=-11.6
posmag.invoke("buttonup")
mi = int(m*127/11.5) + 128
ms = '@pos' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
posvar.set(m)
def runNeg(xtra = 0):
m = float(negmag.get())
if m>11.5:
m = 11.5
negmag.invoke("buttonup")
if m<-11.5:
m=-11.6
negmag.invoke("buttonup")
mi = int(m*127/11.5) + 128
ms = '@neg' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
negvar.set(m)
def runFreq(xtra = 0):
mi = int(freq.get())
if mi>50:
mi = 50
if mi<4:
mi=4
ms = '@frq' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
labelfreq.config(text = str(format(963.234/mi,'3.1f'))+'Hz')
def runStart(xtra = 0):
m = float(startvar.get())
mi = int(m*51)
ms = '@dac' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
def runAdc(xtra = 0):
c = b'ad'
mi = int(adcvar.get())
ms = '@adc' + str(mi) + '\r'
ser.write(bytes(ms, 'utf-8'))
line = ser.readline()
chval = int(line[line.find(c)+4:])
labeladc.config(text = str(format(chval*5.0/1024,'1.3f'))+' V')
def runSteps(xtra = 0):
stepn.set(0)
trcvar.set(0)
mtrcvar.set(5)
crampvar.set(True)
contvar.set(False)
def runMark(c = b'cct'):
n = int(trcvar.get())
a = int(curvar.get())
mkv[0] = x[n][a]
mki[0] = y[n][a]
mkt[0] = tms[n][a]
labelm1.config(text = str(format(mkv[0],'0.3f'))+'V')
labelm2.config(text = str(format(mki[0],'0.3f'))+'mA')
labelm3.config(text = str(format(mkt[0],'3.2f'))+'ms')
runDelta()
def runDelta():
n = int(trcvar.get())
a = int(curvar.get())
mi = int(freq.get())
dv[0] = x[n][a] - mkv
di[0] = y[n][a] - mki
dt[0] = tms[n][a] - mkt
try:
f = abs(1000/dt[0])
except:
f = 10000
labeld1.config(text = str(format(dv[0],'0.3f'))+'V')
labeld2.config(text = str(format(di[0],'0.3f'))+'mA')
labeld3.config(text = str(format(dt[0],'3.2f'))+'ms ' + str(format(f,'3.1f'))+' Hz')
try:
r = abs(dv[0]/di[0]) * 1000
c = (max(y[n][:]) - min(y[n][:]))/(max(x[n][:]) - min(x[n][:])) / (2 * np.pi * 0.963234/mi)
labelr.config(text = 'R = ' + str(format(r,'6.0f'))+' ohms')
labelc.config(text = 'C = ' + str(format(c,'2.2f'))+' uF')
except:
labelr.config(text = 'R = inf ohms')
def runXt():
plotxy(0)
def runPlot(xtra = 0):
plotxy(1)
def runSave(extra = 0):
with filedialog.asksaveasfile() as f:
for i in range(256):
for j in range(10):
f.write(str(format(x[j][i],'1.3f')+' '))
f.write(str(format(y[j][i],'1.3f')+' '))
f.write(str(format(tms[j][i],'1.6f')+' '))
f.write('\n')
f.close()
def runLoad(extra = 0):
with filedialog.askopenfile() as f:
for j in range(256):
s = f.readline()
fs = s.split(' ')
for k in range(10):
x[k][j] = float(fs[k*3])
y[k][j] = float(fs[k*3+1])
tms[k][j] = float(fs[k*3+2])
f.close()
plotxy(0)
def endserial():
ser.close()
root.destroy()
globalVar()
stepn = IntVar()
stepn.set(-1)
canvas.create_rectangle((0,0,512,512),fill='green')
for i in range(11):
canvas.create_line((51.2*i, 0, 51.2*i, 512), fill='black', width=1)
canvas.create_line((0,51.2*i, 512, 51.2*i), fill='black', width=1)
for i in range(50):
canvas.create_line((10.24*i, 254, 10.24*i, 258), fill='green', width=1)
canvas.create_line((254,10.24*i, 258, 10.24*i), fill='green', width=1)
canvas.create_line((612,10,612,480), fill='grey', width=3)
canvas.create_line((520,10,605,10), fill='grey', width=3)
canvas.create_line((520,115,605,115), fill='grey', width=3)
canvas.create_line((520,220,605,220), fill='grey', width=3)
canvas.create_line((520,247,605,247), fill='grey', width=3)
canvas.create_line((520,335,605,335), fill='grey', width=3)
canvas.create_line((520,430,605,430), fill='grey', width=3)
canvas.create_line((620,10,705,10), fill='grey', width=3)
canvas.create_line((620,75,705,75), fill='grey', width=3)
canvas.create_line((620,170,705,170), fill='grey', width=3)
canvas.create_line((620,275,705,275), fill='grey', width=3)
canvas.create_line((620,370,705,370), fill='grey', width=3)
canvas.create_line((520,515,705,515), fill='grey', width=3)
trcvar = IntVar(value=0) # initial value
trc = Spinbox(canvas, from_= 0, to = 4, increment = 1, width = 1, command = plotxy, textvariable=trcvar)
trc.place(x = 620, y = 20)
trcvar.set(0)
labeltrc = Label(canvas)
labeltrc.place(x = 655, y = 20)
labeltrc.config(text = 'Trace')
mtrcvar = IntVar(value=1) # initial value
mtrc = Spinbox(canvas, from_= 1, to = 5, increment = 1, width = 1, command = plotxy, textvariable=mtrcvar)
mtrc.place(x = 620, y = 45)
mtrcvar.set(1)
labelmtrc = Label(canvas)
labelmtrc.place(x = 655, y = 45)
labelmtrc.config(text = 'Multiple')
trcsave = ttk.Button(canvas, text="Save", command = runSave)
trcsave.place(x = 620, y = 90)
trcload = ttk.Button(canvas, text="Load", command = runLoad)
trcload.place(x = 620, y = 130)
startvar = DoubleVar(value=0.6) # initial value
startval = Spinbox(canvas, from_= 0.0, to = 5.0, increment = 0.02, width = 4, command = runStart, textvariable=startvar)
startval.place(x = 620, y = 185)
startvar.set(0.6)
labelstart = Label(canvas)
labelstart.place(x = 680, y = 185)
labelstart.config(text = 'Start')
stepvar = DoubleVar(value=0.88) # initial value
stepval = Spinbox(canvas, from_= 0.0, to = 5.0, increment = 0.02, width = 4, textvariable=stepvar)
stepval.place(x = 620, y = 205)
stepvar.set(0.88)
labelstep = Label(canvas)
labelstep.place(x = 680, y = 205)
labelstep.config(text = 'Step')
trcload = ttk.Button(canvas, text="Run Steps", command = runSteps)
trcload.place(x = 620, y = 235)
adcvar = IntVar(value=0) # initial value
adcval = Spinbox(canvas, from_= 0, to = 5, increment = 1, width = 2, textvariable=adcvar)
adcval.place(x = 620, y = 325)
adcvar.set(0)
labeladc = Label(canvas)
labeladc.place(x = 660, y = 325)
labeladc.config(text = '0.000V')
adcread = ttk.Button(canvas, text="Read ADC", command = runAdc)
adcread.place(x = 620, y = 295)
cts = ttk.Button(canvas, text="Sine")
cts.place(x = 520, y = 20)
cts.bind("<Button-1>", runSine)
magvar = DoubleVar(value=11.5) # initial value
sinmag = Spinbox(canvas, from_= 2.4, to = 11.5, increment = 0.1, width = 4, command = runMag, textvariable=magvar)
sinmag.place(x = 520, y = 50)
magvar.set(11.5)
sinmag.bind("<Return>", runMag)
labelmag = Label(canvas)
labelmag.place(x = 575, y = 50)
labelmag.config(text = 'Vp')
freqvar = DoubleVar(value=16) # initial value
freq = Spinbox(canvas, from_= 4, to =50, increment = 1.0, width = 2, command = runFreq, textvariable=freqvar)
freq.place(x = 520, y = 75)
freqvar.set(16)
labelfreq = Label(canvas)
labelfreq.place(x = 555, y = 75)
labelfreq.config(text = '60.2 Hz')
cnt = ttk.Checkbutton(canvas, text="Cont. Sine", variable=contvar, onvalue=True)
cnt.place(x = 520, y = 95)
ctr = ttk.Button(canvas, text="Ramp")
ctr.place(x = 520, y = 125)
ctr.bind("<Button-1>", runRamp)
posvar = DoubleVar(value=11.5) # initial value
posmag = Spinbox(canvas, from_= -11.5, to = 11.5, increment = 0.1, width = 4, command = runPos, textvariable=posvar)
posmag.place(x = 520, y = 155)
posvar.set(11.5)
posmag.bind("<Return>", runPos)
labelpos = Label(canvas)
labelpos.place(x = 572, y = 155)
labelpos.config(text = 'Vmax')
negvar = DoubleVar(value=-11.5) # initial value
negmag = Spinbox(canvas, from_= -11.5, to = 11.5, increment = 0.1, width = 4, command = runNeg, textvariable=negvar)
negmag.place(x = 520, y = 180)
negvar.set(-11.5)
negmag.bind("<Return>", runNeg)
labelneg = Label(canvas)
labelneg.place(x = 572, y = 180)
labelneg.config(text = 'Vmin')
cramp = ttk.Checkbutton(canvas, text="Cont. Ramp", variable=crampvar, onvalue=True)
cramp.place(x = 520, y = 200)
xt = ttk.Checkbutton(canvas, text="X-t Plot", variable=xtvar, command=runXt, onvalue=True)
xt.place(x = 520, y = 225)
curvar = IntVar(value= 0)
cursor = Spinbox(canvas, from_= 0, to = 255, width = 3, command = runPlot, textvariable = curvar)
cursor.place(x = 520, y = 255)
cursor.bind("<Return>", runPlot)
labelcur = Label(canvas)
labelcur.place(x = 565, y = 255)
labelcur.config(text = 'Cursor')
label1 = Label(canvas)
label1.place(x = 520, y = 275)
label1.config(text = 'V')
label2 = Label(canvas)
label2.place(x = 520, y = 295)
label2.config(text = 'mA')
label3 = Label(canvas)
label3.place(x = 520, y = 315)
label3.config(text = 'ms')
mrk = ttk.Button(canvas, text="Mark")
mrk.place(x = 520, y = 345, height = 22)
mrk.bind("<Button-1>", runMark)
labelm1 = Label(canvas)
labelm1.place(x = 520, y = 370)
labelm1.config(text = 'V')
labelm2 = Label(canvas)
labelm2.place(x = 520, y = 390)
labelm2.config(text = 'mA')
labelm3 = Label(canvas)
labelm3.place(x = 520, y = 410)
labelm3.config(text = 'ms')
labeldt = Label(canvas)
labeldt.place(x = 540, y = 433)
labeldt.config(text = 'Delta')
labeld1 = Label(canvas)
labeld1.place(x = 520, y = 450)
labeld1.config(text = 'V')
labeld2 = Label(canvas)
labeld2.place(x = 520, y = 470)
labeld2.config(text = 'mA')
labeld3 = Label(canvas)
labeld3.place(x = 520, y = 490)
labeld3.config(text = 'ms')
labelr = Label(canvas)
labelr.place(x = 520, y = 520)
labelr.config(text = 'ohms')
labelc = Label(canvas)
labelc.place(x = 640, y = 520)
labelc.config(text = 'C = uF')
canvas.bind("<Button-1>", runMouse)
plotxy()
root.after(0, runCont)
root.wm_protocol ("WM_DELETE_WINDOW", endserial)
root.mainloop()
| 29.745833 | 172 | 0.583975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,381 | 0.096722 |
ef8970f817cb168ae688aab739b624cb804e885d | 665 | py | Python | logger/TMP102.py | scsibug/Raspberry-Pi-Sensor-Node | 606cf2a15a72ac1503c7318a39c9f3cc523a9c4a | [
"Unlicense"
]
| 1 | 2015-12-23T04:27:16.000Z | 2015-12-23T04:27:16.000Z | logger/TMP102.py | scsibug/Raspberry-Pi-Sensor-Node | 606cf2a15a72ac1503c7318a39c9f3cc523a9c4a | [
"Unlicense"
]
| null | null | null | logger/TMP102.py | scsibug/Raspberry-Pi-Sensor-Node | 606cf2a15a72ac1503c7318a39c9f3cc523a9c4a | [
"Unlicense"
]
| null | null | null | import time
import smbus
from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# TMP102 Class
# ===========================================================================
class TMP102:
#i2c = None
# Constructor
def __init__(self, address=0x48, debug=False):
#self.i2c = Adafruit_I2C(address)
#self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
def readTemperature(self):
bus = smbus.SMBus(1)
data = bus.read_i2c_block_data(0x49, 0)
msb = data[0]
lsb = data[1]
return (((msb << 8) | lsb) >> 4) * 0.0625
| 24.62963 | 77 | 0.508271 | 427 | 0.642105 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.46015 |
ef8bac5da5b68c79dd574b7a205be89cb3f23f5d | 178 | py | Python | headlines.py | plamenbelev/headlines | 49e5995042abf31b2f898ca1daaf7ee99005dde9 | [
"MIT"
]
| null | null | null | headlines.py | plamenbelev/headlines | 49e5995042abf31b2f898ca1daaf7ee99005dde9 | [
"MIT"
]
| null | null | null | headlines.py | plamenbelev/headlines | 49e5995042abf31b2f898ca1daaf7ee99005dde9 | [
"MIT"
]
| null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/")
def get_news():
return 'No news is good news'
if __name__ == '__main__':
app.run(port=5000, debug=True) | 14.833333 | 34 | 0.662921 | 0 | 0 | 0 | 0 | 65 | 0.365169 | 0 | 0 | 35 | 0.196629 |
ef90aefef6921157afac229b23fbddf7cab99743 | 854 | py | Python | help_desk/help_desk/doctype/department_name/department_name.py | shrikant9867/mycfohelpdesk | b285b156aec53ecff5873f4630638687ff5a0e92 | [
"MIT"
]
| null | null | null | help_desk/help_desk/doctype/department_name/department_name.py | shrikant9867/mycfohelpdesk | b285b156aec53ecff5873f4630638687ff5a0e92 | [
"MIT"
]
| null | null | null | help_desk/help_desk/doctype/department_name/department_name.py | shrikant9867/mycfohelpdesk | b285b156aec53ecff5873f4630638687ff5a0e92 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
import string
from frappe.model.document import Document
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from erpnext.controllers.item_variant import get_variant, copy_attributes_to_variant, ItemVariantExistsError
class DepartmentName(Document):
def autoname(self):
self.name = self.department_abbriviation.upper()
def validate(self):
self.validate_name_of_department()
def validate_name_of_department(self):
if(self.name_of_department):
self.name_of_department = self.name_of_department.title()
if(self.department_abbriviation):
self.department_abbriviation = self.department_abbriviation.upper() | 32.846154 | 108 | 0.799766 | 404 | 0.473068 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.141686 |
ef911bdd33ff81cae4898bfd37e8a89b765f201c | 2,565 | py | Python | src/medical_test_service/medical_test.py | phamnam-mta/know-life | f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d | [
"MIT"
]
| null | null | null | src/medical_test_service/medical_test.py | phamnam-mta/know-life | f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d | [
"MIT"
]
| null | null | null | src/medical_test_service/medical_test.py | phamnam-mta/know-life | f7c226c41e315f21b5d7fe2ccbc9ec4f9961ed1d | [
"MIT"
]
| null | null | null | import logging
from typing import Text, List
from src.utils.io import read_json
from src.utils.fuzzy import is_relevant_string
from src.utils.common import is_float
from src.utils.constants import (
MEDICAL_TEST_PATH,
QUANTITATIVE_PATH,
POSITIVE_TEXT,
TestResult
)
logger = logging.getLogger(__name__)
class MedicalTest():
def __init__(self, medical_test_path=MEDICAL_TEST_PATH, quantitative_path= QUANTITATIVE_PATH) -> None:
self.medical_test = read_json(medical_test_path)
self.quantitative = read_json(quantitative_path)
logger.info("Medical test loaded")
def get_suggestions(self, indicators: List):
suggestions = []
count = 0
for i in indicators:
count += 1
sg = {
"id": count,
"input": i,
}
references = []
for m in self.medical_test:
if is_relevant_string(i["test_name"], m["name"], method=['exact','fuzzy'], score=90):
sg["name"] = m["name"]
sg["overview"] = m["overview"]
if m["references"]:
references.extend(m["references"])
for q in self.quantitative:
if q["medical_test"]["id"] == m["id"]:
if q["test_result"] == TestResult.positive.value and is_relevant_string(str(i["result"]), POSITIVE_TEXT, score=90, remove_accent=True):
sg["note"] = q["note"]
sg["cause"] = q["cause"]
sg["recommend"] = q["recommend"]
if m["references"]:
references.extend(m["references"])
break
elif is_float(str(i["result"])):
test_result = float(str(i["result"]))
if test_result >= q["min_value"] and test_result <= q["max_value"]:
sg["note"] = q["note"]
sg["cause"] = q["cause"]
sg["recommend"] = q["recommend"]
if m["references"]:
references.extend(m["references"])
break
break
sg["references"] = list(dict.fromkeys(references))
suggestions.append(sg)
return suggestions
| 43.474576 | 163 | 0.474854 | 2,234 | 0.870955 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.138791 |
ef933f2244982928a2ce88206760be93146f1a77 | 1,064 | py | Python | scam.py | TheToddLuci0/Tarkov-Scammer | 5fced3952c6cec72fe3eb85384bc11f65ee6af9c | [
"BSD-3-Clause"
]
| 2 | 2021-02-09T19:13:14.000Z | 2021-02-23T08:41:14.000Z | scam.py | TheToddLuci0/Tarkov-Scammer | 5fced3952c6cec72fe3eb85384bc11f65ee6af9c | [
"BSD-3-Clause"
]
| null | null | null | scam.py | TheToddLuci0/Tarkov-Scammer | 5fced3952c6cec72fe3eb85384bc11f65ee6af9c | [
"BSD-3-Clause"
]
| null | null | null | import requests
import sys
from time import sleep
from tabulate import tabulate
def get_scams(api_key):
scams = []
headers = {"x-api-key": api_key}
r = requests.get('https://tarkov-market.com/api/v1/items/all', headers=headers)
for i in r.json():
r2 = requests.get('https://tarkov-market.com/api/v1/item?uid='+i['uid'], headers=headers)
while r2.status_code == 429:
print("Got rate limited, sleeping")
sleep(15)
r2 = requests.get('https://tarkov-market.com/api/v1/item?uid=' + i['uid'], headers=headers)
data = r2.json()[0]
if data['traderPrice'] > data["price"]:
scams.append([i['name'], data['price'], data['traderName'], data['traderPrice']-data['price']])
scams.sort(key=lambda x: x[3])
print(tabulate(scams, headers=["Item", "Market Price", "Trader", "Profit"]))
if __name__=='__main__':
try:
with open('.secret', 'r') as f:
secret = f.read().strip()
except IOException:
secret = sys.argv[1]
get_scams(secret)
| 34.322581 | 107 | 0.599624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.285714 |
ef955712af3bae4edd3cd451907984d2f75e38d0 | 101 | py | Python | hair_segmentation/model.py | shoman2/mediapipe-models | c588321f3b5056f2239d834c603046de7901d02e | [
"Apache-2.0"
]
| 28 | 2019-10-08T06:07:45.000Z | 2021-06-12T07:01:32.000Z | hair_segmentation/model.py | shoman2/mediapipe-models | c588321f3b5056f2239d834c603046de7901d02e | [
"Apache-2.0"
]
| null | null | null | hair_segmentation/model.py | shoman2/mediapipe-models | c588321f3b5056f2239d834c603046de7901d02e | [
"Apache-2.0"
]
| 8 | 2019-10-10T04:59:02.000Z | 2021-03-28T16:11:09.000Z | # Real-time Hair Segmentation and Recoloring on Mobile GPUs (https://arxiv.org/abs/1907.06740)
# TODO | 50.5 | 94 | 0.772277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.990099 |
ef978b1341d6c5d5f3129e9244bedb2f75765eb8 | 996 | py | Python | blog/viewmixins.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
]
| 11 | 2018-04-02T16:36:19.000Z | 2019-07-10T05:54:58.000Z | blog/viewmixins.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
]
| 22 | 2019-01-01T20:40:21.000Z | 2022-02-10T08:06:39.000Z | blog/viewmixins.py | pincoin/rakmai | d9daa399aff50712a86b2dec9d94e622237b25b0 | [
"MIT"
]
| 4 | 2019-03-12T14:24:37.000Z | 2022-01-07T16:20:22.000Z | import logging
from .forms import PostSearchForm
from .models import Blog
class BlogContextMixin(object):
logger = logging.getLogger(__name__)
def dispatch(self, *args, **kwargs):
self.blog = Blog.objects.get(slug=self.kwargs['blog'])
self.block_size = self.blog.block_size
self.chunk_size = self.blog.chunk_size
return super(BlogContextMixin, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BlogContextMixin, self).get_context_data(**kwargs)
context['blog'] = self.blog
return context
class SearchContextMixin(object):
logger = logging.getLogger(__name__)
search_form_class = PostSearchForm
def get_context_data(self, **kwargs):
context = super(SearchContextMixin, self).get_context_data(**kwargs)
context['search_form'] = self.search_form_class(
q=self.request.GET.get('q') if self.request.GET.get('q') else '')
return context
| 27.666667 | 77 | 0.684739 | 915 | 0.918675 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.033133 |
ef978c724ad463ecd7562dae7e149d5ae0ce4282 | 677 | py | Python | mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py | mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep | b35f6c0b2e264e2913d0a1c432bf89c7b329bf52 | [
"Apache-2.0"
]
| null | null | null | mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py | mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep | b35f6c0b2e264e2913d0a1c432bf89c7b329bf52 | [
"Apache-2.0"
]
| null | null | null | mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py | mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep | b35f6c0b2e264e2913d0a1c432bf89c7b329bf52 | [
"Apache-2.0"
]
| null | null | null | from opencmiss.utils.maths.algorithms import calculate_line_plane_intersection
class ImagePlaneModel(object):
def __init__(self, master_model):
self._master_model = master_model
self._region = None
self._frames_per_second = -1
self._images_file_name_listing = []
self._image_dimensions = [-1, -1]
self._duration_field = None
self._image_based_material = None
self._scaled_coordinate_field = None
self._time_sequence = []
def set_image_information(self, frames_per_second, image_dimensions):
self._frames_per_second = frames_per_second
self._image_dimensions = image_dimensions
| 33.85 | 78 | 0.713442 | 595 | 0.878877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ef97b640d54812e21c1fdb002e84f00eb0d09eea | 76 | py | Python | antools/shared/data_validation/__init__.py | antonin-drozda/antools | 550310a61aae8d11e50e088731211197b7ee790b | [
"MIT"
]
| 1 | 2021-02-27T07:22:39.000Z | 2021-02-27T07:22:39.000Z | antools/shared/data_validation/__init__.py | antonin-drozda/antools | 550310a61aae8d11e50e088731211197b7ee790b | [
"MIT"
]
| null | null | null | antools/shared/data_validation/__init__.py | antonin-drozda/antools | 550310a61aae8d11e50e088731211197b7ee790b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
SHARED - DATA VALIDATION
"""
# %% FILE IMPORT
| 9.5 | 24 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.934211 |
ef9836ec7a3a89d88130ef5b51f413cd84a57435 | 2,152 | py | Python | test/test/host_test_default.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
]
| 29 | 2018-11-30T19:45:22.000Z | 2022-03-29T17:02:16.000Z | test/test/host_test_default.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
]
| 160 | 2018-11-30T21:55:52.000Z | 2022-01-18T10:58:09.000Z | test/test/host_test_default.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
]
| 73 | 2018-11-30T21:34:41.000Z | 2021-10-02T05:51:40.000Z | # Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mbed_os_tools.test.host_tests_runner.host_test_default import DefaultTestSelector
class HostTestDefaultTestCase(unittest.TestCase):
def test_os_info(self):
expected = {
"grm_module" : "module_name",
"grm_host" : "10.2.123.43",
"grm_port" : "3334",
}
# Case that includes an IP address but no protocol
arg = [expected["grm_module"], expected["grm_host"], expected["grm_port"]]
result = DefaultTestSelector._parse_grm(":".join(arg))
self.assertEqual(result, expected)
# Case that includes an IP address but no protocol nor a no port
expected["grm_port"] = None
arg = [expected["grm_module"], expected["grm_host"]]
result = DefaultTestSelector._parse_grm(":".join(arg))
self.assertEqual(result, expected)
# Case that includes an IP address and a protocol
expected["grm_host"] = "https://10.2.123.43"
expected["grm_port"] = "443"
arg = [expected["grm_module"], expected["grm_host"], expected["grm_port"]]
result = DefaultTestSelector._parse_grm(":".join(arg))
self.assertEqual(result, expected)
# Case that includes an IP address and a protocol, but no port
expected["grm_port"] = None
arg = [expected["grm_module"], expected["grm_host"]]
result = DefaultTestSelector._parse_grm(":".join(arg))
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 39.127273 | 86 | 0.676115 | 1,363 | 0.633364 | 0 | 0 | 0 | 0 | 0 | 0 | 1,105 | 0.513476 |
ef9847d747aab77361f5e75e1a5b9c126c9e90f9 | 3,359 | py | Python | lib/surface/debug/logpoints/delete.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
]
| null | null | null | lib/surface/debug/logpoints/delete.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
]
| null | null | null | lib/surface/debug/logpoints/delete.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
]
| 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete command for gcloud debug logpoints command group."""
from googlecloudsdk.api_lib.debug import debug
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class Delete(base.DeleteCommand):
"""Delete debug logpoints.
This command deletes logpoints from a Cloud Debugger debug target.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'id_or_location_regexp', metavar='(ID|LOCATION-REGEXP)', nargs='+',
help="""\
A logpoint ID or a regular expression to match against logpoint
locations. The logpoint with the given ID, or all logpoints whose
locations (file:line) contain the regular expression, will be
deleted.
""")
parser.add_argument(
'--all-users', action='store_true', default=False,
help="""\
If set, any location regexp will match logpoints from all users,
rather than only logpoints created by the current user. This flag is
not required when specifying the exact ID of a logpoint created by
another user.
""")
parser.add_argument(
'--include-inactive', action='store_true', default=False,
help="""\
If set, any location regexp will also match inactive logpoints,
rather than only logpoints which have not expired. This flag is
not required when specifying the exact ID of an inactive logpoint.
""")
def Run(self, args):
"""Run the delete command."""
project_id = properties.VALUES.core.project.Get(required=True)
debugger = debug.Debugger(project_id)
debuggee = debugger.FindDebuggee(args.target)
logpoints = debuggee.ListMatchingBreakpoints(
args.id_or_location_regexp, include_all_users=args.all_users,
include_inactive=args.include_inactive,
restrict_to_type=debugger.LOGPOINT_TYPE)
for s in logpoints:
debuggee.DeleteBreakpoint(s.id)
return logpoints
def Collection(self):
return 'debug.logpoints'
def Format(self, args):
"""Format for printing the results of the Run() method.
Args:
args: The arguments that command was run with.
Returns:
A format string
"""
fields = ['id']
if args.all_users:
fields.append('userEmail:label=USER')
fields.append('location')
fields.append('logLevel:label=LEVEL')
fields.append('short_status():label="STATUS BEFORE DELETION"')
return 'table({0})'.format(','.join(fields))
def Epilog(self, resources_were_displayed):
if resources_were_displayed:
log.status.write('Deleted Logpoints')
else:
log.status.write('No logpoints matched the requested values')
| 36.51087 | 80 | 0.693659 | 2,529 | 0.752903 | 0 | 0 | 1,145 | 0.340875 | 0 | 0 | 2,032 | 0.604942 |
ef99c583c045deb51df0a2fd8b0f81216762f3eb | 3,844 | py | Python | day-07/solution.py | wangjoshuah/Advent-Of-Code-2018 | 6bda7956bb7c6f9a54feffb19147961b56dc5d81 | [
"MIT"
]
| null | null | null | day-07/solution.py | wangjoshuah/Advent-Of-Code-2018 | 6bda7956bb7c6f9a54feffb19147961b56dc5d81 | [
"MIT"
]
| null | null | null | day-07/solution.py | wangjoshuah/Advent-Of-Code-2018 | 6bda7956bb7c6f9a54feffb19147961b56dc5d81 | [
"MIT"
]
| null | null | null | # directed graph problem or breadth first search variant
from collections import defaultdict
import re
input_file = open("input.txt", "r")
input_lines = input_file.readlines()
letter_value = {
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 8,
'I': 9,
'J': 10,
'K': 11,
'L': 12,
'M': 13,
'N': 14,
'O': 15,
'P': 16,
'Q': 17,
'R': 18,
'S': 19,
'T': 20,
'U': 21,
'V': 22,
'W': 23,
'X': 24,
'Y': 25,
'Z': 26
}
# construct graph of nodes and edges
# Read nodes and edges from input with regex
def construct_graph(lines):
# Graph is a Dictionary of String to Set
# { Node : Set of children }
edges = defaultdict(set)
nodes = set()
pattern = r"Step (.?) must be finished before step (.?) can begin\."
for line in lines:
matches = re.search(pattern, line)
edges[matches.group(1)].add(matches.group(2))
nodes.add(matches.group(1))
nodes.add(matches.group(2))
return nodes, edges
# A set of possible nodes to work on (starts with C)
# pick the first alphabetical node and remove it and its edges
def find_all_nodes_to_work(nodes, edges):
possible_nodes_to_work = nodes.copy()
for parent, children in edges.items():
for child in children:
if child in possible_nodes_to_work:
possible_nodes_to_work.remove(child)
sorted_work = list(possible_nodes_to_work)
sorted_work.sort()
return sorted_work
def find_next_node_to_work(nodes, edges):
return find_all_nodes_to_work(nodes, edges)[0]
def work_nodes(nodes, edges):
work_order = ""
while len(nodes) > 0:
node_to_work = find_next_node_to_work(nodes, edges)
nodes.remove(node_to_work)
if node_to_work in edges:
del edges[node_to_work]
work_order += node_to_work
return work_order
class Worker:
def __init__(self, base_time) -> None:
super().__init__()
self.current_node = None
self.seconds_left = 0
self.free = True
self.base_time = base_time
def assign(self, node):
self.current_node = node
self.seconds_left = letter_value[node] + self.base_time
self.free = False
def tick(self):
self.seconds_left -= 1
if self.seconds_left <= 0:
finished_node = self.current_node
self.current_node = None
self.free = True
self.seconds_left = 0
return finished_node
def work_nodes_in_parallel(nodes, edges, workers, base_time):
timer = 0
worker_pool = list()
nodes_worked = set()
for i in range(workers):
worker_pool.append(Worker(base_time))
while len(nodes) > 0:
print(f"Second {timer}")
nodes_to_work = find_all_nodes_to_work(nodes, edges)
for node in nodes_to_work:
if node not in nodes_worked:
for worker in worker_pool:
if worker.free and node not in nodes_worked:
worker.assign(node)
nodes_worked.add(node)
for worker in worker_pool:
print(f"Worker is working on {worker.current_node} with {worker.seconds_left} seconds left.")
finished_node = worker.tick()
if finished_node is not None:
# if a worker finishes a node
nodes.remove(finished_node)
nodes_worked.remove(finished_node)
if finished_node in edges:
del edges[finished_node]
timer += 1
return timer
# Part 1
# print(work_nodes(construct_graph(input_lines)))
# Part 2
nodes, edges = construct_graph(input_lines)
total_time = work_nodes_in_parallel(nodes, edges, 5, 60)
print(f"It took {total_time} seconds")
| 27.070423 | 105 | 0.601977 | 624 | 0.162331 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.181582 |
ef99d022220363214630da6ad916a3a41900d8d7 | 2,862 | py | Python | src/infi/pypi_manager/scripts/compare_pypi_repos.py | Infinidat/infi.pypi_manager | 7b5774b395ef47a23be2957a091b607b35a049f2 | [
"BSD-3-Clause"
]
| null | null | null | src/infi/pypi_manager/scripts/compare_pypi_repos.py | Infinidat/infi.pypi_manager | 7b5774b395ef47a23be2957a091b607b35a049f2 | [
"BSD-3-Clause"
]
| 1 | 2020-11-05T10:04:45.000Z | 2020-11-05T11:03:25.000Z | src/infi/pypi_manager/scripts/compare_pypi_repos.py | Infinidat/infi.pypi_manager | 7b5774b395ef47a23be2957a091b607b35a049f2 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import print_function
from .. import PyPI, DjangoPyPI, PackageNotFound
from prettytable import PrettyTable
from pkg_resources import parse_version, resource_filename
import requests
import re
try:
from urlparse import unquote
except ImportError:
# Python 3
from urllib.parse import unquote
def get_versions_from_reference(reference_repo):
reference_pypi_html = requests.get("{}/pypi".format(reference_repo.server)).text
search_result = re.findall("""href=["'](?:/pypi/)?([^/]+)/([^/]+)/["']""", reference_pypi_html)
return dict((k, unquote(v)) for k, v in search_result)
def get_skipped_packages():
with open(resource_filename(__name__, "skipped_packages.txt"), "r") as fd:
return [line.split("#")[0].strip() for line in fd.readlines()]
def get_major(version_string):
return int(version_string.split('.')[0])
def compare_pypi_repos(reference_repo, other_repo):
upgrade_packages = []
upgrade_table = PrettyTable(["Package", reference_repo.server, other_repo.server, 'Major'])
downgrade_table = PrettyTable(["Package", reference_repo.server, other_repo.server])
skipped_table = PrettyTable(["Package", reference_repo.server, other_repo.server])
skipped_packages = get_skipped_packages()
reference_repo_versions = get_versions_from_reference(reference_repo)
packages_to_check = list(reference_repo_versions.keys())
for name in sorted(packages_to_check):
try:
reference_repo_version = reference_repo_versions[name]
other_repo_version = other_repo.get_latest_version(name)
except PackageNotFound:
continue
if other_repo_version != reference_repo_version:
if name in skipped_packages or any(x in other_repo_version for x in ['a', 'b', 'dev', 'post', 'rc']):
skipped_table.add_row([name, reference_repo_version, other_repo_version])
elif parse_version(reference_repo_version) < parse_version(other_repo_version):
major_change = get_major(reference_repo_version) != get_major(other_repo_version)
upgrade_table.add_row([name, reference_repo_version, other_repo_version, 'Yes' if major_change else ''])
upgrade_packages.append((name, other_repo_version))
else:
downgrade_table.add_row([name, reference_repo_version, other_repo_version])
print("Upgradable Packages:")
print(upgrade_table)
print()
print("Downgradable Packages:")
print(downgrade_table)
print()
print("Skipped Packages:")
print(skipped_table)
print()
print("Upgrade commands:")
for name, version in upgrade_packages:
print("mirror_package %s %s" % (name, version))
def main():
import sys
local = DjangoPyPI(sys.argv[-1])
pypi = PyPI()
compare_pypi_repos(local, pypi)
| 40.885714 | 120 | 0.70615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.092243 |
ef9afc42c7347b259e757e59b46b756f7ac092fc | 6,954 | py | Python | src/GNLSE_specific.py | Computational-Nonlinear-Optics-ORC/Compare-CNLSE | 9b56cedbca2a06af3baa9f64e46ebfd4263f86c2 | [
"MIT"
]
| null | null | null | src/GNLSE_specific.py | Computational-Nonlinear-Optics-ORC/Compare-CNLSE | 9b56cedbca2a06af3baa9f64e46ebfd4263f86c2 | [
"MIT"
]
| null | null | null | src/GNLSE_specific.py | Computational-Nonlinear-Optics-ORC/Compare-CNLSE | 9b56cedbca2a06af3baa9f64e46ebfd4263f86c2 | [
"MIT"
]
| 3 | 2018-06-04T18:43:03.000Z | 2021-11-24T07:57:03.000Z | import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.fftpack import fft
from combined_functions import check_ft_grid
from scipy.constants import pi, c, hbar
from numpy.fft import fftshift
from scipy.io import loadmat
from time import time
import sys
import matplotlib.pyplot as plt
from scipy.integrate import simps
def fv_creator(fp, df, F, int_fwm):
"""
Cretes frequency grid such that the estimated MI-FWM bands
will be on the grid and extends this such that to avoid
fft boundary problems.
Inputs::
lamp: wavelength of the pump (float)
lamda_c: wavelength of the zero dispersion wavelength(ZDW) (float)
int_fwm: class that holds nt (number of points in each band)
betas: Taylor coeffiencts of beta around the ZDW (Array)
M : The M coefficient (or 1/A_eff) (float)
P_p: pump power
Df_band: band frequency bandwidth in Thz, (float)
Output::
fv: Frequency vector of bands (Array of shape [nt])
"""
f_centrals = [fp + i * F for i in range(-1, 2)]
fv1 = np.linspace(f_centrals[0], f_centrals[1],
int_fwm.nt//4 - 1, endpoint=False)
df = fv1[1] - fv1[0]
fv2 = np.linspace(f_centrals[1], f_centrals[2], int_fwm.nt//4)
try:
assert df == fv2[1] - fv2[0]
except AssertionError:
print(df, fv2[1] - fv2[0])
fv0, fv3 = np.zeros(int_fwm.nt//4 + 1), np.zeros(int_fwm.nt//4)
fv0[-1] = fv1[0] - df
fv3[0] = fv2[-1] + df
for i in range(1, len(fv3)):
fv3[i] = fv3[i - 1] + df
for i in range(len(fv0) - 2, -1, -1):
fv0[i] = fv0[i + 1] - df
assert not(np.any(fv0 == fv1))
assert not(np.any(fv1 == fv2))
assert not(np.any(fv2 == fv3))
fv = np.concatenate((fv0, fv1, fv2, fv3))
for i in range(3):
assert f_centrals[i] in fv
check_ft_grid(fv, df)
p_pos = np.where(np.abs(fv - fp) == np.min(np.abs(fv - fp)))[0]
return fv, p_pos, f_centrals
class raman_object(object):
"""
Warning: hf comes back normalised but ht does not!!!
"""
def __init__(self, b=None):
self.how = b
self.hf = None
self.ht = None
def raman_load(self, t, dt):
if self.how == 'analytic':
t11 = 12.2e-3 # [ps]
t2 = 32e-3 # [ps]
# analytical response
ht = (t11**2 + t2**2)/(t11*t2**2) * \
np.exp(-t/t2*(t >= 0))*np.sin(t/t11)*(t >= 0)
self.ht = ht # * dt
ht_norm = ht / simps(ht, t)
# Fourier transform of the analytic nonlinear response
self.hf = fft(ht_norm)
elif self.how == 'load':
# loads the measured response (Stolen et al. JOSAB 1989)
mat = loadmat('loading_data/silicaRaman.mat')
ht = mat['ht']
t1 = mat['t1']
htmeas_func = InterpolatedUnivariateSpline(t1*1e-3, ht)
ht = htmeas_func(t)
ht *= (t > 0)*(t < 1) # only measured between +/- 1 ps)
self.ht = ht / simps(ht, t)
ht_norm = ht / simps(ht, t)
# Fourier transform of the measured nonlinear response
self.hf = fft(ht_norm)
else:
sys.exit("No raman response on the GNLSE")
return None
class sim_window(object):
def __init__(self, fv, lamda, F, lamda_c, int_fwm, where):
self.fv = fv
self.type = 'GNLSE'
self.lamda = lamda
self.fp = 1e-12*c/self.lamda
self.fmed = 0.5*(fv[-1] + fv[0])*1e12 # [Hz]
self.deltaf = np.max(self.fv) - np.min(self.fv) # [THz]
self.df = self.deltaf/int_fwm.nt # [THz]
self.T = 1 / self.df # Time window (period)[ps]
self.woffset = 2*pi*(self.fmed - c/lamda)*1e-12 # [rad/ps]
self.w0 = 2*pi*self.fmed # central angular frequency [rad/s]
self.tsh = (1/self.w0)*1e12 # shock time [ps]
self.dt = self.T/int_fwm.nt # timestep (dt) [ps]
# time vector [ps]
self.t = (range(int_fwm.nt)-np.ones(int_fwm.nt)*int_fwm.nt/2)*self.dt
self.w = fftshift(2*pi * (self.fv - 1e-12*self.fmed))
self.t_band = self.t
self.lv = 1e-3*c/self.fv
self.zv = int_fwm.dzstep*np.asarray(range(0, 2))
self.p_pos = where
self.F = F
self.f_centrals = np.array(
[1e-12 * c/lamda + i * F for i in range(-1, 2)])
self.w_tiled = fftshift(
2*pi * (self.fv - self.f_centrals[1])) # w of self-step
class Loss(object):
def __init__(self, int_fwm, sim_wind, amax=None, apart_div=8):
"""
Initialise the calss Loss, takes in the general parameters and
the freequenbcy window. From that it determines where
the loss will become freequency dependent. With the default value
being an 8th of the difference of max and min.
"""
self.alpha = int_fwm.alphadB/4.343
if amax is None:
self.amax = self.alpha
else:
self.amax = amax/4.343
self.flims_large = (np.min(sim_wind.fv), np.max(sim_wind.fv))
try:
self.begin = apart_div[0]
self.end = apart_div[1]
except TypeError:
self.apart = np.abs(self.flims_large[1] - self.flims_large[0])
self.apart /= apart_div
self.begin = self.flims_large[0] + self.apart
self.end = self.flims_large[1] - self.apart
def atten_func_full(self, fv):
aten = []
a_s = ((self.amax - self.alpha) / (self.flims_large[0] - self.begin),
(self.amax - self.alpha) / (self.flims_large[1] - self.end))
b_s = (-a_s[0] * self.begin, -a_s[1] * self.end)
for f in fv:
if f <= self.begin:
aten.append(a_s[0] * f + b_s[0])
elif f >= self.end:
aten.append(a_s[1] * f + b_s[1])
else:
aten.append(0)
return np.asanyarray(aten) + self.alpha
def plot(self, fv):
fig = plt.figure()
y = self.atten_func_full(fv)
plt.plot(fv, y)
plt.xlabel("Frequency (Thz)")
plt.ylabel("Attenuation (cm -1 )")
plt.savefig(
"loss_function_fibre.png", bbox_inches='tight')
plt.close(fig)
class Noise(object):
def __init__(self, int_fwm, sim_wind):
self.pquant = np.sum(
hbar*(sim_wind.w*1e12 + sim_wind.w0)/(sim_wind.T*1e-12))
self.pquant = (self.pquant/2)**0.5
return None
def noise_func(self, int_fwm):
seed = np.random.seed(int(time()*np.random.rand()))
noise = self.pquant * (np.random.randn(int_fwm.nt) +
1j*np.random.randn(int_fwm.nt))
return noise
def noise_func_freq(self, int_fwm, sim_wind):
noise = self.noise_func(int_fwm)
noise_freq = fftshift(fft(noise))
return noise_freq
| 33.757282 | 77 | 0.560397 | 4,952 | 0.712108 | 0 | 0 | 0 | 0 | 0 | 0 | 1,551 | 0.223037 |
ef9b8a20ac811d824f51d3976e30e0eeef10150a | 172 | py | Python | recipe/run_test.py | regro-cf-autotick-bot/astropy-healpix-feedstock | c859aa66fbdcc397e82ef3bb1940a99da0deb8fc | [
"BSD-3-Clause"
]
| null | null | null | recipe/run_test.py | regro-cf-autotick-bot/astropy-healpix-feedstock | c859aa66fbdcc397e82ef3bb1940a99da0deb8fc | [
"BSD-3-Clause"
]
| 24 | 2017-10-15T20:52:48.000Z | 2021-11-11T00:45:54.000Z | recipe/run_test.py | regro-cf-autotick-bot/astropy-healpix-feedstock | c859aa66fbdcc397e82ef3bb1940a99da0deb8fc | [
"BSD-3-Clause"
]
| 4 | 2017-10-15T20:37:19.000Z | 2021-08-05T14:42:53.000Z | # The test suite runs in <20 seconds so is worth running here to
# make sure there are no issues with the C/Cython extensions
import astropy_healpix
astropy_healpix.test()
| 34.4 | 64 | 0.796512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.72093 |
ef9bbe1541d0c953af96d087b8ca600f95dd7284 | 45 | py | Python | way2sms/__init__.py | shubhamc183/way2sms | 33d8c9e69ab9b053e50501baf887191c718d2d2a | [
"MIT"
]
| 38 | 2016-12-15T14:03:00.000Z | 2022-03-22T01:28:29.000Z | way2sms/__init__.py | shubhamc183/way2sms | 33d8c9e69ab9b053e50501baf887191c718d2d2a | [
"MIT"
]
| 10 | 2017-11-18T08:13:18.000Z | 2020-09-06T11:18:32.000Z | way2sms/__init__.py | shubhamc183/way2sms | 33d8c9e69ab9b053e50501baf887191c718d2d2a | [
"MIT"
]
| 41 | 2016-12-26T16:52:59.000Z | 2022-03-22T01:31:40.000Z | """
Way2sms
"""
from way2sms.app import Sms
| 7.5 | 27 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.333333 |
ef9be069a058d33204131a55950dcf855daf7d54 | 1,164 | py | Python | example.py | jasonkatz/py-graphql-client | 9f938f3d379a8f4d8810961c87baf25dbe35889d | [
"BSD-3-Clause"
]
| 38 | 2019-03-22T16:27:08.000Z | 2022-03-30T11:07:55.000Z | example.py | anthonyhiga/py-graphql-client | 9c59b32bae5c5c6a12634b2bd6353f76328aa31a | [
"BSD-3-Clause"
]
| 31 | 2019-03-25T20:28:40.000Z | 2022-01-26T21:22:47.000Z | example.py | anthonyhiga/py-graphql-client | 9c59b32bae5c5c6a12634b2bd6353f76328aa31a | [
"BSD-3-Clause"
]
| 11 | 2019-03-25T18:54:32.000Z | 2021-09-11T17:00:27.000Z | import time
from graphql_client import GraphQLClient
# some sample GraphQL server which supports websocket transport and subscription
client = GraphQLClient('ws://localhost:9001')
# Simple Query Example
# query example with GraphQL variables
query = """
query getUser($userId: Int!) {
users (id: $userId) {
id
username
}
}
"""
# This is a blocking call, you receive response in the `res` variable
print('Making a query first')
res = client.query(query, variables={'userId': 2})
print('query result', res)
# Subscription Example
subscription_query = """
subscription getUser {
users (id: 2) {
id
username
}
}
"""
# Our callback function, which will be called and passed data everytime new data is available
def my_callback(op_id, data):
print(f"Got data for Operation ID: {op_id}. Data: {data}")
print('Making a graphql subscription now...')
sub_id = client.subscribe(subscription_query, callback=my_callback)
print('Created subscription and waiting. Callback function is called whenever there is new data')
# do some operation while the subscription is running...
time.sleep(10)
client.stop_subscribe(sub_id)
client.close()
| 23.755102 | 97 | 0.734536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 787 | 0.676117 |
ef9c140412569fc3198bcf6324071fb38dea2030 | 2,465 | py | Python | Scopus2Histcite.py | hengxyz/Scopus4HistCite | 87395afe5d8a520b9c32a0efeed2288225430244 | [
"Apache-2.0"
]
| 2 | 2020-07-09T13:10:44.000Z | 2020-07-10T13:00:52.000Z | Scopus2Histcite.py | hengxyz/Scopus4HistCite | 87395afe5d8a520b9c32a0efeed2288225430244 | [
"Apache-2.0"
]
| null | null | null | Scopus2Histcite.py | hengxyz/Scopus4HistCite | 87395afe5d8a520b9c32a0efeed2288225430244 | [
"Apache-2.0"
]
| null | null | null | # coding:utf-8
import os
import sys
def Scopus2HistCite():
try:
wrt_lines = []
if len(sys.argv) >= 2 and os.path.isfile(sys.argv[1]):
print("You are going to convert {}".format(sys.argv[1]))
Scopus_file = sys.argv[1]
elif os.path.isfile("./Scopus.ris"):
print("You are going to convert ./Scopus.ris")
Scopus_file = './Scopus.ris'
else:
raise Exception("No file spcified")
auth_started = False
ref_started = False
LT = [
'TI', # title
'T2', # jounal
'AU', # author
'VL', # volumn
'IS', # issue
'SP', # start page
'EP', # end page
'PY', # public year
'DO', # maybe doi? not important
]
wrt_lines.append('FN Thomson Reuters Web of Knowledge™')
wrt_lines.append('VR 1.0')
with open(Scopus_file, 'rb') as Scopus:
for each in Scopus.readlines():
line = each.strip()
line = line.decode().replace(' - ', ' ')
mark = line[:2]
if ref_started:
if mark == 'ER':
wrt_lines.append('ER')
wrt_lines.append('')
auth_started = False
ref_started = False
else:
wrt_lines.append(line)
elif line[:14] == 'N1 References:':
ref_started = True
line = line.replace(line[:14], 'CR')
wrt_lines.append(line)
elif mark in LT:
if mark == 'TI':
wrt_lines.append('PT J')
else:
line = line.replace('T2 ', 'SO ').replace('SP ', 'BP ')
if not auth_started and mark == 'AU':
auth_started = True
else:
line = line.replace('AU ', '')
wrt_lines.append(line)
with open("./savedres.txt", "w", encoding = "utf-8") as f:
for line in wrt_lines:
print(line)
f.write(line)
f.write("\n")
except Exception as e:
raise e
if __name__ == '__main__':
Scopus2HistCite() | 35.724638 | 80 | 0.416633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.180381 |
ef9d373a85947b14498743498aaf4ab814a074db | 2,449 | py | Python | mel_scale.py | zjlww/dsp | d7bcbf49bc8693560f3203c55b73956cc61dcd50 | [
"MIT"
]
| 9 | 2021-07-22T19:59:34.000Z | 2021-12-16T06:37:27.000Z | mel_scale.py | zjlww/dsp | d7bcbf49bc8693560f3203c55b73956cc61dcd50 | [
"MIT"
]
| null | null | null | mel_scale.py | zjlww/dsp | d7bcbf49bc8693560f3203c55b73956cc61dcd50 | [
"MIT"
]
| 2 | 2021-07-26T07:14:58.000Z | 2021-12-16T06:37:30.000Z | """
Mel-scale definition.
"""
import torch
from torch import Tensor
from typing import Union
import numpy as np
from math import log
import librosa
from librosa.filters import mel as mel_fn
def hz_to_mel(
frequencies: Union[float, int, Tensor, np.ndarray],
htk=False) -> Union[float, int, Tensor, np.ndarray]:
"""Convert Hz to Mels.
Extending librosa.hz_to_mel to accepting Tensor.
"""
if not isinstance(frequencies, Tensor):
return librosa.hz_to_mel(frequencies)
if htk:
return 2595.0 * torch.log10(1.0 + frequencies / 700.0)
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = log(6.4) / 27.0 # step size for log region
log_t = frequencies >= min_log_hz
mels[log_t] = min_log_mel + torch.log(frequencies[log_t] / min_log_hz) / \
logstep
return mels
def mel_to_hz(
mels: Union[int, float, Tensor, np.ndarray],
htk=False) -> Union[int, float, Tensor, np.ndarray]:
"""Convert mel bin numbers to frequencies."""
if not isinstance(mels, Tensor):
return librosa.mel_to_hz(mels, htk=htk)
if htk:
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = log(6.4) / 27.0 # step size for log region
log_t = mels >= min_log_mel
freqs[log_t] = min_log_hz * \
torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def linear_mel_matrix(
sampling_rate: int, fft_size: int, mel_size: int,
mel_min_f0: Union[int, float],
mel_max_f0: Union[int, float],
device: torch.device
) -> Tensor:
"""
Args:
sampling_rate: Sampling rate in Hertz.
fft_size: FFT size, must be an even number.
mel_size: Number of mel-filter banks.
mel_min_f0: Lowest frequency in the mel spectrogram.
mel_max_f0: Highest frequency in the mel spectrogram.
device: Target device of the transformation matrix.
Returns:
basis: [mel_size, fft_size // 2 + 1].
"""
basis = torch.FloatTensor(
mel_fn(sampling_rate, fft_size, mel_size, mel_min_f0, mel_max_f0)
).transpose(-1, -2)
return basis.to(device) | 31.805195 | 78 | 0.642303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.287464 |
ef9edac80f3106bed3243580dd908ece6900cb29 | 379 | py | Python | order/urls.py | xxcfun/trip-api | a51c8b6033ba2a70cf0e400180f31809f4ce476a | [
"Apache-2.0"
]
| 1 | 2021-06-18T03:03:40.000Z | 2021-06-18T03:03:40.000Z | order/urls.py | xxcfun/trip-api | a51c8b6033ba2a70cf0e400180f31809f4ce476a | [
"Apache-2.0"
]
| null | null | null | order/urls.py | xxcfun/trip-api | a51c8b6033ba2a70cf0e400180f31809f4ce476a | [
"Apache-2.0"
]
| null | null | null | from django.urls import path
from order import views
urlpatterns = [
# 订单提交接口
path('ticket/submit/', views.TicketOrderSubmitView.as_view(), name='ticket_submit'),
# 订单详情(支付、取消、删除)
path('order/detail/<int:sn>/', views.OrderDetail.as_view(), name='order_detail'),
# 订单列表
path('order/list/', views.OrderListView.as_view(), name='order_list')
]
| 29.153846 | 89 | 0.664908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.409836 |
ef9f39f03563135dc82bcc1a0e27d1ea6a62e525 | 349 | py | Python | api/models.py | yaroshyk/todo | 828d5afc9abd85cd7f8f25e4d01f90c765231357 | [
"MIT"
]
| 3 | 2021-05-30T19:04:37.000Z | 2021-08-30T14:16:57.000Z | api/models.py | yaroshyk/todo | 828d5afc9abd85cd7f8f25e4d01f90c765231357 | [
"MIT"
]
| null | null | null | api/models.py | yaroshyk/todo | 828d5afc9abd85cd7f8f25e4d01f90c765231357 | [
"MIT"
]
| null | null | null | from django.db import models
class Todo(models.Model):
title = models.CharField(max_length=100)
details = models.TextField()
date = models.DateTimeField(auto_now_add=True)
group = models.TextField(default='home')
user_id = models.IntegerField()
objects = models.Manager()
def __str__(self):
return self.title
| 23.266667 | 50 | 0.696275 | 317 | 0.908309 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.017192 |
ef9fbd19157663838a068e78c39ee7e40bade1b6 | 127 | py | Python | delightfulsoup/utils/unminify.py | etpinard/delightfulsoup | 6d8cf976bf216e0e311808ffbd871a5915ba7b09 | [
"MIT"
]
| null | null | null | delightfulsoup/utils/unminify.py | etpinard/delightfulsoup | 6d8cf976bf216e0e311808ffbd871a5915ba7b09 | [
"MIT"
]
| null | null | null | delightfulsoup/utils/unminify.py | etpinard/delightfulsoup | 6d8cf976bf216e0e311808ffbd871a5915ba7b09 | [
"MIT"
]
| null | null | null | """
unminify
========
"""
def unminify(soup, encoding='utf-8'):
"""
"""
return soup.prettify().encode(encoding)
| 10.583333 | 43 | 0.527559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.346457 |
ef9feaf45807510f4cf448436f428cc436b0de04 | 744 | py | Python | main.py | bhaskar-nair2/Coded-Passwords | 306d01e54bf43c46267ed12c907a49932326b931 | [
"MIT"
]
| null | null | null | main.py | bhaskar-nair2/Coded-Passwords | 306d01e54bf43c46267ed12c907a49932326b931 | [
"MIT"
]
| null | null | null | main.py | bhaskar-nair2/Coded-Passwords | 306d01e54bf43c46267ed12c907a49932326b931 | [
"MIT"
]
| null | null | null | import hashlib
class data:
def __init__(self,username,password):
self.username=username
self.hash=self.get_hash(password)
def get_hash(self,password):
for _ in range(0,9999999):
head=(str(_)+self.username+password).encode()
i=hashlib.sha3_256(head).hexdigest()
if(i[:4]=='0000'):
self.num=_
return i
@staticmethod
def retrive(username,password,hash,num):
head = (str(num) + username + password).encode()
i = hashlib.sha3_256(head).hexdigest()
if(i==hash):
return True
else:
return False
def maker(self):
arr = {"hash": self.hash, "num": self.num}
return arr | 28.615385 | 57 | 0.555108 | 729 | 0.979839 | 0 | 0 | 246 | 0.330645 | 0 | 0 | 17 | 0.022849 |
efa05bae4ae4b077bd16954d59ff3b20aac6edc2 | 17,709 | py | Python | src/upper/utils.py | USArmyResearchLab/ARL-UPPER | 2f79f25338f18655b2a19c8afe3fed267cc0f198 | [
"Apache-2.0"
]
| 4 | 2020-09-14T06:13:04.000Z | 2020-11-21T07:10:36.000Z | src/upper/utils.py | USArmyResearchLab/ARL-UPPER | 2f79f25338f18655b2a19c8afe3fed267cc0f198 | [
"Apache-2.0"
]
| null | null | null | src/upper/utils.py | USArmyResearchLab/ARL-UPPER | 2f79f25338f18655b2a19c8afe3fed267cc0f198 | [
"Apache-2.0"
]
| 2 | 2020-03-15T17:59:26.000Z | 2020-09-14T06:13:05.000Z | from typing import Tuple
from rdkit import Chem
from rdkit.Chem import Draw
import re
import itertools
import numpy as np
import networkx as nx
import logging
import collections
def FindBreakingBonds(cnids: list, bids: list, bts: list, atomic_nums: list) -> list:
"""Returns bond ids to be broken. Check for double/triple bonds;
if exists, check if heteroatom; if heteroatom, bonds of that C atom
are not broken."""
x1s = []
rmflag = None
for (i, bt) in enumerate(bts):
for (j, x) in enumerate(bt):
if x == Chem.rdchem.BondType.DOUBLE or x == Chem.rdchem.BondType.TRIPLE:
if atomic_nums[cnids[i][j]] != 6 and atomic_nums[cnids[i][j]] != 1:
rmflag = True
break
if not rmflag:
x1s.append(i)
rmflag = None
return [bids[x1] for x1 in x1s]
def FragNeighborBreakingBondTypes(
neighbor_ids: list, fnids: list, faids: list, bond_type_matrix: list
) -> list:
"""Determine broken bond types between fragments and fragment neighbors."""
# neighbor ids of fragment neighbors
nids_of_fnids = [[neighbor_ids[x] for x in y] for y in fnids]
# atom ids 'bonded' to fragment neighbor
int_ = [
[Intersection(x, faids[i])[0] for x in y] for (i, y) in enumerate(nids_of_fnids)
]
return [
[bond_type_matrix[x[i]][y[i]] for (i, z) in enumerate(x)]
for (x, y) in zip(fnids, int_)
]
def EditFragNeighborIds(fnids: list, bbtps: list) -> list:
"""Remove fragment neighbor ids that are doubly/triply bonded to fragment."""
# not double/triple bonds
n23bonds = [
[
(x != Chem.rdchem.BondType.DOUBLE and x != Chem.rdchem.BondType.TRIPLE)
for x in y
]
for y in bbtps
]
# return new fragment neighbor ids
return [
[x for (j, x) in enumerate(y) if n23bonds[i][j]] for (i, y) in enumerate(fnids)
]
def num_atom_rings_1bond(atom_rings: tuple, bond_rings: tuple, num_atoms: int) -> list:
"""Number of rings each atoms is in. Only rings sharing at most
1 bond with neighboring rings are considered."""
# atom ids of rings that share at most 1 bond with neighboring rings
atom_rings_1bond = [
atom_rings[i]
for (i, y) in enumerate(bond_rings)
if not any(
IntersectionBoolean(x, y, 2)
for x in [z for (j, z) in enumerate(bond_rings) if i != j]
)
]
return [sum(i in x for x in atom_rings_1bond) for i in range(num_atoms)]
def UniqueElements(x: list) -> list:
"""Returns unique elements of a list (not order preserving)."""
keys = {}
for e in x:
keys[e] = 1
return list(keys.keys())
def NeighborIDs(neighbor_ids: list, atomic_nums: list, y: list) -> list:
"""Find neighbor ids of a list of atoms (Hs not included)."""
# neighbor ids
z = [neighbor_ids[x] for x in y]
# remove Hs
return [[x for x in y if atomic_nums[x] != 1] for y in z]
def GetFragments(
smiles: str,
mol: Chem.rdchem.Mol,
neighbor_ids: list,
atomic_nums: list,
bond_id_matrix: list,
bond_type_matrix: list,
) -> Tuple[list, list]:
"""Fragment the molecule with isolated carbons method, see
Lian and Yalkowsky, JOURNAL OF PHARMACEUTICAL SCIENCES 103:2710-2723."""
# carbons
cids = [i for (i, x) in enumerate(atomic_nums) if x == 6]
# carbon neighbor ids
cnids = NeighborIDs(neighbor_ids, atomic_nums, cids)
# bond ids
bids = [
[bond_id_matrix[cid][cnid] for cnid in cnids]
for (cid, cnids) in zip(cids, cnids)
]
# bond types
bts = [
[bond_type_matrix[cid][cnid] for cnid in cnids]
for (cid, cnids) in zip(cids, cnids)
]
# broken bond ids
bbids = FindBreakingBonds(cnids, bids, bts, atomic_nums)
# break bonds, get fragments
try:
fmol = Chem.FragmentOnBonds(
mol, UniqueElements(list(itertools.chain.from_iterable(bbids)))
)
except:
fmol = mol
logging.info("fragmentation exception: %s" % (smiles))
# draw fragments, debugging only, expensive
# Draw.MolToFile(fmol,'fmol.png')
# fragment atom ids
faids = [list(x) for x in Chem.rdmolops.GetMolFrags(fmol)]
# fragment smiles
fsmiles = [Chem.rdmolfiles.MolFragmentToSmiles(fmol, frag) for frag in faids]
# fragment smarts
fsmarts = [Chem.rdmolfiles.MolFragmentToSmarts(fmol, frag) for frag in faids]
return faids, fsmiles, fsmarts
def FragNeighborID(fsmile: str) -> list:
"""End atoms bonded to a fragment."""
fnid = re.compile(r"(%s|%s)" % ("\d+(?=\*)", "\*[^\]]")).findall(fsmile)
fnid = fnid if fnid else ["-1"]
return [int(x) if "*" not in x else 0 for x in fnid]
def FragNeighborIDs(fsmiles: list) -> list:
"""End atoms bonded to fragments."""
fnids = list(map(FragNeighborID, fsmiles))
return [x if (-1 not in x) else [] for x in fnids]
def BondedFragNeighborIDs(true_faids: list, fnids: list) -> list:
"""Neighbor fragment ids (not atom ids)."""
return [[k for (k, x) in enumerate(true_faids) for j in i if j in x] for i in fnids]
def NumHybridizationType(htype: Chem.rdchem.HybridizationType, fnhybrds: list) -> list:
"""Number of specified hybridization type for each fragment."""
return [sum(x == htype for x in fnhybrd) for fnhybrd in fnhybrds]
def Intersection(x: list, y: list) -> list:
"""Elements that match between two lists."""
return list(set(x) & set(y))
def IntersectionBoolean(x: list, y: list, z: int) -> bool:
"""Returns whether or not two lists overlap with at least z common elements."""
return len(set(x) & set(y)) >= z
def FindIdsWithHtype(
fids: list, fnids: list, fnhybrds: list, htype: Chem.rdchem.HybridizationType
) -> list:
"""Find fragment neighbor ids with htype."""
fnhybrds_in_fids = [fnhybrds[x] for x in fids]
fnids_in_fids = [fnids[x] for x in fids]
hids = []
x1 = 0
for x in fnhybrds_in_fids:
x2 = 0
for y in x:
if y == htype:
hids.append(fnids_in_fids[x1][x2])
x2 += 1
x1 += 1
return hids
def AromaticRings(atom_ids_in_rings: list, bond_type_matrix: list) -> list:
"""Return if bonds in rings are aromatic."""
# atom ids in rings
atom_ids_in_rings = [np.array(x) for x in atom_ids_in_rings]
return [
[
(bond_type_matrix[int(x)][int(y)] == Chem.rdchem.BondType.AROMATIC)
for (x, y) in zip(z, z.take(range(1, len(z) + 1), mode="wrap"))
]
for z in atom_ids_in_rings
]
def TrueFragAtomIDs(num_atoms: int, faids: list) -> list:
"""Remove dummy atom ids from fragments."""
return [[x for x in y if x < num_atoms] for y in faids]
def FindCentralCarbonsOfBiphenyl(
biphenyl_substructs: list,
neighbor_ids: list,
atomic_nums: list,
bond_matrix: list,
bond_type_matrix: list,
) -> list:
"""Find central carbons of biphenyl substructures."""
# find one of the central carbons in biphenyl substructures
cc = []
for z in biphenyl_substructs:
for (x, y) in zip(z, z.take(range(1, len(z) + 1), mode="wrap")):
if not bond_matrix[int(x)][int(y)]:
cc.append(int(y))
break
# find carbon that is singly bonded - other central carbon
ccs = []
for (i, y) in enumerate(NeighborIDs(neighbor_ids, atomic_nums, cc)):
for x in y:
if bond_type_matrix[cc[i]][x] == Chem.rdchem.BondType.SINGLE:
ccs.append([cc[i], x])
break
return ccs
def Flatten(x: list) -> list:
"""Flatten a list."""
return list(itertools.chain.from_iterable(x))
def RemoveElements(x: list, y: list) -> list:
"""Remove elements (y) from a list (x)."""
for e in y:
x.remove(e)
return x
def Graph(x: tuple) -> nx.classes.graph.Graph:
"""Make graph structure from atom ids. Used to find independent ring systems."""
# initialize graph
graph = nx.Graph()
# add nodes and edges
for part in x:
graph.add_nodes_from(part)
graph.add_edges_from(zip(part[:-1], part[1:]))
return graph
def NumIndRings(x: tuple) -> int:
"""Number of independent single, fused, or conjugated rings."""
return len(list(nx.connected_components(Graph(x))))
def ReduceFsmarts(fsmarts: list) -> list:
"""Rewrite fragment smarts."""
return [re.sub(r"\d+\#", "#", x) for x in fsmarts]
def EndLabels(fnbbtps: list) -> list:
"""End label of group.
- : bonded to one neighbor and btype = single
= : one neighbor is bonded with btype = double
tri- : one neighbor is bonded with btype = triple
allenic : allenic atom, two neighbors are bonded with btype = double"""
l = ["" for x in fnbbtps]
for (i, x) in enumerate(fnbbtps):
if len(x) == 1 and x.count(Chem.rdchem.BondType.SINGLE) == 1:
l[i] = "-"
continue
if x.count(Chem.rdchem.BondType.DOUBLE) == 1:
l[i] = "="
continue
if x.count(Chem.rdchem.BondType.TRIPLE) == 1:
l[i] = "tri-"
continue
if x.count(Chem.rdchem.BondType.DOUBLE) == 2:
l[i] = "allenic-"
return l
def FragAtomBondTypeWithSp2(
fnhybrds: list,
fnids: list,
neighbor_ids: list,
atomic_nums: list,
faids: list,
bond_type_matrix: list,
) -> list:
"""Bond type between fragment atom and neighboring sp2 atom."""
# fragment ids bonded to one sp2 atom
fids = [
i
for i, x in enumerate(
NumHybridizationType(Chem.rdchem.HybridizationType.SP2, fnhybrds)
)
if x == 1
]
# atom id in fragments corresponding to the sp2 atom
sp2ids = FindIdsWithHtype(fids, fnids, fnhybrds, Chem.rdchem.HybridizationType.SP2)
# neighbor atom ids of sp2 atoms
sp2nids = NeighborIDs(neighbor_ids, atomic_nums, sp2ids)
# intersection between sp2nids and atom ids in fragments with one sp2 atom
faid = list(
itertools.chain.from_iterable(
[Intersection(x, y) for (x, y) in zip([faids[x] for x in fids], sp2nids)]
)
)
# bond type fragment atom and sp2 atom
bts = [bond_type_matrix[x][y] for (x, y) in zip(sp2ids, faid)]
# generate list with bond types for each fragment, zero for fragments without one sp2 atom
afbts = [0] * len(fnhybrds)
for (x, y) in zip(fids, bts):
afbts[x] = y
return afbts
symm_rules: dict = {
2: {
1: {
Chem.rdchem.HybridizationType.SP: 2,
Chem.rdchem.HybridizationType.SP2: 2,
Chem.rdchem.HybridizationType.SP3: 2,
},
2: {
Chem.rdchem.HybridizationType.SP: 1,
Chem.rdchem.HybridizationType.SP2: 1,
Chem.rdchem.HybridizationType.SP3: 1,
},
},
3: {
1: {Chem.rdchem.HybridizationType.SP2: 6, Chem.rdchem.HybridizationType.SP3: 3},
2: {Chem.rdchem.HybridizationType.SP2: 2, Chem.rdchem.HybridizationType.SP3: 1},
3: {Chem.rdchem.HybridizationType.SP2: 1, Chem.rdchem.HybridizationType.SP3: 1},
},
4: {
1: {Chem.rdchem.HybridizationType.SP3: 12},
2: {Chem.rdchem.HybridizationType.SP3: 0},
3: {Chem.rdchem.HybridizationType.SP3: 1},
4: {Chem.rdchem.HybridizationType.SP3: 1},
},
}
def Symm(
smiles: str,
num_attached_atoms: int,
num_attached_types: int,
center_hybrid: Chem.rdchem.HybridizationType,
count_rankings: collections.Counter,
) -> int:
"""Molecular symmetry."""
try:
symm = symm_rules[num_attached_atoms][num_attached_types][center_hybrid]
except:
logging.warning("symmetry exception: {}".format(smiles))
symm = np.nan
# special case
if symm == 0:
vals = list(count_rankings.values())
symm = 3 if (vals == [1, 3] or vals == [3, 1]) else 2
return symm
def DataReduction(y: dict, group_labels: list) -> None:
"""Remove superfluous data for single molecule."""
for l in group_labels:
y[l] = list(itertools.compress(zip(y["fsmarts"], range(y["num_frags"])), y[l]))
def NFragBadIndices(d: np.ndarray, group_labels: list, smiles: list) -> None:
"""Indices of compounds that do not have consistent number of fragments."""
def NFragCheck(y: dict) -> bool:
"""Check number of fragments and group contributions are consistent."""
num_frags = 0
for l in group_labels:
num_frags += len(y[l])
return num_frags != y["num_frags"]
x = list(map(NFragCheck, d))
indices = list(itertools.compress(range(len(x)), x))
logging.info(
"indices of molecules with inconsistent number of fragments:\n{}".format(
indices
)
)
logging.info("and their smiles:\n{}".format([smiles[x] for x in indices]))
def UniqueGroups(d: np.ndarray, num_mol: int, group_labels: list) -> list:
"""Unique fragments for each environmental group."""
# fragments for each group
groups = [[d[i][j] for i in range(num_mol)] for j in group_labels]
# eliminate fragment ids
groups = [[x[0] for x in Flatten(y)] for y in groups]
return [UniqueElements(x) for x in groups]
def UniqueLabelIndices(flabels: list) -> list:
"""Indices of unique fingerprint labels."""
sort_ = [sorted(x) for x in flabels]
tuple_ = [tuple(x) for x in sort_]
unique_labels = [list(x) for x in sorted(set(tuple_), key=tuple_.index)]
return [[i for (i, x) in enumerate(sort_) if x == y] for y in unique_labels]
def UniqueLabels(flabels: list, indices: list) -> list:
"""Unique fingerprint labels."""
return [flabels[x[0]] for x in indices]
def UniqueFingerprint(indices: list, fingerprint: np.ndarray) -> np.ndarray:
"""Reduce fingerprint according to unique labels."""
fp = np.zeros((fingerprint.shape[0], len(indices)))
for (j, x) in enumerate(indices):
fp[:, j] = np.sum(fingerprint[:, x], axis=1)
return fp
def UniqueLabelsAndFingerprint(
flabels: list, fingerprint: np.ndarray
) -> Tuple[list, np.ndarray]:
"""Reduced labels and fingerprint."""
uli = UniqueLabelIndices(flabels)
ul = UniqueLabels(flabels, uli)
fp = UniqueFingerprint(uli, fingerprint)
return ul, fp
def CountGroups(fingerprint_groups: list, group_labels: list, d: dict) -> list:
"""Count groups for fingerprint."""
return [
[[x[0] for x in d[y]].count(z) for z in fingerprint_groups[i]]
for (i, y) in enumerate(group_labels)
]
def Concat(x: list, y: list) -> list:
"""Concatenate groups and singles in fingerprint."""
return x + y
def MakeFingerprint(
fingerprint_groups: list, labels: dict, d: np.ndarray, num_mol: int
) -> np.ndarray:
"""Make fingerprint."""
# count groups and make fingerprint
fp_groups = [
Flatten(CountGroups(fingerprint_groups, labels["groups"], d[:, 0][i]))
for i in range(num_mol)
]
# reduce singles to requested
fp_singles = [[d[:, 1][i][j] for j in labels["singles"]] for i in range(num_mol)]
# concat groups and singles
return np.array(list(map(Concat, fp_groups, fp_singles)))
def ReduceMultiCount(d: dict) -> None:
"""Ensure each fragment belongs to one environmental group.
Falsify Y, Z when YZ true
Falsify YY, Z when YYZ true
Falsify YYY, Z when YYYZ true
Falsify RG when AR true
..."""
def TrueIndices(group: str) -> list:
"""Return True indices."""
x = d[group]
return list(itertools.compress(range(len(x)), x))
def ReplaceTrue(replace_group: list, actual_group: list) -> None:
"""Replace True elements with False to avoid overcounting fragment contribution."""
replace_indices = list(map(TrueIndices, replace_group))
actual_indices = list(map(TrueIndices, actual_group))
for actual_index in actual_indices:
for (group, replace_index) in zip(replace_group, replace_indices):
int_ = Intersection(replace_index, actual_index)
for x in int_:
d[group][x] = False
replace_groups = [
["Y", "Z"],
["YY", "Z"],
["YYY", "Z"],
["RG"],
["X", "Y", "YY", "YYY", "YYYY", "YYYYY", "Z", "ZZ", "YZ", "YYZ"],
["RG", "AR"],
["AR", "BR2", "BR3", "FU"],
["RG", "AR"],
]
actual_groups = [
["YZ"],
["YYZ"],
["YYYZ"],
["AR"],
["RG", "AR"],
["BR2", "BR3"],
["BIP"],
["FU"],
]
list(map(ReplaceTrue, replace_groups, actual_groups))
def RewriteFsmarts(d: dict) -> None:
"""Rewrite fsmarts to 'fsmiles unique' fsmarts."""
def FsmartsDict(d: dict) -> dict:
"""Dict of original fsmarts to 'fsmiles unique' fsmarts."""
# unique smarts in dataset, mols
fsmarts = UniqueElements(Flatten([x[0]["fsmarts"] for x in d]))
fmols = [Chem.MolFromSmarts(x) for x in fsmarts]
# smiles, not necessarily unique
fsmiles = [Chem.MolToSmiles(x) for x in fmols]
# dict: original fsmarts to 'fsmiles unique' fsmarts
dict_ = collections.defaultdict(lambda: len(dict_))
fsmarts_dict = {}
for (i, x) in enumerate(fsmarts):
fsmarts_dict[x] = fsmarts[dict_[fsmiles[i]]]
return fsmarts_dict
fsmarts_dict = FsmartsDict(d)
# rewrite fsmarts
for (i, y) in enumerate(d):
d[i][0]["fsmarts"] = [fsmarts_dict[x] for x in y[0]["fsmarts"]]
| 28.65534 | 94 | 0.611666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,391 | 0.247953 |
efa2c84741d3637cb65c8fc32a0abc9a577fb053 | 3,317 | py | Python | 025_reverse-nodes-in-k-group.py | tasselcui/leetcode | 5c32446b8b5bf3711cf28e465f448c6a0980f259 | [
"MIT"
]
| null | null | null | 025_reverse-nodes-in-k-group.py | tasselcui/leetcode | 5c32446b8b5bf3711cf28e465f448c6a0980f259 | [
"MIT"
]
| null | null | null | 025_reverse-nodes-in-k-group.py | tasselcui/leetcode | 5c32446b8b5bf3711cf28e465f448c6a0980f259 | [
"MIT"
]
| null | null | null | # =============================================================================
# # -*- coding: utf-8 -*-
# """
# Created on Sun Aug 5 08:07:19 2018
#
# @author: lenovo
# """
# 25. Reverse Nodes in k-Group
# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
#
# k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.
#
# Example:
#
# Given this linked list: 1->2->3->4->5
#
# For k = 2, you should return: 2->1->4->3->5
#
# For k = 3, you should return: 3->2->1->4->5
#
# Note:
#
# Only constant extra memory is allowed.
# You may not alter the values in the list's nodes, only nodes itself may be changed.
# =============================================================================
# =============================================================================
# difficulty: hard
# acceptance: 32.7%
# contributor: LeetCode
# =============================================================================
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
def reverseList(head, k):
pre = None
cur = head
while cur and k:
temp = cur.next
cur.next = pre
pre = cur
cur = temp
k -= 1
return (cur, pre)
p1 = head
length = 0
while p1:
length += 1
p1 = p1.next
if length < k:
return head
step = length // k
p = head
res = None
pre = None
while p and step:
nextp, newhead = reverseList(p, k)
if not res:
res = newhead
if pre:
pre.next = newhead
pre = p
p = nextp
step -= 1
pre.next = p
return res
# reverseList(head, k)
# =============================================================================
# def reverseList(head, k):
# pre = None
# cur = head
# while cur and k:
# temp = cur.next
# cur.next = pre
# pre = cur
# cur = temp
# k -= 1
# return (cur, pre)
# =============================================================================
#------------------------------------------------------------------------------
# note: below is the test code
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
a.next = b
b.next = c
c.next = d
test = a
S = Solution()
result = S.reverseKGroup(test, 3)
#result = a
while result:
print(result.val)
result = result.next
#------------------------------------------------------------------------------
# note: below is the submission detail
# =============================================================================
# Submission Detail
# 81 / 81 test cases passed.
# Status: Accepted
# Runtime: 56 ms
# Submitted: 0 minutes ago
# beats 93.99% python3 submissions
# =============================================================================
| 28.843478 | 186 | 0.403075 | 1,037 | 0.312632 | 0 | 0 | 0 | 0 | 0 | 0 | 2,093 | 0.630992 |
efa5e69113b0347792c870829c3f62690cf050bb | 2,708 | py | Python | perma_web/perma/tests/test_views_common.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
]
| null | null | null | perma_web/perma/tests/test_views_common.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
]
| null | null | null | perma_web/perma/tests/test_views_common.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
]
| null | null | null | from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from perma.urls import urlpatterns
from .utils import PermaTestCase
class CommonViewsTestCase(PermaTestCase):
def test_public_views(self):
# test static template views
for urlpattern in urlpatterns:
if urlpattern.callback.func_name == 'DirectTemplateView':
resp = self.get(urlpattern.name)
def test_misformatted_nonexistent_links_404(self):
response = self.client.get(reverse('single_linky', kwargs={'guid': 'JJ99--JJJJ'}))
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('single_linky', kwargs={'guid': '988-JJJJ=JJJJ'}))
self.assertEqual(response.status_code, 404)
def test_properly_formatted_nonexistent_links_404(self):
response = self.client.get(reverse('single_linky', kwargs={'guid': 'JJ99-JJJJ'}))
self.assertEqual(response.status_code, 404)
# Test the original ID style. We shouldn't get a redirect.
response = self.client.get(reverse('single_linky', kwargs={'guid': '0J6pkzDeQwT'}))
self.assertEqual(response.status_code, 404)
def test_contact(self):
# Does our contact form behave reasonably?
# The form should be fine will all fields
message_body = 'Just some message here'
from_email = '[email protected]'
self.submit_form('contact', data={
'email': from_email,
'message': message_body},
success_url=reverse('contact_thanks'))
# check contents of sent email
message = mail.outbox[0]
self.assertIn(message_body, message.body)
self.assertEqual(message.subject, 'New message from Perma contact form')
self.assertEqual(message.from_email, settings.DEFAULT_FROM_EMAIL)
self.assertEqual(message.recipients(), [settings.DEFAULT_FROM_EMAIL])
self.assertDictEqual(message.extra_headers, {'Reply-To': from_email})
# We should fail if we don't get a from email
response = self.client.post(reverse('contact'), data={
'email': '',
'message': message_body})
self.assertEqual(response.request['PATH_INFO'], reverse('contact'))
# We need at least a message. We should get the contact page back
# instead of the thanks page.
response = self.client.post(reverse('contact'), data={
'email': from_email,
'message': ''})
self.assertEqual(response.request['PATH_INFO'], reverse('contact')) | 43.677419 | 93 | 0.642171 | 2,530 | 0.934269 | 0 | 0 | 0 | 0 | 0 | 0 | 716 | 0.264402 |
efa68642041c99f789a40f12b356c9ba93e64adc | 1,708 | py | Python | GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
]
| null | null | null | GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
]
| null | null | null | GeneratorInterface/Pythia8Interface/python/Py8PtLxyGun_4tau_cfi.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
]
| null | null | null | import FWCore.ParameterSet.Config as cms
#Note: distances in mm instead of in cm usually used in CMS
generator = cms.EDFilter("Pythia8PtAndLxyGun",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
PGunParameters = cms.PSet(
ParticleID = cms.vint32(-15, -15),
AddAntiParticle = cms.bool(True), # antiparticle has opposite momentum and production point symmetric wrt (0,0,0) compared to corresponding particle
MinPt = cms.double(15.00),
MaxPt = cms.double(300.00),
MinEta = cms.double(-2.5),
MaxEta = cms.double(2.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
LxyMin = cms.double(0.0),
LxyMax = cms.double(550.0), # most tau generated within TOB (55cm)
LzMax = cms.double(300.0),
dxyMax = cms.double(30.0),
dzMax = cms.double(120.0),
ConeRadius = cms.double(1000.0),
ConeH = cms.double(3000.0),
DistanceToAPEX = cms.double(850.0),
LxyBackFraction = cms.double(0.0), # fraction of particles going back towards to center at transverse plan; numbers outside the [0,1] range are set to 0 or 1
LzOppositeFraction = cms.double(0.0), # fraction of particles going in opposite direction wrt to center along beam-line than in transverse plane; numbers outside the [0,1] range are set to 0 or 1
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('displaced taus'),
firstRun = cms.untracked.uint32(1),
PythiaParameters = cms.PSet(parameterSets = cms.vstring())
)
| 47.444444 | 203 | 0.668618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.330796 |
efabd851d1c220194dc0597eebe6be9a8b117165 | 5,492 | py | Python | questions/models.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
]
| null | null | null | questions/models.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
]
| 10 | 2020-06-05T22:56:30.000Z | 2022-02-10T08:54:18.000Z | questions/models.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
]
| null | null | null | import logging
from typing import List, Optional
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
VOTE_UP = 1
VOTE_DOWN = -1
VOTE_CHOICES = ((VOTE_UP, "Vote Up"), (VOTE_DOWN, "Vote Down"))
User = get_user_model()
logger = logging.getLogger(__name__)
class AbstractPost(models.Model):
""" Abstract model that defines common fields and methods
for Question / Answer models.
"""
vote_class: Optional[models.Model] = None
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="%(class)ss",
related_query_name="%(class)s",
)
content = models.TextField(blank=False)
posted = models.DateTimeField(auto_now_add=True)
rating = models.IntegerField(default=0)
number_of_votes = models.IntegerField(default=0)
class Meta:
abstract = True
ordering = ["-posted"]
def vote(self, user, value: int) -> int:
""" Add vote from `user` and return new rating.
"""
assert self.vote_class is not None
assert value in (VOTE_DOWN, VOTE_UP), value
try:
current = self.vote_class.objects.get(user=user, to=self)
except ObjectDoesNotExist:
self.vote_class.objects.create(user=user, to=self, value=value)
logger.debug(
f"New vote ({value}) by {user} hase been created for {self}"
)
return self.rating + value
if current.value == value:
return self.rating
self.vote_class.objects.filter(to=self, user=user).delete()
logger.debug(f"Vote by {user} has been deleted for {self}")
return self.rating + value
class AnswerVote(models.Model):
timestamp = models.DateTimeField(auto_now=True)
to = models.ForeignKey(
"Answer",
on_delete=models.CASCADE,
related_name="votes",
related_query_name="votes",
)
user = models.ForeignKey(
to=User,
on_delete=models.CASCADE,
related_name="%(class)ss",
related_query_name="%(class)s",
)
value = models.SmallIntegerField(choices=VOTE_CHOICES)
class Meta:
ordering = ["-timestamp"]
unique_together = ["to", "user"]
def __str__(self):
return f"{self.user.username} {self.value:+d}"
class Answer(AbstractPost):
vote_class = AnswerVote
is_accepted = models.BooleanField(default=False)
question = models.ForeignKey(
"Question",
on_delete=models.CASCADE,
related_name="answers",
related_query_name="answer",
)
def __str__(self):
return f"{self.question.title} - {self.content[:50]} ..."
def mark(self):
""" Mark the answer as accepted.
"""
self.question.answers.update(is_accepted=False)
self.is_accepted = True
self.save(update_fields=["is_accepted"])
logger.debug(
f"Answer ({self.pk}) by {self.author} has been marked "
f"for question ({self.question.pk})."
)
def unmark(self):
""" Unmark acceptance from the answer.
"""
self.is_accepted = False
self.save(update_fields=["is_accepted"])
logger.debug(
f"Answer ({self.pk}) by {self.author} has been unmarked "
f"for question ({self.question.pk})."
)
class QuestionVote(models.Model):
timestamp = models.DateTimeField(auto_now=True)
to = models.ForeignKey(
"Question",
on_delete=models.CASCADE,
related_name="votes",
related_query_name="votes",
)
user = models.ForeignKey(
to=User,
on_delete=models.CASCADE,
related_name="%(class)ss",
related_query_name="%(class)s",
)
value = models.SmallIntegerField(choices=VOTE_CHOICES)
class Meta:
ordering = ["-timestamp"]
unique_together = ["to", "user"]
def __str__(self):
return f"{self.user.username} {self.value:+d}"
class Question(AbstractPost):
vote_class = QuestionVote
number_of_answers = models.IntegerField(default=0)
tags = models.ManyToManyField("Tag")
title = models.CharField(
blank=False, max_length=settings.QUESTIONS_MAX_TITLE_LEN
)
def __str__(self):
return self.title
@classmethod
def trending(cls, count: int = 5) -> models.QuerySet:
""" Returns a query set of trending questions.
"""
return cls.objects.order_by("-number_of_votes")[:count]
def add_tags(self, tags: List[str], user) -> None:
if self.pk is None:
raise ValueError("Instance should be saved.")
for raw_tag in tags:
try:
tag = Tag.objects.get(name=raw_tag)
except ObjectDoesNotExist:
tag = Tag.objects.create(added=user, name=raw_tag)
self.tags.add(tag)
logger.debug(f"Tags ({tags}) have been added to question {self.pk}")
class Tag(models.Model):
added = models.DateTimeField(auto_now_add=True)
added_by = models.ForeignKey(
User,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="added_tags",
related_query_name="added_tag",
)
name = models.CharField(
blank=False, max_length=settings.QUESTIONS_MAX_TAG_LEN
)
def __str__(self):
return self.name
| 27.878173 | 76 | 0.619993 | 5,104 | 0.929352 | 0 | 0 | 201 | 0.036599 | 0 | 0 | 1,097 | 0.199745 |
efac998014549cc9e8410daf8e8486e66ec92ef3 | 1,430 | py | Python | backend/routers/bookmarks.py | heshikirihasebe/fastapi-instagram-clone | 7bc265a62160171c5c5c1b2f18b3c86833cb64e7 | [
"MIT"
]
| 1 | 2022-02-08T19:35:22.000Z | 2022-02-08T19:35:22.000Z | backend/routers/bookmarks.py | heshikirihasebe/fastapi-instagram-clone | 7bc265a62160171c5c5c1b2f18b3c86833cb64e7 | [
"MIT"
]
| null | null | null | backend/routers/bookmarks.py | heshikirihasebe/fastapi-instagram-clone | 7bc265a62160171c5c5c1b2f18b3c86833cb64e7 | [
"MIT"
]
| null | null | null | from datetime import datetime
from fastapi import APIRouter, Request
from ..classes.jwt_authenticator import JWTAuthenticator
from ..repositories import bookmark_repository
from ..schemas.bookmark_schema import RequestSchema, ResponseSchema
router = APIRouter(
prefix='/bookmarks',
tags=['bookmarks'],
)
# Index
@router.get('/')
async def index():
pass
# Store a new bookmark, or update if exists
@router.post('/', response_model=ResponseSchema, status_code=200)
async def store(request: Request, req: RequestSchema):
authenticator = JWTAuthenticator()
# get a user from http headers
auth = await authenticator.get_current_user(request)
# check the record
bookmark = await bookmark_repository.select_one(user_id=auth['id'], post_id=req.post_id)
if bookmark is None:
# insert a new record
await bookmark_repository.insert(user_id=auth['id'], post_id=req.post_id)
is_bookmarked = True
else:
if bookmark.deleted_at is not None:
# bookmark
await bookmark_repository.update(user_id=auth['id'], post_id=req.post_id, deleted_at=None)
is_bookmarked = True
else:
# unbookmark
await bookmark_repository.update(user_id=auth['id'], post_id=req.post_id, deleted_at=datetime.now())
is_bookmarked = False
response = ResponseSchema(is_bookmarked=is_bookmarked)
return response
| 31.086957 | 112 | 0.706294 | 0 | 0 | 0 | 0 | 1,059 | 0.740559 | 976 | 0.682517 | 186 | 0.13007 |
efae6e71bf3ea6317e5681aeac0b15d509089b29 | 2,044 | py | Python | legos/input_type.py | kamongi/legos | 0b4b5b1300af6677ae4e9c642a211ba3c96726a9 | [
"MIT"
]
| null | null | null | legos/input_type.py | kamongi/legos | 0b4b5b1300af6677ae4e9c642a211ba3c96726a9 | [
"MIT"
]
| null | null | null | legos/input_type.py | kamongi/legos | 0b4b5b1300af6677ae4e9c642a211ba3c96726a9 | [
"MIT"
]
| null | null | null | import os, urllib, datetime, time, sys
import getpass
from franz.openrdf.sail.allegrographserver import AllegroGraphServer
from franz.openrdf.repository.repository import Repository
from franz.miniclient import repository
from franz.openrdf.query.query import QueryLanguage
from franz.openrdf.model import URI
from franz.openrdf.vocabulary.rdf import RDF
from franz.openrdf.vocabulary.rdfs import RDFS
from franz.openrdf.vocabulary.owl import OWL
from franz.openrdf.vocabulary.xmlschema import XMLSchema
from franz.openrdf.query.dataset import Dataset
from franz.openrdf.rio.rdfformat import RDFFormat
from franz.openrdf.rio.rdfwriter import NTriplesWriter
from franz.openrdf.rio.rdfxmlwriter import RDFXMLWriter
class Inputs(object):
""" This class service our legos tool cmd line inputs. """
def __init__(self):
""" Set important default parameters to use throughout this project. """
self.username = ""
self.password = ""
self.agHostIp = ""
self.agPort = 10035 # Default
self.repoName = ""
self.conn = None
def createRepo(self):
"""
Repository.ACCESS opens an existing repository, or
creates a new one if the repository is not found.
"""
print "\n-------\nPrelude\n-------\n"
try:
print "Defining connnection to AllegroGraph server -- host:'%s' port:%s" % (self.agHostIp, self.agPort)
server = AllegroGraphServer(self.agHostIp, self.agPort, self.username, self.password)
catalog = server.openCatalog()
myRepository = catalog.getRepository(self.repoName, Repository.ACCESS)
myRepository.initialize()
connection = myRepository.getConnection()
print "Repository <%s> is up! It contains <%i> statements.\n" % (myRepository.getDatabaseName(), connection.size())
self.conn = connection
return 0 # All is well.
except:
print "\n***Oops ... :(\n"
return -1 # Oops
def recycleConns(self):
""" Close any open AllegroGraph connection """
self.conn.close()
connRepository = self.conn.repository
connRepository.shutDown()
return "\nRecycling is done.\n" | 31.9375 | 119 | 0.741683 | 1,328 | 0.649706 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.258317 |
efaec7b2aeea24ccd064fbf8dcfa28faac52b446 | 1,635 | py | Python | analysis/scripts/project_functions_Tom.py | data301-2020-winter2/course-project-group_1052 | 3733aacac0812811752d77e5f3d822ef5251c17b | [
"MIT"
]
| null | null | null | analysis/scripts/project_functions_Tom.py | data301-2020-winter2/course-project-group_1052 | 3733aacac0812811752d77e5f3d822ef5251c17b | [
"MIT"
]
| 1 | 2021-03-24T17:16:52.000Z | 2021-03-24T17:16:52.000Z | analysis/scripts/project_functions_Tom.py | data301-2020-winter2/course-project-group_1052 | 3733aacac0812811752d77e5f3d822ef5251c17b | [
"MIT"
]
| null | null | null | import pandas as pd
def load_and_process(path):
df1 = pd.read_csv(path)
df2 = (
df1.drop(columns=['songName', 'ogLyric', 'kbLyric'])
.rename(columns={'badword':'badWord'})
.sort_values("ogArtist", ascending = True)
.sort_values("year", ascending = True)
)
return df2
def badword_count(dataframe):
df1 = (pd.DataFrame(dataframe['badWord']
.value_counts())
.reset_index()
.rename(columns={'index':'badWord','badWord':'frequency'})
)
return df1
def unique_word_count(dataframe):
df1 = (dataframe.groupby('ogArtist')['badWord']
.nunique()
.sort_values(ascending = False))
return df1
def words_per_year(dataframe):
df1 = dataframe.drop(columns = ['ogArtist', 'songName', 'category', 'ogLyric', 'kbLyric', 'count'])
df2 = df1.loc[(df1['badWord'] == 'fuck') | (df1['badWord'] == 'shit') |(df1['badWord'] == 'damn') |(df1['badWord'] == 'man') |(df1['badWord'] == 'kiss')]
df3 = df2.value_counts()
df4 = df3.reset_index()
df5 = df4.rename(columns={0:'count'})
return df5
def words_per_year_T4(dataframe):
df1 = dataframe.drop(columns = ['ogArtist', 'category', 'isCensored', 'isPresent', 'count'])
df2 = df1.loc[(df1['badWord'] == 'fuck') | (df1['badWord'] == 'shit') |(df1['badWord'] == 'damn') |(df1['badWord'] == 'man') |(df1['badWord'] == 'kiss')]
df3 = df2.value_counts()
df4 = df3.reset_index()
df5 = df4.rename(columns={0:'count'})
return df5 | 30.277778 | 157 | 0.55107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.240367 |
efaec9e129260471f4d26372fd487df99a205a00 | 4,887 | py | Python | utils.py | loc-trinh/GrandmasterZero | 58365890fe2b0145344f17be5fb59e08c8f1993a | [
"MIT"
]
| null | null | null | utils.py | loc-trinh/GrandmasterZero | 58365890fe2b0145344f17be5fb59e08c8f1993a | [
"MIT"
]
| null | null | null | utils.py | loc-trinh/GrandmasterZero | 58365890fe2b0145344f17be5fb59e08c8f1993a | [
"MIT"
]
| null | null | null | import pprint
import time
import chess.pgn
import IPython.display as display
import ipywidgets as widgets
def who(player):
return 'White' if player == chess.WHITE else 'Black'
def get_last_move_san_from_board(board):
if len(board.move_stack) == 0:
return chess.Move.null()
else:
last_move = board.pop()
move_san = board.san(last_move)
board.push(last_move)
return move_san
def view_game(pgn_file, manual=False, pause=0.1, print_text=False):
pgn_file = open(pgn_file)
game = chess.pgn.read_game(pgn_file)
board = game.board()
mainline_moves = list(reversed(game.mainline_moves()))
def print_game_info(game):
print('Event:', game.headers['Event'])
print('White:', game.headers['White'])
print('Black:', game.headers['Black'])
print('Result:', game.headers['Result'])
def backward_click(event):
if len(board.move_stack) == 0:
return
move = board.pop()
mainline_moves.append(move)
render(with_manual=manual)
def fbackward_click(event):
if len(board.move_stack) == 0:
return
while len(board.move_stack) > 0:
move = board.pop()
mainline_moves.append(move)
render(with_manual=manual)
def foward_click(event):
if len(mainline_moves) == 0:
return
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
def ffoward_click(event):
if len(mainline_moves) == 0:
return
while len(mainline_moves) > 0:
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
def render(with_manual):
with output:
html = "<b>Move {} {}, Play '{}':</b><br/>{}".format(
len(board.move_stack), who(not board.turn),
get_last_move_san_from_board(board), board._repr_svg_())
display.clear_output(wait=True)
display.display(display.HTML(html))
if with_manual:
layout = widgets.Layout(width='95px')
btn_fbackward = widgets.Button(description='<<', layout=layout)
btn_backward = widgets.Button(description='<', layout=layout)
btn_forward = widgets.Button(description='>', layout=layout)
btn_fforward = widgets.Button(description='>>', layout=layout)
display.display(
widgets.HBox((btn_fbackward, btn_backward, btn_forward,
btn_fforward)))
btn_fbackward.on_click(fbackward_click)
btn_backward.on_click(backward_click)
btn_forward.on_click(foward_click)
btn_fforward.on_click(ffoward_click)
time.sleep(pause)
print_game_info(game)
if print_text:
print(game.mainline_moves())
else:
output = widgets.Output()
display.display(output)
if manual:
render(with_manual=manual)
else:
while len(mainline_moves) > 0:
move = mainline_moves.pop()
board.push(move)
render(with_manual=manual)
time.sleep(pause)
def play_game(white_player,
black_player,
visualize=False,
move_limit=None,
pause=0.1):
board = chess.Board()
try:
while not board.is_game_over(claim_draw=True):
if move_limit is not None and len(board.move_stack) >= move_limit:
return (None, 'draw: reached move limit', board)
if board.turn == chess.WHITE:
move = white_player.move(board)
else:
move = black_player.move(board)
board.push(move)
if visualize:
html = "<b>Move %s %s, Play '%s':</b><br/>%s" % (
len(board.move_stack), who(not board.turn),
get_last_move_san_from_board(board), board._repr_svg_())
display.clear_output(wait=True)
display.display(display.HTML(html))
time.sleep(pause)
except KeyboardInterrupt:
msg = 'Game interrupted!'
return (None, msg, board)
result = None
if board.is_checkmate():
result = who(not board.turn)
msg = 'checkmate: ' + result + ' wins!'
elif board.is_stalemate():
msg = 'draw: stalemate'
elif board.is_fivefold_repetition():
msg = 'draw: 5-fold repetition'
elif board.is_insufficient_material():
msg = 'draw: insufficient material'
elif board.can_claim_draw():
msg = 'draw: claim'
else:
raise Exception(
'error: game ended without reaching correct ending conditions')
return (result, msg, board)
| 32.58 | 79 | 0.575609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.078576 |
efaff942ac5c5e8e164b052efc98c4fab3a41b3b | 1,401 | py | Python | falco/svc/version_pb2_grpc.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
]
| 20 | 2019-10-14T15:01:14.000Z | 2021-08-09T19:13:08.000Z | falco/svc/version_pb2_grpc.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
]
| 45 | 2019-10-14T14:55:30.000Z | 2022-02-11T03:27:37.000Z | falco/svc/version_pb2_grpc.py | jasondellaluce/client-py | 694780796289fdd20f1588d06e66c5a1b52ecb26 | [
"Apache-2.0"
]
| 11 | 2019-10-14T17:41:06.000Z | 2022-02-21T05:40:44.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import version_pb2 as version__pb2
class serviceStub(object):
"""This service defines a RPC call
to request the Falco version.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.version = channel.unary_unary(
'/falco.version.service/version',
request_serializer=version__pb2.request.SerializeToString,
response_deserializer=version__pb2.response.FromString,
)
class serviceServicer(object):
"""This service defines a RPC call
to request the Falco version.
"""
def version(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_serviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'version': grpc.unary_unary_rpc_method_handler(
servicer.version,
request_deserializer=version__pb2.request.FromString,
response_serializer=version__pb2.response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'falco.version.service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 28.591837 | 70 | 0.730193 | 802 | 0.572448 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.320485 |
efb0224997c2a73db24a06482baa1e76838ea1f0 | 2,904 | py | Python | query.py | urmi-21/COVID-biorxiv | 6dfe713c2634197b6c9983eb2aa3fa6676f7d045 | [
"MIT"
]
| 2 | 2020-06-29T16:55:17.000Z | 2020-09-21T14:00:16.000Z | query.py | urmi-21/COVID-biorxiv | 6dfe713c2634197b6c9983eb2aa3fa6676f7d045 | [
"MIT"
]
| null | null | null | query.py | urmi-21/COVID-biorxiv | 6dfe713c2634197b6c9983eb2aa3fa6676f7d045 | [
"MIT"
]
| 1 | 2020-09-21T14:00:23.000Z | 2020-09-21T14:00:23.000Z | import sys
import json
import requests
import subprocess
from datetime import datetime
#dict storing data
collection={}
def execute_commandRealtime(cmd):
"""Execute shell command and print stdout in realtime.
Function taken from pyrpipe Singh et.al. 2020
usage:
for output in execute_commandRealtime(['curl','-o',outfile,link]):
print (output)
"""
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def update_collection():
'''
Download bioarxiv and medarxiv collections
'''
link='https://connect.biorxiv.org/relate/collection_json.php?grp=181'
outfile='collection.json'
print('Downloading ...')
for output in execute_commandRealtime(['curl','-o',outfile,link]):
print (output)
def read_collection():
'''
open file
'''
filename='collection.json'
with open(filename) as f:
data = json.load(f)
i=0
for key,value in data.items() :
#print (key,":",value)
if key=='rels':
val=data[key]
print('{} records found'.format(len(val)))
return value
def get_terms():
print('Available terms:')
for key,value in collection[0].items():
print(key)
def searchall(keywords):
result=[]
for k in keywords:
result.extend(search(k))
return result
def search(term):
#search in collection is a list of dicts
print('Searching',term)
result=[]
for d in collection:
#seach in all keys
for key,value in d.items():
if term.lower() in str(value).lower():
#print (d['rel_title'])
result.append(d)
#print('total matches: {}'.format(len(result)))
return result
def get_title(res):
titles=[]
for d in res:
if not d['rel_title'] in titles:
titles.append(d['rel_title'])
#print(d['rel_title'])
return titles
def filter_date(res,startdate):
'''
keep results by date
'''
filtered=[]
for d in res:
if datetime.strptime(d['rel_date'], '%Y-%m-%d')>=startdate:
filtered.append(d)
return filtered
#step 1 update collection downloads around 15 MB .json data
#update_collection()
#read collection in memory
collection=read_collection()
#see available terms
#get_terms()
#perform search
#res=search(' RNA-seq')
tosearch=[' RNA-seq','transcriptom','express','sequencing']
res=searchall(tosearch)
print(len(res))
print(len(get_title(res)))
fdate=datetime.strptime('2020-06-25', '%Y-%m-%d')
print('filtering results before',fdate)
final_res=get_title(filter_date(res,fdate))
print(len(final_res))
print('\n'.join(final_res))
| 25.034483 | 82 | 0.64084 | 0 | 0 | 564 | 0.194215 | 0 | 0 | 0 | 0 | 1,020 | 0.35124 |
efb3562ab2f0bc0a7a96ac315758b6464fb9c4ea | 1,336 | py | Python | core/server/wx_handler.py | Maru-zhang/FilmHub-Tornado | 870da52cec65920565439d2d5bb1424ae614665d | [
"Apache-2.0"
]
| 2 | 2017-07-19T01:24:05.000Z | 2017-07-19T09:12:46.000Z | core/server/wx_handler.py | Maru-zhang/FilmHub-Tornado | 870da52cec65920565439d2d5bb1424ae614665d | [
"Apache-2.0"
]
| null | null | null | core/server/wx_handler.py | Maru-zhang/FilmHub-Tornado | 870da52cec65920565439d2d5bb1424ae614665d | [
"Apache-2.0"
]
| 1 | 2017-07-28T09:31:42.000Z | 2017-07-28T09:31:42.000Z | import tornado.web
from core.logger_helper import logger
from core.server.wxauthorize import WxConfig
from core.server.wxauthorize import WxAuthorServer
from core.cache.tokencache import TokenCache
class WxHandler(tornado.web.RequestHandler):
"""
微信handler处理类
"""
'''微信配置文件'''
wx_config = WxConfig()
'''微信网页授权server'''
wx_author_server = WxAuthorServer()
'''redis服务'''
wx_token_cache = TokenCache()
def post(self, flag):
if flag == 'wxauthor':
'''微信网页授权'''
code = self.get_argument('code')
state = self.get_argument('state')
# 获取重定向的url
redirect_url = self.wx_config.wx_menu_state_map[state]
logger.debug('【微信网页授权】将要重定向的地址为:redirct_url[' + redirect_url + ']')
logger.debug('【微信网页授权】用户同意授权,获取code>>>>code[' + code + ']state[' + state + ']')
if code:
# 通过code换取网页授权access_token
data = self.wx_author_server.get_auth_access_token(code)
openid = data['openid']
logger.debug('【微信网页授权】openid>>>>openid[' + openid + ']')
if openid:
# 跳到自己的业务界面
self.redirect(redirect_url)
else:
# 获取不到openid
logger.debug('获取不到openid')
| 32.585366 | 91 | 0.569611 | 1,331 | 0.868799 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.320496 |
efb4030a249dafcb2be0137ce898a4f573bed62c | 2,771 | py | Python | Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py | Lotame/DataStream_Cookbook | 3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6 | [
"MIT"
]
| 1 | 2022-02-28T10:40:53.000Z | 2022-02-28T10:40:53.000Z | Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py | Lotame/DataStream_Cookbook | 3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6 | [
"MIT"
]
| 2 | 2021-01-08T17:51:10.000Z | 2021-03-29T11:36:07.000Z | Recipes/Convert_Files_Into_JSON_And_CSV/Mapping_JsonToCsvConverter.py | Lotame/DataStream_Cookbook | 3ec7ded6bd1e3a59fa4d06bb76e81be9da9c97a6 | [
"MIT"
]
| 3 | 2020-01-26T23:31:23.000Z | 2022-02-18T19:29:30.000Z | #!/usr/bin/python
#
# Write in Python3.6
# Filename:
#
# Mapping_JsonToCsvExtractor.py
#
#
# Basic Usage:
#
# python Mapping_JsonToCsvExtractor.py /directory/containing/datastream/mapping/json/files
#
# Utilities
import sys
import os
import json
import argparse
def writeCsvHeader(delimiter, csv_file, *args):
csv_file.write(delimiter.join(args))
csv_file.write("\n")
# write a line to the target file
def writeCsvLine(delimiter, csv_file, *args):
csv_file.write(delimiter.join([str(i) for i in args]))
csv_file.write("\n")
def main():
parser = argparse.ArgumentParser(description='Parse the mapping json file to CSV format')
parser.add_argument('--mapping_path', dest='mapping_path', required=True,
help='the path for the mapping json file')
parser.add_argument('--csv_name', dest='csv_name', required=False, default='mapping.csv',
help='specify the file name to write the csv file')
parser.add_argument('--csv_dir', dest='csv_dir', required=False, default='',
help='specify the dir to write the output file')
parser.add_argument('--delimiter', dest='delimiter', required=False, default='\001',
help='specify the delimiter to write the output file')
args = parser.parse_args()
mapping_path = args.mapping_path
csv_dir = args.csv_dir if args.csv_dir else mapping_path
csv_name = args.csv_name
delimiter = args.delimiter
if not os.path.isdir(mapping_path):
print("The mapping file path does not exist, confirm it again")
sys.exit()
if not os.path.isdir(csv_dir):
print("the specific csv_dir path %s does not exist, create it now" % csv_dir)
os.system("mkdir -p %s" % csv_dir)
output_path = os.path.join(csv_dir, csv_name)
output = open(output_path, 'w')
writeCsvHeader(delimiter, output, "behavior_id", "hierarchy_path", "hierarchy_id")
for file in os.listdir(mapping_path):
if not file.endswith("json"):
print("%s is not a json file, skip it" % file)
continue
file_path = os.path.join(mapping_path, file)
with open(file_path, 'r') as f:
for line in f:
js = json.loads(line.strip())
behid = js.get('behavior_id')
# if behavior id smaller than 0, it should be illegal skip
if behid < 0:
continue
# for each hierarchy, write a line
for hierpath in js.get('hierarchy_nodes', []):
writeCsvHeader(delimiter, output, str(behid), str(hierpath.get("path", "")), str(hierpath.get("id", -1)))
output.close()
if __name__ == '__main__':
sys.exit(main())
| 35.987013 | 125 | 0.631902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 940 | 0.339228 |
efb4a4c9d9efe6b7461d24bff10a128e9ce9296a | 2,692 | py | Python | shuttl/__init__.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
]
| 2 | 2017-06-26T18:06:58.000Z | 2017-10-11T21:45:29.000Z | shuttl/__init__.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
]
| null | null | null | shuttl/__init__.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
]
| null | null | null | import sys
from flask import Flask, redirect, request, session, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager, current_user
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_wtf.csrf import CsrfProtect
from .sessions import ShuttlSessionInterface
app = Flask(__name__)
app.config.from_object("shuttl.settings.DevelopmentConfig")
app.session_interface = ShuttlSessionInterface()
csrf = CsrfProtect(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
from shuttl.MiddleWare import OrganizationMiddleware
from .Views import *
from .Models import *
from .misc import shuttl, shuttlOrgs
@login_manager.unauthorized_handler
def unauthorized():
url = redirect(url_for("shuttlOrgs.login", organization=request.organization.sys_name))
return url
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
from .Commands.FillDB import FillDB
from .Commands.TestSuite import TestSuite
from .Commands.Add import Add
from .Commands.DemoFiller import DemoFiller
from .Commands.ResetPublishers import ResetPublishers
from .Commands.UploadToS3 import UploadS3
from .Templates.Tags import load_tags
# load_tags(app.jinja_env)
manager.add_command('test', TestSuite())
manager.add_command('add', Add())
manager.add_command('filldb', FillDB())
manager.add_command('demofiller', DemoFiller())
manager.add_command("resetQueue", ResetPublishers())
manager.add_command('upload', UploadS3)
app.register_blueprint(shuttl)
app.register_blueprint(shuttlOrgs)
from .Models.Reseller import Reseller, ResellerDoesNotExist
from .Models.organization import Organization, OrganizationDoesNotExistException
@app.before_request
def before_request():
request.organization = None
# hostname = request.headers.get("host")
# try:
# reseller = Reseller.GetNameFromHost(hostname)
# try:
# hostname = request.headers.get("host").split("//", 1)[-1]
# subdomain = hostname.split(".", 1)[0]
# request.organization = Organization.Get(name=subdomain.replace("_", " "), vendor = reseller)
# except OrganizationDoesNotExistException:
# pass
# pass
# except ResellerDoesNotExist:
# pass
pass
@app.teardown_request
def teardown_request(exception):
pass
from .Models.FileTree.FileObjects.FileObject import FileObject
FileObject.LoadMapper()
@app.before_first_request
def beforeFirstRequest():
from .Templates.Tags import load_tags
load_tags(app.jinja_env)
| 28.041667 | 106 | 0.76523 | 0 | 0 | 0 | 0 | 1,003 | 0.372585 | 0 | 0 | 568 | 0.210996 |
efb55216c30cf2837e4576480260417e73138279 | 4,088 | py | Python | main.py | DayvsonAlmeida/Programa-o-Gen-tica | 6edaceab99c61f55f4157e81fcf7cbad580f81d1 | [
"MIT"
]
| null | null | null | main.py | DayvsonAlmeida/Programa-o-Gen-tica | 6edaceab99c61f55f4157e81fcf7cbad580f81d1 | [
"MIT"
]
| null | null | null | main.py | DayvsonAlmeida/Programa-o-Gen-tica | 6edaceab99c61f55f4157e81fcf7cbad580f81d1 | [
"MIT"
]
| null | null | null | from utils import initialize
from pandas import DataFrame
from genetic import GA
import numpy as np
import argparse
import random
import time
import sys
sys.setrecursionlimit(2000)
random.seed(time.time())
parser = argparse.ArgumentParser()
parser.add_argument('--mr', help='Mutation Rate')
parser.add_argument('--cr', help='Crossover Rate')
parser.add_argument('--size', help='Population Size')
parser.add_argument('--ngen', help='Number of Generations')
parser.add_argument('--base', help='Base de Teste [Easy, Middle, Hard, Newton, Einstein, Pythagorean]')
args, unknown = parser.parse_known_args()
#cls && python main.py --mr 0.05 --cr 0.8 --size 100 --ngen 5000 --base Easy
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
mutation_rate = float(args.mr)
crossover_rate = float(args.cr)
size = int(args.size)
ngen = int(args.ngen)
test = args.base
# f(x) = 2*x
easy = {}
easy['x'] = {'a':np.array(np.arange(100), dtype='float64')}
easy['y'] = easy['x']['a']*2
easy['terminal_symb'] = ['a']
# f(x,y,z) = sqrt(x+y)+z
medium = {}
medium['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100)),#, dtype='float64'),
'z':np.array(np.random.randint(100))}#, dtype='float64')}
medium['y'] = (medium['x']['x']+medium['x']['y'])**0.5 + medium['x']['z']
medium['terminal_symb'] = ['x','y','z']
# f(x,y,z) = sin(x)+sqrt(y)-tan(z+x)
hard = {}
hard['x'] = {'x':np.array(np.arange(100), dtype='float64'),
'y':np.array(np.random.randint(100), dtype='float64'),#, dtype='float64'),
'z':np.array(np.random.randint(100), dtype='float64')}#, dtype='float64')}
hard['y'] = np.sin(hard['x']['x']) + hard['x']['y']**0.5 - np.tan(hard['x']['z'] + hard['x']['x'])
hard['terminal_symb'] = ['x','y','z']
#Pythagorean Theorem
# c² = a²+b²
pythagorean_theorem = {}
pythagorean_theorem['x'] = {'a': np.array(np.random.randint(100, size=100), dtype='float64'),
'b': np.array(np.arange(100), dtype='float64')}
pythagorean_theorem['y'] = pythagorean_theorem['x']['a']**2 +pythagorean_theorem['x']['b']**2
pythagorean_theorem['terminal_symb'] = ['a','b']
#Einstein's Theory of Relativity
# E = m*c²
# c = 299.792.458 m/s
einstein_relativity = {}
einstein_relativity['x'] = {'m': np.random.random(100)}
einstein_relativity['y'] = einstein_relativity['x']['m']*(299792458**2) #c²=89875517873681764
einstein_relativity['terminal_symb'] = ['m']
#Newton's Universal Law of Gravitation
# F = G*m1*m2/d²
G = 6.674*10E-11
newton_law = {}
newton_law['x'] = {'m1': 10*np.array(np.random.random(100), dtype='float64'),
'm2': np.array(np.random.randint(100, size=100), dtype='float64'),
'd': np.array(np.random.randint(100, size=100)+np.random.rand(100)+10E-11, dtype='float64')}
newton_law['y'] = (newton_law['x']['m1']*newton_law['x']['m2']*G)/(newton_law['x']['d']**2)
newton_law['terminal_symb'] = ['m1','m2','d']
base = {'Easy': easy, 'Pythagorean':pythagorean_theorem,
'Middle': medium, 'Hard': hard,
'Newton': newton_law,
"Einstein": einstein_relativity}
#cr:[0.7, 0.75, 0.8] mr:[0.05, 0.1, 0.2] size:[10, 50, 100]
results = {}
duration = {}
ngen = 2000
for test in ['Hard']:#,'Hard','Hard']:
for crossover_rate in [0.7, 0.8]:
for mutation_rate in [0.05]:#, 0.1, 0.2]:
for size in [10, 100]:
ga = GA(terminal_symb=base[test]['terminal_symb'], x=base[test]['x'], y=base[test]['y'], size=size,
num_generations=ngen, crossover_rate=crossover_rate, mutation_rate=mutation_rate, early_stop=0.1)
ga.run()
loss = ga.loss_history
loss = np.concatenate((loss, [loss[len(loss)-1] for i in range(ngen - len(loss))] ) )
results[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = loss
duration[test+'_cr_'+str(crossover_rate)+'_mr_'+str(mutation_rate)+'_size_'+str(size)] = [ga.duration]
df = DataFrame(results)
df.to_csv('Resultados Hard GA.csv', index=False, decimal=',', sep=';')
df = DataFrame(duration)
df.to_csv('Duração Hard GA.csv', index=False, decimal=',', sep=';')
| 40.88 | 107 | 0.634785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,307 | 0.319092 |
efb70cec858af5a7d681ffd1896f1dd46735a318 | 1,774 | py | Python | Dictionary.py | edisoncast/DES-UOC | cd179e21c03ad780c9ea3876a6219c32b8e34cad | [
"MIT"
]
| null | null | null | Dictionary.py | edisoncast/DES-UOC | cd179e21c03ad780c9ea3876a6219c32b8e34cad | [
"MIT"
]
| null | null | null | Dictionary.py | edisoncast/DES-UOC | cd179e21c03ad780c9ea3876a6219c32b8e34cad | [
"MIT"
]
| null | null | null | #Creado por Jhon Edison Castro Sánchez
#Diccionario tomado de https://github.com/danielmiessler/SecLists/blob/bb915befb208fd900592bb5a25d0c5e4f869f8ea/Passwords/Leaked-Databases/rockyou.txt.tar.gz
#Se usa para generar el mismo comportamiento de openssl de linux
#https://docs.python.org/2/library/crypt.html
import crypt
#Funcion que me permite generar los hash de las palabras de 8 caracteres
#Se usa el hash dado en la PEC
#La salida es un archivo con un par de valores por linea
#la primera es el hash y al frente el valor del password en texto plano
def generateHash(filename, outputfilename, salt):
with open(outputfilename, "w") as out:
with open(filename,'r')as f:
for line in f:
Hash = crypt.crypt(line, salt)
result = " ".join([Hash, line])
print result
out.write(result + "\n")
#Funcion que recibe el archivo de entrada y el de salida y llama a otra funcion
#que valida el tamaño de los strings
def DESDictionary(filename,outputFile):
plaintext = inputText(filename, outputFile)
#Funcion que verifica cada linea y si es de 8 caracteres me genera un archivo nuevo
#Con esto ya puedo generar los hash y comparar.
def inputText(filename, outputfilename):
with open(outputfilename, "w") as out:
with open(filename,'r')as f:
for line in f:
if len(" ".join(line.split())) == 8:
print line
out.write(line)
#Llamado a las funciones
if __name__ == "__main__":
with open('rockyou.txt','r')as f:
text = f.read()
DESDictionary('rockyou.txt','pec1.txt')
salt1='tl'
salt2='as'
generateHash('pec1.txt','hashed1.txt', salt1)
generateHash('pec1.txt','hashed2.txt', salt2) | 39.422222 | 157 | 0.673055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.523086 |
efb866a60e5d0e5a7b79c81d5acd283c1c39df92 | 227 | py | Python | .test/test/task1/aufgabe4.py | sowinski/testsubtree | d09b72e6b366e8e29e038445a1fa6987b2456625 | [
"MIT"
]
| null | null | null | .test/test/task1/aufgabe4.py | sowinski/testsubtree | d09b72e6b366e8e29e038445a1fa6987b2456625 | [
"MIT"
]
| null | null | null | .test/test/task1/aufgabe4.py | sowinski/testsubtree | d09b72e6b366e8e29e038445a1fa6987b2456625 | [
"MIT"
]
| null | null | null | from nltk.book import *
def letterFrequ(text):
freq_dist = FreqDist()
for word in text:
for char in word:
freq_dist.inc(char)
return freq_dist
print letterFrequ(text1)
print letterFrequ(text5)
| 15.133333 | 26 | 0.669604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
efbe9e033668c8068ec57cb141083c350416dc90 | 1,668 | py | Python | src/controllers/userController.py | gioliveirass/fatec-BDNR-MercadoLivre | dd2c407f6728e4f11e8292463cc2ba3ad562de1e | [
"MIT"
]
| null | null | null | src/controllers/userController.py | gioliveirass/fatec-BDNR-MercadoLivre | dd2c407f6728e4f11e8292463cc2ba3ad562de1e | [
"MIT"
]
| null | null | null | src/controllers/userController.py | gioliveirass/fatec-BDNR-MercadoLivre | dd2c407f6728e4f11e8292463cc2ba3ad562de1e | [
"MIT"
]
| null | null | null | import connectBD as connectDB
from pprint import pprint
def findSort():
mydb = connectDB.connect()
mycol = mydb.usuario
print("\n===========================")
print("==== TODOS OS USUARIOS ====")
print("===========================\n")
mydoc = mycol.find().sort("nome")
for x in mydoc:
pprint(x)
def insert(name, cpf):
mydb = connectDB.connect()
mycol = mydb.usuario
print("\n=========================")
print("=== USUARIO INSERIDO ===")
print("=========================\n")
mydict = { "nome": name, "cpf": cpf }
x = mycol.insert_one(mydict)
pprint(x.inserted_id)
print("Usuario cadastrado com sucesso.")
def findQuery(name):
mydb = connectDB.connect()
mycol = mydb.usuario
print("\n=========================")
print("==== USUARIO BUSCADO ====")
print("=========================\n")
myquery = { "nome": name }
mydoc = mycol.find(myquery)
for x in mydoc:
pprint(x)
def update(name, newName):
mydb = connectDB.connect()
mycol = mydb.usuario
print("\n============================")
print("==== USUARIO ATUALIZADO ====")
print("============================\n")
myquery = { "nome": name }
newvalues = { "$set": { "nome": newName } }
mycol.update_one(myquery, newvalues)
pprint("Usuario atualizado com sucesso.")
def delete(name):
mydb = connectDB.connect()
mycol = mydb.usuario
print("\n==========================")
print("==== USUARIO DELETADO ====")
print("==========================\n")
myquery = { "nome": name }
mycol.delete_one(myquery)
pprint("Usuario deletado com sucesso.") | 30.327273 | 49 | 0.492206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.351319 |
efc074386633ac80149d8065fe9a27e3e95d188c | 374 | py | Python | models/sample.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
]
| null | null | null | models/sample.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
]
| 2 | 2022-01-11T15:50:04.000Z | 2022-01-13T01:53:53.000Z | models/sample.py | OttrOne/suivi | 9e53a39b0f50054b89cb960eb9055fd0a28a5ebf | [
"MIT"
]
| null | null | null | from utils import hrsize
from time import time_ns
class Sample:
cpu : float
memory: int
round: int = 5
def __init__(self, cpu: int, mem: int) -> None:
self.cpu = cpu
self.memory = mem
self.timestamp = time_ns()
def __str__(self) -> str:
return f"CPU: {round(self.cpu * 100.0,self.round)}%, MEM: {hrsize(self.memory)}"
| 22 | 88 | 0.59893 | 322 | 0.860963 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.195187 |
efc4891e8e505e8dc24f5447323153c9667f9326 | 1,220 | py | Python | file-convertors/pdf-to-image/pdf_to_image.py | fraserlove/python-productivity-scripts | 4a667446250042b01e307c7e4be53defc905207e | [
"MIT"
]
| null | null | null | file-convertors/pdf-to-image/pdf_to_image.py | fraserlove/python-productivity-scripts | 4a667446250042b01e307c7e4be53defc905207e | [
"MIT"
]
| null | null | null | file-convertors/pdf-to-image/pdf_to_image.py | fraserlove/python-productivity-scripts | 4a667446250042b01e307c7e4be53defc905207e | [
"MIT"
]
| null | null | null | '''
PDF to Image Converter
Author: Fraser Love, [email protected]
Created: 2020-06-13
Latest Release: v1.0.1, 2020-06-21
Python: v3.6.9
Dependancies: pdf2image
Converts multiple pdf's to images (JPEG format) and stores them in a logical folder structure under the desired image directory.
Usage: Update the pdf_dir and img_dir paths to point to the directory that holds the pdf files and the directory that the
generated images should be placed under.
'''
from pdf2image import convert_from_path
import os
pdf_dir = 'pdfs/' # Include trailing forward slash
img_dir = 'images/'
first_page_only = True # Only convert the first page of the pdf to an image
pdf_names = [pdf_name.split('.')[0] for pdf_name in os.listdir(pdf_dir) if pdf_name[-4:] == ".pdf"]
for pdf_name in pdf_names:
pages = convert_from_path('{}{}.pdf'.format(pdf_dir, pdf_name))
if first_page_only:
pages[0].save('{}/{}.jpg'.format(img_dir, pdf_name), 'JPEG')
else:
directory = '{}{}'.format(img_dir, pdf_name)
if not os.path.exists(directory):
os.makedirs(directory)
for i, page in enumerate(pages):
page.save('{}{}/{}-{}.jpg'.format(img_dir, pdf_name, pdf_name, i), 'JPEG') | 36.969697 | 128 | 0.694262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.512295 |
efc5229f2a8966dc64e04e1c67caf2f4bee4df93 | 4,217 | py | Python | tests/test/search/test_references_searcher_string.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
]
| 1 | 2022-03-30T19:12:14.000Z | 2022-03-30T19:12:14.000Z | tests/test/search/test_references_searcher_string.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
]
| null | null | null | tests/test/search/test_references_searcher_string.py | watermelonwolverine/fvttmv | 8689d47d1f904dd2bf0a083de515fda65713c460 | [
"MIT"
]
| null | null | null | from fvttmv.exceptions import FvttmvException
from fvttmv.reference_tools import ReferenceTools
from fvttmv.search.__references_searcher_string import ReferencesSearcherString
from test.common import TestCase
class ReferencesSearcherStringTest(TestCase):
json_base_str = "\"img\":\"{0}\""
html_base_str = "<img src=\\\"{0}\\\">"
reference = "this/is/just/a/test"
json_str = json_base_str.format(reference)
html_str = html_base_str.format(reference)
def test_contain_json_references(self):
print("test_contain_json_references")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
self.reference)
self.assertTrue(result)
def test_contain_json_references2(self):
print("test_contain_json_references2")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_json_references3(self):
print("test_contain_json_references3")
result = ReferencesSearcherString._does_contain_json_references(self.json_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_contain_json_references4(self):
print("test_contain_json_references4")
result = ReferencesSearcherString._does_contain_json_references(self.html_str,
self.reference)
self.assertFalse(result)
def test_contain_html_references1(self):
print("test_contain_html_references1")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
self.reference)
self.assertTrue(result)
def test_contain_html_references2(self):
print("test_contain_html_references2")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_html_references3(self):
print("test_contain_html_references3")
result = ReferencesSearcherString._does_contain_html_references(self.html_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_contain_html_references4(self):
print("test_contain_html_references4")
result = ReferencesSearcherString._does_contain_html_references(self.json_str,
self.reference)
self.assertFalse(result)
def test_contain_references1(self):
print("test_contain_references1")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
self.reference)
self.assertTrue(result)
def test_contain_references2(self):
print("test_contain_references2")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
"this/is/just/a")
self.assertFalse(result)
def test_contain_references3(self):
print("test_contain_references3")
result = ReferencesSearcherString.does_contain_references(self.html_str + self.json_str,
"this/is/a/false/test")
self.assertFalse(result)
def test_does_references_exceptions(self):
print("test_does_contain_html_references_exceptions")
for char in ReferenceTools.illegal_chars:
try:
ReferencesSearcherString.does_contain_references(self.json_str,
char)
self.fail()
except FvttmvException:
pass
| 40.548077 | 96 | 0.591653 | 4,005 | 0.949727 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.129476 |
efc64b0b3d469f8a4e23675a9039dc1fed37be48 | 4,999 | py | Python | vtk.py | becklabs/geotag-gui | c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03 | [
"MIT"
]
| null | null | null | vtk.py | becklabs/geotag-gui | c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03 | [
"MIT"
]
| null | null | null | vtk.py | becklabs/geotag-gui | c8b1c3a0c6ca0c3eed09fab69d9dbb8b974b1b03 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 19:50:43 2020
@author: beck
"""
import cv2
import datetime
import dateparser
import os
import sys
import pandas as pd
import pytz
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from PIL import Image
import numpy as np
import pytesseract
import imutils
import time
from GPSPhoto import gpsphoto
from threading import Thread
def firstFrame(video):
if 'timestamp_frame' not in os.listdir(os.getcwd()):
os.mkdir('timestamp_frame/')
video_capture = cv2.VideoCapture(video)
file = 'timestamp_frame/'+video+'_'+ str(0)+'.jpg'
while(True):
ret, frame = video_capture.read()
if not ret:
break
im = frame
break
video_capture.release()
PIL_image = Image.fromarray(im.astype('uint8'), 'RGB')
return PIL_image
def formatFrame(image, LEFT = 50, TOP = 20, RIGHT = 250, BOTTOM = 90):
image = image.crop((LEFT, TOP, RIGHT, BOTTOM))
image = np.array(image.convert('RGB'))[:, :, ::-1].copy()
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
return thresh
def getCreationDate(filename, config):
if config == 'trident':
pytesseract.pytesseract.tesseract_cmd = 'Tesseract-OCR\\tesseract.exe'
image = formatFrame(firstFrame(filename))
data = pytesseract.image_to_string(image, lang='eng',config='--psm 6')
data_str = str(data).split('\n')
metadata = dateparser.parse(data_str[0]+ ' '+data_str[1])
else:
parser = createParser(filename)
metadata = extractMetadata(parser).get('creation_date')
return metadata
def getOffsets(file):
#GET DELTA SECONDS FOR EVERY FRAME
cap = cv2.VideoCapture(file)
totalframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
offsets = [0]
for i in range(totalframes-1):
offsets.append(offsets[-1]+1000/fps)
offsets = [datetime.timedelta(milliseconds=i) for i in offsets]
return offsets
def getTimestamps(file, config):
offsets = getOffsets(file)
creationdate = getCreationDate(file, config)
#CALCULATE TIMESTAMPS
timestamps = [(creationdate+offset).replace(tzinfo = pytz.timezone('UTC')) for offset in offsets]
#GENERATE FRAME NAMES
frames = [file.split('/')[-1]+'_'+str(i)+'.jpg' for i in range(len(timestamps))]
#EXPORT DATA AS CSV
df = pd.DataFrame()
df['Frame'] = frames
df['Timestamp'] = timestamps
return df
def getFps(file):
cap = cv2.VideoCapture(file)
return int(cap.get(cv2.CAP_PROP_FPS))
class Writer:
def __init__(self, stream, export_path, taggedDF, parent, controller):
self.taggedDF = taggedDF.reset_index()
self.export_path = export_path
self.taggedList = [self.taggedDF.loc[i,'Frame'] for i in range(len(self.taggedDF['Frame']))]
self.frame_inds = [int(i.split('.')[1].split('_')[1]) for i in self.taggedList]
self.parent = parent
self.controller = controller
self.stream = cv2.VideoCapture(stream)
self.thread = Thread(target=self.write, args=())
self.thread.setDaemon(True)
def write(self):
i = 0
for frame_ind in self.frame_inds:
self.stream.set(cv2.CAP_PROP_POS_FRAMES, frame_ind)
(grabbed, frame) = self.stream.read()
frame_path = self.export_path+self.taggedList[self.frame_inds.index(frame_ind)]
cv2.imwrite(frame_path, frame)
#ADD METADATA
photo = gpsphoto.GPSPhoto(frame_path)
info = gpsphoto.GPSInfo((self.taggedDF.loc[i, 'Latitude'],
self.taggedDF.loc[i, 'Longitude']),
timeStamp=self.taggedDF.loc[i, 'Timestamp'],
alt=int(self.taggedDF.loc[i, 'Elevation']))
photo.modGPSData(info, frame_path)
self.parent.num+=1
i+=1
self.parent.e_status.set('Writing: '+str(self.parent.num)+'/'+str(self.parent.denom))
self.stream.release()
return
def createFrames(path, export_path, taggedDF, parent, controller):
x = len(taggedDF)
a = int(round(x/3))
b = int(a*2)
writer1 = Writer(path, export_path, taggedDF.iloc[:a], parent, controller)
writer2 = Writer(path, export_path, taggedDF.iloc[a:b], parent, controller)
writer3 = Writer(path, export_path, taggedDF.iloc[b:], parent, controller)
writer1.thread.start()
writer2.thread.start()
writer3.thread.start()
writer1.thread.join()
writer2.thread.join()
writer3.thread.join()
parent.e_status.set('Done')
| 35.707143 | 102 | 0.619324 | 1,555 | 0.311062 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.094219 |
efc6952d49bfc96baa0e1e3a017cc887fba50c18 | 4,237 | py | Python | ROS_fall_detection/src/detector.py | SeanChen0220/Posefall | f27eedc0a624cc2875d14ffa276cf96cdfc1b410 | [
"MIT"
]
| 15 | 2021-08-08T08:41:54.000Z | 2022-03-30T10:12:49.000Z | ROS_fall_detection/src/detector.py | SeanChen0220/Posefall | f27eedc0a624cc2875d14ffa276cf96cdfc1b410 | [
"MIT"
]
| 1 | 2021-11-24T16:51:51.000Z | 2021-12-03T06:20:11.000Z | ROS_fall_detection/src/detector.py | SeanChen0220/Posefall | f27eedc0a624cc2875d14ffa276cf96cdfc1b410 | [
"MIT"
]
| 3 | 2021-08-08T08:41:55.000Z | 2022-03-15T07:28:53.000Z | #! /home/seanchen/anaconda3/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import sys
import rospy
from std_msgs.msg import String
import torch
import torch.nn.parallel
import torch.nn.functional as F
import numpy as np
import cv2
from LPN import LPN
from fall_net import Fall_Net
from pose_utils import Cropmyimage
from pose_utils import Drawkeypoints
import plot_sen
from time import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
global cam_image
def callback(data):
try:
global cam_image
cam_image = np.frombuffer(data.data, dtype=np.uint8).reshape((data.height, data.width, -1))
#print(cam_image.shape)
# show_image = bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
rospy.init_node('detector', anonymous=True)
pub = rospy.Publisher('det_result', Image, queue_size=10)
rospy.Subscriber('cam_image', Image, callback)
rate = rospy.Rate(50) # 10hz
# model
pose_net = LPN(nJoints=17)
pose_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/pose_net_pred100.pth.tar'))
pose_net.cuda()
fall_net = Fall_Net(64, 48, 17, device=torch.device('cuda'))
fall_net.cuda().double()
fall_net.load_state_dict(torch.load('/home/seanchen/robot_fall_det/fall_net_pred5.pth.tar'))
pose_net.eval()
fall_net.eval()
print('Load successfully!')
bridge = CvBridge()
global cam_image
cam_image = np.array([])
fall_count = []
while not rospy.is_shutdown():
rate.sleep()
if not cam_image.any():
print('waiting!')
continue
start = time()
# 每来一张图检测一次,更新显示
# image initialize
#photo_file = '/home/seanchen/robot_fall_det/fall1.jpg'
#input = cv2.imread(photo_file)# cv2 返回np.array类型,为(w,h,channel)
input = cam_image
bbox = [0, 0, input.shape[1], input.shape[0]]
input_image, details = Cropmyimage(input, bbox)
input_image = np.array([input_image.numpy()])
#print(input_image.shape)
input_image = torch.from_numpy(input_image)
#input_image.cuda()
# get posedetails
pose_out = pose_net(input_image.cuda())
fall_out, pose_cor = fall_net(pose_out)
# 跌倒结果计算
# 姿态可视化
neck = (pose_cor[:, 5:6, :] + pose_cor[:, 6:7, :]) / 2
pose_cor = torch.cat((pose_cor, neck), dim=1)
pose_cor = pose_cor * 4 + 2.
scale = torch.Tensor([[256, 192]]).cuda()
pose_cor = pose_cor / scale
scale = torch.Tensor([[details[3]-details[1], details[2]-details[0]]]).cuda()
pose_cor = pose_cor * scale
scale = torch.Tensor([[details[1], details[0]]]).cuda()
pose_cor = pose_cor + scale
#pose_cor_1 = (4*pose_cor[:, :, 0]+2.)/64*(details[3]-details[1])/4+details[1]
#pose_cor_2 = (4*pose_cor[:, :, 1]+2.)/48*(details[2]-details[0])/4+details[0]
pose_cor = torch.flip(pose_cor, dims=[2])
ones = torch.ones(1, 18, 1).cuda()
pose_cor = torch.cat((pose_cor, ones), dim=2).cpu().detach().numpy()
#det_result = torch.zeros(64, 48, 3).numpy()
det_result = plot_sen.plot_poses(input, pose_cor)
#print(det_result.shape)
# 跌倒估计
#if fall_out.indices == 1:
# print('Down!')
#if fall_out.indices == 0:
# print('Not Down!')
fall_out = torch.max(F.softmax(fall_out, dim=0), dim=0)
fall_count.append(fall_out.indices)
fall_dis = sum(fall_count[len(fall_count)-30 : len(fall_count)])
#print(len(fall_count))
end = time()
run_time = end-start
if fall_dis > 24:
print('Normal!', 1. / run_time)
else:
print('Down!', 1. / run_time)
det_result = bridge.cv2_to_imgmsg(det_result, encoding="passthrough")
pub.publish(det_result)
#print(1. / run_time)
# spin() simply keeps python from exiting until this node is stopped
#rospy.spin()
#while True:
#pass
| 35.605042 | 99 | 0.630399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,182 | 0.274437 |
efc705c5b7dd44b358486c8f4931ee3c4faede41 | 3,696 | py | Python | tensorflow1.x/sound_conv.py | wikeex/tensorflow-learning | a6ab7c99455711e9f3c015e0abb04fa58342e0cb | [
"MIT"
]
| null | null | null | tensorflow1.x/sound_conv.py | wikeex/tensorflow-learning | a6ab7c99455711e9f3c015e0abb04fa58342e0cb | [
"MIT"
]
| null | null | null | tensorflow1.x/sound_conv.py | wikeex/tensorflow-learning | a6ab7c99455711e9f3c015e0abb04fa58342e0cb | [
"MIT"
]
| null | null | null | import tensorflow as tf
from sound_lstm_test import data
batch_size = 10
x = tf.placeholder(tf.float32, [batch_size, 512, 80])
y_ = tf.placeholder(tf.float32, [batch_size, 59])
w_conv1 = tf.Variable(tf.truncated_normal([16, 2, 1, 64], stddev=0.1), name='conv1_w')
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]), name='conv1_b')
x_image = tf.reshape(x, [-1, 512, 80, 1])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, w_conv1, strides=[1, 2, 1, 1], padding='VALID') + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_conv2 = tf.Variable(tf.truncated_normal([2, 16, 64, 128], stddev=0.1), name='conv2_w')
b_conv2 = tf.Variable(tf.constant(0.1, shape=[128]), name='conv2_b')
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, w_conv2, strides=[1, 1, 1, 1], padding='VALID') + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
w_fc1 = tf.Variable(tf.truncated_normal([61 * 12 * 128, 1024], stddev=0.1), name='fc1_w')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]), name='fc1_b')
h_pool2_flat = tf.reshape(h_pool2, [-1, 61 * 12 * 128])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
rate = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, rate=rate)
w_fc2 = tf.Variable(tf.truncated_normal([1024, 59], stddev=0.1), name='fc2_w')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[59]), name='fc2_b')
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
variables = tf.trainable_variables()
conv1_variable = [t for t in variables if t.name.startswith('conv1')]
conv2_variable = [t for t in variables if t.name.startswith('conv2')]
fc1_variable = [t for t in variables if t.name.startswith('fc1')]
fc2_variable = [t for t in variables if t.name.startswith('fc2')]
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
grads_conv1, _ = tf.clip_by_global_norm(tf.gradients(loss, conv1_variable), clip_norm=5)
grads_conv2, _ = tf.clip_by_global_norm(tf.gradients(loss, conv2_variable), clip_norm=5)
grads_fc1, _ = tf.clip_by_global_norm(tf.gradients(loss, fc1_variable), clip_norm=5)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, variables), clip_norm=5)
conv1_optimizer = tf.train.AdamOptimizer(0.001)
conv2_optimizer = tf.train.AdamOptimizer(0.001)
fc1_optimizer = tf.train.AdamOptimizer(0.001)
fc2_optimizer = tf.train.AdamOptimizer(0.001)
optimizer = tf.train.AdamOptimizer(0.001)
conv1_op = conv1_optimizer.apply_gradients(zip(grads_conv1, conv1_variable))
conv2_op = conv2_optimizer.apply_gradients(zip(grads_conv2, conv2_variable))
fc1_op = fc1_optimizer.apply_gradients(zip(grads_fc1, fc1_variable))
fc2_op = fc2_optimizer.apply_gradients(zip(grads_fc2, fc2_variable))
op = optimizer.apply_gradients(zip(grads, variables))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
train_data = data.np_load(batch_size=10, batch_type='train/')
test_data = data.np_load(batch_size=10, batch_type='test/')
for i in range(1000):
for _ in range(100):
input_, label = next(train_data)
sess.run([conv1_op, conv2_op, fc1_op, fc2_op], feed_dict={x: input_, y_: label, rate: 0})
test_total_accuracy = 0
for i in range(10):
test_input_, test_label = next(test_data)
test_accuracy, _ = sess.run([accuracy, tf.no_op()], feed_dict={x: test_input_, y_: test_label, rate: 0})
test_total_accuracy += test_accuracy
print('测试集准确度:%.3f' % (test_total_accuracy / 10))
| 44.53012 | 116 | 0.717532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.042588 |
efc7713fd5edcdf52845e8c0b576613822945b28 | 2,213 | py | Python | interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
]
| null | null | null | interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
]
| null | null | null | interviewPractice/python/02_linkedLists/03_addTwoHugeNumbers.py | netor27/codefights-arcade-solutions | 69701ab06d45902c79ec9221137f90b75969d8c8 | [
"MIT"
]
| null | null | null | ''''
You're given 2 huge integers represented by linked lists. Each linked list element is a number from 0 to 9999 that represents a number with exactly 4 digits. The represented number might have leading zeros. Your task is to add up these huge integers and return the result in the same format.
Example
For a = [9876, 5432, 1999] and b = [1, 8001], the output should be
addTwoHugeNumbers(a, b) = [9876, 5434, 0].
Explanation: 987654321999 + 18001 = 987654340000.
For a = [123, 4, 5] and b = [100, 100, 100], the output should be
addTwoHugeNumbers(a, b) = [223, 104, 105].
Explanation: 12300040005 + 10001000100 = 22301040105.
Input/Output
[execution time limit] 4 seconds (py3)
[input] linkedlist.integer a
The first number, without its leading zeros.
Guaranteed constraints:
0 ≤ a size ≤ 104,
0 ≤ element value ≤ 9999.
[input] linkedlist.integer b
The second number, without its leading zeros.
Guaranteed constraints:
0 ≤ b size ≤ 104,
0 ≤ element value ≤ 9999.
[output] linkedlist.integer
The result of adding a and b together, returned without leading zeros in the same format.
''''
# Definition for singly-linked list:
class ListNode(object):
def __init__(self, x):
self.value = x
self.next = None
def addTwoHugeNumbers(a, b):
a = reverseList(a)
b = reverseList(b)
helper = ListNode(None)
r = helper
carry = 0
while a != None or b != None or carry > 0:
aValue = 0 if a == None else a.value
bValue = 0 if b == None else b.value
total = aValue + bValue + carry
carry = total // 10000
total = total % 10000
r.next = ListNode(total)
r = r.next
if a != None:
a = a.next
if b != None:
b = b.next
return reverseList(helper.next)
def reverseList(a):
if a == None:
return None
stack = []
while a != None:
stack.append(a.value)
a = a.next
r = ListNode(stack.pop())
head = r
while len(stack) > 0:
r.next = ListNode(stack.pop())
r = r.next
return head
def printList(a):
while a != None:
print (a.value)
a = a.next
| 24.054348 | 291 | 0.615002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,155 | 0.51817 |
efc7a9d58bb127091a58a8679f3c1f9062aeca6a | 3,123 | py | Python | src/ensae_projects/datainc/data_medical.py | sdpython/ensae_projects | 9647751da053c09fa35402527b294e02a4e6e2ad | [
"MIT"
]
| 1 | 2020-11-22T10:24:54.000Z | 2020-11-22T10:24:54.000Z | src/ensae_projects/datainc/data_medical.py | sdpython/ensae_projects | 9647751da053c09fa35402527b294e02a4e6e2ad | [
"MIT"
]
| 13 | 2017-11-20T00:20:45.000Z | 2021-01-05T14:13:51.000Z | src/ensae_projects/datainc/data_medical.py | sdpython/ensae_projects | 9647751da053c09fa35402527b294e02a4e6e2ad | [
"MIT"
]
| null | null | null | """
@file
@brief Functions to handle data coming from
:epkg:`Cancer Imaging Archive`.
"""
import os
import pydicom
import pandas
import cv2
from pyquickhelper.filehelper.synchelper import explore_folder_iterfile # pylint: disable=C0411
def _recurse_fill(obs, dataset, parent=""):
for data_element in dataset:
if isinstance(data_element.value, bytes):
continue
if data_element.VR == "SQ": # a sequence
name = data_element.name
for i, ds in enumerate(data_element.value):
_recurse_fill(obs, ds,
parent="{parent}.{name}[{i}]".format(
parent=parent, name=name, i=i))
else:
text = str(data_element.value)
name = str(data_element.name)
key = name if parent == '' else parent + "." + name
obs[key] = text
def convert_dcm2png(folder, dest, fLOG=None):
"""
Converts all medical images in a folder from format
:epkg:`dcm` to :epkg:`png`.
@param folder source folder
@param dest destination folder
@param fLOG logging function
@return :epkg:`pandas:DataFrame` with many data
The function uses module :epkg:`pydicom`.
"""
if not os.path.exists(dest):
raise FileNotFoundError("Unable to find folder '{}'.".format(dest))
if fLOG is not None:
fLOG("[convert_dcm2png] convert dcm files from '{}'.".format(folder))
fLOG("[convert_dcm2png] into '{}'.".format(dest))
done = {}
rows = []
for name in explore_folder_iterfile(folder, ".*[.]dcm$"):
relname = os.path.relpath(name, folder)
if fLOG is not None:
fLOG("[convert_dcm2png] read {}: '{}'.".format(
len(rows) + 1, relname))
f1 = relname.replace("\\", "/").split("/")[0]
name_ = "img_%06d.png" % len(done)
if "_" in f1:
sub = f1.split('_')[0]
fsub = os.path.join(dest, sub)
if not os.path.exists(fsub):
if fLOG is not None:
fLOG("[convert_dcm2png] create folder '{}'.".format(sub))
os.mkdir(fsub)
new_name = os.path.join(sub, name_)
else:
new_name = name_
# read
ds = pydicom.dcmread(name)
# data
obs = dict(_src=relname, _dest=new_name, _size=len(ds.pixel_array))
_recurse_fill(obs, ds)
rows.append(obs)
# image
full_name = os.path.join(dest, new_name)
if os.path.exists(full_name):
done[name] = full_name
continue
pixel_array_numpy = ds.pixel_array
cv2.imwrite(full_name, pixel_array_numpy) # pylint: disable=E1101
done[name] = full_name
final = os.path.join(dest, "_summary.csv")
if fLOG is not None:
fLOG("[convert_dcm2png] converted {} images.".format(len(rows)))
fLOG("[convert_dcm2png] write '{}'.".format(final))
df = pandas.DataFrame(rows)
df.to_csv(final, index=False, encoding="utf-8")
return df
| 33.945652 | 96 | 0.565802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.275696 |
efca70e500dca1e2e95cfa22dacbadf959220409 | 8,743 | py | Python | preprocessing_data.py | sharathrao13/seq2seq | 0768ea0b765ed93617a8e9e5cb907deae042c83d | [
"Apache-2.0"
]
| 1 | 2021-02-12T00:01:45.000Z | 2021-02-12T00:01:45.000Z | preprocessing_data.py | sharathrao13/seq2seq | 0768ea0b765ed93617a8e9e5cb907deae042c83d | [
"Apache-2.0"
]
| null | null | null | preprocessing_data.py | sharathrao13/seq2seq | 0768ea0b765ed93617a8e9e5cb907deae042c83d | [
"Apache-2.0"
]
| null | null | null | import re
import collections
import shutil
from tensorflow.python.platform import gfile
num_movie_scripts = 10
vocabulary_size = 10000
fraction_dev = 50
path_for_x_train = 'X_train.txt'
path_for_y_train = 'y_train.txt'
path_for_x_dev = 'X_dev.txt'
path_for_y_dev = 'y_dev.txt'
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
_WORD_SPLIT = re.compile(b"([.,!?\":;)(])")
_DIGIT_RE = re.compile(br"\d")
#FROM DATA UTILS
# Build the dictionary with word-IDs from self-made dictionary and replace rare words with UNK token.
def build_dataset(words, vocabulary_size):
count = [['_UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def create_vocabulary(dictionary, vocabulary_path):
f = open(vocabulary_path, 'w')
for key in dictionary:
f.write(dictionary[key] + '\n')
f.close()
def initialize_vocabulary(vocabulary_path):
# finds vocabulary file
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def generate_encoded_files2(x_train_file, y_train_file, x_dev_file, y_dev_file, tokenized_sentences, dictionary):
"""Sentence A is in x_train, Sentence B in y_train"""
encoded_holder = []
unk_id = dictionary['_UNK']
for sentence in tokenized_sentences:
encoded_holder.append(encode_sentence(sentence, dictionary, unk_id))
f1 = open(x_train_file, 'w')
f2 = open(y_train_file, 'w')
fraction = int(len(encoded_holder) / fraction_dev)
if (len(encoded_holder) % 2 == 0):
end = len(encoded_holder)
else:
end = len(encoded_holder)-1
for i in xrange(0,fraction,2):
f1.write(str(encoded_holder[i]) + '\n')
f2.write(str(encoded_holder[i+1]) + '\n')
f1.close()
f2.close()
d1 = open(x_dev_file, 'w')
d2 = open(y_dev_file, 'w')
for i in xrange(fraction, end, 2):
d1.write(str(encoded_holder[i]) + '\n')
d2.write(str(encoded_holder[i+1]) + '\n')
d1.close()
d2.close()
def generate_encoded_files(x_train_file, y_train_file, x_dev_file, y_dev_file, tokenized_sentences, dictionary):
"""Sentence A is in x_train and y_train, Sentence B in X_train and y_train"""
encoded_holder = []
f1 = open(x_train_file, 'w')
last_line = tokenized_sentences.pop()
first_line = tokenized_sentences.pop(0)
dev_counter = int(len(tokenized_sentences) - len(tokenized_sentences)/fraction_dev)
unk_id = dictionary['_UNK']
first_line_encoded = encode_sentence(first_line, dictionary, unk_id)
f1.write(first_line_encoded + '\n')
# Creates data for X_train
for x in xrange(dev_counter):
encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)
encoded_holder.append(encoded_sentence)
f1.write(encoded_sentence + '\n') # Write sentence to file
f1.close()
d1 = open(x_dev_file, 'w')
# Creates data for x_dev_file
for x in xrange(dev_counter, len(tokenized_sentences)):
encoded_sentence = encode_sentence(tokenized_sentences[x], dictionary, unk_id)
encoded_holder.append(encoded_sentence)
d1.write(encoded_sentence + '\n') # Write sentence to file
d1.close()
# Creates data for y_train
f2 = open(y_train_file, 'w')
for x in xrange(dev_counter + 1):
f2.write(encoded_holder[x] + '\n') # Write sentence to file
f2.close()
# Creates data for y_dev
d2 = open(y_dev_file, 'w')
for x in xrange(dev_counter + 1, len(tokenized_sentences)):
d2.write(encoded_holder[x] + '\n') # Write sentence to file
last_line_encoded = encode_sentence(last_line, dictionary, unk_id)
d2.write(last_line_encoded + '\n')
d2.close()
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens"""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
def encode_sentence(sentence, dictionary, unk_id):
# Extract first word (and don't add any space)
if not sentence:
return ""
first_word = sentence.pop(0)
if first_word in dictionary:
encoded_sentence = str(dictionary[first_word])
else:
encoded_sentence = str(unk_id)
# Loop rest of the words (and add space in front)
for word in sentence:
if word in dictionary:
encoded_word = dictionary[word]
else:
encoded_word = unk_id
encoded_sentence += " " + str(encoded_word)
return encoded_sentence
def sentence_to_token_ids(sentence, vocabulary):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Returns:
a list of integers, the token-ids for the sentence.
"""
words = basic_tokenizer(sentence)
return [vocabulary.get(w, UNK_ID) for w in words]
def read_data(num_movie_scripts):
data_tokens = []
# Append each line in file to the set
for i in range(0, num_movie_scripts):
path = 'data/'+str(i)+'raw.txt'
print 'Reading ', path, '...'
lines = [line.rstrip('\n') for line in open(path)]
data_tokens_temp = []
for line in lines:
# Tokenize each sentence
data_tokens_temp.extend(re.findall(r'\S+', line))
data_tokens.extend(data_tokens_temp)
return data_tokens
# Reads data and puts every sentence in a TWO DIMENSIONAL array as tokens
# data_tokens[0] = ['This', 'is', 'a', 'sentence']
def read_sentences(num_movie_scripts):
data_tokens = []
# Append each line in file to the set
for i in range(0, num_movie_scripts):
path = 'data/'+str(i)+'raw.txt'
print 'Reading ', path, '...'
lines = [line.rstrip('\n') for line in open(path)]
data_tokens_temp = []
for line in lines:
# Tokenize each sentence
data_tokens_temp.append(re.findall(r'\S+', line))
data_tokens.extend(data_tokens_temp)
return data_tokens
def make_files(num_movie_scripts, vocabulary_size, fraction_dev=50, path_for_x_train = 'X_train.txt', path_for_y_train = 'y_train.txt', path_for_x_dev = 'X_dev.txt', path_for_y_dev = 'y_dev.txt'):
# Generate dictionary for dataset
print '------------------------------------------------'
print ' Generating dictionary based on ', str(num_movie_scripts), ' scripts'
print '------------------------------------------------'
tokenized_data = read_data(num_movie_scripts)
data, count, dictionary, reverse_dictionary = build_dataset(tokenized_data, vocabulary_size)
create_vocabulary(reverse_dictionary, 'vocabulary_for_movies.txt')
# Generate an encoded file using the freated dictionary
print '------------------------------------------------'
print ' Creating encoded file using created dictionary'
print ' (Saved in ', path_for_x_train, ')'
print '------------------------------------------------'
tokenized_sentences = read_sentences(num_movie_scripts)
generate_encoded_files(path_for_x_train, path_for_y_train, path_for_x_dev, path_for_y_dev, tokenized_sentences, dictionary)
#-----------------------Printing methods----------------------------
def print_dic(dic, counter):
c = 0
for x in dic:
print x
c += 1
if(c == counter):
break
def print_info(data, count, dictionary, reverse_dictionary):
print '-------- data'
print data
print '-------- count'
print count
print '-------- dictionary'
print_dic(dictionary, 5)
print dictionary
print '-------- reverse_dictionary'
print_dic(reverse_dictionary, 5)
print reverse_dictionary
| 32.501859 | 196 | 0.649663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,163 | 0.247398 |
efcb531829013e0d275069585a78eef303453aa5 | 851 | py | Python | dfirtrack_api/serializers.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
]
| 4 | 2020-03-06T17:37:09.000Z | 2020-03-17T07:50:55.000Z | dfirtrack_api/serializers.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
]
| null | null | null | dfirtrack_api/serializers.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
]
| 1 | 2020-03-06T20:54:52.000Z | 2020-03-06T20:54:52.000Z | from rest_framework import serializers
from dfirtrack_main.models import System, Systemtype
class SystemtypeSerializer(serializers.ModelSerializer):
""" create serializer for systemtype (needed because of foreignkey relationsship) """
class Meta:
model = Systemtype
# attributes made available for api
fields = (
'systemtype_name',
)
class SystemSerializer(serializers.ModelSerializer):
""" create serializer for system """
# get serializer for systemtype (needed because of foreignkey relationsship)
systemtype = SystemtypeSerializer(many=False, read_only=True)
class Meta:
model = System
# attributes made available for api
fields = (
'system_id',
'system_uuid',
'system_name',
'systemtype',
)
| 27.451613 | 89 | 0.654524 | 755 | 0.887192 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.391304 |
efcedab7fe4be160e4d524567ddc2da000250f7a | 185 | py | Python | exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py | exasol/advanced-analytics-framework | 78cb9c92fa905132c346d289623598d39def480c | [
"MIT"
]
| null | null | null | exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py | exasol/advanced-analytics-framework | 78cb9c92fa905132c346d289623598d39def480c | [
"MIT"
]
| 12 | 2022-02-21T15:54:47.000Z | 2022-03-30T08:35:52.000Z | exasol_advanced_analytics_framework/udf_framework/create_event_handler_udf_call.py | exasol/advanced-analytics-framework | 78cb9c92fa905132c346d289623598d39def480c | [
"MIT"
]
| null | null | null | from exasol_advanced_analytics_framework.interface.create_event_handler_udf \
import CreateEventHandlerUDF
udf = CreateEventHandlerUDF(exa)
def run(ctx):
return udf.run(ctx)
| 20.555556 | 77 | 0.810811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
efd02e3f34305859967db711ac4399efc0f26e99 | 7,489 | py | Python | corrct/utils_proc.py | cicwi/PyCorrectedEmissionCT | 424449e1879a03cdbb8910c806417962e5b9faff | [
"BSD-3-Clause"
]
| 3 | 2020-12-08T17:09:08.000Z | 2022-01-21T22:46:56.000Z | corrct/utils_proc.py | cicwi/PyCorrectedEmissionCT | 424449e1879a03cdbb8910c806417962e5b9faff | [
"BSD-3-Clause"
]
| 11 | 2021-03-19T11:34:34.000Z | 2022-03-31T13:22:02.000Z | corrct/utils_proc.py | cicwi/PyCorrectedEmissionCT | 424449e1879a03cdbb8910c806417962e5b9faff | [
"BSD-3-Clause"
]
| 1 | 2021-03-11T18:27:48.000Z | 2021-03-11T18:27:48.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 15:25:14 2020
@author: Nicola VIGANÒ, Computational Imaging group, CWI, The Netherlands,
and ESRF - The European Synchrotron, Grenoble, France
"""
import numpy as np
from . import operators
from . import solvers
def get_circular_mask(vol_shape, radius_offset=0, coords_ball=None, mask_drop_off="const", data_type=np.float32):
"""Computes a circular mask for the reconstruction volume.
:param vol_shape: The size of the volume.
:type vol_shape: numpy.array_like
:param radius_offset: The offset with respect to the volume edge.
:type radius_offset: float. Optional, default: 0
:param coords_ball: The coordinates to consider for the non-masked region.
:type coords_ball: list of dimensions. Optional, default: None
:param data_type: The mask data type.
:type data_type: numpy.dtype. Optional, default: np.float32
:returns: The circular mask.
:rtype: (numpy.array_like)
"""
vol_shape = np.array(vol_shape, dtype=np.intp)
coords = [np.linspace(-(s - 1) / 2, (s - 1) / 2, s, dtype=data_type) for s in vol_shape]
coords = np.meshgrid(*coords, indexing="ij")
if coords_ball is None:
coords_ball = np.arange(-np.fmin(2, len(vol_shape)), 0, dtype=np.intp)
else:
coords_ball = np.array(coords_ball, dtype=np.intp)
radius = np.min(vol_shape[coords_ball]) / 2 + radius_offset
coords = np.stack(coords, axis=0)
if coords_ball.size == 1:
dists = np.abs(coords[coords_ball, ...])
else:
dists = np.sqrt(np.sum(coords[coords_ball, ...] ** 2, axis=0))
if mask_drop_off.lower() == "const":
return dists <= radius
elif mask_drop_off.lower() == "sinc":
cut_off = np.min(vol_shape[coords_ball]) / np.sqrt(2) - radius
outter_region = 1 - (dists <= radius)
outter_vals = 1 - np.sinc((dists - radius) / cut_off)
return np.fmax(1 - outter_region * outter_vals, 0)
else:
raise ValueError("Unknown drop-off function: %s" % mask_drop_off)
def pad_sinogram(sinogram, width, pad_axis=-1, mode="edge", **kwds):
"""Pads the sinogram.
:param sinogram: The sinogram to pad.
:type sinogram: numpy.array_like
:param width: The width of the padding.
:type width: either an int or tuple(int, int)
:param pad_axis: The axis to pad.
:type pad_axis: int. Optional, default: -1
:param mode: The padding type (from numpy.pad).
:type mode: string. Optional, default: 'edge'.
:param kwds: The numpy.pad arguments.
:returns: The padded sinogram.
:rtype: (numpy.array_like)
"""
pad_size = [(0, 0)] * len(sinogram.shape)
if len(width) == 1:
width = (width, width)
pad_size[pad_axis] = width
return np.pad(sinogram, pad_size, mode=mode, **kwds)
def apply_flat_field(projs, flats, darks=None, crop=None, data_type=np.float32):
"""Apply flat field.
:param projs: Projections
:type projs: numpy.array_like
:param flats: Flat fields
:type flats: numpy.array_like
:param darks: Dark noise, defaults to None
:type darks: numpy.array_like, optional
:param crop: Crop region, defaults to None
:type crop: numpy.array_like, optional
:param data_type: numpy.dtype, defaults to np.float32
:type data_type: Data type of the processed data, optional
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
if crop is not None:
projs = projs[..., crop[0] : crop[2], crop[1] : crop[3]]
flats = flats[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
darks = darks[..., crop[0] : crop[2], crop[1] : crop[3]]
if darks is not None:
projs -= darks
flats -= darks
flats = np.mean(flats.astype(data_type), axis=0)
return projs.astype(data_type) / flats
def apply_minus_log(projs):
"""Apply -log.
:param projs: Projections
:type projs: numpy.array_like
:return: Falt-field corrected and linearized projections
:rtype: numpy.array_like
"""
return np.fmax(-np.log(projs), 0.0)
def denoise_image(
img, reg_weight=1e-2, stddev=None, error_norm="l2b", iterations=250, axes=(-2, -1), lower_limit=None, verbose=False
):
"""Image denoiser based on (simple, weighted or dead-zone) least-squares and wavelets.
The weighted least-squares requires the local pixel-wise standard deviations.
It can be used to denoise sinograms and projections.
:param img: The image or sinogram to denoise.
:type img: `numpy.array_like`
:param reg_weight: Weight of the regularization term, defaults to 1e-2
:type reg_weight: float, optional
:param stddev: The local standard deviations. If None, it performs a standard least-squares.
:type stddev: `numpy.array_like`, optional
:param error_norm: The error weighting mechanism. When using std_dev, options are: {'l2b'} | 'l1b' | 'hub' | 'wl2' \
(corresponding to: 'l2 dead-zone', 'l1 dead-zone', 'Huber', 'weighted least-squares').
:type error_norm: str, optional
:param iterations: Number of iterations, defaults to 250
:type iterations: int, optional
:param axes: Axes along which the regularization should be done, defaults to (-2, -1)
:type iterations: int or tuple, optional
:param lower_limit: Lower clipping limit of the image, defaults to None
:type iterations: float, optional
:param verbose: Turn verbosity on, defaults to False
:type verbose: boolean, optional
:return: Denoised image or sinogram.
:rtype: `numpy.array_like`
"""
def compute_wls_weights(stddev, At, reg_weights):
stddev_zeros = stddev == 0
stddev_valid = np.invert(stddev_zeros)
min_valid_stddev = np.min(stddev[stddev_valid])
reg_weights = reg_weights * (At(stddev_zeros) == 0) * min_valid_stddev
img_weights = min_valid_stddev / np.fmax(stddev, min_valid_stddev)
return (img_weights, reg_weights)
def compute_lsb_weights(stddev):
stddev_zeros = stddev == 0
stddev_valid = np.invert(stddev_zeros)
min_valid_stddev = np.min(stddev[stddev_valid])
return np.fmax(stddev, min_valid_stddev)
OpI = operators.TransformIdentity(img.shape)
if stddev is not None:
if error_norm.lower() == "l2b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l2b(img_weight)
elif error_norm.lower() == "l1b":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_l1b(img_weight)
elif error_norm.lower() == "hub":
img_weight = compute_lsb_weights(stddev)
data_term = solvers.DataFidelity_Huber(img_weight)
elif error_norm.lower() == "wl2":
(img_weight, reg_weight) = compute_wls_weights(stddev, OpI.T, reg_weight)
data_term = solvers.DataFidelity_wl2(img_weight)
else:
raise ValueError('Unknown error method: "%s". Options are: {"l2b"} | "l1b" | "hub" | "wl2"' % error_norm)
else:
data_term = error_norm
if isinstance(axes, int):
axes = (axes,)
reg_wl = solvers.Regularizer_l1swl(reg_weight, "bior4.4", 2, axes=axes, normalized=False)
sol_wls_wl = solvers.CP(verbose=verbose, regularizer=reg_wl, data_term=data_term)
(denoised_img, _) = sol_wls_wl(OpI, img, iterations, x0=img, lower_limit=lower_limit)
return denoised_img
| 37.633166 | 120 | 0.667646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,521 | 0.470093 |
efd15e7fb718ba74481d809759853c9e66bc24c0 | 80 | py | Python | bcap/__init__.py | keioku/bcap-python | 5f1c912fcac515d8f26bda113f644d55a38e15d6 | [
"MIT"
]
| null | null | null | bcap/__init__.py | keioku/bcap-python | 5f1c912fcac515d8f26bda113f644d55a38e15d6 | [
"MIT"
]
| null | null | null | bcap/__init__.py | keioku/bcap-python | 5f1c912fcac515d8f26bda113f644d55a38e15d6 | [
"MIT"
]
| null | null | null | from .b_cap_client import BCapClient
from .b_cap_exception import BCapException
| 26.666667 | 42 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
efd1c5307f2a5343f619264248d49a40d7ec14ee | 675 | py | Python | 84.py | gdmanandamohon/leetcode | a691a4e37ee1fdad69c710e3710c5faf8b0a7d76 | [
"MIT"
]
| null | null | null | 84.py | gdmanandamohon/leetcode | a691a4e37ee1fdad69c710e3710c5faf8b0a7d76 | [
"MIT"
]
| null | null | null | 84.py | gdmanandamohon/leetcode | a691a4e37ee1fdad69c710e3710c5faf8b0a7d76 | [
"MIT"
]
| null | null | null | '''
@author: l4zyc0d3r
People who are happy makes other happy. I am gonna finish it slowly but definitely.cdt
'''
class Solution:
def largestRectangleArea(self, H: List[int]) -> int:
st, mx, i = [], 0, 0
while i<len(H):
if len(st)==0 or H[st[-1]]<=H[i]:
st.append(i)
i+=1
else:
rb = i
h = H[st.pop()]
lb = st[-1] if len(st) else -1
mx = max(mx, (rb-lb-1)*h)
while len(st):
rb = len(H)
h = H[st.pop()]
lb = st[-1] if len(st) else -1
mx = max(mx, (rb-lb-1)*h)
return mx
| 29.347826 | 86 | 0.422222 | 559 | 0.828148 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.168889 |
efd28e21b75921adf9dd8a8cb27c1319019eacfc | 402 | py | Python | delete_event.py | garymcwilliams/py-google-calendar | 546b412f0ffc1bdc9a81868bddf4de18a0c20899 | [
"Apache-2.0"
]
| null | null | null | delete_event.py | garymcwilliams/py-google-calendar | 546b412f0ffc1bdc9a81868bddf4de18a0c20899 | [
"Apache-2.0"
]
| 1 | 2021-04-30T20:59:15.000Z | 2021-04-30T20:59:15.000Z | delete_event.py | garymcwilliams/py-google-calendar | 546b412f0ffc1bdc9a81868bddf4de18a0c20899 | [
"Apache-2.0"
]
| null | null | null | from cal_setup import get_calendar_service
def main():
# Delete the event
service = get_calendar_service()
try:
service.events().delete(
calendarId='primary',
eventId='njdev79d574rdmkv0180t7t7lo',
).execute()
except googleapiclient.errors.HttpError:
print("Failed to delete event")
print("Event deleted")
if __name__ == '__main__':
main() | 23.647059 | 48 | 0.659204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.258706 |
efd293b0fad7a4595a31aa160b88ccb1aa88a456 | 37 | py | Python | rses/src/flask_app/blueprints/client/__init__.py | iScrE4m/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
]
| 1 | 2022-02-16T15:06:22.000Z | 2022-02-16T15:06:22.000Z | rses/src/flask_app/blueprints/client/__init__.py | djetelina/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
]
| null | null | null | rses/src/flask_app/blueprints/client/__init__.py | djetelina/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
]
| null | null | null | # coding=utf-8
"""Client blueprint""" | 18.5 | 22 | 0.675676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.972973 |
efd30ee41ca03d2e23b35a990fdeba3358b3d6c7 | 15,351 | py | Python | pycdp/asyncio.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
]
| null | null | null | pycdp/asyncio.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
]
| null | null | null | pycdp/asyncio.py | HMaker/python-chrome-devtools-protocol | a9646a1c4e172ce458c15e2fcb3860ca8c9b4599 | [
"MIT"
]
| null | null | null | from __future__ import annotations
import json
import asyncio
import itertools
import typing as t
from collections import defaultdict
from contextlib import asynccontextmanager
from aiohttp import ClientSession
from aiohttp.client import ClientWebSocketResponse
from aiohttp.http_websocket import WSMsgType, WSCloseCode
from aiohttp.client_exceptions import (
ClientResponseError, ClientConnectorError, ClientConnectionError, ServerDisconnectedError
)
from pycdp.utils import ContextLoggerMixin, LoggerMixin, SingleTaskWorker, retry_on
from pycdp import cdp
T = t.TypeVar('T')
class CDPError(Exception):
pass
class CDPBrowserError(CDPError):
''' This exception is raised when the browser's response to a command
indicates that an error occurred. '''
def __init__(self, obj):
self.code: int = obj['code']
self.message: str = obj['message']
self.detail = obj.get('data')
def __str__(self):
return 'BrowserError<code={} message={}> {}'.format(self.code,
self.message, self.detail)
class CDPConnectionClosed(CDPError):
''' Raised when a public method is called on a closed CDP connection. '''
def __init__(self, reason):
'''
Constructor.
:param reason:
:type reason: wsproto.frame_protocol.CloseReason
'''
self.reason = reason
def __repr__(self):
''' Return representation. '''
return '{}<{}>'.format(self.__class__.__name__, self.reason)
class CDPSessionClosed(CDPError):
pass
class CDPInternalError(CDPError):
''' This exception is only raised when there is faulty logic in TrioCDP or
the integration with PyCDP. '''
class CDPEventListenerClosed(CDPError):
pass
_CLOSE_SENTINEL = object
class CDPEventListener:
def __init__(self, queue: asyncio.Queue):
self._queue = queue
self._closed = False
@property
def closed(self):
return self._closed
def put(self, elem: dict):
if self._closed: raise CDPEventListenerClosed
self._queue.put_nowait(elem)
def close(self):
self._closed = True
try:
self._queue.put_nowait(_CLOSE_SENTINEL)
except asyncio.QueueFull:
pass
async def __aiter__(self):
try:
while not self._closed:
elem = await self._queue.get()
if elem is _CLOSE_SENTINEL:
return
yield elem
finally:
self._closed = True
def __str__(self) -> str:
return f'{self.__class__.__name__}(buffer={self._queue.qsize()}/{self._queue.maxsize}, closed={self._closed})'
class CDPBase(LoggerMixin):
'''
Contains shared functionality between the CDP connection and session.
'''
def __init__(self, ws: ClientWebSocketResponse=None, session_id=None, target_id=None):
super().__init__()
self._listeners: t.Dict[type, t.Set[CDPEventListener]] = defaultdict(set)
self._id_iter = itertools.count()
self._inflight_cmd: t.Dict[int, t.Tuple[t.Generator[dict, dict , t.Any], asyncio.Future]] = {}
self._session_id = session_id
self._target_id = target_id
self._ws = ws
@property
def session_id(self) -> cdp.target.SessionID:
return self._session_id
async def execute(self, cmd: t.Generator[dict, dict , T]) -> T:
'''
Execute a command on the server and wait for the result.
:param cmd: any CDP command
:returns: a CDP result
'''
cmd_id = next(self._id_iter)
cmd_response = asyncio.get_running_loop().create_future()
self._inflight_cmd[cmd_id] = cmd, cmd_response
request = next(cmd)
request['id'] = cmd_id
if self._session_id:
request['sessionId'] = self._session_id
self._logger.debug('sending command %r', request)
request_str = json.dumps(request)
try:
try:
await self._ws.send_str(request_str)
except ConnectionResetError as e:
del self._inflight_cmd[cmd_id]
raise CDPConnectionClosed(e.args[0]) from e
return await cmd_response
except asyncio.CancelledError:
if cmd_id in self._inflight_cmd:
del self._inflight_cmd[cmd_id]
raise
def listen(self, *event_types: t.Type[T], buffer_size=100) -> t.AsyncIterator[T]:
'''Return an async iterator that iterates over events matching the
indicated types.'''
receiver = CDPEventListener(asyncio.Queue(buffer_size))
for event_type in event_types:
self._listeners[event_type].add(receiver)
return receiver.__aiter__()
@asynccontextmanager
async def wait_for(self, event_type: t.Type[T], buffer_size=100) -> t.AsyncGenerator[T, None]:
'''
Wait for an event of the given type and return it.
This is an async context manager, so you should open it inside an async
with block. The block will not exit until the indicated event is
received.
'''
async for event in self.listen(event_type, buffer_size):
yield event
return
def close_listeners(self):
for listener in itertools.chain.from_iterable(self._listeners.values()):
listener.close()
self._listeners.clear()
def _handle_data(self, data):
'''
Handle incoming WebSocket data.
:param dict data: a JSON dictionary
'''
if 'id' in data:
self._handle_cmd_response(data)
else:
self._handle_event(data)
def _handle_cmd_response(self, data):
'''
Handle a response to a command. This will set an event flag that will
return control to the task that called the command.
:param dict data: response as a JSON dictionary
'''
cmd_id = data['id']
try:
cmd, event = self._inflight_cmd.pop(cmd_id)
except KeyError:
self._logger.debug('got a message with a command ID that does not exist: %s', data)
return
if 'error' in data:
# If the server reported an error, convert it to an exception and do
# not process the response any further.
event.set_exception(CDPBrowserError(data['error']))
else:
# Otherwise, continue the generator to parse the JSON result
# into a CDP object.
try:
cmd.send(data['result'])
event.set_exception(CDPInternalError("the command's generator function did not exit when expected!"))
except StopIteration as e:
event.set_result(e.value)
def _handle_event(self, data):
'''
Handle an event.
:param dict data: event as a JSON dictionary
'''
event = cdp.util.parse_json_event(data)
self._logger.debug('dispatching event %s', event)
to_remove = set()
for listener in self._listeners[type(event)]:
try:
listener.put(event)
except asyncio.QueueFull:
self._logger.warning('event %s dropped because listener %s queue is full', type(event), listener)
except CDPEventListenerClosed:
to_remove.add(listener)
self._listeners[type(event)] -= to_remove
self._logger.debug('event dispatched')
class CDPConnection(CDPBase, SingleTaskWorker):
'''
Contains the connection state for a Chrome DevTools Protocol server.
CDP can multiplex multiple "sessions" over a single connection. This class
corresponds to the "root" session, i.e. the implicitly created session that
has no session ID. This class is responsible for reading incoming WebSocket
messages and forwarding them to the corresponding session, as well as
handling messages targeted at the root session itself.
You should generally call the :func:`open_cdp()` instead of
instantiating this class directly.
'''
def __init__(self, debugging_url: str, http_client: ClientSession):
super().__init__()
self._debugging_url = debugging_url.rstrip('/')
self._http_client = http_client
self._wsurl: str = None
self._ws_context = None
self._sessions: t.Dict[str, CDPSession] = {}
@property
def closed(self) -> bool:
return self._ws.closed
@property
def had_normal_closure(self) -> bool:
return self._ws.close_code == WSCloseCode.OK
@retry_on(
ClientConnectorError, asyncio.TimeoutError,
retries=10, delay=3.0, delay_growth=1.3, log_errors=True
)
async def connect(self):
if self._ws is not None: raise RuntimeError('already connected')
if self._wsurl is None:
if self._debugging_url.startswith('http://'):
async with self._http_client.get(f'{self._debugging_url}/json/version') as resp:
if resp.status != 200:
raise ClientResponseError(
resp.request_info,
resp.history,
status=resp.status,
message=resp.reason,
headers=resp.headers
)
self._wsurl = (await resp.json())['webSocketDebuggerUrl']
elif self._debugging_url.startswith('ws://'):
self._wsurl = self._debugging_url
else:
raise ValueError('bad debugging URL scheme')
self._ws = await self._http_client.ws_connect(self._wsurl, compress=15, autoping=True, autoclose=True).__aenter__()
def add_session(self, session_id: str, target_id: str) -> CDPSession:
if session_id is self._sessions:
return self._sessions[session_id]
session = CDPSession(self._ws, session_id, target_id)
self._sessions[session_id] = session
return session
def remove_session(self, session_id: str):
if session_id in self._sessions:
self._sessions.pop(session_id).close()
async def connect_session(self, target_id: cdp.target.TargetID) -> 'CDPSession':
'''
Returns a new :class:`CDPSession` connected to the specified target.
'''
session_id = await self.execute(cdp.target.attach_to_target(target_id, True))
session = CDPSession(self._ws, session_id, target_id)
self._sessions[session_id] = session
return session
async def _run(self):
while True:
message = await self._ws.receive()
if message.type == WSMsgType.TEXT:
try:
data = json.loads(message.data)
except json.JSONDecodeError:
raise CDPBrowserError({
'code': -32700,
'message': 'Client received invalid JSON',
'data': message
})
if 'sessionId' in data:
session_id = cdp.target.SessionID(data['sessionId'])
try:
session = self._sessions[session_id]
except KeyError:
self._logger.debug(f'received message for unknown session: {data}')
continue
session._handle_data(data)
else:
self._handle_data(data)
elif message.type == WSMsgType.CLOSE or message.type == WSMsgType.CLOSING or message.type == WSMsgType.CLOSED:
return
elif message.type == WSMsgType.ERROR:
raise message.data
else:
await self._ws.close(code=WSCloseCode.UNSUPPORTED_DATA)
raise CDPConnectionClosed('received non text frame from remote peer')
async def _close(self):
try:
await super()._close()
for session in self._sessions.values():
session.close()
self._sessions.clear()
self.close_listeners()
if self._ws is not None and not self._ws.closed:
await self._ws.close()
finally:
await self._http_client.close()
class CDPSession(CDPBase, ContextLoggerMixin):
'''
Contains the state for a CDP session.
Generally you should not instantiate this object yourself; you should call
:meth:`CdpConnection.open_session`.
'''
def __init__(self, ws: ClientWebSocketResponse, session_id: cdp.target.SessionID, target_id: cdp.target.TargetID):
super().__init__(ws, session_id, target_id)
self._dom_enable_count = 0
self._dom_enable_lock = asyncio.Lock()
self._page_enable_count = 0
self._page_enable_lock = asyncio.Lock()
self.set_logger_context(extra_name=session_id)
@asynccontextmanager
async def dom_enable(self):
'''
A context manager that executes ``dom.enable()`` when it enters and then
calls ``dom.disable()``.
This keeps track of concurrent callers and only disables DOM events when
all callers have exited.
'''
async with self._dom_enable_lock:
self._dom_enable_count += 1
if self._dom_enable_count == 1:
await self.execute(cdp.dom.enable())
yield
async with self._dom_enable_lock:
self._dom_enable_count -= 1
if self._dom_enable_count == 0:
await self.execute(cdp.dom.disable())
@asynccontextmanager
async def page_enable(self):
'''
A context manager that executes ``page.enable()`` when it enters and
then calls ``page.disable()`` when it exits.
This keeps track of concurrent callers and only disables page events
when all callers have exited.
'''
async with self._page_enable_lock:
self._page_enable_count += 1
if self._page_enable_count == 1:
await self.execute(cdp.page.enable())
yield
async with self._page_enable_lock:
self._page_enable_count -= 1
if self._page_enable_count == 0:
await self.execute(cdp.page.disable())
def close(self):
if len(self._inflight_cmd) > 0:
exc = CDPSessionClosed()
for (_, event) in self._inflight_cmd.values():
if not event.done():
event.set_exception(exc)
self._inflight_cmd.clear()
self.close_listeners()
@retry_on(ClientConnectionError, ServerDisconnectedError, retries=10, delay=3.0, delay_growth=1.3, log_errors=True)
async def connect_cdp(url: str) -> CDPConnection:
'''
Connect to the browser specified by debugging ``url``.
This connection is not automatically closed! You can either use the connection
object as a context manager (``async with conn:``) or else call ``await
conn.aclose()`` on it when you are done with it.
'''
http = ClientSession()
cdp_conn = CDPConnection(url, http)
try:
await cdp_conn.connect()
cdp_conn.start()
except:
await http.close()
raise
return cdp_conn
| 35.7 | 123 | 0.614162 | 14,051 | 0.915315 | 2,062 | 0.134323 | 4,018 | 0.261742 | 6,806 | 0.443359 | 3,963 | 0.258159 |
efd3aea1c3cf0426d8d1f43ef851162a882e6a5f | 7,680 | py | Python | src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
]
| 1 | 2020-06-30T15:00:50.000Z | 2020-06-30T15:00:50.000Z | src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
]
| null | null | null | src/manager/om/script/gspylib/inspection/items/cluster/CheckSpecialFile.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
]
| null | null | null | # -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import os
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from gspylib.common.Common import DefaultValue
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
from gspylib.os.gsfile import g_file
class CheckSpecialFile(BaseItem):
def __init__(self):
super(CheckSpecialFile, self).__init__(self.__class__.__name__)
def getDiskPath(self):
nodeDirs = []
# get PGHOST Dir
tmpDir = DefaultValue.getEnv("PGHOST")
nodeDirs.append(tmpDir)
# get gphome dir
gphome_path = DefaultValue.getEnv("GPHOME")
nodeDirs.append(gphome_path)
# get log dir
log_path = DefaultValue.getEnv("GAUSSLOG")
nodeDirs.append(log_path)
# get gausshome dir
gausshome_path = DefaultValue.getEnv("GAUSSHOME")
nodeDirs.append(os.path.realpath(gausshome_path))
hostName = DefaultValue.GetHostIpOrName()
dbNode = self.cluster.getDbNodeByName(hostName)
# including dn
for dbInst in dbNode.datanodes:
nodeDirs.append(dbInst.datadir)
return nodeDirs
def checkPathVaild(self, envValue):
"""
function: check path vaild
input : envValue
output: NA
"""
if (envValue.strip() == ""):
return 0
# check path vaild
for rac in DefaultValue.PATH_CHECK_LIST:
flag = envValue.find(rac)
if flag >= 0:
return 1
return 0
def ignorePath(self, path):
# Part of the root path and file permissions need to be ignored
ignorePathList = []
toolPath = DefaultValue.getEnv("GPHOME")
sudoPath = os.path.join(toolPath, "sudo")
inspectionPath = os.path.join(toolPath, "script/inspection")
ignorePathList.append("%s/script/gs_preinstall" % toolPath)
ignorePathList.append("%s/script/gs_postuninstall" % toolPath)
ignorePathList.append("%s/script/gs_checkos" % toolPath)
scriptPath = os.path.join(toolPath, "script")
scriptDirList = scriptPath.split('/')
inspectionDirList = inspectionPath.split('/')
# ignore own special files
if (path in ignorePathList or os.path.dirname(path) == sudoPath):
return True
else:
(filename, suffix) = os.path.splitext(path)
pathDirList = path.split('/')
# ignore .pyc file in GPHOME/script
if (path.find(scriptPath) == 0 and pathDirList[:len(
scriptDirList)] == scriptDirList and suffix == ".pyc"):
return True
# ignore GPHOME/script/inspection dir
elif (path.find(inspectionPath) == 0 and pathDirList[:len(
inspectionDirList)] == inspectionDirList):
return True
else:
return False
def checkSpecialChar(self):
outputList = []
failList = []
pathList = []
paths = self.getDiskPath()
for path in paths:
if (not path or not os.path.isdir(path)):
continue
else:
pathList.append(path)
pool = ThreadPool(DefaultValue.getCpuSet())
results = pool.map(self.checkSingleSpecialChar, pathList)
pool.close()
pool.join()
for outlist, flist in results:
if (outlist):
outputList.extend(outlist)
if (flist):
failList.extend(flist)
if (len(outputList) > 0):
outputList = DefaultValue.Deduplication(outputList)
if (failList):
failList = DefaultValue.Deduplication(failList)
return outputList, failList
def checkSingleSpecialChar(self, path):
# Check a single path
outputList = []
failList = []
cmd = "find '%s' -name '*'" % path
(status, output) = subprocess.getstatusoutput(cmd)
FileList = output.split('\n')
while '' in FileList:
FileList.remove('')
if (status != 0 and output.find("Permission denied") > 0):
for realPath in FileList:
if (realPath.find("Permission denied") > 0):
failList.append(realPath)
elif (self.checkPathVaild(realPath) != 0):
outputList.append(realPath)
else:
for realPath in FileList:
if (self.checkPathVaild(realPath) != 0):
outputList.append(realPath)
return outputList, failList
#########################################################
# get the files which under the all useful directory and
# its owner is not current execute use
#########################################################
def checkErrorOwner(self, ownername):
outputList = []
failList = []
path = ""
for path in self.getDiskPath():
if (not path or not os.path.isdir(path)):
continue
cmd = "find '%s' -iname '*' ! -user %s -print" % (path, ownername)
(status, output) = subprocess.getstatusoutput(cmd)
if (status == 0 and output != ""):
pathList = output.split("\n")
for path in pathList:
if (self.ignorePath(path)):
continue
outputList.append(path)
elif (output.find("Permission denied") > 0):
pathList = output.split("\n")
for path in pathList:
if (path.find("Permission denied") > 0):
failList.append(path)
continue
if (self.ignorePath(path)):
continue
outputList.append(path)
if (len(outputList) > 0):
outputList = DefaultValue.Deduplication(outputList)
return outputList, failList
def doCheck(self):
parRes = ""
flag = 0
output = ""
outputList, failList = self.checkSpecialChar()
for output in outputList:
if (output != ""):
flag = 1
parRes += "\nSpecial characters file: \"%s\"" % output
outputList, errorList = self.checkErrorOwner(self.user)
for output in outputList:
if (output != ""):
flag = 1
parRes += "\nFile owner should be %s." \
" Incorrect owner file: \"%s\"" \
% (self.user, output)
failList.extend(errorList)
if (failList):
flag = 1
failList = DefaultValue.Deduplication(failList)
parRes += "\n%s" % ("\n".join(failList))
if (flag == 1):
self.result.rst = ResultStatus.NG
self.result.val = parRes
else:
self.result.rst = ResultStatus.OK
self.result.val = "All files are normal."
| 37.101449 | 78 | 0.552344 | 6,757 | 0.879818 | 0 | 0 | 0 | 0 | 0 | 0 | 1,661 | 0.216276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.