hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f87b501fc4a702c459b5a826cf1537ec0638bb2a
| 1,855 |
py
|
Python
|
core/migrations/0001_initial.py
|
SanjaLV/Tenis
|
ea714da10207723c27ff7204b4285ea6a773521b
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
SanjaLV/Tenis
|
ea714da10207723c27ff7204b4285ea6a773521b
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
SanjaLV/Tenis
|
ea714da10207723c27ff7204b4285ea6a773521b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-31 10:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 37.1 | 132 | 0.581132 |
f87cfb9c6282ebda75b44ea58b3afec144dcbcf4
| 448 |
py
|
Python
|
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2 |
2020-04-10T07:29:56.000Z
|
2020-05-27T03:45:21.000Z
|
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | null | null | null |
generator.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2 |
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
# Douglas Crockford's idea for making generators
# basically "why do you need a `yield` keyword when you can just maintain some state"
# in my view, a class would be a better way to do this, and indeed, in python,
# that's how Iterators are defined.
gen = iter([1,2,3])
for _ in range(4):
print(gen())
| 22.4 | 85 | 0.683036 |
f87d14c4254943a1783a77600bab62106f89c898
| 6,336 |
py
|
Python
|
pddlstream/ss/algorithms/fast_downward.py
|
zerongxi/Kitchen2D
|
2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793
|
[
"MIT"
] | 35 |
2018-03-15T14:26:33.000Z
|
2022-02-09T15:37:59.000Z
|
pddlstream/ss/algorithms/fast_downward.py
|
zerongxi/Kitchen2D
|
2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793
|
[
"MIT"
] | 1 |
2020-11-03T04:49:43.000Z
|
2020-11-17T16:42:48.000Z
|
pddlstream/ss/algorithms/fast_downward.py
|
zerongxi/Kitchen2D
|
2cbaa6c8ea8fbf5f5c3a5de34cb11efde4121793
|
[
"MIT"
] | 12 |
2018-04-28T20:11:21.000Z
|
2021-09-18T22:24:46.000Z
|
from time import time
from ss.utils import INF
import sys
import os
import shutil
TEMP_DIR = 'temp/'
DOMAIN_INPUT = 'domain.pddl'
PROBLEM_INPUT = 'problem.pddl'
TRANSLATE_OUTPUT = 'output.sas'
SEARCH_OUTPUT = 'sas_plan'
ENV_VAR = 'FD_PATH'
FD_BIN = 'bin'
TRANSLATE_DIR = 'translate/'
SEARCH_COMMAND = 'downward --internal-plan-file %s %s < %s'
SEARCH_OPTIONS = {
'dijkstra': '--heuristic "h=blind(transform=adapt_costs(cost_type=NORMAL))" '
'--search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'max-astar': '--heuristic "h=hmax(transform=adapt_costs(cost_type=NORMAL))"'
' --search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-astar': '--heuristic "h=ff(transform=adapt_costs(cost_type=NORMAL))" '
'--search "astar(h,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-wastar1': '--heuristic "h=ff(transform=adapt_costs(cost_type=NORMAL))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=true,boost=100,w=1,'
'preferred_successors_first=true,cost_type=NORMAL,max_time=%s,bound=%s)"',
'ff-wastar3': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=100,w=3,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-wastar5': '--heuristic "h=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=100,w=5,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar1': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=1,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar3': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=3,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'cea-wastar5': '--heuristic "h=cea(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_wastar([h],preferred=[h],reopen_closed=false,boost=1000,w=5,'
'preferred_successors_first=true,cost_type=PLUSONE,max_time=%s,bound=%s)"',
'ff-eager': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([hff],max_time=%s,bound=%s)"',
'ff-eager-pref': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "eager_greedy([hff],preferred=[hff],max_time=%s,bound=%s)"',
'ff-lazy': '--heuristic "hff=ff(transform=adapt_costs(cost_type=PLUSONE))" '
'--search "lazy_greedy([hff],preferred=[hff],max_time=%s,bound=%s)"',
}
| 34.064516 | 94 | 0.646938 |
f881c0e0b875dfcd895b81b936783f36c735935f
| 564 |
py
|
Python
|
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
backend/external/docgen/request_token.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
import requests
from api import config
| 21.692308 | 64 | 0.615248 |
f88205db59ac35f6745b81386eb53c57775a1972
| 3,164 |
py
|
Python
|
gcode_gen/gcode.py
|
tulth/gcode_gen
|
d6e276f2074d4fe66755b2ae06c5b4d85583c563
|
[
"BSD-3-Clause"
] | null | null | null |
gcode_gen/gcode.py
|
tulth/gcode_gen
|
d6e276f2074d4fe66755b2ae06c5b4d85583c563
|
[
"BSD-3-Clause"
] | null | null | null |
gcode_gen/gcode.py
|
tulth/gcode_gen
|
d6e276f2074d4fe66755b2ae06c5b4d85583c563
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Library for gcode commands objects that render to strings.
'''
from .number import num2str
from .point import XYZ
| 24.913386 | 71 | 0.60335 |
f8825ad47b75cf630d4ad3f98bb97cd2847d852d
| 619 |
py
|
Python
|
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
tAPP/2/P3.py
|
ArvinZJC/UofG_PGT_PSD_Python
|
d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4
|
[
"MIT"
] | null | null | null |
'''
Description: Problem 3 (rearrange the code)
Version: 1.0.1.20210116
Author: Arvin Zhao
Date: 2021-01-14 22:51:16
Last Editors: Arvin Zhao
LastEditTime: 2021-01-16 04:11:18
'''
if __name__ == '__main__': # It is strongly recommended to add this line.
main()
| 20.633333 | 74 | 0.646204 |
f8825cac93ae51da9c9e342930c13e66cd5b1a63
| 1,046 |
py
|
Python
|
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
tf_trees/demo.py
|
hazimehh/google-research
|
81ff754d88f9ad479448c78d7ab615bef140423d
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow import keras
# Make sure the tf_trees directory is in the search path.
from tf_trees import TEL
# The documentation of TEL can be accessed as follows
print(TEL.__doc__)
# We will fit TEL on the Boston Housing regression dataset.
# First, load the dataset.
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
# Define the tree layer; here we choose 10 trees, each of depth 3.
# Note output_logits_dim is the dimension of the tree output.
# output_logits_dim = 1 in this case, but should be equal to the
# number of classes if used as an output layer in a classification task.
tree_layer = TEL(output_logits_dim=1, trees_num=10, depth=3)
# Construct a sequential model with batch normalization and TEL.
model = keras.Sequential()
model.add(keras.layers.BatchNormalization())
model.add(tree_layer)
# Fit a model with mse loss.
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
result = model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))
| 38.740741 | 82 | 0.772467 |
f88367f68dcb96f708907ba780b8dfe0c11ecea5
| 725 |
py
|
Python
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 4 |
2015-01-26T17:56:05.000Z
|
2020-04-01T05:52:00.000Z
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 11 |
2015-01-06T10:34:36.000Z
|
2021-03-22T18:29:45.000Z
|
tests/utils_test.py
|
MartinThoma/nntoolkit
|
1f9eed7b6d6fdacc706060d9cbfefaa9c2d0dbf8
|
[
"MIT"
] | 6 |
2015-01-02T15:02:27.000Z
|
2021-05-12T18:09:35.000Z
|
#!/usr/bin/env python
# Core Library modules
import argparse
import os
# Third party modules
import pytest
# First party modules
import nntoolkit.utils as utils
| 20.714286 | 62 | 0.704828 |
f8837ac94ce790820bfbaf796665ce3cc290523c
| 101 |
py
|
Python
|
ex1.py
|
luismachado/python_project_euler
|
79798ee00c18f4f8cc1b397aa7c92f8175a3ed33
|
[
"MIT"
] | null | null | null |
ex1.py
|
luismachado/python_project_euler
|
79798ee00c18f4f8cc1b397aa7c92f8175a3ed33
|
[
"MIT"
] | null | null | null |
ex1.py
|
luismachado/python_project_euler
|
79798ee00c18f4f8cc1b397aa7c92f8175a3ed33
|
[
"MIT"
] | null | null | null |
sum = 0
for x in range(1,1000):
if x%3 == 0 or x%5 == 0:
sum += x
print ("Total is:", sum)
| 14.428571 | 26 | 0.485149 |
f885cb85cd328a59b1d3f0d46e987b871f1a5d6d
| 1,977 |
py
|
Python
|
apiser/10-grpc/src/utils/tools/zemail.py
|
hyhlinux/demo_vue
|
cf61d0ba21cce93b04951076c8c23c0fe693bb5b
|
[
"Apache-2.0"
] | null | null | null |
apiser/10-grpc/src/utils/tools/zemail.py
|
hyhlinux/demo_vue
|
cf61d0ba21cce93b04951076c8c23c0fe693bb5b
|
[
"Apache-2.0"
] | 2 |
2022-02-10T12:00:22.000Z
|
2022-03-02T02:31:40.000Z
|
apiser/10-grpc/src/utils/tools/zemail.py
|
hyhlinux/demo_vue
|
cf61d0ba21cce93b04951076c8c23c0fe693bb5b
|
[
"Apache-2.0"
] | null | null | null |
import smtplib
import os
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
try:
from src.config import CONFIG
except ImportError:
Zemail = SendEmail(CONFIG.EMAIL.get("user"), CONFIG.EMAIL.get("passwd"))
if __name__ == '__main__':
main()
| 29.954545 | 107 | 0.630754 |
f8885de2c1bf956e3ffc0a2b8c32753cd240d5eb
| 2,679 |
py
|
Python
|
can_decoder/Frame.py
|
justinwald99/can_decoder
|
abfdd839856745f88b3fc3a58c8bedbdd05d5616
|
[
"MIT"
] | 17 |
2020-08-18T02:34:57.000Z
|
2022-03-16T16:26:53.000Z
|
can_decoder/Frame.py
|
justinwald99/can_decoder
|
abfdd839856745f88b3fc3a58c8bedbdd05d5616
|
[
"MIT"
] | 4 |
2020-09-09T04:18:28.000Z
|
2022-02-23T10:29:14.000Z
|
can_decoder/Frame.py
|
justinwald99/can_decoder
|
abfdd839856745f88b3fc3a58c8bedbdd05d5616
|
[
"MIT"
] | 3 |
2021-08-18T18:30:43.000Z
|
2022-02-21T07:11:09.000Z
|
from typing import List, Optional
from can_decoder.Signal import Signal
| 29.43956 | 117 | 0.528182 |
f888a9124299142dae94af378de65454815c28dd
| 268 |
py
|
Python
|
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
Curso_de_Python_ Curso_em_Video/PythonTeste/operadoresAritmeticosEx007.py
|
DanilooSilva/Cursos_de_Python
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
[
"MIT"
] | null | null | null |
largura = float(input('Digite a largura da parede: '))
altura = float(input('Digite a altura da parede: '))
area = largura * altura
tinta = area / 2
print('A rea da parede de {}'.format(area))
print('Ser necessrio para pintar {} litros de tinta'.format(tinta))
| 29.777778 | 69 | 0.697761 |
f888dc9c7ee0e666487347bc03cdcb3278274bed
| 174 |
py
|
Python
|
terraform.py
|
kyleslater/254-space-log
|
7496ff4e134b1a916580d8c0f8a0493e4863e9a2
|
[
"MIT"
] | null | null | null |
terraform.py
|
kyleslater/254-space-log
|
7496ff4e134b1a916580d8c0f8a0493e4863e9a2
|
[
"MIT"
] | null | null | null |
terraform.py
|
kyleslater/254-space-log
|
7496ff4e134b1a916580d8c0f8a0493e4863e9a2
|
[
"MIT"
] | null | null | null |
#Kyle Slater
import re
| 24.857143 | 61 | 0.752874 |
f88a9c72050c19a376ad171a7a2391d21f7e3ac6
| 256 |
py
|
Python
|
bugzilla_service/bzservice_flask/app/tests/test_flaskr.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-tsb
|
3fe3140b26d30e7e7ff1a034315183eaed60a599
|
[
"MIT"
] | null | null | null |
bugzilla_service/bzservice_flask/app/tests/test_flaskr.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-tsb
|
3fe3140b26d30e7e7ff1a034315183eaed60a599
|
[
"MIT"
] | 3 |
2021-02-08T20:38:29.000Z
|
2021-06-02T00:55:43.000Z
|
file_storage_service/tests/test_flaskr.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-fs
|
27d5d10fa39e3007cfee2e48e3b95047abf2c144
|
[
"MIT"
] | null | null | null |
import os
import tempfile
import requests
api_url = "http://127.0.0.1:8989"
def test_no_token():
"""Request home without token"""
response = requests.get(api_url+"/isvalid")
print(response.status_code)
assert response.status_code == 401
| 23.272727 | 47 | 0.707031 |
f88aa3fcd8cfa698889ea39a72ffe01decd8c2ea
| 6,279 |
py
|
Python
|
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | 1 |
2021-07-22T14:06:08.000Z
|
2021-07-22T14:06:08.000Z
|
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | null | null | null |
translator-v2.py
|
g-h-0-S-t/translator
|
9e55b5b3a7d68b85aa718bc9eef064599b75f914
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# MIT License
#
# Copyright (c) 2021 gh0$t
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
############################################################################################################################
# imports
############################################################################################################################
import sys
import urllib.request
from bs4 import BeautifulSoup
from urllib.request import Request
from selenium import webdriver
import os
import time
from stem import Signal
from stem.control import Controller
############################################################################################################################
# Pass URL, extract text, translate
############################################################################################################################
URL = str(sys.argv[1])
GTURL = 'https://translate.google.com/'
# this is important, drives the whole translation process.
# if google updates the translate.google.com page selectors, this HORRIBLE selector needs to be updated
GTXpathSel = '//*[@id="yDmH0d"]/c-wiz/div/div[@class="WFnNle"]/c-wiz/div[@class="OlSOob"]/c-wiz/div[@class="hRFt4b"]/c-wiz/div[@class="ykTHSe"]/div/div[@class="dykxn MeCBDd j33Gae"]/div/div[2]/div/div[@class="Llmcnf"]'
print('\nConnecting to ' + URL + ' ...' + '\nExtracting text...')
req = Request(URL)
html = BeautifulSoup(urllib.request.urlopen(req).read(), 'html.parser')
text = html.find('div', {'id': 'bodyContent'}).get_text()
with open('out/English.txt', 'w', encoding='utf-8') as f:
f.write(text)
print('\nExtracted -> out/English.txt')
print('\nStarting translation job...')
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
options.add_argument('--headless')
driver = webdriver.Chrome(executable_path='driver/chromedriver', options=options)
print('\nConnecting to ' + GTURL + ' ...')
driver.get(GTURL)
time.sleep(1)
try:
# accept Google's cookies
driver.find_elements_by_xpath ('//span[contains(text(), "I agree")]')[0].click()
except:
pass
time.sleep(2)
driver.find_element_by_xpath('//*[@aria-label="Document translation"]').click()
driver.find_element_by_name('file').send_keys(os.path.abspath('out/English.txt'))
langEle = driver.find_elements_by_xpath(GTXpathSel)
i = 0
totLang = len(langEle)
print('\nTotal languages = ' + str(totLang) + ' [press CTRL + C once or twice or thrice or any number of times you like to press to quit anytime]')
print('\nTranslating text...')
while i < totLang:
init(driver)
i += 1
print('\nTranslations completed. Check "/out" for the files.')
driver.quit()
exit()
| 34.5 | 218 | 0.645963 |
f88ab7cb09ff4cce53f828728ecd959e4a4ca37a
| 955 |
py
|
Python
|
djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
djangoBackend/payment_module/migrations/0005_auto_20210924_0054.py
|
muhanzi/Django-REST-API
|
08b8b2bbd08a74589cca7b5fd4e1d604d9a6d7eb
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-09-23 21:54
from django.db import migrations
| 24.487179 | 54 | 0.536126 |
f88e5bdd49e9b79ee78760de491336a0c465e929
| 935 |
py
|
Python
|
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
general/tfHelper.py
|
jbroot/SHGAN
|
9ed83f8356145adcbda219c0d9673e36109b0cb2
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import keras
import numpy as np
| 32.241379 | 73 | 0.698396 |
f88f6e13c4185abcf8cceff79dbfda6d0f9a19ba
| 486 |
py
|
Python
|
wsgi/settings.py
|
zhemao/speakeasy
|
793bcca6d30fe31b1579bb8464f1eafacd6eb593
|
[
"BSD-2-Clause"
] | 1 |
2022-02-02T10:40:59.000Z
|
2022-02-02T10:40:59.000Z
|
wsgi/settings.py
|
zhemao/speakeasy
|
793bcca6d30fe31b1579bb8464f1eafacd6eb593
|
[
"BSD-2-Clause"
] | null | null | null |
wsgi/settings.py
|
zhemao/speakeasy
|
793bcca6d30fe31b1579bb8464f1eafacd6eb593
|
[
"BSD-2-Clause"
] | null | null | null |
import os
MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST')
MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT')
MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME')
MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD')
MONGO_DBNAME = 'speakeasy'
PRIV_KEY_FILE = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem'
PUB_KEY_FILE = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
PRIV_KEY = open(PRIV_KEY_FILE).read()
PUB_KEY = open(PUB_KEY_FILE).read()
DEBUG = True
| 30.375 | 71 | 0.790123 |
f8900e5fac4e08162311478b3ed9cf017f5cb02c
| 10,047 |
py
|
Python
|
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
perl_io.py
|
hariguchi/perl_io
|
1deb367faa56081b68c4eda99d364f5b533a331e
|
[
"MIT"
] | null | null | null |
r''' perl_io - Opens a file or pipe in the Perl style
Copyright (c) 2016 Yoichi Hariguchi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Usage:
from perl_io import PerlIO
Example 1:
pio = PerlIO('/proc/meminfo') # open `/proc/meminfo' for input
Example 2:
pio = PerlIO('> /tmp/foo.txt') # open '/tmp/foo.txt' for output
Example 3:
pio = PerlIO('>> /tmp/foo.txt') # open '/tmp/foo.txt' for appending
Example 4:
pio = PerlIO('| cmd arg1 ...') # we pipe output to the command `cmd'
Example 5:
pio = PerlIO('cmd arg1 ... |') # execute `cmd' that pipes output to us
You can access the Python file object as `pio.fo' after
PerlIO object `pio' was successfully created. `pio.fo' is
set to `None' if PelIO failed to open a file or pipe.
Example6 : Read the output of `strings /usr/bin/python' from a pipe
with PerlIO('strings /usr/bin/python |') as pio:
for line in pio.fo.xreadlines():
#
# do something...
#
Example7 : Write to a file
with PerlIO('>/tmp/.tmpfile-%d' % (os.getpid())) as pio:
print >> pio.fo, 'This is an example'
pio.fo.write('This is another example')
pio.fo.write('\n')
Note: PerlIO parses the parameter as follows in the case it
indicates to input from or output to a pipe.
1. Strips the first or last `|' (which indicates to open a pipe)
2. If the remaining string includes shell special characters
like `|', `>', `;', etc., PerlIO calls Popen() with
"sh -c 'remaining_string'", which means it can be a security
hazard when the remaining string includes the unsanitized input
from an untrusted source.
3. If the remaining string includes no shell special characters,
PerlIO does not invoke shell when it calls Popen().
How to test:
python -m unittest -v perl_io
'''
import os
import platform
import re
import sys
import syslog
import time
import subprocess
import shlex
import unittest
class TestPerlIO(unittest.TestCase):
#
# 1. Open a file to write using PerlIO
# 2. Open a pipe outputting to us with a complex command line
# PerlIO('strings `which ls` | sort | uniq | ')
# so that shell is invoked with Popen().
# 3. Write all the input to the file created in No. 1
# 4. Check the contents
#
#
# 1. Open a pipe to write with a complex command line
# PerlIO('| cat > /tmp/.pio_pipe_rt_test-XXXX')
# so that shell is invoked with Popen().
# The output to the pipe is redirected to a file
# 2. Open the file to read using PerlIO
# 3. Check the contents
#
#
# Read from a pipe with a simple command line
# so that shell is not invoked with Popen().
# Confirm the contents of the file is correct.
# Must be called after file_test().
#
| 33.602007 | 78 | 0.54902 |
f89039eac3e7b46b0d707c6f7b3927ce103b2914
| 919 |
py
|
Python
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 152 |
2020-12-07T13:26:53.000Z
|
2022-03-23T02:00:04.000Z
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 16 |
2020-12-07T17:04:36.000Z
|
2022-03-10T11:12:52.000Z
|
app/controllers/config/system/logs.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 36 |
2020-12-09T13:04:40.000Z
|
2022-03-12T18:14:36.000Z
|
from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
| 26.257143 | 68 | 0.688792 |
f8905d54c870fed2c5b2b1831a4130a25651f566
| 5,539 |
py
|
Python
|
MR-OCP/mrcap/utils/downsample_atlas.py
|
justi/m2g
|
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
|
[
"Apache-2.0"
] | 12 |
2015-03-11T22:07:17.000Z
|
2016-01-29T21:24:29.000Z
|
MR-OCP/mrcap/utils/downsample_atlas.py
|
youngmook/m2g
|
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
|
[
"Apache-2.0"
] | 213 |
2015-01-30T16:02:57.000Z
|
2016-01-29T21:45:02.000Z
|
MR-OCP/mrcap/utils/downsample_atlas.py
|
youngmook/m2g
|
09e8b889889ee8d8fb08b9b6fcd726fb3d901644
|
[
"Apache-2.0"
] | 5 |
2015-02-04T13:58:12.000Z
|
2016-01-29T21:24:46.000Z
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# create_atlas.py
# Created by Disa Mhembere on 2014-04-10.
# Email: [email protected]
# Copyright (c) 2014. All rights reserved.
# This simply takes a (182, 218, 182) atlas and creates
# a ~30-non-zero k region atlas by relabelling each
# 3x3x3 region with a new label then masking
# using a base atlas
import argparse
import nibabel as nib
import numpy as np
from math import ceil
from copy import copy
import sys, pdb
from time import time
import os
from packages.utils.setup import get_files
def create(roifn=os.path.join(os.environ["M2G_HOME"],"data","Atlas",
"MNI152_T1_1mm_brain.nii"), start=2):
"""
Downsamples an atlas from a template brain.
Create a new atlas given some scaling factor determined by the start index. Can be useful if looking for parcellation of certain scale for graph generation.
**Positional Arguments**
roifn: [.nii; nifti image] (default = MNI152)
- Nifti roi mask file name
start: [int] (default = 2)
- The x,y,z start position which determines the scaling.
**Returns**
atlas: [.nii; nifti image]
- Atlas labels in MNI space.
"""
start_time = time()
atlmap = None
print "Loading rois as base ..."
if not os.path.exists(roifn):
get_files()
img = nib.load(roifn)
base = img.get_data()
aff = img.get_affine()
fm = img.file_map
true_dim = base.shape
# Labelling new
label_used = False
print "Labeling new ..."
region_num = 1
step = 1+(start*2)
mstart = -start
mend = start+1
# Align new to scale factor
xdim, ydim, zdim = map(ceil, np.array(base.shape)/float(step))
if step == 1:
assert xdim == base.shape[0] and ydim == base.shape[1] and zdim == base.shape[2]
resized_base = np.zeros((xdim*step, ydim*step, zdim*step), dtype=int)
resized_base[:base.shape[0], :base.shape[1], :base.shape[2]] = base
base = resized_base
del resized_base
# Create new matrix
new = np.zeros_like(base, dtype=np.int) # poke my finger in the eye of bjarne
# TODO: Cythonize
for z in xrange(start, base.shape[2]-start, step):
for y in xrange(start, base.shape[1]-start, step):
for x in xrange(start, base.shape[0]-start, step):
if label_used:
region_num += 1 # only increase counter when a label was used
label_used = False
# set other (step*step)-1 around me to same region
for zz in xrange(mstart,mend):
for yy in xrange(mstart,mend):
for xx in xrange(mstart,mend):
if (base[x+xx,y+yy,z+zz]): # Masking
label_used = True
new[x+xx,y+yy,z+zz] = region_num
new = new[:true_dim[0], :true_dim[1], :true_dim[2]] # shrink new to correct size
print "Your atlas has %d regions ..." % len(np.unique(new))
img = nib.Nifti1Image(new, affine=img.get_affine(), header=img.get_header(), file_map=img.file_map)
del new
print "Building atlas took %.3f sec ..." % (time()-start_time)
return img
def validate(atlas_fn, roifn):
"""
Validate that an atlas you've created is a valid based on the
masking you have
@param atlas_fn: the new atlas you've created
@param roifn: nifti roi file name
"""
base = nib.load(roifn).get_data()
try:
new = nib.load(atlas_fn).get_data()
except:
sys.stderr.write("[Error]: Loading file %s failed!\n" % atlas_fn);
exit(-1)
# This is a mapping from base to new where if we get any conflicting regions we failed to make a valid atlas
old_to_new = {}
for i in xrange(new.shape[2]):
for ii in xrange(new.shape[1]):
for iii in xrange(new.shape[0]):
if old_to_new.has_key(base[i,ii,iii]):
if old_to_new[base[i,ii,iii]] != new[i,ii,iii]:
print "[Error]; Index [%d,%d,%d] Should be: {0}, but is {1}".format(i, ii, iii,
old_to_new[base[i,ii,iii]], new[i,ii,iii])
exit(911)
else:
if start == 0 and new[i,i,iii] in old_to_new.values(): import pdb; pdb.set_trace()
old_to_new[base[i,ii,iii]] = new[i,i,iii]
print "Success! Validation complete."
if __name__ == "__main__":
main()
| 32.582353 | 157 | 0.667268 |
f890b528c3dd1757b9098304393522baa32267a2
| 2,241 |
py
|
Python
|
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | 1 |
2021-08-23T19:49:03.000Z
|
2021-08-23T19:49:03.000Z
|
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tensorforce/agents/random_agent.py
|
matthewwilfred/tensorforce
|
0ba3d39ed88fb0a0a0bf4bf03e79150c0fe0d54c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Random agent that always returns a random action.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from random import gauss, random, randrange
from tensorforce.agents import Agent
| 30.283784 | 96 | 0.622936 |
f891f4ca2c23bac0817312243666f8fd196ddfcf
| 9,970 |
py
|
Python
|
selinum_basics.py
|
elithaxxor/craiglist_scraper
|
db35d06004e306229cd10d7678574763cf48c625
|
[
"MIT"
] | null | null | null |
selinum_basics.py
|
elithaxxor/craiglist_scraper
|
db35d06004e306229cd10d7678574763cf48c625
|
[
"MIT"
] | null | null | null |
selinum_basics.py
|
elithaxxor/craiglist_scraper
|
db35d06004e306229cd10d7678574763cf48c625
|
[
"MIT"
] | null | null | null |
import os
import re
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import Chrome
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
## ORIGINAL CODE ###
# OS = os.name
# # s.environ['PATH'] += '/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent'
# driver = webdriver.Chrome(r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
# driver.get('https://1337x.to/')
## To Load Extensions::
try:
OS = os.name
chrome_options = Options()
chrome_options.add_extension('/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/Selenium_Project/ad_blocker.crx')
driver = webdriver.Chrome(options=chrome_options, executable_path= r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
time.sleep(2)
driver.get('https://1337x.to/')
driver.implicitly_wait(25) ### no need to call more than once
print(OS)
print(driver)
#print(driver.text)
except Exception as e:
print('ERROR IN PARSING CHROME EXTENSION', str(e))
try:
search_box = driver.find_element_by_id('autocomplete')
print(search_box.text)
search_box.click()
search_box.send_keys('chopper')
click_search_box = driver.find_element_by_class_name('flaticon-search')
#click_seach_box.click()
#click_search_box.send_keys(Keys.ENTER)
search_box.send_keys(Keys.ENTER)
#driver.find_element_by_xpath("html/xxxxx").send_keys('keys.ENTER')
except Exception as e:
print('Element not found CANNOT FIND SEARCH BOX ', str(e))
try:
search_box01 = driver.find_element_by_id('autocomplete')
print(search_box01.text)
search_box01.click()
search_box01.send_keys(Keys.CONTROL, "a")
search_box01.clear()
search_box01.send_keys('the titanic')
search_box01.send_keys(Keys.ENTER)
except Exception as e:
print('Element not found 2nd search', str(e))
### IMPLIMENT EXPLICIT WAIT
## SINCE THE WEBPAGE MAY TAKE LONG TO LOAD, AND TIME TO PARSE, SET UP AN EXPLICIT WAIT--> THIS WILL WAIT UNTIL THE DEFINED THING IS LOADED
## SET UP LOOP TO ITERATE THROUGH LIST OF ELEMENTS
try:
body = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CLASS_NAME, 'table-list-wrap'))
#EC.presence_of_all_elements_located((by.CLASS, 'table-list table table-responsive table-striped')) ##
)
print(body.text)
print(),print()
print('1111111111')
href_link = body.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
print(href_link.text)
except Exception as e:
print('Element not found body search', str(e))
try:
click_link = driver.find_element_by_link_text('The Titanic Secret by Clive Cussler, Jack Du Brul EPUB')
print(click_link.text)
click_link.click()
except Exception as e:
print('Element not found click test', str(e))
try:
# magnet = driver.find_element
magnet_pull =WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CLASS_NAME, "l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610"))
)
print('magnetpull info')
print(magnet_pull.text)
magnet_link = driver.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/ul[1]/li[1]/a")
print(magnet_link.text)
magnet_link.click()
except Exception as e:
print('MAGNET PULL ERROR', str(e))
driver.quit()
###### GOOOD CODE ######
##### TO LOOP THROUGH A LIST WHILE IN IMPLICIT WAIT
# sm_table = body.find_element_by_class_name('"table-list table table-responsive table-striped"')
# # sm_table = body.find_element_by_class_name('coll-1 name')
# #sm_table = body.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
#
# for cell in sm_table:
# href_link = cell.find_element_by_xpath("/html/body/main/div/div/div/div[2]/div[1]/table/tbody/tr[1]/td[1]")
# print(href_link.text)
## ORIGINAL CODE ###
# OS = os.name
# # s.environ['PATH'] += '/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent'
# driver = webdriver.Chrome(r'/Users/macbook/Documents/CS/PROJECT/AutoDownloader/TEST_DOWNLOADS/fileexteniontest.torrenttorrent.torrent/chromedriver')
# driver.get('https://1337x.to/')
#################### EXPLICIT WAIT ###########################
###### USE WHEN DOWNLOAD COMPLETES ######### (23:00)
#### use when you want to wait some to for executution
## explicit wait -- waits until condition is returned true.
## driver, 30 --> how long to wait till true
# ## use body class to find element
# ## nest elements in a tuple
# print(f"my_element")
# WebDriverWait(driver, 30).until(
# EC.text_to_b_present_in_element(
# (by.CLASS_NAME, 'progress-label'),## element filtration (class name, class name vaue as a tuple
# 'complete' ## expected text as a string
#
# )
#
# )
# my_element00 = driver.find_element_by_class_name('') ## <--- pass in class value #-> class styling method
# print(my_element00)
#
# #### DROP DOWN CLASSES FOR MAGNET / TORRENT DOWNLOAD ##
# <ul class="lfa750b508ad7d04e3fc96bae2ea94a5d121e6607 lcafae12a818cf41a5873ad374b98e79512c946c6">
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610" href="magnet:?xt=urn:btih:F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2&dn=The+Titanic+Secret+by+Clive+Cussler%2C+Jack+Du+Brul+EPUB&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2F9.rarbg.to%3A2710%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.uw0.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.demonii.si%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.nibba.trade%3A1337%2Fannounce&tr=udp%3A%2F%2Fopentracker.sktorrent.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt.xxx-tracker.com%3A2710%2Fannounce&tr=udp%3A%2F%2Fzephir.monocul.us%3A6969%2Fannounce&tr=udp%3A%2F%2Famigacity.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce" onclick="javascript: count(this);"><span class="icon"><i class="flaticon-ld08a4206c278863eddc1bf813faa024ef55ce0ef"></i></span>Magnet Download</a> </li>
# <li class="dropdown">
# <a data-toggle="dropdown" class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c le41399670fcf7cac9ad72cbf1af20d76a1fa16ad" onclick="javascript: count(this);" href="#"><span class="icon"><i class="flaticon-le9f40194aef2ed76d8d0f7f1be7fe5aad6fce5e6"></i></span>Torrent Download</a>
# <ul class="dropdown-menu" aria-labelledby="dropdownMenu1">
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://itorrents.org/torrent/F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2.torrent"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>ITORRENTS MIRROR</a> </li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://torrage.info/torrent.php?h=F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>TORRAGE MIRROR</a></li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l13bf8e2d22d06c362f67b795686b16d022e80098" target="_blank" href="http://btcache.me/torrent/F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2"><span class="icon"><i class="flaticon-lbebff891414215bfc65d51afbd7677e45be19fad"></i></span>BTCACHE MIRROR</a></li>
# <li><a class="l4702248fa49fbaf25efd33c5904b4b3175b29571 l0e850ee5d16878d261dd01e2486970eda4fb2b0c l8680f3a1872d2d50e0908459a4bfa4dc04f0e610" href="magnet:?xt=urn:btih:F5BC20E9AA709CFC32BE63B2F6BEE56882EB7BD2&dn=The+Titanic+Secret+by+Clive+Cussler%2C+Jack+Du+Brul+EPUB&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2F9.rarbg.to%3A2710%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.uw0.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.demonii.si%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.nibba.trade%3A1337%2Fannounce&tr=udp%3A%2F%2Fopentracker.sktorrent.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt.xxx-tracker.com%3A2710%2Fannounce&tr=udp%3A%2F%2Fzephir.monocul.us%3A6969%2Fannounce&tr=udp%3A%2F%2Famigacity.xyz%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce"><span class="icon"><i class="flaticon-ld08a4206c278863eddc1bf813faa024ef55ce0ef"></i></span>None Working? Use Magnet</a></li>
#
| 57.298851 | 1,381 | 0.768907 |
f893a81b68249d96ab59017996d9f35493423f0f
| 8,644 |
py
|
Python
|
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | null | null | null |
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | 32 |
2021-03-17T13:17:22.000Z
|
2021-05-04T14:25:31.000Z
|
training/MNISTFashionMicroservice/src/server/training.py
|
UMass-Rescue/CombinedTechStack
|
b3447b174d9798f3baf9bf6509b4cc14a5bd225a
|
[
"MIT"
] | 1 |
2021-03-24T13:47:44.000Z
|
2021-03-24T13:47:44.000Z
|
import os
import tempfile
import shutil
import requests
import sys
import logging
import json
from src.server.dependency import ModelData
import tensorflow as tf
def train_model(training_id, model_data: ModelData):
"""
Train model(s) based on a given model and hyperparameters
Now supporting two hyperparameters which are
- Optimizer and learning_rate
"""
# SET LOGGER TO PRINT TO STDOUT AND WRITE TO FILE
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("/log/{}.log".format(training_id)),
logging.StreamHandler(sys.stdout)
]
)
log = logging.getLogger('db_microservice_logger')
sys.stdout = StreamToLogger(log,logging.INFO)
sys.stderr = StreamToLogger(log,logging.ERROR)
# get API KEY from the environment file
API_KEY = os.getenv('API_KEY')
best_acc = -1
best_val_acc = -1
best_loss = -1
best_val_loss = -1
best_model = None
best_config = None
best_optimizer = None
best_loss_fn = None
# print("Save:" + str(model_data.save))
logging.info("Save:" + str(model_data.save))
try:
# print('[Training] Starting to train model ID: ' + training_id)
logging.info('[Training] Starting to train model ID: ' + training_id)
dataset_root = '/app/src/public_dataset'
img_height = 28
img_width = 28
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="training",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
validation_ds = tf.keras.preprocessing.image_dataset_from_directory(
dataset_root,
validation_split=model_data.split,
subset="validation",
seed=model_data.seed,
image_size=(img_height, img_width),
batch_size=model_data.batch_size
)
autotune_buf_size = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=autotune_buf_size)
validation_ds = validation_ds.cache().prefetch(buffer_size=autotune_buf_size)
optimizer_dict = model_data.optimizer.dict()
config = {}
if "config" in optimizer_dict and optimizer_dict["config"]:
# convert all float config from string to float
convert_data_type(optimizer_dict["config"])
config = optimizer_dict["config"]
# if learning_rate is not defined, it will use the optimizor's default value
learning_rate_list = [None]
if model_data.optimizer.learning_rate:
learning_rate_list = model_data.optimizer.learning_rate
# get loss function object
loss_dict = model_data.loss_function.dict()
if loss_dict["config"] is None:
loss_dict["config"] = {}
else:
convert_data_type(loss_dict["config"])
loss_fn = tf.keras.losses.get(loss_dict)
logging.info(loss_fn)
# create all hyperparameters combination
optimizer_class = model_data.optimizer.dict()
hyperparameters = [[o,lr] for o in optimizer_dict["class_name"]
for lr in learning_rate_list]
# loop through all hyperparameters
for hp in hyperparameters:
# load model from json file
model = tf.keras.models.model_from_json(model_data.model_structure)
optimizer_obj = {
"class_name": hp[0],
"config": config
}
# set learning rate if not None
if hp[1]:
optimizer_obj["config"]["learning_rate"] = hp[1]
optimizer = tf.keras.optimizers.get(optimizer_obj)
n_epochs = model_data.n_epochs
# train the model
(acc, val_acc, loss, val_loss, model) = fit(model, loss_fn, optimizer, train_ds, validation_ds, n_epochs)
# CHECK FOR THE BEST MODEL (from validation accuracy)
if val_acc > best_val_acc:
best_acc = acc
best_val_acc = val_acc
best_loss = loss
best_val_loss = val_loss
best_model = model
best_optimizer = optimizer.get_config()
best_loss_fn = loss_fn.get_config()
# END LOOP
logging.info('[Training] Completed training on model ID: ' + training_id)
# If we are saving the model, we must save it to folder, zip that folder,
# and then send the zip file to the server via HTTP requests
if model_data.save:
# print('[Training] Preparing to save Model data on model ID: ' + training_id)
logging.info('[Training] Preparing to save Model data on model ID: ' + training_id)
# Create temp dir and save model to it
tmpdir = tempfile.mkdtemp()
model_save_path = os.path.join(tmpdir, training_id)
# Save model nested 1 more layer down to facilitate unzipping
tf.saved_model.save(best_model, os.path.join(model_save_path, training_id))
shutil.make_archive(model_save_path, 'zip', model_save_path)
print(tmpdir)
files = {'model': open(model_save_path+'.zip', 'rb')}
requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/model',
headers={'api_key': API_KEY},
params={'training_id': training_id},
files=files
)
# print('[Training] Sent SavedModel file data on model ID: ' + training_id)
logging.info('[Training] Sent SavedModel file data on model ID: ' + training_id)
except:
# print('[Training] Critical error on training: ' + training_id)
logging.exception('[Training] Critical error on training: ' + training_id)
result = {
'training_accuracy': best_acc,
'validation_accuracy': best_val_acc,
'training_loss': best_loss,
'validation_loss': best_val_loss,
'optimizer_config': str(best_optimizer),
'loss_config': str(best_loss_fn)
}
logging.info('[Training] results: ' + str(result))
# Send HTTP request to server with the statistics on this training
r = requests.post(
'http://host.docker.internal:' + str(os.getenv('SERVER_PORT')) + '/training/result',
headers={'api_key': API_KEY},
json={
'dataset_name': os.getenv('DATASET_NAME'),
'training_id': training_id,
'results': result
})
r.raise_for_status()
# print("[Training Results] Sent training results to server.")
logging.info("[Training Results] Sent training results to server.")
| 32.618868 | 117 | 0.614762 |
f894286d87c8139bf9e7bda1448f050c5b02eb70
| 3,287 |
py
|
Python
|
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | 1 |
2017-05-03T12:18:31.000Z
|
2017-05-03T12:18:31.000Z
|
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
pythonlittleboy/python_gentleman_crawler
|
751b624d22a5024746c256080ea0815a9986e3d7
|
[
"Apache-2.0"
] | 1 |
2020-10-29T04:00:04.000Z
|
2020-10-29T04:00:04.000Z
|
from flask import Flask
from flask import render_template
from flask import request
from model import MovieWebDAO
import json
from ml import Forcast
app = Flask(__name__)
if __name__ == '__main__':
print("http://localhost:15001")
app.run(host='0.0.0.0', debug=True, port=15001)
| 31.009434 | 79 | 0.703377 |
f89a1bc1e1f90da376a0c1761bee56b1db485561
| 1,438 |
py
|
Python
|
remove_negative_from_positive_augmented_samples.py
|
DarkElement75/object-detection-experiments
|
fc638f361f76d7bbb6e5cde9a3480c656b486ad6
|
[
"MIT"
] | null | null | null |
remove_negative_from_positive_augmented_samples.py
|
DarkElement75/object-detection-experiments
|
fc638f361f76d7bbb6e5cde9a3480c656b486ad6
|
[
"MIT"
] | null | null | null |
remove_negative_from_positive_augmented_samples.py
|
DarkElement75/object-detection-experiments
|
fc638f361f76d7bbb6e5cde9a3480c656b486ad6
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
import cv2
"""
Just gets rid of the negatives by only reading the positives, then writing them to replace the existing archive
"""
archive_dir="positive_augmented_samples.h5"
x,y = read_new(archive_dir)
write_new(archive_dir, x, y)
| 35.95 | 112 | 0.632823 |
f89ade1e452186e4d101ccde6adaccc57996d66d
| 646 |
py
|
Python
|
Automate_Whatsapp_Sending_Text.py
|
IshvinaKapoor/Automate-WhatsApp
|
f499db0540c56b74152a368af1fa361ecea69806
|
[
"MIT"
] | null | null | null |
Automate_Whatsapp_Sending_Text.py
|
IshvinaKapoor/Automate-WhatsApp
|
f499db0540c56b74152a368af1fa361ecea69806
|
[
"MIT"
] | null | null | null |
Automate_Whatsapp_Sending_Text.py
|
IshvinaKapoor/Automate-WhatsApp
|
f499db0540c56b74152a368af1fa361ecea69806
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Automate WhatsApp - Sending WhatsApp message
@author: DELL Ishvina Kapoor
"""
#importing the necessary modules
import pywhatkit as pkt
import getpass as gp
#displaying a welcome message
print("Automating Whatsapp!")
#capturing the target phone number from the user
phone_num = gp.getpass(prompt = 'Enter the phone number(with country code) : ', stream = None)
#capture the message
message = "Hi IK this side"
#call the method
#the time is in 24 hr format
pkt.sendwhatmsg(phone_num, message, 22 , 33)
#will be displayed once whatsapp is automated
print("Delivered to the target user")
| 23.925926 | 96 | 0.716718 |
f89c748dd51197d30a5af7af230eb9f70959fb01
| 894 |
py
|
Python
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
transonic/analyses/beniget.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import gast as ast
from beniget import Ancestors, DefUseChains as DUC, UseDefChains
from beniget.beniget import Def
__all__ = ["Ancestors", "DefUseChains", "UseDefChains"]
# this import has to be after the definition of DefUseChains
from transonic.analyses.extast import CommentLine # noqa: E402
| 29.8 | 64 | 0.659955 |
f89ffb26ee589ce79dc400f7f5cf4afa16b557b3
| 88 |
py
|
Python
|
view/resources/__init__.py
|
surfaceanalytics/inelasticscattering
|
da549dde788a55084c565bbc5f89ebf9cbae4263
|
[
"MIT"
] | null | null | null |
view/resources/__init__.py
|
surfaceanalytics/inelasticscattering
|
da549dde788a55084c565bbc5f89ebf9cbae4263
|
[
"MIT"
] | 3 |
2021-09-08T03:02:25.000Z
|
2022-03-12T01:00:06.000Z
|
view/resources/__init__.py
|
surfaceanalytics/inelasticscattering
|
da549dde788a55084c565bbc5f89ebf9cbae4263
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 16:43:28 2020
@author: nicholls
"""
| 11 | 35 | 0.568182 |
f8a219513d5df677c7712f374a4d0f79bdc2f13b
| 2,401 |
py
|
Python
|
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | 1 |
2021-12-29T09:32:08.000Z
|
2021-12-29T09:32:08.000Z
|
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | null | null | null |
2020/python/16.py
|
gcp825/advent_of_code
|
b4ea17572847e1a9044487041b3e12a0da58c94b
|
[
"MIT"
] | null | null | null |
from collections import Counter
print(main('day16.txt'))
| 24.752577 | 96 | 0.43107 |
f8a565676ba40410367b887bd52120b87f5a4d60
| 9,512 |
py
|
Python
|
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | 2 |
2020-09-15T18:10:12.000Z
|
2021-01-25T21:54:04.000Z
|
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | null | null | null |
MODEL3.CNN.py
|
alhasacademy96/finalyearproject
|
1f8f21dea55e45807767e465c27b225e2fc5c082
|
[
"MIT"
] | null | null | null |
# Author: Ibrahim Alhas - ID: 1533204.
# MODEL 3: CNN with built-in tensorflow tokenizer.
# This is the final version of the model (not the base).
# Packages and libraries used for this model.
# ** Install these if not installed already **.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from time import time
import re
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score, roc_curve, \
classification_report
from tensorflow import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras import layers
from keras.models import Sequential
from sklearn.model_selection import train_test_split, cross_validate
import tensorflow as tf
import seaborn as sns
import warnings
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization
from keras.layers.noise import GaussianNoise
from keras.layers import Conv2D, MaxPooling2D
warnings.filterwarnings('ignore')
# plt.style.use('ggplot')
# Basic data visualisation and analysis ------------------------------------------------------------------------------
# We see that the title column is from news articles, and the text column forms the twitter tweet extracts.
true = pd.read_csv('True.csv')
false = pd.read_csv('Fake.csv')
# We drop the columns we do not need. See chapter 3, model CNN for more details.
true = true.drop('title', axis=1)
true = true.drop('subject', axis=1)
true = true.drop('date', axis=1)
false = false.drop('title', axis=1)
false = false.drop('subject', axis=1)
false = false.drop('date', axis=1)
# We set the labels for each data instance, where factual = 1, otherwise 0.
false['label'] = 0
true['label'] = 1
# We merge the two divided datasets (true and fake) into a singular dataset.
data = pd.concat([true, false], ignore_index=True)
texts = data['text']
labels = data['label']
x = texts
y = labels
# We incorporate the publishers feature from title and text instances, and place it into the dataset manually.
# First Creating list of index that do not have publication part. We can use this as a new feature.
unknown_publishers = []
for index, row in enumerate(true.text.values):
try:
record = row.split(" -", maxsplit=1)
# if no text part is present, following will give error
print(record[1])
# if len of piblication part is greater than 260
# following will give error, ensuring no text having "-" in between is counted
assert (len(record[0]) < 260)
except:
unknown_publishers.append(index)
# We print the instances where publication information is absent or different.
print(true.iloc[unknown_publishers].text)
# We want to use the publication information as a new feature.
publisher = []
tmp_text = []
for index, row in enumerate(true.text.values):
if index in unknown_publishers:
# Append unknown publisher:
tmp_text.append(row)
publisher.append("Unknown")
continue
record = row.split(" -", maxsplit=1)
publisher.append(record[0])
tmp_text.append(record[1])
# Replace text column with new text + add a new feature column called publisher/source.
true["publisher"] = publisher
true["text"] = tmp_text
del publisher, tmp_text, record, unknown_publishers
# Validate that the publisher/source column has been added to the dataset.
print(true.head())
# Check for missing values, then drop them for both datasets.
print([index for index, text in enumerate(true.text.values) if str(text).strip() == ''])
true = true.drop(8970, axis=0)
fakeEmptyIndex = [index for index, text in enumerate(false.text.values) if str(text).strip() == '']
print(f"No of empty rows: {len(fakeEmptyIndex)}")
false.iloc[fakeEmptyIndex].tail()
# -
# For CNNs, we have to vectorize the text into 2d integers (tensors).
MAX_SEQUENCE_LENGTH = 5000
MAX_NUM_WORDS = 25000
EMBEDDING_DIM = 300
TEST_SPLIT = 0.2
epochs = 1
# We tokenize the text, just like all other models--------------------------------------------------------------------
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH, padding='pre', truncating='pre')
# Print the total number of tokens:
print('Found %s tokens.' % len(word_index))
# We partition our dataset into train/test.
x_train, x_val, y_train, y_val = train_test_split(data, labels.apply(lambda x: 0 if x == 0 else 1),
test_size=TEST_SPLIT)
log_dir = "logs\\model\\"
# A custom callbacks function, which initially included tensorboard.
mycallbacks = [
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', patience=2, verbose=1, factor=0.5, min_lr=0.00001),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True), # Restoring the best
# ...weights will help keep the optimal weights.
# tf.keras.callbacks.TensorBoard(log_dir="./logs"), # NEWLY ADDED - CHECK.
# tf.keras.callbacks.TensorBoard(log_dir=log_dir.format(time())), # NEWLY ADDED - CHECK.
# tensorboard --logdir logs --> to check tensorboard feedback.
]
# Parameters for our model. We experimented with some combinations and settled on this configuration------------------
model = Sequential(
[
# Word/sequence processing:
layers.Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=True),
# The layers:
layers.Conv1D(128, 5, activation='relu'),
layers.GlobalMaxPooling1D(),
# We classify our model here:
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
# We compile our model and run, with the loss function crossentropy, and optimizer rmsprop (we experimented with adam,
# ...but rmsprop produced better results).
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.summary()
print("Model weights:")
print(model.weights)
# tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./logs")
history = model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_val, y_val),
callbacks=mycallbacks)
# Produce a figure, for every epoch, and show performance metrics.
epochs = [i for i in range(1)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig.set_size_inches(20, 10)
ax[0].plot(epochs, train_acc, 'go-', label='Training Accuracy')
ax[0].plot(epochs, val_acc, 'ro-', label='Testing Accuracy')
ax[0].set_title('Training & Testing Accuracy')
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs, train_loss, 'go-', label='Training Loss')
ax[1].plot(epochs, val_loss, 'ro-', label='Testing Loss')
ax[1].set_title('Training & Testing Loss')
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
'''
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = history.epoch
plt.figure(figsize=(12, 9))
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Loss', size=20)
plt.legend(prop={'size': 20})
plt.show()
plt.figure(figsize=(12, 9))
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Accuracy', size=20)
plt.legend(prop={'size': 20})
plt.ylim((0.5, 1))
plt.show()
'''
# We evaluate our model by predicting a few instances from our test data (the first 5)--------------------------------
print("Evaluation:")
print(model.evaluate(x_val, y_val))
# We predict a few instances (up to 5).
pred = model.predict(x_val)
print(pred[:5])
binary_predictions = []
for i in pred:
if i >= 0.5:
binary_predictions.append(1)
else:
binary_predictions.append(0)
# We print performance metrics:
print('Accuracy on test set:', accuracy_score(binary_predictions, y_val))
print('Precision on test set:', precision_score(binary_predictions, y_val))
print('Recall on test set:', recall_score(binary_predictions, y_val))
print('F1 on test set:', f1_score(binary_predictions, y_val))
# We print the classification report (as an extra):
print(classification_report(y_val, pred.round(), target_names=['Fact', 'Fiction']))
# We print the confusion matrix.
cmm = confusion_matrix(y_val, pred.round())
print(cmm)
print("Ibrahim Alhas")
cmm = pd.DataFrame(cmm, index=['Fake', 'Original'], columns=['Fake', 'Original'])
plt.figure(figsize=(10, 10))
sns.heatmap(cmm, cmap="Blues", linecolor='black', linewidth=1, annot=True, fmt='', xticklabels=['Fake', 'Original'],
yticklabels=['Fake', 'Original'])
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.show()
# End----------------------------------------------------
| 37.746032 | 120 | 0.700694 |
f8a57061a44b4ce6c14481e8a79c00cddf4bc7c8
| 40,857 |
py
|
Python
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 6 |
2015-07-27T21:50:39.000Z
|
2020-06-25T14:32:35.000Z
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 89 |
2015-06-24T09:35:40.000Z
|
2022-02-13T14:40:31.000Z
|
tn/old_scripts/old_md_to_pdf/export_md_to_pdf.py
|
unfoldingWord-dev/tools
|
7251d64b4750f1615125dab3c09d6d00a9c284b4
|
[
"MIT"
] | 12 |
2015-07-13T17:31:04.000Z
|
2021-08-06T06:50:21.000Z
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2017 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <[email protected]>
"""
This script generates the HTML tN documents for each book of the Bible
"""
from __future__ import unicode_literals, print_function
import os
import sys
import re
import pprint
import logging
import argparse
import tempfile
import markdown
import shutil
import subprocess
import csv
import codecs
import markdown2
import json
from glob import glob
from bs4 import BeautifulSoup
from usfm_tools.transform import UsfmTransform
from ...general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
from ...general_tools.url_utils import download_file
from ...general_tools.bible_books import BOOK_NUMBERS, BOOK_CHAPTER_VERSES
from ...general_tools.usfm_utils import usfm3_to_usfm2
_print = print
def main(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, lang_code, books, working_dir, output_dir):
"""
:param ta_tag:
:param tn_tag:
:param tw_tag:
:param ust_tag:
:param ult_tag:
:param ugnt_tag:
:param lang_code:
:param books:
:param working_dir:
:param output_dir:
:return:
"""
tn_converter = TnConverter(ta_tag, tn_tag, tw_tag, ust_tag, ult_tag, ugnt_tag, working_dir, output_dir,
lang_code, books)
tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_code', default='en', required=False, help="Language Code")
parser.add_argument('-b', '--book_id', dest='books', nargs='+', default=None, required=False, help="Bible Book(s)")
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help="Working Directory")
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help="Output Directory")
parser.add_argument('--ta-tag', dest='ta', default='v10', required=False, help="tA Tag")
parser.add_argument('--tn-tag', dest='tn', default='v13', required=False, help="tN Tag")
parser.add_argument('--tw-tag', dest='tw', default='v9', required=False, help="tW Tag")
parser.add_argument('--ust-tag', dest='ust', default='master', required=False, help="UST Tag")
parser.add_argument('--ult-tag', dest='ult', default='master', required=False, help="ULT Tag")
parser.add_argument('--ugnt-tag', dest='ugnt', default='v0.4', required=False, help="UGNT Tag")
args = parser.parse_args(sys.argv[1:])
main(args.ta, args.tn, args.tw, args.ust, args.ult, args.ugnt, args.lang_code, args.books, args.working_dir, args.output_dir)
| 49.46368 | 254 | 0.548303 |
f8a59fce72ffcde75ac9e9b378c6906ab092d7dd
| 2,565 |
py
|
Python
|
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | 1 |
2021-11-04T00:08:00.000Z
|
2021-11-04T00:08:00.000Z
|
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
mudi/interp/bootstrap_aucell.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from tqdm import tqdm
import argparse
from pyscenic.aucell import aucell
from .aucell import create_gene_signatures
from .aucell import assign_bootstrap
if __name__ == "__main__":
main()
| 28.5 | 91 | 0.617934 |
f8a65542e1ebb18eabea4f393380c912f8314bfc
| 696 |
py
|
Python
|
network/topo-custom.py
|
kstough/pox
|
152625fcd40fc5ddfce87b7632fd40777507205c
|
[
"Apache-2.0"
] | null | null | null |
network/topo-custom.py
|
kstough/pox
|
152625fcd40fc5ddfce87b7632fd40777507205c
|
[
"Apache-2.0"
] | null | null | null |
network/topo-custom.py
|
kstough/pox
|
152625fcd40fc5ddfce87b7632fd40777507205c
|
[
"Apache-2.0"
] | null | null | null |
"""Custom topology example
s7 ---- s8 ---- s9
/ \ / \ / \
h1 h2 h3 h4 h5 h6
"""
from mininet.topo import Topo
print('Loading MyTopo')
topos = {'mytopo': (lambda: MyTopo())}
| 19.885714 | 79 | 0.570402 |
f8a77e8060730c4c9bc76d9c5c083f084aed00b7
| 2,383 |
py
|
Python
|
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
test_alarms.py
|
ajaynema/rule-engine
|
99cd5d54dd45e1223d0eec2a65bc6d5f0ef3da51
|
[
"MIT"
] | null | null | null |
from rule_condition import Condition
from rule_action import Action
from rule_template import RuleTemplate
from rule_engine import RuleEngine
from rule import Rule
from rule_data import Data
from rule_scope import Scope
from action_handler_send_email import SendEmailHandler
from action_handler_report_alarm import ReportAlarmHandler
if __name__=="__main__":
main()
| 29.419753 | 81 | 0.660512 |
f8a7cc80262619abcc2b85bf1530f105f8f8ce34
| 362 |
py
|
Python
|
agri/urls.py
|
Bhavesh0327/Agriblock
|
72015e1765214b153771dbc3868eae01fe8898b3
|
[
"MIT"
] | 1 |
2020-10-01T08:28:57.000Z
|
2020-10-01T08:28:57.000Z
|
agri/urls.py
|
Bhavesh0327/Agriblock
|
72015e1765214b153771dbc3868eae01fe8898b3
|
[
"MIT"
] | 14 |
2020-06-05T20:37:13.000Z
|
2022-02-26T22:51:36.000Z
|
agri/urls.py
|
Bhavesh0327/Agriblock
|
72015e1765214b153771dbc3868eae01fe8898b3
|
[
"MIT"
] | 3 |
2020-01-29T04:34:28.000Z
|
2020-09-30T21:48:30.000Z
|
from django.urls import path
from .views import *
rest_urls = list(map(lambda x: path(x[0], x[1], name=x[2]), [
('login/', login, 'login'),
('issue_asset/', issue_asset, 'issue_asset'),
('buy/', buy, 'buy'),
('get_assets/', get_assets, 'get_assets'),
('get_transactions/', get_transactions, 'get_transactions')
]))
urlpatterns = rest_urls
| 25.857143 | 63 | 0.638122 |
f8a96eee4517afeca4532922b8ea2f6d38dc101a
| 4,898 |
py
|
Python
|
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
lib/utils_monai.py
|
octaviomtz/Growing-Neural-Cellular-Automata
|
a6f91661e35f7bd0d7b90ac4347f4d56c9351d0b
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import monai
import math
import torch
import glob
from skimage.morphology import remove_small_holes, remove_small_objects
from monai.transforms import (
LoadImaged,
AddChanneld,
Orientationd,
Spacingd,
ScaleIntensityRanged,
SpatialPadd,
RandAffined,
RandCropByPosNegLabeld,
RandGaussianNoised,
RandFlipd,
RandFlipd,
RandFlipd,
CastToTyped,
)
def get_xforms_scans_or_synthetic_lesions(mode="scans", keys=("image", "label")):
"""returns a composed transform for scans or synthetic lesions."""
xforms = [
LoadImaged(keys),
AddChanneld(keys),
Orientationd(keys, axcodes="LPS"),
Spacingd(keys, pixdim=(1.25, 1.25, 5.0), mode=("bilinear", "nearest")[: len(keys)]),
]
dtype = (np.int16, np.uint8)
if mode == "synthetic":
xforms.extend([
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
])
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
def get_xforms_load(mode="load", keys=("image", "label")):
"""returns a composed transform."""
xforms = [
LoadImaged(keys),
ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
]
if mode == "load":
dtype = (np.float32, np.uint8)
xforms.extend([CastToTyped(keys, dtype=dtype)])
return monai.transforms.Compose(xforms)
| 39.5 | 111 | 0.669661 |
f8ab0286f449987129eeade795e566330ff36d18
| 867 |
py
|
Python
|
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | 2 |
2019-12-09T10:19:36.000Z
|
2020-01-11T11:48:41.000Z
|
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
api/leaderboard/tests/test_views.py
|
individuo7/wololo-tournaments-api
|
5be6284064373e99346d39c78844e454c41c501d
|
[
"MIT"
] | null | null | null |
import json
import pytest
from unittest import TestCase
from rest_framework.test import APIClient
from ..models import Group, Prediction
| 27.967742 | 63 | 0.704729 |
f8ab70b04aa64ecaf4843be345aba0efec2cfc69
| 414 |
py
|
Python
|
sapextractor/utils/string_matching/distances.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | 2 |
2021-02-10T08:09:35.000Z
|
2021-05-21T06:25:34.000Z
|
sapextractor/utils/string_matching/distances.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | null | null | null |
sapextractor/utils/string_matching/distances.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | 3 |
2021-11-22T13:27:00.000Z
|
2022-03-16T22:08:51.000Z
|
import stringdist
def levenshtein(stru1, stru2):
"""
Measures the Levenshtein distance between two strings
Parameters
---------------
stru1
First string
stru2
Second string
Returns
---------------
levens_dist
Levenshtein distance
"""
return stringdist.levenshtein(stru1, stru2)
| 16.56 | 57 | 0.601449 |
f8acaa7460d221225a0bd79d4a5ca48dc091b0af
| 2,873 |
py
|
Python
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 2,860 |
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 7,331 |
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
components/aws/sagemaker/delete_simulation_app/src/robomaker_delete_simulation_app_spec.py
|
Strasser-Pablo/pipelines
|
a1d513eb412f3ffd44edf82af2fa7edb05c3b952
|
[
"Apache-2.0"
] | 1,359 |
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
"""Specification for the RoboMaker delete. simulation application component."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List
from common.sagemaker_component_spec import SageMakerComponentSpec
from common.common_inputs import (
COMMON_INPUTS,
SageMakerComponentCommonInputs,
SageMakerComponentInput as Input,
SageMakerComponentOutput as Output,
SageMakerComponentBaseOutputs,
SageMakerComponentInputValidator as InputValidator,
SageMakerComponentOutputValidator as OutputValidator,
)
class RoboMakerDeleteSimulationAppSpec(
SageMakerComponentSpec[
RoboMakerDeleteSimulationAppInputs, RoboMakerDeleteSimulationAppOutputs
]
):
INPUTS: RoboMakerDeleteSimulationAppInputs = RoboMakerDeleteSimulationAppInputs(
arn=InputValidator(
input_type=str,
required=True,
description="The Amazon Resource Name (ARN) of the simulation application.",
default="",
),
version=InputValidator(
input_type=str,
required=False,
description="The version of the simulation application.",
default=None,
),
**vars(COMMON_INPUTS),
)
OUTPUTS = RoboMakerDeleteSimulationAppOutputs(
arn=OutputValidator(
description="The Amazon Resource Name (ARN) of the simulation application."
),
)
| 32.280899 | 88 | 0.725374 |
f8aeac4c82055a9ca0856652e23d45a0af0bcf39
| 7,595 |
py
|
Python
|
ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py
|
alexandru-m-g/hdx-ckan
|
647f1f23f0505fa195601245b758edcaf4d25985
|
[
"Apache-2.0"
] | null | null | null |
ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py
|
alexandru-m-g/hdx-ckan
|
647f1f23f0505fa195601245b758edcaf4d25985
|
[
"Apache-2.0"
] | null | null | null |
ckanext-hdx_theme/ckanext/hdx_theme/util/jql_queries.py
|
alexandru-m-g/hdx-ckan
|
647f1f23f0505fa195601245b758edcaf4d25985
|
[
"Apache-2.0"
] | null | null | null |
DOWNLOADS_PER_DATASET = '''
/* VER 1.2
used for total downloads from 2016-08-01 which is used to sort datasets by "most downloads" for the "XXX downloads" counter on /search and on each individual dataset
gets all download events and counts occurrences of unique combinations of user, resource, and dataset, and day, then counts the number of occurrences of dataset by week. In other words, if a user downloaded all 3 resources on a dataset 2 different times on the same day (6 total downloads), the result of this query would be 3. It answers the question "What is the total number of downloads of any resource on a given dataset, ignorning repeated downloads from the same user the same day?"*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.dataset id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2"], mixpanel.reducer.count())
.map(function(r){{
return {{
dataset_id: r.key[0],
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_DATASET = '''
/* VER 1.0
gets all page view events and counts the occurrence of each unique dataset. It answers the question "How many times has this dataset page been viewed?"*/
/* Note: as of 12-july-2017, this query fails (or at least doesn't return what is expected), because there are no dataset IDs being sent with the page view event.*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view"}}]
}})
.groupBy(["properties.dataset id"],mixpanel.reducer.count())
.map(function(r){{
return {{
dataset_id: r.key[0],
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_DATASET_PER_WEEK = '''
/* VER 1.0
selects all download events, counts unique combinations of week, user, resource, and dataset, then counts the number of those unique combinations by dataset. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a single dataset in a given week, the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.dataset id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2",(mixpanel.numeric_bucket('key.3',mixpanel.weekly_time_buckets))],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
dataset_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_ORGANIZATION = '''
/* VER 1.0
gets all page view events and counts unique combinations of user and org. This is to say, if a single user looked at 3 different datasets from a single organization and then looked at the organization page as well (4 total page views), the count returned by this query would be 1. It answers the question "How many individuals looked at one or more of an organization's content."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view"}}]
}})
.groupBy(["distinct_id","properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(1)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION = '''
/* VER 1.0
gets all download events and counts unique combinations of user and org. This is to say, if a single user downloaded 5 resources 2 times from datasets belonging to a given organization (10 total downloads), the count returned by this query would be 1. It answers the question "How many individuals one or more resources from an organization's datasets."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(1)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
value: r.value
}};
}});
}}
'''
PAGEVIEWS_PER_ORGANIZATION_PER_WEEK = '''
/* VER 1.0
gets all page view events and counts unique combinations of week and org. This is to say, if a single user looked at 3 different datasets from a single organization and then looked at the organization page as well (4 total page views) in a given week, the count returned by this query for that week would be 4. It answers the question "How many page views did an organization's content receive in a given week."*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "page view", selector: 'properties["org id"] != ""'}}]
}})
.groupBy(["properties.org id",mixpanel.numeric_bucket('time',mixpanel.weekly_time_buckets)],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
org_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION_PER_WEEK = '''
/* VER 1.0
selects all download events, counts unique combinations of week, user, resource, and org, then counts the number of those unique combinations by org. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a given org in a given week, the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id","properties.org id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets)],mixpanel.reducer.count())
.groupBy(["key.2",(mixpanel.numeric_bucket('key.3',mixpanel.weekly_time_buckets))],mixpanel.reducer.count())
.sortAsc(function(row){{return row.key[1]}})
.map(function(r){{
return {{
org_id: r.key[0],
date: new Date(r.key[1]).toISOString().substring(0,10),
value: r.value
}};
}});
}}
'''
DOWNLOADS_PER_ORGANIZATION_PER_DATASET = '''
/* VER 1.0
unique (by distinct id, resource id, dataset id, org id) downloads by dataset id (24 weeks, used for top downloads on org page)*/
/*selects all download events, counts unique combinations of day, user, resource, dataset, and org, then counts the number of those unique combinations by dataset. That is to say if a single user downloaded 10 different resources two times each (20 total downloads) from a single dataset in a given day (and on no other days), the count returned by this query would be 10*/
function main() {{
return Events({{
from_date: '{}',
to_date: '{}',
event_selectors: [{{event: "resource download"}}]
}})
.groupBy(["distinct_id","properties.resource id",mixpanel.numeric_bucket('time',mixpanel.daily_time_buckets),"properties.dataset id", "properties.org id"],mixpanel.reducer.count())
.groupBy([function(row) {{return row.key.slice(4)}}, function(row) {{return row.key.slice(3)}}],mixpanel.reducer.count())
.map(function(r){{
return {{
org_id: r.key[0],
dataset_id: r.key[1],
value: r.value
}};
}})
.sortDesc('value');
}}
'''
| 42.194444 | 493 | 0.688743 |
f8afd1b0a1d62c5e20c07db83d59c2c494f17348
| 13,343 |
py
|
Python
|
source/rttov_test/profile-datasets-py/div83/077.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
source/rttov_test/profile-datasets-py/div83/077.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | 1 |
2022-03-12T12:19:59.000Z
|
2022-03-12T12:19:59.000Z
|
source/rttov_test/profile-datasets-py/div83/077.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Profile ../profile-datasets-py/div83/077.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/077.py"
self["Q"] = numpy.array([ 3.01408100e+00, 3.40341800e+00, 3.94918400e+00,
4.08209300e+00, 4.65722800e+00, 5.59385900e+00,
5.96882400e+00, 5.96578400e+00, 6.02361400e+00,
6.13266200e+00, 5.61561800e+00, 5.17541300e+00,
4.73120800e+00, 4.38244100e+00, 4.13858300e+00,
3.94732400e+00, 3.82339500e+00, 3.74146600e+00,
3.68389600e+00, 3.64322700e+00, 3.61384700e+00,
3.58783700e+00, 3.57544700e+00, 3.57424700e+00,
3.57814700e+00, 3.57652700e+00, 3.56295700e+00,
3.53513800e+00, 3.51090800e+00, 3.50409800e+00,
3.51977800e+00, 3.54417700e+00, 3.53987700e+00,
3.51452800e+00, 3.48830800e+00, 3.47651800e+00,
3.48119800e+00, 3.49274800e+00, 3.50137800e+00,
3.50850800e+00, 3.52815800e+00, 3.56910700e+00,
3.61097700e+00, 3.71830600e+00, 3.89014500e+00,
3.89370500e+00, 3.85655500e+00, 3.87925500e+00,
3.95365400e+00, 4.00917400e+00, 4.16308300e+00,
4.52899900e+00, 5.18923300e+00, 6.26899100e+00,
7.92153700e+00, 1.00846000e+01, 1.24507400e+01,
1.47046800e+01, 1.67259200e+01, 1.84705600e+01,
1.96999100e+01, 2.08678600e+01, 2.23955000e+01,
2.44190000e+01, 2.71340600e+01, 3.11191300e+01,
3.80605500e+01, 4.93422700e+01, 7.03837500e+01,
1.05079000e+02, 1.47056400e+02, 1.80304500e+02,
2.22368500e+02, 2.73803000e+02, 3.33293900e+02,
4.05331600e+02, 4.94623200e+02, 6.04438400e+02,
7.36045800e+02, 8.86931700e+02, 1.05317000e+03,
1.23561100e+03, 1.43888700e+03, 1.66709600e+03,
1.91848200e+03, 2.17581600e+03, 2.42905500e+03,
2.65031700e+03, 2.83038600e+03, 2.95328200e+03,
2.87015800e+03, 2.97041000e+03, 3.22605900e+03,
3.13244700e+03, 3.04276300e+03, 2.95681100e+03,
2.87439400e+03, 2.79532400e+03, 2.71943500e+03,
2.64656700e+03, 2.57657400e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 376.9289, 376.9267, 376.9235, 376.9185, 376.9102, 376.8979,
376.8878, 376.8858, 376.8987, 376.9157, 376.9379, 376.967 ,
377.0032, 377.0483, 377.0994, 377.1415, 377.1806, 377.2196,
377.2566, 377.2936, 377.3406, 377.4116, 377.4967, 377.5807,
377.6576, 377.7326, 377.7957, 377.8617, 377.9087, 377.9647,
378.0677, 378.1777, 378.3247, 378.4827, 378.5467, 378.5667,
378.6177, 378.7377, 378.8647, 379.1987, 379.5707, 379.8876,
380.1336, 380.3936, 380.7865, 381.1975, 381.5395, 381.8335,
382.1145, 382.2825, 382.4564, 382.5853, 382.705 , 382.8186,
382.922 , 383.0401, 383.2172, 383.4014, 383.6806, 383.9769,
384.2704, 384.569 , 384.8374, 385.0826, 385.3235, 385.554 ,
385.7733, 385.971 , 386.1468, 386.2794, 386.3942, 386.4873,
386.57 , 386.6411, 386.7101, 386.7782, 386.8426, 386.902 ,
386.948 , 386.9845, 387.009 , 387.0222, 387.0443, 387.0946,
387.2027, 387.3873, 387.5952, 387.7785, 388.0156, 388.3936,
388.8278, 389.5115, 390.0167, 390.5358, 390.9229, 391.1729,
391.2791, 391.3101, 391.3399, 391.3685, 391.3959])
self["CO"] = numpy.array([ 0.4988025 , 0.4837694 , 0.4549212 , 0.4091083 , 0.3466384 ,
0.2724125 , 0.2529705 , 0.3556049 , 0.3436299 , 0.3118041 ,
0.2360657 , 0.1332083 , 0.06529029, 0.04917818, 0.04630671,
0.04344553, 0.04531133, 0.04861692, 0.05090421, 0.05133911,
0.05167021, 0.04959452, 0.04651663, 0.04325405, 0.04006766,
0.03693337, 0.03511297, 0.03345558, 0.03285768, 0.03228319,
0.03236039, 0.03244319, 0.03296888, 0.03355668, 0.03477178,
0.03638897, 0.03844617, 0.04135936, 0.04467554, 0.05192792,
0.06128788, 0.07390404, 0.09132347, 0.1136636 , 0.1258555 ,
0.1400065 , 0.1455584 , 0.1438834 , 0.1412344 , 0.1340595 ,
0.1269835 , 0.1253564 , 0.1252244 , 0.1268982 , 0.131127 ,
0.1360076 , 0.1437272 , 0.1521708 , 0.1615463 , 0.1718538 ,
0.1819464 , 0.192305 , 0.2043344 , 0.2183397 , 0.2317967 ,
0.2427394 , 0.2529074 , 0.2590382 , 0.2647594 , 0.2679568 ,
0.2707162 , 0.2713961 , 0.2720315 , 0.2725024 , 0.2734588 ,
0.2752834 , 0.2772678 , 0.279404 , 0.2813058 , 0.2830447 ,
0.2835061 , 0.2839138 , 0.2842594 , 0.2847725 , 0.2852078 ,
0.2855673 , 0.2859427 , 0.2860628 , 0.2855026 , 0.2845641 ,
0.2836705 , 0.2825133 , 0.28085 , 0.2789235 , 0.2766895 ,
0.2739117 , 0.2711194 , 0.2683139 , 0.265496 , 0.262667 ,
0.2598288 ])
self["T"] = numpy.array([ 197.478, 204.431, 217.518, 232.024, 240.06 , 241.488,
237.615, 229.648, 222.059, 220.002, 221.016, 222.004,
224.079, 226.704, 229.151, 230.726, 232.026, 233.278,
234.389, 235.37 , 236.325, 237.27 , 238.285, 239.125,
239.562, 239.408, 239.074, 238.623, 237.788, 236.618,
235.366, 234.287, 233.649, 232.492, 231.082, 230.178,
230.011, 230.065, 229.721, 228.916, 227.9 , 226.942,
226.202, 225.266, 224.187, 223.613, 222.971, 222.094,
221.208, 220.74 , 220.537, 220.284, 219.887, 219.382,
218.843, 218.328, 217.832, 217.287, 216.618, 215.885,
215.461, 215.505, 215.981, 216.806, 217.881, 219.112,
220.38 , 221.707, 223.156, 224.773, 226.616, 228.678,
230.824, 232.99 , 235.092, 237.21 , 239.397, 241.632,
243.87 , 246.04 , 248.097, 250.061, 251.959, 253.803,
255.582, 257.221, 258.709, 259.968, 261.008, 261.803,
262.785, 263.963, 265.207, 265.207, 265.207, 265.207,
265.207, 265.207, 265.207, 265.207, 265.207])
self["N2O"] = numpy.array([ 0.00386999, 0.00306999, 0.00246999, 0.00239999, 0.00190999,
0.00132999, 0.00145999, 0.00159999, 0.00196999, 0.00296998,
0.00447997, 0.00696996, 0.01015995, 0.01528993, 0.02053991,
0.02663989, 0.03359987, 0.04270984, 0.05238981, 0.06860975,
0.0840797 , 0.1006696 , 0.1174596 , 0.1335495 , 0.1466895 ,
0.1590294 , 0.1708994 , 0.1853993 , 0.2003893 , 0.2148792 ,
0.2275592 , 0.2346592 , 0.2415291 , 0.2481891 , 0.2543991 ,
0.2593091 , 0.2640791 , 0.2687091 , 0.272769 , 0.276369 ,
0.279779 , 0.282829 , 0.285849 , 0.2887289 , 0.2917289 ,
0.2947289 , 0.2977089 , 0.3006588 , 0.3035488 , 0.3063688 ,
0.3090987 , 0.3116986 , 0.3141584 , 0.316448 , 0.3185375 ,
0.3203968 , 0.321996 , 0.3226453 , 0.3232446 , 0.323774 ,
0.3242236 , 0.3245932 , 0.3248727 , 0.3250421 , 0.3251012 ,
0.3250999 , 0.3250976 , 0.325094 , 0.3250871 , 0.3250758 ,
0.3250622 , 0.3250514 , 0.3250377 , 0.325021 , 0.3250016 ,
0.3249782 , 0.3249492 , 0.3249135 , 0.3248707 , 0.3248216 ,
0.3247676 , 0.3247083 , 0.3246422 , 0.324568 , 0.3244863 ,
0.3244026 , 0.3243203 , 0.3242484 , 0.3241898 , 0.3241499 ,
0.3241769 , 0.3241443 , 0.3240612 , 0.3240916 , 0.3241208 ,
0.3241487 , 0.3241755 , 0.3242012 , 0.3242259 , 0.3242496 ,
0.3242723 ])
self["O3"] = numpy.array([ 0.4650166 , 0.3722967 , 0.25801 , 0.3565255 , 0.5657804 ,
0.8310854 , 1.275442 , 1.941668 , 2.751043 , 3.509408 ,
4.226426 , 4.982314 , 5.571684 , 5.950054 , 6.172944 ,
6.354005 , 6.459525 , 6.548995 , 6.644776 , 6.735845 ,
6.790695 , 6.827586 , 6.854715 , 6.862505 , 6.844556 ,
6.799816 , 6.769176 , 6.719646 , 6.545257 , 6.195258 ,
5.70893 , 5.274831 , 4.976922 , 4.668544 , 4.294395 ,
3.893666 , 3.566958 , 3.348398 , 3.139029 , 2.84919 ,
2.451261 , 2.077143 , 1.888043 , 1.731914 , 1.409245 ,
1.303475 , 1.292365 , 1.148696 , 0.9682352 , 0.8581456 ,
0.728699 , 0.5758034 , 0.4443007 , 0.3535498 , 0.2938877 ,
0.2527305 , 0.2229512 , 0.1999561 , 0.178937 , 0.1568281 ,
0.1353713 , 0.1190565 , 0.1065156 , 0.09549277, 0.08532788,
0.07889094, 0.07722016, 0.07431793, 0.06841198, 0.05924887,
0.04912847, 0.04243675, 0.03862361, 0.03721341, 0.03721039,
0.03694232, 0.03630673, 0.03556769, 0.03509875, 0.03521114,
0.03578028, 0.03636931, 0.03676782, 0.03692234, 0.03683649,
0.03651109, 0.03619457, 0.03624089, 0.03710717, 0.03880576,
0.04014684, 0.03936333, 0.03682412, 0.03682758, 0.03683089,
0.03683407, 0.03683711, 0.03684003, 0.03684284, 0.03684553,
0.03684811])
self["CH4"] = numpy.array([ 0.3005231, 0.2351152, 0.1864963, 0.1572414, 0.1760812,
0.1975499, 0.2255547, 0.2531465, 0.2866593, 0.31921 ,
0.3721429, 0.4494437, 0.5428354, 0.6650551, 0.7814038,
0.8957425, 0.9909472, 1.054496 , 1.115956 , 1.182386 ,
1.245675 , 1.300155 , 1.349285 , 1.396355 , 1.424055 ,
1.448405 , 1.471845 , 1.501435 , 1.532305 , 1.537285 ,
1.542625 , 1.548345 , 1.554454 , 1.551955 , 1.549555 ,
1.547285 , 1.545195 , 1.543345 , 1.549515 , 1.556025 ,
1.562874 , 1.570084 , 1.577644 , 1.617574 , 1.657994 ,
1.700273 , 1.730303 , 1.751553 , 1.771163 , 1.779313 ,
1.787803 , 1.793322 , 1.798201 , 1.802289 , 1.805406 ,
1.808492 , 1.811087 , 1.813773 , 1.81631 , 1.818906 ,
1.821484 , 1.824092 , 1.826669 , 1.829225 , 1.83172 ,
1.834133 , 1.83637 , 1.838309 , 1.84005 , 1.841406 ,
1.842689 , 1.843857 , 1.84505 , 1.846294 , 1.847644 ,
1.84909 , 1.850584 , 1.85209 , 1.853525 , 1.854883 ,
1.856113 , 1.857182 , 1.858302 , 1.859615 , 1.861771 ,
1.864195 , 1.866575 , 1.868445 , 1.869743 , 1.871178 ,
1.873019 , 1.875702 , 1.877623 , 1.879713 , 1.881288 ,
1.882308 , 1.882753 , 1.882902 , 1.883045 , 1.883183 ,
1.883315 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 265.207
self["S2M"]["Q"] = 2576.57411645
self["S2M"]["O"] = 0.0368481128494
self["S2M"]["P"] = 876.30151
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 265.207
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 60.824
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 12, 10])
self["TIME"] = numpy.array([0, 0, 0])
| 57.512931 | 92 | 0.566739 |
f8b003880b2b0c817a1e02d7db8475b7ea56eada
| 2,624 |
py
|
Python
|
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
xos/synchronizers/monitoring_channel/templates/sflow_pub_sub/sflow_sub_records.py
|
xmaruto/mcord
|
3678a3d10c3703c2b73f396c293faebf0c82a4f4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import fnmatch
import logging
sflow_sub_database=[]
| 41 | 91 | 0.62843 |
f8b2fa45ad6aa0b508fe2d6b2b81fce66e566e4c
| 3,148 |
py
|
Python
|
scripts/gcorr/run_xfaster.py
|
SPIDER-CMB/xfaster
|
1b8e56d775f2c3a8693d1372ae461392c21da7ca
|
[
"MIT"
] | 1 |
2021-03-25T14:15:44.000Z
|
2021-03-25T14:15:44.000Z
|
scripts/gcorr/run_xfaster.py
|
annegambrel/xfaster
|
03d5a2971d3cc19ae360d78995e3575f3f678d6e
|
[
"MIT"
] | 7 |
2021-04-20T23:34:38.000Z
|
2021-08-24T00:00:53.000Z
|
scripts/gcorr/run_xfaster.py
|
SPIDER-CMB/xfaster
|
1b8e56d775f2c3a8693d1372ae461392c21da7ca
|
[
"MIT"
] | 1 |
2021-05-18T16:43:54.000Z
|
2021-05-18T16:43:54.000Z
|
"""
A script to run XFaster for gcorr calculation. Called by iterate.py.
"""
import os
import xfaster as xf
import argparse as ap
from configparser import ConfigParser
# Change XFaster options here to suit your purposes
opts = dict(
likelihood=False,
residual_fit=False,
foreground_fit=False,
# change options below for your purposes
tbeb=True,
bin_width=25,
lmin=2,
lmax=500,
)
# Change submit options here to fit your system
submit_opts = dict(nodes=1, ppn=1, mem=6, omp_threads=10, wallt=4)
P = ap.ArgumentParser()
P.add_argument("--gcorr-config", help="The config file for gcorr computation")
P.add_argument("-f", "--first", default=0, type=int, help="First sim index to run")
P.add_argument("-n", "--num", default=1, type=int, help="Number of sims to run")
P.add_argument(
"-o", "--output", default="xfaster_gcal", help="Name of output subdirectory"
)
P.add_argument(
"--no-gcorr",
dest="gcorr",
default=True,
action="store_false",
help="Don't apply a g-gcorrection",
)
P.add_argument(
"--reload-gcorr", default=False, action="store_true", help="Reload the gcorr factor"
)
P.add_argument("--check-point", default="bandpowers", help="XFaster checkpoint")
P.add_argument(
"--no-submit", dest="submit", action="store_false", help="Don't submit, run locally"
)
P.add_argument(
"--omp",
default=None,
type=int,
help="Number of omp threads, if submit. Overwrites value in config file",
)
args = P.parse_args()
# start by loading up gcorr config file and parsing it
assert os.path.exists(args.gcorr_config), "Missing config file {}".format(
args.gcorr_config
)
g_cfg = ConfigParser()
g_cfg.read(args.gcorr_config)
# set all user-specific xfaster opts
for k, v in g_cfg["xfaster_opts"].items():
opts[k] = v
null = g_cfg.getboolean("gcorr_opts", "null")
tags = g_cfg["gcorr_opts"]["map_tags"].split(",")
# null tests should use noise sims. signal shouldn't.
if null:
opts["noise_type"] = g_cfg["xfaster_opts"]["noise_type"]
opts["sim_data_components"] = ["signal", "noise"]
else:
opts["noise_type"] = None
opts["sim_data_components"] = ["signal"]
opts["output_root"] = os.path.join(g_cfg["gcorr_opts"]["output_root"], args.output)
# update opts with command line args
opts["apply_gcorr"] = args.gcorr
opts["reload_gcorr"] = args.reload_gcorr
opts["checkpoint"] = args.check_point
seeds = list(range(args.first, args.first + args.num))
for tag in tags:
opts["sim_data"] = True
opts["output_tag"] = tag
opts["gcorr_file"] = os.path.abspath(
os.path.join(
g_cfg["gcorr_opts"]["output_root"],
"xfaster_gcal",
tag,
"gcorr_{}_total.npz".format(tag),
)
)
opts["data_subset"] = os.path.join(
g_cfg["gcorr_opts"]["data_subset"], "*{}".format(tag)
)
if args.omp is not None:
submit_opts["omp_threads"] = args.omp
if args.submit:
opts.update(**submit_opts)
for s in seeds:
opts["sim_index_default"] = s
if args.submit:
xf.xfaster_submit(**opts)
else:
xf.xfaster_run(**opts)
| 28.618182 | 88 | 0.661055 |
f8b309e5e28868df32235aef95ba627c1ca50e48
| 1,888 |
py
|
Python
|
tests/examples/c_decisions/tests_decisions.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | null | null | null |
tests/examples/c_decisions/tests_decisions.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | null | null | null |
tests/examples/c_decisions/tests_decisions.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | 1 |
2022-02-12T03:50:32.000Z
|
2022-02-12T03:50:32.000Z
|
import unittest
from src.examples.c_decisions.decisions import is_letter_consonant, logical_op_precedence, num_is_not_in_range_or, number_is_in_range_and, test_config
from src.examples.c_decisions.decisions import get_letter_grade
from src.examples.c_decisions.decisions import logical_op_precedence
from src.examples.c_decisions.decisions import number_is_not_in_range
| 46.04878 | 150 | 0.743114 |
f8b46b6ff72b56497017d6f934899df81b96c51a
| 32 |
py
|
Python
|
badge/__init__.py
|
krisgesling/swag-badge-skill
|
7640264880d8ae14f9c49c3ba40c6e388e58dcaf
|
[
"Apache-2.0"
] | 1 |
2021-01-24T01:42:15.000Z
|
2021-01-24T01:42:15.000Z
|
badge/__init__.py
|
krisgesling/swag-badge-skill
|
7640264880d8ae14f9c49c3ba40c6e388e58dcaf
|
[
"Apache-2.0"
] | null | null | null |
badge/__init__.py
|
krisgesling/swag-badge-skill
|
7640264880d8ae14f9c49c3ba40c6e388e58dcaf
|
[
"Apache-2.0"
] | null | null | null |
from .client import MQTT_Client
| 16 | 31 | 0.84375 |
f8b5ae0ccaf93b252b0712f888f73a49ece568a6
| 23,824 |
py
|
Python
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 1 |
2021-03-29T22:09:47.000Z
|
2021-03-29T22:09:47.000Z
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 49 |
2021-03-29T20:13:28.000Z
|
2021-05-01T10:38:19.000Z
|
easy_server/_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for server files.
"""
from __future__ import absolute_import, print_function
import os
import yaml
import jsonschema
from ._server import Server
from ._vault_file import VaultFile
__all__ = ['ServerFile', 'ServerFileException',
'ServerFileOpenError', 'ServerFileFormatError',
'ServerFileUserDefinedFormatError',
'ServerFileUserDefinedSchemaError',
'ServerFileGroupUserDefinedFormatError',
'ServerFileGroupUserDefinedSchemaError']
# JSON schema describing the structure of the server files
SERVER_FILE_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "JSON schema for easy-server server files",
"definitions": {},
"type": "object",
"required": [
"servers",
],
"additionalProperties": False,
"properties": {
"vault_file": {
"type": "string",
"description":
"Path name of vault file. Relative path names are relative to "
"the directory of the server file",
},
"servers": {
"type": "object",
"description": "The servers in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server",
"required": [
"description",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description": "Short description of the server",
},
"contact_name": {
"type": "string",
"description":
"Name of technical contact for the server",
},
"access_via": {
"type": "string",
"description":
"Short reminder on the "
"network/firewall/proxy/vpn used to access the "
"server",
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"server_groups": {
"type": "object",
"description": "The server groups in the server file",
"additionalProperties": False,
"patternProperties": {
"^[a-zA-Z0-9_]+$": {
"type": "object",
"description": "Nickname of the server group",
"required": [
"description",
"members",
],
"additionalProperties": False,
"properties": {
"description": {
"type": "string",
"description":
"Short description of the server group",
},
"members": {
"type": "array",
"description":
"List of members of the server group. "
"Those can be servers or other server groups.",
"items": {
"type": "string",
"description":
"Nickname of server or server group in "
"this file",
},
},
"user_defined": {
"type": "object",
"description":
"User-defined properties of the server group. "
"This object can have an arbitrary "
"user-defined structure",
},
},
},
},
},
"default": {
"type": "string",
"description": "Nickname of default server or server group",
},
},
}
def get_server(self, nickname):
"""
Get server for a given server nickname.
Parameters:
nickname (:term:`unicode string`): Server nickname.
Returns:
:class:`~easy_server.Server`:
Server with the specified nickname.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
try:
server_dict = self._servers[nickname]
except KeyError:
new_exc = KeyError(
"Server with nickname {!r} not found in server "
"file {!r}".
format(nickname, self._filepath))
new_exc.__cause__ = None
raise new_exc # KeyError
if self._vault:
try:
secrets_dict = self._vault.get_secrets(nickname)
except KeyError:
secrets_dict = None
else:
secrets_dict = None
return Server(nickname, server_dict, secrets_dict)
def list_servers(self, nickname):
"""
List the servers for a given server or server group nickname.
Parameters:
nickname (:term:`unicode string`): Server or server group nickname.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
Raises:
:exc:`py:KeyError`: Nickname not found
"""
if nickname in self._servers:
return [self.get_server(nickname)]
if nickname in self._server_groups:
sd_list = list() # of Server objects
sd_nick_list = list() # of server nicknames
sg_item = self._server_groups[nickname]
for member_nick in sg_item['members']:
member_sds = self.list_servers(member_nick)
for sd in member_sds:
if sd.nickname not in sd_nick_list:
sd_nick_list.append(sd.nickname)
sd_list.append(sd)
return sd_list
raise KeyError(
"Server or server group with nickname {!r} not found in server "
"definition file {!r}".
format(nickname, self._filepath))
def list_default_servers(self):
"""
List the servers for the default server or group.
An omitted 'default' element in the server file results in
an empty list.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
if self._default is None:
return []
return self.list_servers(self._default)
def list_all_servers(self):
"""
List all servers.
Returns:
list of :class:`~easy_server.Server`:
List of servers.
"""
return [self.get_server(nickname) for nickname in self._servers]
def _load_server_file(
filepath, user_defined_schema=None, group_user_defined_schema=None):
"""
Load the server file, validate its format and default some
optional elements.
Returns:
dict: Python dict representing the file content.
Raises:
ServerFileOpenError: Error opening server file
ServerFileFormatError: Invalid server file content
ServerFileUserDefinedFormatError: Invalid format of user-defined
portion of server items in the server file
ServerFileUserDefinedSchemaError: Invalid JSON schema for validating
user-defined portion of server items in the server file
ServerFileGroupUserDefinedFormatError: Invalid format of user-defined
portion of group items in the server file
ServerFileGroupUserDefinedSchemaError: Invalid JSON schema for
validating user-defined portion of group items in the server file
"""
# Load the server file (YAML)
try:
with open(filepath, 'r') as fp:
data = yaml.safe_load(fp)
except (OSError, IOError) as exc:
new_exc = ServerFileOpenError(
"Cannot open server file: {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileOpenError
except yaml.YAMLError as exc:
new_exc = ServerFileFormatError(
"Invalid YAML syntax in server file {fn}: {exc}".
format(fn=filepath, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Schema validation of server file content
try:
jsonschema.validate(data, SERVER_FILE_SCHEMA)
# Raises jsonschema.exceptions.SchemaError if JSON schema is invalid
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = 'top-level element'
new_exc = ServerFileFormatError(
"Invalid format in server file {fn}: Validation "
"failed on {elem}: {exc}".
format(fn=filepath, elem=elem_str, exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
# Establish defaults for optional top-level elements
if 'server_groups' not in data:
data['server_groups'] = {}
if 'default' not in data:
data['default'] = None
if 'vault_file' not in data:
data['vault_file'] = None
# Schema validation of user-defined portion of server items
if user_defined_schema:
for server_nick, server_item in data['servers'].items():
user_defined = server_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileUserDefinedFormatError(
"Missing user_defined element for server {srv} "
"in server file {fn}".
format(srv=server_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
try:
jsonschema.validate(user_defined, user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of server items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"server {srv} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(srv=server_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileUserDefinedFormatError
# Schema validation of user-defined portion of group items
if group_user_defined_schema:
for group_nick, group_item in data['server_groups'].items():
user_defined = group_item.get('user_defined', None)
if user_defined is None:
new_exc = ServerFileGroupUserDefinedFormatError(
"Missing user_defined element for group {grp} "
"in server file {fn}".
format(grp=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
try:
jsonschema.validate(user_defined, group_user_defined_schema)
except jsonschema.exceptions.SchemaError as exc:
new_exc = ServerFileGroupUserDefinedSchemaError(
"Invalid JSON schema for validating user-defined portion "
"of group items in server file: {exc}".
format(exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedSchemaError
except jsonschema.exceptions.ValidationError as exc:
if exc.absolute_path:
elem_str = "element '{}'". \
format('.'.join(str(e) for e in exc.absolute_path))
else:
elem_str = "top-level of user-defined item"
new_exc = ServerFileGroupUserDefinedFormatError(
"Invalid format in user-defined portion of item for "
"group {grp} in server file {fn}: "
"Validation failed on {elem}: {exc}".
format(grp=group_nick, fn=filepath, elem=elem_str,
exc=exc))
new_exc.__cause__ = None
raise new_exc # ServerFileGroupUserDefinedFormatError
# Check dependencies in the file
server_nicks = list(data['servers'].keys())
group_nicks = list(data['server_groups'].keys())
all_nicks = server_nicks + group_nicks
default_nick = data['default']
if default_nick and default_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Default nickname '{n}' not found in servers or groups in "
"server file {fn}".
format(n=default_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
for group_nick in group_nicks:
sg_item = data['server_groups'][group_nick]
for member_nick in sg_item['members']:
if member_nick not in all_nicks:
new_exc = ServerFileFormatError(
"Nickname '{n}' in server group '{g}' not found in "
"servers or groups in server file {fn}".
format(n=member_nick, g=group_nick, fn=filepath))
new_exc.__cause__ = None
raise new_exc # ServerFileFormatError
return data
| 37.815873 | 80 | 0.580423 |
f8b628877707fa6132110ae047367879935e3268
| 444 |
py
|
Python
|
mezzanine_pagedown/defaults.py
|
eht16/mezzanine-pagedown
|
e6090f5713097e664e85b279a4a17febb73b00a1
|
[
"BSD-2-Clause"
] | 94 |
2015-01-28T15:46:02.000Z
|
2020-11-02T12:56:15.000Z
|
mezzanine_pagedown/defaults.py
|
eht16/mezzanine-pagedown
|
e6090f5713097e664e85b279a4a17febb73b00a1
|
[
"BSD-2-Clause"
] | 56 |
2015-04-05T03:18:41.000Z
|
2021-08-29T00:50:57.000Z
|
mezzanine_pagedown/defaults.py
|
eht16/mezzanine-pagedown
|
e6090f5713097e664e85b279a4a17febb73b00a1
|
[
"BSD-2-Clause"
] | 38 |
2015-08-26T08:10:12.000Z
|
2021-06-11T19:36:31.000Z
|
from mezzanine.conf import register_setting
register_setting(
name="PAGEDOWN_SERVER_SIDE_PREVIEW",
description="Render previews on the server using the same "
"converter that generates the actual pages.",
editable=False,
default=False,
)
register_setting(
name="PAGEDOWN_MARKDOWN_EXTENSIONS",
description="A tuple specifying enabled python-markdown extensions.",
editable=False,
default=(),
)
| 24.666667 | 73 | 0.725225 |
f8b864241fa615529ec19943c7bf44bcc5c33cfb
| 4,274 |
py
|
Python
|
solutions/2021/day5/day5.py
|
teije01/adventofcode
|
2742985f8437e9784e7ec5430e3846a755b5d386
|
[
"MIT"
] | null | null | null |
solutions/2021/day5/day5.py
|
teije01/adventofcode
|
2742985f8437e9784e7ec5430e3846a755b5d386
|
[
"MIT"
] | null | null | null |
solutions/2021/day5/day5.py
|
teije01/adventofcode
|
2742985f8437e9784e7ec5430e3846a755b5d386
|
[
"MIT"
] | null | null | null |
"""
--- Day 5: Hydrothermal Venture ---
You come across a field of hydrothermal vents on the ocean floor! These vents constantly produce
large, opaque clouds, so it would be best to avoid them if possible.
They tend to form in lines; the submarine helpfully produces a list of nearby lines of vents (your
puzzle input) for you to review. For example:
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
Each line of vents is given as a line segment in the format x1,y1 -> x2,y2 where x1,y1 are the
coordinates of one end the line segment and x2,y2 are the coordinates of the other end. These line
segments include the points at both ends. In other words:
An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3.
An entry like 9,7 -> 7,7 covers points 9,7, 8,7, and 7,7.
For now, only consider horizontal and vertical lines: lines where either x1 = x2 or y1 = y2.
So, the horizontal and vertical lines from the above list would produce the following diagram:
.......1..
..1....1..
..1....1..
.......1..
.112111211
..........
..........
..........
..........
222111....
In this diagram, the top left corner is 0,0 and the bottom right corner is 9,9. Each position is
shown as the number of lines which cover that point or . if no line covers that point. The top-left
pair of 1s, for example, comes from 2,2 -> 2,1; the very bottom row is formed by the overlapping
lines 0,9 -> 5,9 and 0,9 -> 2,9.
To avoid the most dangerous areas, you need to determine the number of points where at least two
lines overlap. In the above example, this is anywhere in the diagram with a 2 or larger - a total
of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two lines overlap?
--- Part Two ---
Unfortunately, considering only horizontal and vertical lines doesn't give you the full picture;
you need to also consider diagonal lines.
Because of the limits of the hydrothermal vent mapping system, the lines in your list will only
ever be horizontal, vertical, or a diagonal line at exactly 45 degrees. In other words:
An entry like 1,1 -> 3,3 covers points 1,1, 2,2, and 3,3.
An entry like 9,7 -> 7,9 covers points 9,7, 8,8, and 7,9.
Considering all lines from the above example would now produce the following diagram:
1.1....11.
.111...2..
..2.1.111.
...1.2.2..
.112313211
...1.2....
..1...1...
.1.....1..
1.......1.
222111....
You still need to determine the number of points where at least two lines overlap. In the above
example, this is still anywhere in the diagram with a 2 or larger - now a total of 12 points.
Consider all of the lines. At how many points do at least two lines overlap?
"""
import numpy as np
if __name__ == "__main__":
with open("solutions/2021/day5/input.txt", "r") as f:
lines = [Line.from_puzzle_input(line) for line in f.readlines()]
straight_field = np.zeros((1000, 1000), dtype=int)
diagonal_field = straight_field.copy()
for line in lines:
field_index = (slice(line.ymin, line.ymax + 1), slice(line.xmin, line.xmax + 1))
if line.x1 == line.x2 or line.y1 == line.y2:
straight_field[field_index] += 1
else:
is_identity = (line.x2 - line.x1 > 0) == (line.y2 - line.y1 > 0)
diag_slice = slice(None, None, None if is_identity else -1)
diagonal_field[field_index] += np.diag(np.ones((line.xmax - line.xmin + 1), dtype=int))[diag_slice]
field = straight_field + diagonal_field
print(f"Answer 1: {np.sum(straight_field > 1)}")
print(f"Answer 2: {np.sum(field > 1)}")
| 31.426471 | 111 | 0.653486 |
f8b88aa220e765ebad5849f646d7fa3f22e031df
| 1,316 |
py
|
Python
|
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | 6 |
2021-05-21T01:10:42.000Z
|
2021-12-16T16:12:30.000Z
|
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
sort_array_by_parity_ii_alt.py
|
tusharsadhwani/leetcode
|
a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8
|
[
"MIT"
] | null | null | null |
from typing import Callable
tests = [
(
([4, 2, 5, 7],),
[4, 5, 2, 7],
),
(
([2, 3],),
[2, 3],
),
(
([2, 3, 1, 1, 4, 0, 0, 4, 3, 3],),
[2, 3, 4, 1, 4, 3, 0, 1, 0, 3],
),
]
| 24.830189 | 77 | 0.50152 |
f8ba3fd25de458d4df99e4ca579804ce22c8dbdc
| 112 |
py
|
Python
|
database.py
|
anthonypang99/TakeNote
|
7da668d48b72ee825f3fb9f503f8d4d6fe2ff644
|
[
"MIT"
] | null | null | null |
database.py
|
anthonypang99/TakeNote
|
7da668d48b72ee825f3fb9f503f8d4d6fe2ff644
|
[
"MIT"
] | null | null | null |
database.py
|
anthonypang99/TakeNote
|
7da668d48b72ee825f3fb9f503f8d4d6fe2ff644
|
[
"MIT"
] | null | null | null |
from flask_sqlalchemy import SQLAlchemy
# Initialize the Flask-SQLAlchemy extension instance
db = SQLAlchemy()
| 22.4 | 52 | 0.830357 |
f8ba6e975ac143461562e6b418e4b0a0aee2b105
| 4,285 |
py
|
Python
|
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | 2 |
2019-09-17T10:20:20.000Z
|
2020-02-10T11:46:33.000Z
|
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | null | null | null |
alfred/Alfred.alfredpreferences/workflows/user.workflow.99DE3F5C-7CB4-4E0B-9195-7782AADC167B/converter/constants.py
|
karamfil/saphe
|
f1c56dcf11613808e07f462d50f20881aef7fbdc
|
[
"MIT"
] | null | null | null |
import re
UNITS_XML_FILE = 'poscUnits22.xml'
UNITS_PICKLE_FILE = 'units.pickle'
OUTPUT_DECIMALS = 6
SOURCE_PATTERN = r'^(?P<quantity>.*[\d.]+)\s*(?P<from>[^\d\s]([^\s]*|.+?))'
SOURCE_RE = re.compile(SOURCE_PATTERN + '$', re.IGNORECASE | re.VERBOSE)
FULL_PATTERN = r'(\s+as|\s+to|\s+in|\s*>|\s*=)\s(?P<to>[^\d\s][^\s]*)$'
FULL_RE = re.compile(SOURCE_PATTERN + FULL_PATTERN + '$',
re.IGNORECASE | re.VERBOSE)
ICONS = {
'length': 'scale6.png',
'height': 'scale6.png',
'distance': 'scale6.png',
'area': 'scaling1.png',
'time': 'round27.png',
'thermodynamic temperature': 'thermometer19.png',
'volume': 'measuring3.png',
'mass': 'weight4.png',
'velocity': 'timer18.png',
'level of power intensity': 'treble2.png',
'digital storage': 'binary9.png',
}
DEFAULT_ICON = 'ruler9.png'
ANNOTATION_REPLACEMENTS = {
'litre': ('liter', 'liters', 'l'),
'metre': ('meter', 'm'),
'm2': ('meter^3',),
'dm': ('decimeter',),
'dm2': ('dm^2', 'decimeter^2',),
'dm3': ('dm^3', 'decimeter^3',),
'cm': ('centimeter',),
'cm2': ('cm^2', 'centimeter^2',),
'cm3': ('cm^3', 'centimeter^3',),
'mm': ('milimeter',),
'mm2': ('mm^2', 'milimeter^2'),
'mm3': ('mm^3', 'milimeter^3'),
'degF': ('f', 'fahrenheit', 'farhenheit', 'farenheit'),
'degC': ('c', 'celsius', 'celcius'),
'byte': ('B', 'bytes',),
'bit': ('b', 'bits',),
'kbyte': ('KB', 'kB', 'kb', 'kilobyte',),
'Mbyte': ('MB', 'megabyte',),
'ozm': ('oz', 'ounce', 'ounces'),
'lbm': ('lb', 'lbs', 'pound', 'pounds'),
'miPh': ('mph',),
'ftPh': ('fps',),
'foot': ("'",),
'square': ('sq',),
'ft2': ('ft^2', 'foot^2'),
'ft3': ('ft^3', 'foot^3'),
'inch': ('inches', '"'),
'inch2': ('inch^2', 'square inch'),
'inch3': ('inch^3', 'cube inch'),
'flozUS': ('flus', 'floz', 'fl', 'fl oz', 'fl oz uk'),
'flozUK': ('fluk', 'fl oz uk', 'fl uk'),
}
EXPANSIONS = {
'foot': ('feet', 'ft'),
'mili': ('milli',),
'meter': ('metres', 'meter', 'meters'),
'^2': ('sq', 'square'),
'^3': ('cube', 'cubed'),
}
for annotation, items in ANNOTATION_REPLACEMENTS.items():
items = set(items)
items.add(annotation)
for key, expansions in EXPANSIONS.iteritems():
for expansion in expansions:
for item in set(items):
items.add(item.replace(key, expansion))
ANNOTATION_REPLACEMENTS[annotation] = sorted(items)
# Mostly for language specific stuff, defaulting to US for now since I'm not
# easily able to detect the language in a fast way from within alfred
LOCALIZED_UNITS = (
('metre', 'meter'),
('litre', 'liter'),
)
RIGHT_TRIMABLE_OPERATORS = '/+*- (.^'
FUNCTION_ALIASES = {
'deg': 'degrees',
'rad': 'radians',
'ln': 'log',
'arccos': 'acos',
'arcsin': 'asin',
'arctan': 'atan',
}
FUNCTION_ALIASES_RE = re.compile(r'\b(%s)\(' % '|'.join(FUNCTION_ALIASES))
FOOT_INCH_RE = re.compile(r'''(\d+)'(\d+)"?''')
FOOT_INCH_REPLACE = r'(\1*12)+\2 inch'
POWER_UNIT_RE = re.compile(r'([a-z])\^([23])\b')
POWER_UNIT_REPLACEMENT = r'\g<1>\g<2>'
PRE_EVAL_REPLACEMENTS = {
'^': '**',
}
# Known safe math functions
MATH_FUNCTIONS = [
# Number theoretic and representation functions
'ceil',
'copysign',
'fabs',
'factorial',
'floor',
'fmod',
'frexp',
'isinf',
'isnan',
'ldexp',
'modf',
'trunc',
# Power and logarithmic functions
'exp',
'expm1',
'log',
'log1p',
'log10',
'pow',
'sqrt',
# Trigonometric functions
'acos',
'asin',
'atan',
'atan2',
'cos',
'hypot',
'sin',
'tan',
# Angular conversion functions
'degrees',
'radians',
# Hyperbolic functions
'acosh',
'asinh',
'atanh',
'cosh',
'sinh',
'tanh',
# Special functions
'erf',
'erfc',
'gamma',
'lgamma',
# Missing functions won't break anything but won't do anything either
'this_function_definitely_does_not_exist',
]
| 22.792553 | 76 | 0.54189 |
f8bab3dc683ade4a29b7e25ca1a99e68f49ac849
| 462 |
py
|
Python
|
30/00/1.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
30/00/1.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | 46 |
2017-06-30T22:19:07.000Z
|
2017-07-31T22:51:31.000Z
|
30/00/1.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
print('----- or -----')
RetT() or RetF()
RetF() or RetT() #
print('----- and -----')
RetT() and RetF() #
RetF() and RetT() #
print('----- not -----')
print(not True and True)
print(False or not True)
print(not True == True)
#print(True == not True) #SyntaxError: invalid syntax
print(True == (not True))
| 25.666667 | 53 | 0.645022 |
f8bc9f66b7afd106a2727f0668012f3210c6ab27
| 1,548 |
py
|
Python
|
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | null | null | null |
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | 2 |
2020-07-19T21:03:34.000Z
|
2020-09-11T14:56:34.000Z
|
tests/test_click.py
|
maxmouchet/mtoolbox
|
977f3af1e3fe6e6403a26fcca3a30a1285eb28c2
|
[
"MIT"
] | null | null | null |
from enum import Enum
from pathlib import Path
import click
from mbox.click import EnumChoice, ParsedDate, PathParam
| 24.1875 | 72 | 0.623385 |
f8bdfba3ce0bde25189979ebc289968a2512c766
| 1,400 |
py
|
Python
|
util/plot_pbt.py
|
Linus4world/3D-MRI-style-transfer
|
6747f0b235b8a6e773a941c222d594d9eedc6a35
|
[
"BSD-3-Clause"
] | 1 |
2022-01-03T16:08:35.000Z
|
2022-01-03T16:08:35.000Z
|
util/plot_PBT.py
|
Linus4world/mrs-gan
|
64669251584a7421cce3a5173983a2275dcb438a
|
[
"BSD-2-Clause"
] | null | null | null |
util/plot_PBT.py
|
Linus4world/mrs-gan
|
64669251584a7421cce3a5173983a2275dcb438a
|
[
"BSD-2-Clause"
] | 1 |
2022-02-11T13:26:38.000Z
|
2022-02-11T13:26:38.000Z
|
import math
import matplotlib.pyplot as plt
import json
import os
import warnings
warnings.filterwarnings("ignore")
if __name__ == "__main__":
plotPBT('/home/kreitnerl/mrs-gan/ray_results/test_feat/')
| 31.111111 | 96 | 0.594286 |
f8bec2e6574c370927ccaaf8971ce34b58a52c44
| 497 |
py
|
Python
|
Cap_9/ex9.23/ex9.23.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | 4 |
2020-04-07T00:38:46.000Z
|
2022-03-10T03:34:42.000Z
|
Cap_9/ex9.23/ex9.23.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | null | null | null |
Cap_9/ex9.23/ex9.23.py
|
gguilherme42/Livro-de-Python
|
465a509d50476fd1a87239c71ed741639d58418b
|
[
"MIT"
] | 1 |
2021-04-22T02:45:38.000Z
|
2021-04-22T02:45:38.000Z
|
import agenda23
agenda23.le('Agenda.txt')
while True:
opcao = agenda23.menu()
if opcao == 0:
break
elif opcao == 1:
agenda23.novo()
elif opcao == 2:
agenda23.altera()
elif opcao == 3:
agenda23.apaga()
elif opcao == 4:
agenda23.lista()
elif opcao == 5:
agenda23.grava()
elif opcao == 6:
agenda23.le()
elif opcao == 7:
agenda23.ordena()
else:
print('Opo invlida! Digite novamente.')
| 20.708333 | 50 | 0.539235 |
f8c2cf6aa69e132e2490580fb1e86b3b369033e8
| 192 |
py
|
Python
|
src/qualtrutils/__init__.py
|
emanuele-albini/qualtrutils
|
7f1fb08221edb220738445bf3e8a92a1a037bb76
|
[
"MIT"
] | null | null | null |
src/qualtrutils/__init__.py
|
emanuele-albini/qualtrutils
|
7f1fb08221edb220738445bf3e8a92a1a037bb76
|
[
"MIT"
] | null | null | null |
src/qualtrutils/__init__.py
|
emanuele-albini/qualtrutils
|
7f1fb08221edb220738445bf3e8a92a1a037bb76
|
[
"MIT"
] | null | null | null |
try:
import os
import pkg_resources # part of setuptools
__version__ = pkg_resources.get_distribution(os.path.dirname(__file__)).version
except:
pass
from .qualtrics import *
| 24 | 83 | 0.744792 |
f8c3a6ea72bf6a5606bd1c07a86991ac84f55edc
| 97 |
py
|
Python
|
image_web/image_app/apps.py
|
datasciencee/Image_web
|
28d0d8a3006e64da05bb6ede03b037daaa2897b2
|
[
"MIT"
] | null | null | null |
image_web/image_app/apps.py
|
datasciencee/Image_web
|
28d0d8a3006e64da05bb6ede03b037daaa2897b2
|
[
"MIT"
] | null | null | null |
image_web/image_app/apps.py
|
datasciencee/Image_web
|
28d0d8a3006e64da05bb6ede03b037daaa2897b2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
| 16.166667 | 34 | 0.721649 |
f8c595e3b1f5711087075fef4510b16d73d51d2b
| 9,650 |
py
|
Python
|
pyramboia/tasks/views.py
|
bicofino/Pyramboia
|
1c291b8fdc71f057a99e7ffbfaa8ba4e713346fd
|
[
"MIT"
] | 1 |
2016-03-09T13:40:06.000Z
|
2016-03-09T13:40:06.000Z
|
pyramboia/tasks/views.py
|
bicofino/Pyramboia
|
1c291b8fdc71f057a99e7ffbfaa8ba4e713346fd
|
[
"MIT"
] | null | null | null |
pyramboia/tasks/views.py
|
bicofino/Pyramboia
|
1c291b8fdc71f057a99e7ffbfaa8ba4e713346fd
|
[
"MIT"
] | 1 |
2018-03-24T18:03:09.000Z
|
2018-03-24T18:03:09.000Z
|
import time
import requests
import xml.dom.minidom
from lxml import etree
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.template import RequestContext, loader
from .models import Project, Target, Header, Argument, Task, History
from django.shortcuts import render, render_to_response, RequestContext, HttpResponseRedirect
from .forms import TaskForm, ProjectForm, ArgumentForm, HeaderForm, TargetForm
from django.views.generic.edit import FormView
from django.db.models import Count
from jsonrpc import jsonrpc_method
from django.views.generic.edit import CreateView, UpdateView, DeleteView
# Create your views here.
class ProjectDeleteView(DeleteView):
model = Project
success_url = reverse_lazy('project-list')
class ProjectListView(ListView):
model = Project
template_name_suffix = '_list'
class ProjectCreateView(CreateView):
template_name_suffix = '_create'
model = Project
form_class = ProjectForm
class ProjectDetailView(DetailView):
model = Project
class ProjectUpdateView(UpdateView):
template_name_suffix = '_create'
model = Project
form_class = ProjectForm
def all_projects(request):
projects = Project.objects.all()
return render_to_response('all_projects.html', locals(), context_instance=RequestContext(request))
def addTask(request):
form = TasksForm(request.POST or None)
if form.is_valid():
save_it = form.save(commit=False)
save_it.save()
message = 'Add a new task'
messages.success(request, 'Your task has been added.')
return HttpResponseRedirect('/')
return render_to_response('addtask.html', locals(), context_instance=RequestContext(request))
def addArguments(request):
form = ArgumentsForm(request.POST or None)
if form.is_valid():
save_it = form.save(commit=False)
save_it.save()
message = 'Add a new argument'
messages.success(request, 'Your argument has been added.')
return HttpResponseRedirect('/')
return render_to_response('addargs.html', locals(), context_instance=RequestContext(request))
class HeaderDeleteView(DeleteView):
model = Header
success_url = reverse_lazy('header-list')
class HeaderListView(ListView):
model = Header
template_name_suffix = '_list'
class HeaderCreateView(CreateView):
template_name_suffix = '_create'
model = Header
form_class = HeaderForm
class HeaderDetailView(DetailView):
model = Header
class HeaderUpdateView(UpdateView):
template_name_suffix = '_create'
model = Header
form_class = HeaderForm
# Target Views
# Argument Views
# Task Views
| 31.129032 | 108 | 0.684249 |
f8c5dfc6d0cdbf14f1da548855e712503b3de0e5
| 230 |
py
|
Python
|
frameworks/MLNet/__init__.py
|
Ennosigaeon/automlbenchmark
|
bd3e529d641b64300a075d59408203d537311b7e
|
[
"MIT"
] | 282 |
2018-09-19T09:45:46.000Z
|
2022-03-30T04:05:51.000Z
|
frameworks/MLNet/__init__.py
|
Ennosigaeon/automlbenchmark
|
bd3e529d641b64300a075d59408203d537311b7e
|
[
"MIT"
] | 267 |
2018-11-02T11:43:11.000Z
|
2022-03-31T08:58:16.000Z
|
frameworks/MLNet/__init__.py
|
Ennosigaeon/automlbenchmark
|
bd3e529d641b64300a075d59408203d537311b7e
|
[
"MIT"
] | 104 |
2018-10-17T19:32:36.000Z
|
2022-03-19T22:47:59.000Z
|
from amlb.utils import call_script_in_same_dir
| 20.909091 | 66 | 0.721739 |
f8c6b59947b8e1e01fbc267420d89e101ab3f722
| 932 |
py
|
Python
|
util_test.py
|
svennickel/itunes-app-scraper
|
14b857bd40a237825cb6bd93be388e6bcd083c01
|
[
"MIT"
] | 10 |
2020-08-12T06:47:04.000Z
|
2021-12-04T03:06:19.000Z
|
util_test.py
|
svennickel/itunes-app-scraper
|
14b857bd40a237825cb6bd93be388e6bcd083c01
|
[
"MIT"
] | 5 |
2020-11-19T07:53:19.000Z
|
2022-03-16T15:06:37.000Z
|
util_test.py
|
iaine/itunes-app-scraper
|
de60c8c0b369e78d4c87a0cb11284b2ef576c090
|
[
"MIT"
] | 11 |
2020-08-12T06:47:31.000Z
|
2022-03-19T23:36:18.000Z
|
from itunes_app_scraper.util import AppStoreException, AppStoreCollections, AppStoreCategories, AppStoreUtils
import json
import pytest
import os
| 33.285714 | 109 | 0.769313 |
f8c6f95465da9e6fd5b7017053c85eda97db68b6
| 802 |
py
|
Python
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 822 |
2017-09-05T08:38:42.000Z
|
2022-03-31T16:08:48.000Z
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 81 |
2017-09-12T12:49:00.000Z
|
2022-03-25T18:21:12.000Z
|
natasha/span.py
|
baltachev/natasha
|
b326631c510384b1ce3ac198bce8ed11818ec784
|
[
"MIT"
] | 90 |
2017-09-05T08:38:49.000Z
|
2022-03-29T12:09:22.000Z
|
from .record import Record
| 21.105263 | 52 | 0.516209 |
f8c7ce0b20cdca0b81d121ae696bffeb609cd523
| 7,297 |
py
|
Python
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 86 |
2016-02-29T03:24:28.000Z
|
2022-03-29T09:30:21.000Z
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 135 |
2016-04-12T13:31:28.000Z
|
2022-03-29T02:18:51.000Z
|
bingads/v13/bulk/entities/bulk_offline_conversion.py
|
pawelulita/BingAds-Python-SDK
|
e7b5a618e87a43d0a5e2c79d9aa4626e208797bd
|
[
"MIT"
] | 154 |
2016-04-08T04:11:27.000Z
|
2022-03-29T21:21:07.000Z
|
from __future__ import print_function
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.extensions import *
| 34.582938 | 139 | 0.628752 |
f8c98cbdffeb6bc1eca9320791dd78a1cefdb9cd
| 4,320 |
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3 |
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/tests/test_tasks.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1 |
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
from unittest.mock import MagicMock, patch
import ddt
from django.test import TestCase
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
import lms.djangoapps.lti_provider.tasks as tasks
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
| 31.532847 | 92 | 0.634491 |
f8c9d560d993e370d3b1363238c43807ccc5dfd5
| 1,954 |
py
|
Python
|
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | 2 |
2018-04-17T00:37:40.000Z
|
2018-04-30T03:04:20.000Z
|
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | null | null | null |
agents/dumbagent.py
|
dbelliss/Starcraft2AI
|
a3044f0eb3c1bb18084fa59265a430ddcdfab80b
|
[
"MIT"
] | null | null | null |
from loser_agent import *
if __name__ == '__main__':
main()
| 41.574468 | 129 | 0.666837 |
f8caa3e778c29557bd6611746d149fdf5e4f18a9
| 113 |
py
|
Python
|
juno/server/http/handler/api.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
juno/server/http/handler/api.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
juno/server/http/handler/api.py
|
DSciLab/juno
|
1d572c8d3fd06a6c1fcc51b42a6539dd3ae0927e
|
[
"MIT"
] | null | null | null |
from .base.api_handler import APIBaseHandler
| 16.142857 | 44 | 0.725664 |
f8cc12080c230a16858bbc18a05bcd5b93430fe7
| 317 |
py
|
Python
|
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
Python/mathematics/find_missing_number.py
|
RCubedClub/cp_algo
|
ec254055ef745224b0a1c766ef16709a3eea7087
|
[
"MIT"
] | null | null | null |
import random
if __name__ == '__main__':
main()
| 14.409091 | 43 | 0.577287 |
f8cda283a32c2452d1728b137c6f236a6921d3ec
| 1,099 |
py
|
Python
|
091. Decode Ways.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
091. Decode Ways.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
091. Decode Ways.py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
# Problem Reduction: variation of n-th staircase with n = [1, 2] steps.
# Approach: We generate a bottom up DP table.
# The tricky part is handling the corner cases (e.g. s = "30").
# Most elegant way to deal with those error/corner cases, is to allocate an extra space, dp[0].
# Let dp[ i ] = the number of ways to parse the string s[1: i + 1]
# For example:
# s = "231"
# index 0: extra base offset. dp[0] = 1
# index 1: # of ways to parse "2" => dp[1] = 1
# index 2: # of ways to parse "23" => "2" and "23", dp[2] = 2
# index 3: # of ways to parse "231" => "2 3 1" and "23 1" => dp[3] = 2
| 28.921053 | 95 | 0.497725 |
f8cddb9ef6bd722c93b8a3657d1eaf9e8803d45f
| 4,525 |
py
|
Python
|
Scripts/Cutter.py
|
rhong3/CPTAC-UCEC
|
ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9
|
[
"MIT"
] | 4 |
2019-01-04T21:11:03.000Z
|
2020-12-11T16:56:15.000Z
|
Scripts/Cutter.py
|
rhong3/CPTAC-UCEC
|
ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9
|
[
"MIT"
] | null | null | null |
Scripts/Cutter.py
|
rhong3/CPTAC-UCEC
|
ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9
|
[
"MIT"
] | null | null | null |
"""
Tile svs/scn files
Created on 11/01/2018
@author: RH
"""
import time
import matplotlib
import os
import shutil
import pandas as pd
matplotlib.use('Agg')
import Slicer
import staintools
import re
# Get all images in the root directory
# cut; each level is 2 times difference (20x, 10x, 5x)
# Run as main
if __name__ == "__main__":
if not os.path.isdir('../tiles'):
os.mkdir('../tiles')
cut()
| 29.769737 | 106 | 0.478232 |
f8cde62d3add298d347b197159cd3ef0fad71443
| 2,850 |
py
|
Python
|
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | 8 |
2017-05-04T22:04:48.000Z
|
2020-03-27T13:06:39.000Z
|
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | null | null | null |
brake.py
|
tensorpro/AutonomousBraking
|
9861e5c0423d8ca1a2f3f640003b3581a3074459
|
[
"MIT"
] | 2 |
2019-07-22T02:19:57.000Z
|
2020-09-29T21:00:00.000Z
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
m = 4
b = -.2
bl = -.1
br = -.1
sh = .13
from visualizations import show_bboxes
| 26.635514 | 75 | 0.561404 |
f8d06ccac9f1d3cb709d0653c37332224ffae6f4
| 20,941 |
py
|
Python
|
pysnmp/DPS-MIB-V38.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/DPS-MIB-V38.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/DPS-MIB-V38.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DPS-MIB-V38 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DPS-MIB-V38
# Produced by pysmi-0.3.4 at Mon Apr 29 18:39:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
iso, ModuleIdentity, Unsigned32, Counter64, Gauge32, ObjectIdentity, IpAddress, enterprises, NotificationType, Integer32, Bits, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ModuleIdentity", "Unsigned32", "Counter64", "Gauge32", "ObjectIdentity", "IpAddress", "enterprises", "NotificationType", "Integer32", "Bits", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dpsInc = MibIdentifier((1, 3, 6, 1, 4, 1, 2682))
dpsAlarmControl = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1))
tmonXM = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 1))
tmonIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 1, 1))
tmonIdentManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonIdentManufacturer.setStatus('mandatory')
tmonIdentModel = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonIdentModel.setStatus('mandatory')
tmonIdentSoftwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonIdentSoftwareVersion.setStatus('mandatory')
tmonAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2), )
if mibBuilder.loadTexts: tmonAlarmTable.setStatus('mandatory')
tmonAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1), ).setIndexNames((0, "DPS-MIB-V38", "tmonAIndex"))
if mibBuilder.loadTexts: tmonAlarmEntry.setStatus('mandatory')
tmonAIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAIndex.setStatus('mandatory')
tmonASite = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(30, 30)).setFixedLength(30)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonASite.setStatus('mandatory')
tmonADesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(40, 40)).setFixedLength(40)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonADesc.setStatus('mandatory')
tmonAState = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAState.setStatus('mandatory')
tmonASeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonASeverity.setStatus('mandatory')
tmonAChgDate = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAChgDate.setStatus('mandatory')
tmonAChgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAChgTime.setStatus('mandatory')
tmonAAuxDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(30, 30)).setFixedLength(30)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAAuxDesc.setStatus('mandatory')
tmonADispDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(14, 14)).setFixedLength(14)).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonADispDesc.setStatus('mandatory')
tmonAPntType = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAPntType.setStatus('mandatory')
tmonAPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAPort.setStatus('mandatory')
tmonAAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAAddress.setStatus('mandatory')
tmonADisplay = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonADisplay.setStatus('mandatory')
tmonAPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 1, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonAPoint.setStatus('mandatory')
tmonCommandGrid = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3))
tmonCPType = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCPType.setStatus('mandatory')
tmonCPort = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCPort.setStatus('mandatory')
tmonCAddress = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCAddress.setStatus('mandatory')
tmonCDisplay = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCDisplay.setStatus('mandatory')
tmonCPoint = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCPoint.setStatus('mandatory')
tmonCEvent = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCEvent.setStatus('mandatory')
tmonCAction = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 17, 18, 19))).clone(namedValues=NamedValues(("latch", 1), ("release", 2), ("momentary", 3), ("ack", 17), ("tag", 18), ("untag", 19)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCAction.setStatus('mandatory')
tmonCAuxText = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(30, 30)).setFixedLength(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmonCAuxText.setStatus('mandatory')
tmonCResult = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 1, 3, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("success", 1), ("failure", 2), ("pending", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmonCResult.setStatus('mandatory')
dpsRTU = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 2))
dpsRTUIdent = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1))
dpsRTUManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(30, 30)).setFixedLength(30)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUManufacturer.setStatus('mandatory')
dpsRTUModel = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(30, 30)).setFixedLength(30)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUModel.setStatus('mandatory')
dpsRTUFirmwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(20, 20)).setFixedLength(20)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUFirmwareVersion.setStatus('mandatory')
dpsRTUDateTime = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(23, 23)).setFixedLength(23)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUDateTime.setStatus('mandatory')
dpsRTUSyncReq = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("sync", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUSyncReq.setStatus('mandatory')
dpsRTUDisplayGrid = MibTable((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2), )
if mibBuilder.loadTexts: dpsRTUDisplayGrid.setStatus('mandatory')
dpsRTUDisplayEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1), ).setIndexNames((0, "DPS-MIB-V38", "dpsRTUPort"), (0, "DPS-MIB-V38", "dpsRTUAddress"), (0, "DPS-MIB-V38", "dpsRTUDisplay"))
if mibBuilder.loadTexts: dpsRTUDisplayEntry.setStatus('mandatory')
dpsRTUPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUPort.setStatus('mandatory')
dpsRTUAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAddress.setStatus('mandatory')
dpsRTUDisplay = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUDisplay.setStatus('mandatory')
dpsRTUDispDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(20, 20)).setFixedLength(20)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUDispDesc.setStatus('mandatory')
dpsRTUPntMap = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(71, 71)).setFixedLength(71)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUPntMap.setStatus('mandatory')
dpsRTUControlGrid = MibIdentifier((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3))
dpsRTUCPort = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUCPort.setStatus('mandatory')
dpsRTUCAddress = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUCAddress.setStatus('mandatory')
dpsRTUCDisplay = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUCDisplay.setStatus('mandatory')
dpsRTUCPoint = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUCPoint.setStatus('mandatory')
dpsRTUCAction = MibScalar((1, 3, 6, 1, 4, 1, 2682, 1, 2, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("latch", 1), ("release", 2), ("momentary", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dpsRTUCAction.setStatus('mandatory')
dpsRTUAlarmGrid = MibTable((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5), )
if mibBuilder.loadTexts: dpsRTUAlarmGrid.setStatus('mandatory')
dpsRTUAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1), ).setIndexNames((0, "DPS-MIB-V38", "dpsRTUAPort"), (0, "DPS-MIB-V38", "dpsRTUAAddress"), (0, "DPS-MIB-V38", "dpsRTUADisplay"), (0, "DPS-MIB-V38", "dpsRTUAPoint"))
if mibBuilder.loadTexts: dpsRTUAlarmEntry.setStatus('mandatory')
dpsRTUAPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAPort.setStatus('mandatory')
dpsRTUAAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAAddress.setStatus('mandatory')
dpsRTUADisplay = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUADisplay.setStatus('mandatory')
dpsRTUAPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAPoint.setStatus('mandatory')
dpsRTUAPntDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(21, 21)).setFixedLength(21)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAPntDesc.setStatus('mandatory')
dpsRTUAState = MibTableColumn((1, 3, 6, 1, 4, 1, 2682, 1, 2, 5, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dpsRTUAState.setStatus('mandatory')
tmonCRalarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,10)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonCRalarmClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,11)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonMJalarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,12)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonMJalarmClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,13)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonMNalarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,14)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonMNalarmClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,15)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonSTalarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,16)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
tmonSTalarmClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 1) + (0,17)).setObjects(("DPS-MIB-V38", "tmonASite"), ("DPS-MIB-V38", "tmonADesc"), ("DPS-MIB-V38", "tmonAState"), ("DPS-MIB-V38", "tmonASeverity"), ("DPS-MIB-V38", "tmonAChgDate"), ("DPS-MIB-V38", "tmonAChgTime"), ("DPS-MIB-V38", "tmonAAuxDesc"), ("DPS-MIB-V38", "tmonADispDesc"), ("DPS-MIB-V38", "tmonAPntType"), ("DPS-MIB-V38", "tmonAPort"), ("DPS-MIB-V38", "tmonAAddress"), ("DPS-MIB-V38", "tmonADisplay"), ("DPS-MIB-V38", "tmonAPoint"), ("DPS-MIB-V38", "tmonCEvent"))
dpsRTUPointSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,20)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"), ("DPS-MIB-V38", "dpsRTUAPort"), ("DPS-MIB-V38", "dpsRTUAAddress"), ("DPS-MIB-V38", "dpsRTUADisplay"), ("DPS-MIB-V38", "dpsRTUAPoint"), ("DPS-MIB-V38", "dpsRTUAPntDesc"), ("DPS-MIB-V38", "dpsRTUAState"))
dpsRTUPointClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,21)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"), ("DPS-MIB-V38", "dpsRTUAPort"), ("DPS-MIB-V38", "dpsRTUCAddress"), ("DPS-MIB-V38", "dpsRTUADisplay"), ("DPS-MIB-V38", "dpsRTUAPoint"), ("DPS-MIB-V38", "dpsRTUAPntDesc"), ("DPS-MIB-V38", "dpsRTUAState"))
dpsRTUsumPSet = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,101)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"))
dpsRTUsumPClr = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,102)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"))
dpsRTUcomFailed = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,103)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"))
dpsRTUcomRestored = NotificationType((1, 3, 6, 1, 4, 1, 2682, 1, 2) + (0,104)).setObjects(("DPS-MIB-V38", "sysDescr"), ("DPS-MIB-V38", "sysLocation"), ("DPS-MIB-V38", "dpsRTUDateTime"))
mibBuilder.exportSymbols("DPS-MIB-V38", tmonAlarmEntry=tmonAlarmEntry, tmonAIndex=tmonAIndex, tmonAChgDate=tmonAChgDate, tmonCAddress=tmonCAddress, tmonSTalarmClr=tmonSTalarmClr, dpsRTUFirmwareVersion=dpsRTUFirmwareVersion, tmonAChgTime=tmonAChgTime, dpsRTUCPoint=dpsRTUCPoint, dpsRTU=dpsRTU, dpsRTUPntMap=dpsRTUPntMap, dpsRTUsumPSet=dpsRTUsumPSet, tmonASite=tmonASite, tmonAlarmTable=tmonAlarmTable, dpsRTUcomFailed=dpsRTUcomFailed, tmonCRalarmSet=tmonCRalarmSet, dpsRTUDisplayEntry=dpsRTUDisplayEntry, dpsRTUAlarmGrid=dpsRTUAlarmGrid, tmonAAddress=tmonAAddress, dpsRTUADisplay=dpsRTUADisplay, dpsRTUDisplayGrid=dpsRTUDisplayGrid, tmonADispDesc=tmonADispDesc, dpsRTUManufacturer=dpsRTUManufacturer, dpsRTUModel=dpsRTUModel, dpsRTUDispDesc=dpsRTUDispDesc, tmonCPoint=tmonCPoint, tmonMJalarmSet=tmonMJalarmSet, dpsRTUAPntDesc=dpsRTUAPntDesc, dpsInc=dpsInc, dpsAlarmControl=dpsAlarmControl, tmonAPort=tmonAPort, dpsRTUAlarmEntry=dpsRTUAlarmEntry, dpsRTUSyncReq=dpsRTUSyncReq, tmonIdent=tmonIdent, tmonASeverity=tmonASeverity, tmonMNalarmClr=tmonMNalarmClr, dpsRTUcomRestored=dpsRTUcomRestored, tmonCAction=tmonCAction, tmonIdentSoftwareVersion=tmonIdentSoftwareVersion, tmonIdentModel=tmonIdentModel, dpsRTUCAction=dpsRTUCAction, tmonMNalarmSet=tmonMNalarmSet, tmonADesc=tmonADesc, tmonCEvent=tmonCEvent, tmonSTalarmSet=tmonSTalarmSet, tmonADisplay=tmonADisplay, dpsRTUIdent=dpsRTUIdent, dpsRTUAPort=dpsRTUAPort, dpsRTUAAddress=dpsRTUAAddress, dpsRTUAddress=dpsRTUAddress, dpsRTUCPort=dpsRTUCPort, tmonAPntType=tmonAPntType, dpsRTUCAddress=dpsRTUCAddress, dpsRTUCDisplay=dpsRTUCDisplay, dpsRTUAState=dpsRTUAState, tmonCResult=tmonCResult, tmonXM=tmonXM, dpsRTUDateTime=dpsRTUDateTime, dpsRTUAPoint=dpsRTUAPoint, dpsRTUsumPClr=dpsRTUsumPClr, tmonCommandGrid=tmonCommandGrid, tmonCPType=tmonCPType, tmonAState=tmonAState, dpsRTUPort=dpsRTUPort, tmonMJalarmClr=tmonMJalarmClr, dpsRTUDisplay=dpsRTUDisplay, dpsRTUPointSet=dpsRTUPointSet, dpsRTUPointClr=dpsRTUPointClr, tmonAPoint=tmonAPoint, tmonCRalarmClr=tmonCRalarmClr, tmonIdentManufacturer=tmonIdentManufacturer, tmonCAuxText=tmonCAuxText, dpsRTUControlGrid=dpsRTUControlGrid, tmonAAuxDesc=tmonAAuxDesc, tmonCPort=tmonCPort, tmonCDisplay=tmonCDisplay)
| 146.440559 | 2,200 | 0.719641 |
f8d0c7ea7f201118a072a6fce98f54b42edb4e97
| 524 |
py
|
Python
|
Lab5/load_graph.py
|
YuryMalyshev/CAD-with-Python
|
ecbb82b8efb436e7089b0895dc898cf956351046
|
[
"MIT"
] | null | null | null |
Lab5/load_graph.py
|
YuryMalyshev/CAD-with-Python
|
ecbb82b8efb436e7089b0895dc898cf956351046
|
[
"MIT"
] | null | null | null |
Lab5/load_graph.py
|
YuryMalyshev/CAD-with-Python
|
ecbb82b8efb436e7089b0895dc898cf956351046
|
[
"MIT"
] | null | null | null |
import numpy as np
| 32.75 | 101 | 0.532443 |
f8d0d6ecca8d12cee0a53f9628644c363e8839b3
| 1,055 |
py
|
Python
|
python/smqtk/utils/simple_timer.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 82 |
2015-01-07T15:33:29.000Z
|
2021-08-11T18:34:05.000Z
|
python/smqtk/utils/simple_timer.py
|
jbeezley/SMQTK
|
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
|
[
"BSD-3-Clause"
] | 230 |
2015-04-08T14:36:51.000Z
|
2022-03-14T17:55:30.000Z
|
python/smqtk/utils/simple_timer.py
|
DigitalCompanion/SMQTK
|
fc9404b69150ef44f24423844bc80735c0c2b669
|
[
"BSD-3-Clause"
] | 65 |
2015-01-04T15:00:16.000Z
|
2021-11-19T18:09:11.000Z
|
import time
from smqtk.utils import SmqtkObject
| 27.051282 | 74 | 0.525118 |
f8d1533d26da78b1c9ff1203760b4a5ae33a69c7
| 206 |
py
|
Python
|
1546.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | 1 |
2022-01-14T08:45:32.000Z
|
2022-01-14T08:45:32.000Z
|
1546.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
1546.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
arr = ['', 'Rolien', 'Naej', 'Elehcim', 'Odranoel']
n = int(input())
while n != 0:
n -= 1
k = int(input())
while k != 0:
k -= 1
num = int(input())
print(arr[num])
| 22.888889 | 52 | 0.417476 |
f8d1e3f53857745560685cc9254effe945b354f9
| 3,314 |
py
|
Python
|
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | null | null | null |
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | 1 |
2020-10-31T15:32:31.000Z
|
2020-10-31T15:33:11.000Z
|
portl.py
|
blackc8/portl
|
8be36d67db2041071d5169204902ec9fff6aabe9
|
[
"MIT"
] | null | null | null |
import socket, time, sys
import argparse
__version__="0.1"
min_port=0
#max_port=65535
max_port=10000
parser = argparse.ArgumentParser(description="a simple python port scanner",epilog="author: blackc8")
parser.add_argument("hostname",metavar="<hostname>",help="host to scan")
parser.add_argument("-dp","--ddport",help="do not display port",action="store_true")
parser.add_argument("-sF","--show_filtered",help="show filtered ports",action="store_true")
parser.add_argument("-b","--banner",help="grab the banners of ports",action="store_true")
parser.add_argument("-v","--version",help="dispaly version",action="version",version="%(prog)s ("+__version__+")")
args=parser.parse_args()
if __name__ == "__main__":
scan(args.hostname,args.ddport,args.banner,args.show_filtered)
| 30.971963 | 114 | 0.624925 |
f8d25c456ce1d78680f761522a288c787f746b68
| 4,730 |
py
|
Python
|
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | null | null | null |
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | null | null | null |
Python/MachineLearning_Ng/examples/ex2.py
|
Ritetsu/lizhe_Notes
|
4c465b5e23c1e520f9508314cfda7f26517d6dd3
|
[
"MIT"
] | 1 |
2021-07-07T12:01:42.000Z
|
2021-07-07T12:01:42.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 20:15:55 2019
@author: Shinelon
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
path='ex2data1.txt'
data=pd.read_csv(path,header=None,names=['Exam1','Exam2','Admitted'])
data.head()
#
positive=data[data['Admitted'].isin([1])]
negative=data[data['Admitted'].isin([0])]
fig,ax=plt.subplots(figsize=(12,8))
#c=colors20
ax.scatter(positive['Exam1'],positive['Exam2'],c='b',marker='o',label='Admitted')
ax.scatter(negative['Exam1'],negative['Exam2'],c='r',marker='o',label='Unadimitted')
ax.legend(loc=4)
ax.set_xlabel('Exam1 Score');ax.set_ylabel('Exam2 Score')
plt.show()#
#sigmoid
#/sigmoid
nums=np.arange(-10,10,1)
fig,ax=plt.subplots(figsize=(12,8))
ax.plot(nums,sigmoid(nums),'r')
plt.show()
data.insert(0,'ones',1)#011
cols=data.shape[1]
X=data.iloc[:,0:cols-1]
y=data.iloc[:,cols-1:cols]
X=np.array(X.values)
y=np.array(y.values)
theta=np.zeros(3)
cost(theta,X,y)
gradientDescent(theta,X,y)#theta
#SciPyTruncatedNewton
import scipy.optimize as opt
result=opt.fmin_tnc(func=cost,x0=theta,fprime=gradientDescent,args=(X,y))
result#theta
cost(result[0],X,y)
#
theta_min=np.matrix(result[0])#theta_min1x3
predictions=predict(theta_min,X)
correct=[1 if((a==1 and b==1) or (a==0 and b==0))\
else 0 for (a,b) in zip(predictions,y)]
accuracy=(sum(map(int,correct))/len(correct))
print('accuracy={}'.format(accuracy))#
path2='ex2data2.txt'
data2=pd.read_csv(path2,header=None,names=['Test1','Test2','Accepted'])
data2.head()
positive=data2[data2['Accepted'].isin([1])]
negative=data2[data2['Accepted'].isin([0])]
fig,ax=plt.subplots(figsize=(12,8))
ax.scatter(positive['Test1'],positive['Test2'],s=50,c='b',\
marker='o',label='Accepted')
ax.scatter(negative['Test1'],negative['Test2'],s=50,c='r',\
marker='x',label='Rejected')
ax.legend()
ax.set_xlabel('Test1 Score')
ax.set_ylabel('Test2 Score')
plt.show()
#
degree=5
x1=data2['Test1']
x2=data2['Test2']
data2.insert(3,'ones',1)
for i in range(1,degree):
for j in range(0,i):
data2['F'+str(i)+str(j)]=np.power(x1,i-j)*np.power(x2,j)
data2.drop('Test1',axis=1,inplace=True)#axis=01TRUE
data2.drop('Test2',axis=1,inplace=True)
data2.head()
#
#theta
cols=data2.shape[1]
X2=data2.iloc[:,1:cols]
y2=data2.iloc[:,0:1]
X2=np.array(X2.values)
y2=np.array(y2.values)
theta2=np.zeros(11)
learningRate=1
costReg(theta2,X2,y2,learningRate)
gradientReg(theta2,X2,y2,learningRate)
result2=opt.fmin_tnc(func=costReg,x0=theta2,fprime=gradientReg,\
args=(X2,y2,learningRate))
result2
#
theta_min=np.matrix(result2[0])
predictions=predict(theta_min,X2)
correct=[1 if ((a==1 and b==1) or (a==0 and b==0))\
else 0 for (a,b) in zip(predictions,y2)]
accuracy=(sum(map(int,correct))/len(correct))
print('accuracy2={}%'.format(accuracy*100))
#sklearn
from sklearn import linear_model
model=linear_model.LogisticRegression(penalty='l2',\
C=1.0,solver='liblinear')
model.fit(X2,y2.ravel())
model.score(X2,y2)
| 31.533333 | 84 | 0.679281 |
6ef1130a48a6b7d1320ed14916f6226e73d226a4
| 855 |
py
|
Python
|
questioning/trade/migrations/0003_auto_20190901_1710.py
|
PythonerKK/questioning
|
c30fb2d31a221398df392f4b8faab539d6921ac0
|
[
"MIT"
] | 6 |
2019-12-31T05:23:27.000Z
|
2021-03-12T19:23:34.000Z
|
questioning/trade/migrations/0003_auto_20190901_1710.py
|
PythonerKK/questioning
|
c30fb2d31a221398df392f4b8faab539d6921ac0
|
[
"MIT"
] | 4 |
2020-05-12T13:39:40.000Z
|
2021-02-08T20:35:18.000Z
|
questioning/trade/migrations/0003_auto_20190901_1710.py
|
PythonerKK/questioning
|
c30fb2d31a221398df392f4b8faab539d6921ac0
|
[
"MIT"
] | 1 |
2019-09-02T07:15:16.000Z
|
2019-09-02T07:15:16.000Z
|
# Generated by Django 2.1.9 on 2019-09-01 09:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 29.482759 | 129 | 0.661988 |
6ef13886b158bd50cda282a2108b24f47033b23c
| 3,557 |
py
|
Python
|
Ch5/rbf.py
|
jason-168/MLCode
|
429c17e004fb41ba16c371416c8f73833ab8fc1d
|
[
"Xnet",
"X11"
] | 146 |
2016-05-24T02:55:53.000Z
|
2022-03-23T14:54:42.000Z
|
Ch5/rbf.py
|
coky/MarslandMLAlgo
|
4277b24db88c4cb70d6b249921c5d21bc8f86eb4
|
[
"Xnet",
"X11"
] | 1 |
2017-08-17T23:07:39.000Z
|
2017-08-18T08:27:19.000Z
|
Ch5/rbf.py
|
coky/MarslandMLAlgo
|
4277b24db88c4cb70d6b249921c5d21bc8f86eb4
|
[
"Xnet",
"X11"
] | 94 |
2016-05-06T12:34:33.000Z
|
2022-03-30T03:31:04.000Z
|
# Code from Chapter 5 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np
import pcn
import kmeans
| 34.872549 | 127 | 0.576047 |
6ef1936814cf84ad0a8d2c89da28ee0ee1c74c44
| 929 |
py
|
Python
|
_correlation_grad.py
|
jgorgenucsd/corr_tf
|
ad777821283f7d18d8bdd04d584e12df9a3fba69
|
[
"BSD-2-Clause"
] | 22 |
2017-12-27T07:37:14.000Z
|
2021-11-15T05:51:44.000Z
|
_correlation_grad.py
|
jgorgenucsd/corr_tf
|
ad777821283f7d18d8bdd04d584e12df9a3fba69
|
[
"BSD-2-Clause"
] | 4 |
2018-04-03T17:08:43.000Z
|
2019-08-07T08:55:24.000Z
|
_correlation_grad.py
|
jgorgenucsd/corr_tf
|
ad777821283f7d18d8bdd04d584e12df9a3fba69
|
[
"BSD-2-Clause"
] | 12 |
2018-02-06T02:35:12.000Z
|
2022-03-02T07:18:19.000Z
|
#!/usr/bin/env python3
"""
Gradients for inner product.
"""
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
correlation_grad_module = tf.load_op_library('./build/libcorrelation_grad.so')
| 38.708333 | 163 | 0.750269 |
6ef32ce891baec71eb1386e2c2b81b8e89a8b9a4
| 123 |
py
|
Python
|
src/hcrystalball/metrics/__init__.py
|
betatim/hcrystalball
|
693b9b406f05afa23cfc4647c43260166a7076fe
|
[
"MIT"
] | 1 |
2021-04-12T17:08:17.000Z
|
2021-04-12T17:08:17.000Z
|
src/hcrystalball/metrics/__init__.py
|
betatim/hcrystalball
|
693b9b406f05afa23cfc4647c43260166a7076fe
|
[
"MIT"
] | null | null | null |
src/hcrystalball/metrics/__init__.py
|
betatim/hcrystalball
|
693b9b406f05afa23cfc4647c43260166a7076fe
|
[
"MIT"
] | 1 |
2022-01-03T16:02:35.000Z
|
2022-01-03T16:02:35.000Z
|
from ._scorer import make_ts_scorer
from ._scorer import get_scorer
__all__ = [
"get_scorer",
"make_ts_scorer",
]
| 15.375 | 35 | 0.723577 |
6ef9b4082cb1779ade1e3f88552ad789562c6383
| 2,776 |
py
|
Python
|
tests/selenium/auth/test_user.py
|
bodik/sner4-web
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 9 |
2019-05-15T11:33:43.000Z
|
2022-02-17T04:05:28.000Z
|
tests/selenium/auth/test_user.py
|
bodik/sner4
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 1 |
2019-03-01T11:48:13.000Z
|
2019-03-01T11:48:13.000Z
|
tests/selenium/auth/test_user.py
|
bodik/sner4-web
|
cb054d79c587b2f8468c73a88754b7c0d5cd5a95
|
[
"MIT"
] | 3 |
2020-03-03T21:06:37.000Z
|
2021-01-11T14:40:56.000Z
|
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
auth.views.user selenium tests
"""
from flask import url_for
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from sner.server.auth.models import User
from sner.server.extensions import db
from tests.selenium import dt_inrow_delete, dt_rendered, webdriver_waituntil
def test_user_list_route(live_server, sl_admin, user): # pylint: disable=unused-argument
"""simple test ajaxed datatable rendering"""
sl_admin.get(url_for('auth.user_list_route', _external=True))
dt_rendered(sl_admin, 'user_list_table', user.username)
def test_user_list_route_inrow_delete(live_server, sl_admin, user): # pylint: disable=unused-argument
"""delete user inrow button"""
user_id = user.id
db.session.expunge(user)
sl_admin.get(url_for('auth.user_list_route', _external=True))
# in this test-case there are multiple items in the table (current_user, test_user), hence index which to delete has to be used
dt_inrow_delete(sl_admin, 'user_list_table', 1)
assert not User.query.get(user_id)
def test_user_apikey_route(live_server, sl_admin, user): # pylint: disable=unused-argument
"""apikey generation/revoking feature tests"""
sl_admin.get(url_for('auth.user_list_route', _external=True))
dt_rendered(sl_admin, 'user_list_table', user.username)
# disable fade, the timing interferes with the test
sl_admin.execute_script('$("div#modal-global").toggleClass("fade")')
sl_admin.find_element_by_xpath('//a[@data-url="%s"]' % url_for('auth.user_apikey_route', user_id=user.id, action='generate')).click()
webdriver_waituntil(sl_admin, EC.visibility_of_element_located((By.XPATH, '//h4[@class="modal-title" and text()="Apikey operation"]')))
sl_admin.find_element_by_xpath('//div[@id="modal-global"]//button[@class="close"]').click()
webdriver_waituntil(sl_admin, EC.invisibility_of_element_located((By.XPATH, '//div[@class="modal-global"')))
dt_rendered(sl_admin, 'user_list_table', user.username)
db.session.refresh(user)
assert user.apikey
sl_admin.find_element_by_xpath('//a[@data-url="%s"]' % url_for('auth.user_apikey_route', user_id=user.id, action='revoke')).click()
webdriver_waituntil(sl_admin, EC.visibility_of_element_located((By.XPATH, '//h4[@class="modal-title" and text()="Apikey operation"]')))
sl_admin.find_element_by_xpath('//div[@id="modal-global"]//button[@class="close"]').click()
webdriver_waituntil(sl_admin, EC.invisibility_of_element_located((By.XPATH, '//div[@class="modal-global"')))
dt_rendered(sl_admin, 'user_list_table', user.username)
db.session.refresh(user)
assert not user.apikey
| 46.266667 | 139 | 0.747839 |
6efaa56371bdc91af714b2ef343d987547b208e3
| 936 |
py
|
Python
|
isobmff/media_file.py
|
kentoku24/isobmff
|
6877505a75915caf440bbb80b6024ba6bf9f3baa
|
[
"MIT"
] | 6 |
2017-08-31T01:55:37.000Z
|
2018-12-26T03:03:24.000Z
|
isobmff/media_file.py
|
kentoku24/isobmff
|
6877505a75915caf440bbb80b6024ba6bf9f3baa
|
[
"MIT"
] | 4 |
2017-08-29T03:47:16.000Z
|
2017-09-05T09:00:17.000Z
|
isobmff/media_file.py
|
m-hiki/isbmff
|
0724b9892884ae35bdd0796a97a9506098c4cd25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .box import indent
from .box import read_box
| 26.742857 | 53 | 0.459402 |
6efc120e05e2c5cbca1587bd26026c11c811582c
| 2,257 |
py
|
Python
|
python homework 2.py
|
pkpatricia/python34
|
5ee4f864444b8835f8ee6cf416bc9fd1d969595b
|
[
"bzip2-1.0.6"
] | null | null | null |
python homework 2.py
|
pkpatricia/python34
|
5ee4f864444b8835f8ee6cf416bc9fd1d969595b
|
[
"bzip2-1.0.6"
] | null | null | null |
python homework 2.py
|
pkpatricia/python34
|
5ee4f864444b8835f8ee6cf416bc9fd1d969595b
|
[
"bzip2-1.0.6"
] | null | null | null |
Python 3.4.1 (v3.4.1:c0e311e010fc, May 18 2014, 10:38:22) [MSC v.1600 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> ================================ RESTART ================================
>>>
>>> ================================ RESTART ================================
>>>
What is your favorite color?blue
I like that color too
>>> ================================ RESTART ================================
>>>
What is your favorite color?black
I do not care too much for that color
>>> ================================ RESTART ================================
>>>
What is your favorite color?green
That is my favorite color.
>>> ================================ RESTART ================================
>>>
What is your favorite color?green
That is my 2nd favorite color.
>>> ================================ RESTART ================================
>>>
What is your favorite color?violet
Traceback (most recent call last):
File "C:/Users/P/Desktop/two_b.py", line 7, in <module>
rank = color.index(pick) + 1
ValueError: 'violet' is not in list
>>> ================================ RESTART ================================
>>>
What is your favorite color?violet
I do not care too much for that color
>>> ================================ RESTART ================================
>>>
What is your favorite color?yello
I do not care too much for that color
>>> ================================ RESTART ================================
>>>
What is your favorite color?yellow
That is my 6th favorite color.
>>> ================================ RESTART ================================
>>>
Bach
Antheil
Chopin
Mozart
Handel
>>> ================================ RESTART ================================
>>>
Please enter a lower bound: 4
Please enter an upper bound: 23
2**4=16
2**5=32
2**6=64
2**7=128
2**8=256
2**9=512
2**10=1024
2**11=2048
2**12=4096
2**13=8192
2**14=16384
2**15=32768
2**16=65536
2**17=131072
2**18=262144
2**19=524288
2**20=1048576
2**21=2097152
2**22=4194304
>>> ================================ RESTART ================================
>>>
Please enter a lower bound: 0
Please enter an upper bound: 6
2**0 = 1
2**1 = 2
2**2 = 4
2**3 = 8
2**4 = 16
2**5 = 32
2**6 = 64
>>>
| 27.52439 | 94 | 0.438635 |
6efc25feb8365613f08bcea149b9338afcb635e2
| 3,690 |
py
|
Python
|
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | 1 |
2021-02-24T12:05:06.000Z
|
2021-02-24T12:05:06.000Z
|
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | null | null | null |
mlw/build_database.py
|
imjoseangel/hacktheplanet2021
|
bffc4f9a4f821fcfe2215244f5b563effe6982e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from glob import glob
import logging
import os
from os.path import abspath, dirname, normpath
import re
from shutil import rmtree
import sqlite3
import sys
import folium
from folium.plugins import FastMarkerCluster
from zipfile import ZipFile
import pandas as pd
import requests
from config import db
from models import MarinaLitterWatch
CLEAN_FILES = ('./CSV_1', './CSV_2')
ZIP_FILE = 'fme.zip'
DB_FILE = 'mlw.db'
MAP_FILE = 'locations.html'
# Set Logging
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%d-%b-%y %H:%M:%S", stream=sys.stdout, level=logging.INFO)
# Set local path
here = normpath(abspath(dirname(__file__)))
# Download data
logging.info("Downloading data...")
response = requests.get(
'http://fme.discomap.eea.europa.eu/fmedatadownload/MarineLitter/MLWPivotExport.fmw'
'?CommunityCode=&FromDate=2010-01-01&ToDate=2022-12-31'
'&opt_showresult=false&opt_servicemode=sync')
downloadlink = re.search(
r"<a\s+(?:[^>]*?\s+)?href=([\"'])(.*?)\1>", response.content.decode()).group(2)
logging.info("Saving data...")
zipfile = requests.get(downloadlink)
open(f'{here}/{ZIP_FILE}', 'wb').write(zipfile.content)
logging.info("Uzipping data...")
zipObject = ZipFile(f'{here}/{ZIP_FILE}', 'r')
zipObject.extractall(path=here)
logging.info("Loading data...")
# Data to initialize database with
data = pd.read_csv(
f'{here}/CSV_1/MLW_PivotExport/MLW_Data.csv', encoding="ISO-8859-1")
# Delete database file if it exists currently
if os.path.exists(f'{here}/{DB_FILE}'):
os.remove(f'{here}/{DB_FILE}')
# Create the database
db.create_all()
# populate the database
conn = sqlite3.connect(f'{here}/{DB_FILE}')
data.to_sql('mlw', conn, if_exists='append')
db.session.commit()
# Create Map
folium_map = folium.Map(location=[40.416729, -3.703339],
zoom_start=3, min_zoom=3,
tiles='Stamen Terrain')
callback = ('function (row) {'
'var marker = L.marker(new L.LatLng(row[0], row[1]), {color: "red"});'
'var icon = L.AwesomeMarkers.icon({'
"icon: 'info-sign',"
"iconColor: 'white',"
"markerColor: 'red',"
"prefix: 'glyphicon',"
"extraClasses: 'fa-rotate-0'"
'});'
'marker.setIcon(icon);'
"var popup = L.popup({maxWidth: '300'});"
"const display_text = {text: row[2]};"
"var mytext = $(`<div id='mytext' class='display_text' style='width: 100.0%; height: 100.0%;'> ${display_text.text}</div>`)[0];"
"popup.setContent(mytext);"
"marker.bindPopup(popup);"
'return marker};')
FastMarkerCluster(data=list(
zip(data['lat_y1'].values, data['lon_x1'].values, data['BeachName'].values)), callback=callback).add_to(folium_map)
folium.LayerControl().add_to(folium_map)
folium_map.save(f'{here}/templates/{MAP_FILE}')
# Clean files
logging.info("Cleaning files...")
for path_spec in CLEAN_FILES:
# Make paths absolute and relative to this path
abs_paths = glob(os.path.normpath(
os.path.join(here, path_spec)))
for path in [str(p) for p in abs_paths]:
if not path.startswith(here):
# Die if path in CLEAN_FILES is absolute + outside this directory
raise ValueError(
"%s is not a path inside %s" % (path, here))
logging.info(f'removing {os.path.relpath(path)}')
rmtree(path)
logging.info(f'removing {ZIP_FILE}')
os.remove(f'{here}/{ZIP_FILE}')
| 31.810345 | 140 | 0.644986 |
6efcad9f388b05b3d7f79c0c4ad5c784bb1826e5
| 3,486 |
py
|
Python
|
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
domotica/configuration.py
|
jjmartinr01/gauss3
|
1c71c44430e0f15fb2f3f83d32ad66bb1b7e3e94
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
TIPO = 'selectable' # 'basic' or 'selectable'. 'basic': necesario para el funcionamiento del programa
# 'selectable': No necesario. Aade nuevas funcionalidades al programa
# Por ejemplo autenticar es 'basic', pero actas es prescindible
# El code_menu debe ser nico y se configurar como un permiso del sistema
MENU_DEFAULT = [
{'code_menu': 'acceso_domotica',
'texto_menu': 'Domtica',
'href': '',
'nivel': 1,
'tipo': 'Accesible',
'pos': 1,
},
{'code_menu': 'acceso_grupos_domotica',
'texto_menu': 'Agrupaciones de dispositivos',
'href': 'grupos_domotica',
'nivel': 2,
'tipo': 'Accesible',
'pos': 1,
'parent': 'acceso_domotica'
},
{'code_menu': 'acceso_configura_domotica',
'texto_menu': 'Configurar domtica',
'href': 'configura_domotica',
'nivel': 2,
'tipo': 'Accesible',
'pos': 2,
'parent': 'acceso_domotica'
}
]
# Se aaden otros permisos para el usuario
PERMISOS = [{'code_nombre': 'crea_grupos_domotica',
'nombre': 'Permiso para crear un grupo de dispositivos domticos',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'borra_grupos_domotica',
'nombre': 'Permiso para borrar cualquier grupo que contiene domtica',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'edita_grupos_domotica',
'nombre': 'Permiso para modificar cualquier grupo que contiene domtica',
'menu': 'acceso_grupos_domotica'
},
{'code_nombre': 'crea_dispositivos_domotica',
'nombre': 'Permiso para crear un dispositivo domtico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_dispositivos_domotica',
'nombre': 'Permiso para borrar cualquier dispositivo domtico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_dispositivos_domotica',
'nombre': 'Permiso para editar cualquier dispositivo domtico',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'crea_secuencias_domotica',
'nombre': 'Permiso para crear una secuencia domtica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_secuencias_domotica',
'nombre': 'Permiso para borrar cualquier secuencia domtica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_secuencias_domotica',
'nombre': 'Permiso para modificar cualquier secuencia domtica',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'crea_conjuntos_domotica',
'nombre': 'Permiso para crear un conjunto de dispositivos domticos',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'borra_conjuntos_domotica',
'nombre': 'Permiso para borrar cualquier conjunto de dispositivos domticos',
'menu': 'acceso_configura_domotica'
},
{'code_nombre': 'edita_conjuntos_domotica',
'nombre': 'Permiso para modificar cualquier conjunto de dispositivos domticos',
'menu': 'acceso_configura_domotica'
}
]
| 41.011765 | 103 | 0.592943 |
6efceaaf9fe7bf6e6a3d8409b3f03d38e6342a11
| 5,944 |
py
|
Python
|
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
eval.py
|
itisianlee/hawk-facedet
|
55774ac5619f9a4c76a3a872ff11940a874b32d1
|
[
"Apache-2.0"
] | null | null | null |
import os
import cv2
import fire
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from configs.common import config as cfg
from hawkdet.models.build import build_detor
from hawkdet.lib.numpy_nms import np_nms
from hawkdet.lib.box_utils import decode, decode_landm
from hawkdet.lib.prior_box import PriorBox
if __name__ == '__main__':
fire.Fire({"run": run})
exit()
| 33.206704 | 105 | 0.57924 |
6efe244e5a0524f99c737e6f3d3da045c5866cd7
| 101 |
py
|
Python
|
hello.py
|
zhuiyue568/test27
|
f96438c29711b62000eb363ff32c059529a0e142
|
[
"MIT"
] | null | null | null |
hello.py
|
zhuiyue568/test27
|
f96438c29711b62000eb363ff32c059529a0e142
|
[
"MIT"
] | null | null | null |
hello.py
|
zhuiyue568/test27
|
f96438c29711b62000eb363ff32c059529a0e142
|
[
"MIT"
] | null | null | null |
name="zhuiyue"
num="123456"
num=111
num3=333
str="keep going"
num4=666
num5=888
num5=777
num6=999
| 7.769231 | 16 | 0.722772 |
3e00ea020dca2ee0cd420f43a2015391aba2eabc
| 2,491 |
py
|
Python
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 12 |
2021-05-04T10:47:02.000Z
|
2022-03-10T13:25:04.000Z
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 17 |
2021-05-04T00:53:49.000Z
|
2022-01-18T10:01:49.000Z
|
src/keydra/providers/contentful.py
|
jangroth/keydra
|
9bab1b21e025ceb6ae074ea936d693e36efae5a4
|
[
"MIT"
] | 9 |
2021-05-04T00:46:38.000Z
|
2022-02-16T02:55:50.000Z
|
from keydra.clients.contentful import ContentfulClient
from keydra.providers.base import BaseProvider
from keydra.providers.base import exponential_backoff_retry
from keydra.exceptions import DistributionException
from keydra.exceptions import RotationException
from keydra.logging import get_logger
LOGGER = get_logger()
PW_FIELD = 'secret'
| 28.965116 | 79 | 0.566439 |
3e017ed1492cc6fe4bfc5ac25bc91b6acc5c2bd6
| 1,266 |
py
|
Python
|
numbas_lti/migrations/0063_auto_20210211_1307.py
|
jhoobergs/numbas-lti-provider
|
9d673e0ec8dcb085bd783e949c3ee179e507be5c
|
[
"Apache-2.0"
] | 6 |
2016-12-12T14:41:33.000Z
|
2021-04-18T01:04:23.000Z
|
numbas_lti/migrations/0063_auto_20210211_1307.py
|
jhoobergs/numbas-lti-provider
|
9d673e0ec8dcb085bd783e949c3ee179e507be5c
|
[
"Apache-2.0"
] | 206 |
2016-08-24T13:53:07.000Z
|
2022-03-31T09:14:43.000Z
|
numbas_lti/migrations/0063_auto_20210211_1307.py
|
jhoobergs/numbas-lti-provider
|
9d673e0ec8dcb085bd783e949c3ee179e507be5c
|
[
"Apache-2.0"
] | 13 |
2016-10-23T04:53:30.000Z
|
2022-02-17T09:25:00.000Z
|
# Generated by Django 2.2.13 on 2021-02-11 13:07
from django.db import migrations, models
import django.db.models.deletion
| 33.315789 | 156 | 0.650869 |
3e035da887a72ca05d47f4e04f4fd021e19671d0
| 1,356 |
py
|
Python
|
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | 1 |
2022-02-21T18:55:34.000Z
|
2022-02-21T18:55:34.000Z
|
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
sahyun_bot/utils_session.py
|
TheGoodlike13/sahyun-bot
|
8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb
|
[
"MIT"
] | null | null | null |
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from sahyun_bot.utils_logging import HttpDump
DEFAULT_RETRY_COUNT = 3
RETRY_ON_METHOD = frozenset([
'HEAD', 'GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'
])
RETRY_ON_STATUS = frozenset([
403, 429, 500, 502, 503, 504
])
| 30.133333 | 117 | 0.668142 |
3e03fc65e12b6935503f8e6630624fed1809bd0e
| 5,763 |
py
|
Python
|
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | null | null | null |
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | null | null | null |
EzLibrarianApplication/DAO/BookCirculationDAO.py
|
coregameHD/SmartLib_Librarian
|
31b58a4aab648ee9110ba6a78d5fcab942267380
|
[
"MIT"
] | 2 |
2018-10-01T14:08:25.000Z
|
2020-09-30T03:02:15.000Z
|
import json
import requests
from datetime import datetime, timedelta
from BookCirculation import BookCirculation
from DAO.AbstractDAO import AbstractDAO
from DAO.BookDAO import BookDAO
from DAO.UserDAO import UserDAO
from constant import *
from datetime import datetime
if __name__ == "__main__":
bookCirculationDAO = BookCirculationDAO()
for circulation in bookCirculationDAO.getAllCirculations():
print(str(circulation))
| 39.472603 | 143 | 0.622245 |
3e063c3a08ca1b49f1f08adcb5b79cf09de3aefe
| 4,128 |
py
|
Python
|
flask_mm/managers/__init__.py
|
szkkteam/flask_mm
|
ea96899a41a0573e51792f1554550c6d77f22a07
|
[
"MIT"
] | 1 |
2021-03-21T18:46:36.000Z
|
2021-03-21T18:46:36.000Z
|
flask_mm/managers/__init__.py
|
szkkteam/flask_mm
|
ea96899a41a0573e51792f1554550c6d77f22a07
|
[
"MIT"
] | null | null | null |
flask_mm/managers/__init__.py
|
szkkteam/flask_mm
|
ea96899a41a0573e51792f1554550c6d77f22a07
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import os
# Pip package imports
from six.moves.urllib.parse import urljoin
from flask import url_for, request, abort
from werkzeug import secure_filename, FileStorage, cached_property
# Internal package imports
from flask_mm.utils import UuidNameGen
from flask_mm.files import extension, lower_extension
from flask_mm.storages import BaseStorage
DEFAULT_MANAGER = 'file'
| 35.282051 | 107 | 0.66376 |
3e07225d9f986640eeceeb3fecfcd08a0bbf84a5
| 1,627 |
py
|
Python
|
web/api/user/core.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 5 |
2020-12-04T19:28:42.000Z
|
2021-12-07T16:14:28.000Z
|
web/api/user/core.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 50 |
2019-10-08T19:47:24.000Z
|
2021-07-26T05:43:37.000Z
|
web/api/user/core.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 4 |
2019-10-23T04:14:49.000Z
|
2021-08-01T01:22:37.000Z
|
from functools import partial
from marshmallow import ValidationError
from web.extensions import db
from .validators import validate_user_token
from .serializers import SuperUserSchema
from .exceptions import InvalidUsage
from .user import SuperUser
def delete_superuser(id, created_at_timestamp):
"""
Delete a user record from the SuperUser table
For added security, must provide exact creation datetime
of the user, in timestamp format
"""
s_user = SuperUser.get_by_id(id)
validate_user_token(s_user, created_at_timestamp)
s_user.delete()
db.session.commit()
return s_user.email, True
| 31.288462 | 95 | 0.761524 |
3e07f9fff3837dd41ad8b264e8c09d1d22e6939d
| 12,853 |
py
|
Python
|
digsby/src/util/auxencodings.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35 |
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
digsby/src/util/auxencodings.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4 |
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
digsby/src/util/auxencodings.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15 |
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
'''
Registers auxillary encodings in the codecs module.
>>> 'x\x9cK\xc9L/N\xaa\x04\x00\x08\x9d\x02\x83'.decode('zip')
'digsby'
'''
from peak.util.imports import lazyModule
sys = lazyModule('sys')
warnings = lazyModule('warnings')
locale = lazyModule('locale')
collections = lazyModule('collections')
urllib = lazyModule('urllib')
urllib2 = lazyModule('urllib2')
codecs = lazyModule('codecs')
StringIO = lazyModule('StringIO')
zipfile = lazyModule('zipfile')
gzip = lazyModule('gzip')
htmlentitydefs = lazyModule('htmlentitydefs')
base64 = lazyModule('base64')
#pylzma = lazyModule('pylzma')
HAVE_LZMA = False #until proven otherwise
ENCODE_LZMA = False
__simplechars_enc = {
ord('<') : 'lt',
ord('>') : 'gt',
#ord("'") : 'apos',
ord('"') : 'quot',
ord('&') : 'amp',
}
__simplechars_dec = dict((v, unichr(k)) for k,v in __simplechars_enc.items())
__simplechars_dec['apos'] = unichr(ord("'"))
_encodings = [
lambda: locale.getpreferredencoding(),
lambda: sys.getfilesystemencoding(),
lambda: sys.getdefaultencoding(),
]
_to_register = [
]
def register_codec(name, encode, decode):
'An easy way to register a pair of encode/decode functions with a name.'
global _to_register
_to_register.append(_search)
def fuzzydecode(s, encoding = None, errors = 'strict'):
'''
Try decoding the string using several encodings, in this order.
- the one(s) you give as "encoding"
- the system's "preferred" encoding
'''
if isinstance(s, unicode):
import warnings; warnings.warn('decoding unicode is not supported!')
return s
encodings = [enc() for enc in _encodings]
if isinstance(encoding, basestring):
encodings.insert(0, encoding)
elif encoding is None:
pass
else:
encodings = list(encoding) + encodings
assert all(isinstance(e, basestring) for e in encodings)
for e in encodings:
try:
res = s.decode(e, errors)
except (UnicodeDecodeError, LookupError), _ex:
# LookupError will catch missing encodings
import warnings; warnings.warn("Exception when fuzzydecoding %r: %r" % (s, _ex))
else:
return res
return s.decode(encoding, 'replace')
register_codec('xml', _xml_encode, _xml_decode)
_to_register.append(search)
del search
_to_register.append(search)
del search
_to_register.append(search)
del search
__locale_encoding = lambda: locale.getpreferredencoding()
register_codec('locale',
lambda s, errors = 'strict': (s.encode(__locale_encoding()), len(s)),
lambda s, errors = 'strict': (s.decode(__locale_encoding()), len(s)))
__filesysencoding = lambda: sys.getfilesystemencoding()
register_codec('filesys',
_filesys_encode,
_filesys_decode)
del _filesys_encode
del _filesys_decode
register_codec('url', _url_encode, _url_decode)
# codec: utf8url
# encode = utf8 encode -> url encode
# decode = url decode -> utf8 decode
register_codec('utf8url', _utf8url_encode, _utf8url_decode)
b64_codecs = {}
b64_names = frozenset(('b64', 'b32', 'b16'))
_to_register.append(search_base64)
del search_base64
register_codec('binary', _binary_encode, _binary_decode)
__all__ = []
if __name__ == '__main__':
install()
strings = [gen_rand_str() for x in xrange(100)]
results1 = []
results2 = []
from time import clock
print 'xml', timeit(lambda: foo('xml', results1))
print 'xml2', timeit(lambda: foo('xml2', results2))
assert results1 == results2
| 29.821346 | 119 | 0.561503 |
3e08ccba7d47176de06f3bb412445c1550a56baf
| 463 |
py
|
Python
|
jaxfg/core/__init__.py
|
AvanDavad/jaxfg
|
6d1559126ba872b452eca6a13c2688349f1c5f7e
|
[
"MIT"
] | 120 |
2020-11-28T19:43:31.000Z
|
2022-03-29T02:35:46.000Z
|
jaxfg/core/__init__.py
|
AvanDavad/jaxfg
|
6d1559126ba872b452eca6a13c2688349f1c5f7e
|
[
"MIT"
] | 12 |
2021-05-24T09:02:12.000Z
|
2022-03-30T19:51:40.000Z
|
jaxfg/core/__init__.py
|
AvanDavad/jaxfg
|
6d1559126ba872b452eca6a13c2688349f1c5f7e
|
[
"MIT"
] | 9 |
2021-05-06T15:31:23.000Z
|
2022-03-23T12:06:44.000Z
|
from ._factor_base import FactorBase
from ._factor_stack import FactorStack
from ._stacked_factor_graph import StackedFactorGraph
from ._storage_metadata import StorageMetadata
from ._variable_assignments import VariableAssignments
from ._variables import RealVectorVariable, VariableBase
__all__ = [
"FactorStack",
"FactorBase",
"StackedFactorGraph",
"StorageMetadata",
"VariableAssignments",
"RealVectorVariable",
"VariableBase",
]
| 27.235294 | 56 | 0.792657 |
3e09b3a92c71458b7e09905c1beec58ee515ed7a
| 106 |
py
|
Python
|
cython/wrap_c/test_cython_wrapper.py
|
tleonhardt/Python_Interface_Cpp
|
398eab0c6e7f5e0358edb6644c71b5fdc6b2606a
|
[
"MIT"
] | 64 |
2017-03-10T09:32:22.000Z
|
2022-01-25T08:44:06.000Z
|
cython/wrap_c/test_cython_wrapper.py
|
tleonhardt/Python_Interface_Cpp
|
398eab0c6e7f5e0358edb6644c71b5fdc6b2606a
|
[
"MIT"
] | null | null | null |
cython/wrap_c/test_cython_wrapper.py
|
tleonhardt/Python_Interface_Cpp
|
398eab0c6e7f5e0358edb6644c71b5fdc6b2606a
|
[
"MIT"
] | 13 |
2017-03-13T23:28:56.000Z
|
2021-06-07T08:37:03.000Z
|
# coding=utf-8
import cyfib
| 15.142857 | 57 | 0.726415 |
3e0aba9a6fd99c2588436a872d706b50b1c4f2cd
| 1,612 |
py
|
Python
|
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
Server/server.py
|
mjbogusz/CCVR
|
65b11d39c1412134f8a695b30955368eb43c2518
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from http.server import SimpleHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs
import time
if __name__ == "__main__":
from sys import argv
if len(argv) == 3:
run(port = int(argv[1]), hostName = str(argv[2]))
elif len(argv) == 2:
run(port = int(argv[1]))
else:
run()
| 25.587302 | 66 | 0.673077 |
3e0adca23e72763263f72a46a3ff5aad270ff8c2
| 4,907 |
py
|
Python
|
dags/dag_update.py
|
alyildiz/btc_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | 5 |
2021-09-06T08:42:02.000Z
|
2021-11-15T15:04:57.000Z
|
dags/dag_update.py
|
alyildiz/sncf_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | null | null | null |
dags/dag_update.py
|
alyildiz/sncf_forecast
|
b1e70431c9f18bee0afda71b96805f6194072548
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.docker_operator import DockerOperator
from docker.types import Mount
default_args = {
"owner": "airflow",
"description": "Use of the DockerOperator",
"depend_on_past": False,
"start_date": datetime(2021, 5, 1),
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
BASE_DIR = "/home/baris/PROJECTS/sncf_forecast/"
dic_env = {
"API_KEY": os.environ["API_KEY"],
"API_KEY_SECRET": os.environ["API_KEY_SECRET"],
"ACCESS_TOKEN": os.environ["ACCESS_TOKEN"],
"ACCESS_TOKEN_SECRET": os.environ["ACCESS_TOKEN_SECRET"],
"MONGODB_HOST": os.environ["MONGODB_HOST"],
"MONGODB_PORT": os.environ["MONGODB_PORT"],
"MONGO_INITDB_ROOT_USERNAME": os.environ["MONGO_INITDB_ROOT_USERNAME"],
"MONGO_INITDB_ROOT_PASSWORD": os.environ["MONGO_INITDB_ROOT_PASSWORD"],
}
with DAG("daily_update_new", default_args=default_args, schedule_interval="0 2 * * *", catchup=False) as dag:
update_db = DockerOperator(
task_id="task_____daily_update_dbmongo",
image="sncf_forecast_update",
environment=dic_env,
container_name="task_____daily_update_dbmongo",
api_version="auto",
auto_remove=True,
command="python3 /workdir/update.py",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "backend/update", target="/workdir", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_lstm = DockerOperator(
task_id="task_____daily_update_lstm",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_lstm",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m lstm",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_baseline = DockerOperator(
task_id="task_____daily_update_baseline",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_baseline",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m baseline",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_autoencoder = DockerOperator(
task_id="task_____daily_update_autoencoder",
image="sncf_forecast_modeling",
environment=dic_env,
container_name="task_____daily_update_autoencoder",
api_version="auto",
auto_remove=True,
command="python3 /workdir/bin/train_model.py -m autoencoder",
docker_url="unix://var/run/docker.sock",
working_dir="/workdir",
mount_tmp_dir=False,
mounts=[
Mount(source=BASE_DIR + "backend/modeling/bin", target="/workdir/bin", type="bind"),
Mount(source=BASE_DIR + "backend/modeling/src", target="/workdir/src", type="bind"),
Mount(source=BASE_DIR + "shared", target="/workdir/shared", type="bind"),
Mount(source=BASE_DIR + "mlflow/db", target="/workdir/data", type="bind"),
Mount(source=BASE_DIR + "mlflow/artifacts", target="/workdir/artifacts", type="bind"),
],
network_mode="sncf_forecast_default",
)
update_db >> update_lstm
update_db >> update_baseline
update_db >> update_autoencoder
| 41.584746 | 109 | 0.651314 |
3e0b03ec64f84131a309427f748ab4fc729497d0
| 1,723 |
py
|
Python
|
sustainableCityManagement/main_project/Population_API/views_population.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | null | null | null |
sustainableCityManagement/main_project/Population_API/views_population.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | null | null | null |
sustainableCityManagement/main_project/Population_API/views_population.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | 1 |
2021-05-13T16:33:18.000Z
|
2021-05-13T16:33:18.000Z
|
from django.http import JsonResponse
from django.http import HttpResponse
from rest_framework.views import APIView
from .store_population import StorePopulation
import time as processTiming
import uuid
# API to fetch Ireland population used by frontend. The result consist of population estimate and year.
# API to fetch Dublin population used by frontend. The result consist of population estimate and year.
| 39.159091 | 103 | 0.62101 |
3e105c7bee23ddd23731ff6b0bc65a97faa40678
| 2,536 |
py
|
Python
|
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | 7 |
2018-04-28T17:25:25.000Z
|
2021-08-15T17:52:11.000Z
|
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | null | null | null |
examples/tutorial7.py
|
fangj99/gifmaze
|
fd0f7fbf592537a26b13359ccf87dab836d9b1b3
|
[
"MIT"
] | 2 |
2019-10-30T03:40:50.000Z
|
2022-01-02T05:44:33.000Z
|
# -*- coding: utf-8 -*-
"""
This script shows how to embed the animation into a
background image (it's also possible to embed the animation
into another animation, but that's too complicated to implement
in a simple program ...)
"""
from colorsys import hls_to_rgb
import gifmaze as gm
from gifmaze.algorithms import wilson, bfs
from gifmaze.utils import generate_text_mask
# firstly define the size and color_depth of the image.
width, height = 600, 400
color_depth = 8
# define a surface to draw on.
surface = gm.GIFSurface.from_image('teacher.png', color_depth)
# set the 0-th color to be the same with the blackboard's.
palette = [52, 51, 50, 200, 200, 200, 255, 0, 255]
for i in range(256):
rgb = hls_to_rgb((i / 360.0) % 1, 0.5, 1.0)
palette += [int(round(255 * x)) for x in rgb]
surface.set_palette(palette)
# next define an animation environment to run the algorithm.
anim = gm.Animation(surface)
# set the speed, delay, and transparent color we want.
anim.set_control(speed=50, delay=2, trans_index=3)
# add a maze instance.
mask = generate_text_mask(surface.size, 'UST', 'ubuntu.ttf', 350)
# specify the region that where the animation is embedded.
left, top, right, bottom = 66, 47, 540, 343
maze = anim.create_maze_in_region(cell_size=4,
region=(left, top, right, bottom),
mask=mask)
anim.pad_delay_frame(100)
# paint the blackboard
surface.rectangle(left, top, right - left + 1, bottom - top + 1, 0)
# in the first algorithm only 4 colors occur in the image, so we can use
# a smaller minimum code length, this can help reduce the file size significantly.
surface.set_lzw_compress(2)
# pad one second delay, get ready!
anim.pad_delay_frame(100)
# the animation runs here.
wilson(maze, root=(0, 0))
# pad three seconds delay to see the result clearly.
anim.pad_delay_frame(300)
# now we run the maze solving algorithm.
# this time we use full 256 colors, hence the minimum code length is 8.
surface.set_lzw_compress(8)
# the tree and wall are unchanged throughout the maze solving algorithm hence
# it's safe to use 0 as the transparent color and color the wall and tree transparent.
anim.set_colormap({0: 0, 1: 0, 2: 2, 3: 3})
anim.set_control(speed=30, delay=5, trans_index=0)
# run the maze solving algorithm.
bfs(maze,
start=(0, 0),
end=(maze.size[0] - 1, maze.size[1] - 1))
# pad five seconds delay to see the path clearly.
anim.pad_delay_frame(500)
# save the result.
surface.save('wilson_bfs.gif')
surface.close()
| 31.308642 | 86 | 0.714117 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.