max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_examples.py | fizmat/ef_python | 1 | 12792351 | <filename>tests/test_examples.py
import os
import subprocess
from shutil import copytree
import inject
import pytest
from ef.config.config import Config
from ef.runner import Runner
from ef.util.testing import assert_dataclass_eq
_examples_conf = [("examples/axially_symmetric_beam_contour/contour.conf", pytest.mark.slow),
("examples/minimal_working_example/minimal_conf.conf", ()),
("examples/single_particle_in_free_space/single_particle_in_free_space.conf", pytest.mark.slowish),
("examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf",
()),
("examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf",
pytest.mark.slow),
("examples/single_particle_in_magnetic_field/large_time_step.conf", pytest.mark.slow),
("examples/single_particle_in_magnetic_field/long_simulation_time.conf", pytest.mark.slow),
("examples/ribbon_beam_contour/contour_bin.conf", pytest.mark.slowish),
("examples/drift_tube_potential/pot.conf", pytest.mark.slow),
("examples/ribbon_beam_contour/contour.conf", pytest.mark.slow),
("examples/tube_source_test/contour.conf", pytest.mark.slow)
]
_pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf]
@pytest.mark.parametrize("fname", _pytest_params_example_conf)
def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver):
sim = Config.from_fname(fname).make()
monkeypatch.chdir(tmpdir)
Runner(sim).start()
def run_jupyter(dir, fname, path=None, copy_dir=False):
dir = dir.replace('/', os.path.sep)
fname = os.path.join(dir, fname)
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
with open(fname) as f:
nb = nbformat.read(f, as_version=4)
if copy_dir:
copytree(dir, path)
ep = ExecutePreprocessor(timeout=600)
ep.preprocess(nb, {'metadata': {'path': path}} if path is not None else {})
inject.clear()
@pytest.mark.slowish
@pytest.mark.jupyter_examples
def test_all_examples_visualize():
run_jupyter("examples/jupyter", "visualize_examples.ipynb", 'examples/jupyter/')
@pytest.mark.slow
@pytest.mark.jupyter_examples
def test_axially_symmetric_beam_contour(tmpdir):
run_jupyter("examples/axially_symmetric_beam_contour", "axially_symmetric_beam_contour.ipynb", tmpdir)
@pytest.mark.slow
@pytest.mark.jupyter_examples
def test_drift_tube_potential(tmpdir):
run_jupyter("examples/drift_tube_potential", "potential.ipynb", tmpdir.join('newdir'), True)
@pytest.mark.slow
@pytest.mark.jupyter_examples
def test_ribbon_beam_contour(tmpdir):
run_jupyter("examples/ribbon_beam_contour", "beam.ipynb", tmpdir.join('newdir'), True)
@pytest.mark.slow
@pytest.mark.jupyter_examples
def test_tube_source(tmpdir):
run_jupyter("examples/tube_source_test", "plot.ipynb", tmpdir.join('newdir'), True)
@pytest.mark.slowish
@pytest.mark.jupyter_examples
def test_single_particle_in_free_space(tmpdir):
run_jupyter("examples/single_particle_in_free_space", "single_particle_in_free_space.ipynb",
tmpdir.join('newdir'), True)
assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')),
Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf')))
@pytest.mark.slowish
@pytest.mark.jupyter_examples
def test_single_particle_in_uniform_electric_field(tmpdir):
run_jupyter("examples/single_particle_in_electric_field", "single_particle_in_uniform_electric_field.ipynb",
tmpdir)
@pytest.mark.slowish
@pytest.mark.jupyter_examples
def test_single_particle_in_radial_electric_field(tmpdir):
run_jupyter("examples/single_particle_in_radial_electric_field", "plot.ipynb", tmpdir.join('newdir'), True)
@pytest.mark.slow
@pytest.mark.jupyter_examples
def test_particle_in_magnetic_field(tmpdir):
run_jupyter("examples/single_particle_in_magnetic_field", "Single Particle in Uniform Magnetic Field.ipynb", tmpdir)
run_jupyter("examples/single_particle_in_magnetic_field", "single_particle_in_magnetic_field.ipynb",
tmpdir.join('newdir'), True)
@pytest.mark.requires_install
@pytest.mark.parametrize("fname", _pytest_params_example_conf)
def test_main_shell(fname, tmpdir, monkeypatch):
basedir = os.path.join(os.path.dirname(__file__), '..')
monkeypatch.chdir(tmpdir)
result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
for line in result.stderr.split("\n"):
assert line == '' or line.startswith("WARNING:")
assert result.stdout != ""
| 2.15625 | 2 |
build/lib.macosx-10.9-x86_64-3.9/gators/encoders/tests/test_base_encoder.py | Aditya-Kapadiya/gators | 4 | 12792352 | # License: Apache-2.0
from gators.encoders import WOEEncoder
import pytest
def test_init():
with pytest.raises(TypeError):
WOEEncoder(dtype=str)
| 1.734375 | 2 |
python/regex_and_parsing/validating_and_parsing_email_addresses.py | avenet/hackerrank | 0 | 12792353 | from email.utils import formataddr, parseaddr
import re
emails_to_validate = int(input())
EMAIL_RE = re.compile(
r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\.[a-zA-Z]{1,3}$'
)
def validate_email(email_guess):
return bool(EMAIL_RE.match(email_guess))
for i in range(emails_to_validate):
real_name, email_address = parseaddr(input())
if not real_name and not email_address:
continue
if not validate_email(email_address):
continue
print(formataddr((real_name, email_address)))
| 3.71875 | 4 |
migrations/versions/78ac40739c16_model_post_title_add.py | Renzf2015/myblog | 0 | 12792354 | <reponame>Renzf2015/myblog
"""model-post-title add
Revision ID: 78ac40739c16
Revises: <PASSWORD>
Create Date: 2016-09-05 17:54:40.327821
"""
# revision identifiers, used by Alembic.
revision = '78ac40739c16'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_posts_title'), table_name='posts')
op.drop_column('posts', 'title')
### end Alembic commands ###
| 1.101563 | 1 |
item_59/waste_memory.py | nickaigi/effective_python_tips | 0 | 12792355 | <filename>item_59/waste_memory.py
import os
import hashlib
class MyObject(object):
def __init__(self):
self.x = os.urandom(100)
self.y = hashlib.sha1(self.x).hexdigest()
def get_data():
values = []
for _ in range(100):
obj = MyObject()
values.append(obj)
return values
def run():
deep_values = []
for _ in range(100):
deep_values.append(get_data())
return deep_values
| 2.921875 | 3 |
data/process_data.py | ranjeetraj2005/Disaster_Response_System | 0 | 12792356 | import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""loads the specified message and category data
Args:
messages_filepath (string): The file path of the messages csv
categories_filepath (string): The file path of the categories cv
Returns:
df (pandas dataframe): The combined messages and categories df
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return pd.merge(messages, categories, on='id')
def clean_data(df):
"""Cleans the data:
- drops duplicates
- removes messages missing classes
- cleans up the categories column
Args:
df (pandas dataframe): combined categories and messages df
Returns:
df (pandas dataframe): Cleaned dataframe with split categories
"""
# expand the categories column
categories = df.categories.str.split(';', expand=True)
row = categories[:1]
# get the category names
category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist()
categories.columns = category_colnames
# get only the last value in each value as an integer
categories = categories.applymap(lambda s: int(s[-1]))
# add the categories back to the original df
df.drop('categories', axis=1, inplace=True)
df = pd.concat([df, categories], axis=1)
# clean up the final data
df.drop_duplicates(subset='message', inplace=True)
df.dropna(subset=category_colnames, inplace=True)
df.related.replace(2, 0, inplace=True)
return df
def save_data(df, database_filename):
"""Saves the resulting data to a sqlite db
Args:
df (pandas dataframe): The cleaned dataframe
database_filename (string): the file path to save the db
Returns:
None
"""
engine = create_engine('sqlite:///'+database_filename)
df.to_sql('labeled_messages', engine, index=False, if_exists='replace')
engine.dispose()
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 3.296875 | 3 |
bartender/mongo_pruner.py | TheFoxAtWork/bartender | 0 | 12792357 | <filename>bartender/mongo_pruner.py<gh_stars>0
import logging
from datetime import datetime, timedelta
from mongoengine import Q
from brewtils.stoppable_thread import StoppableThread
class MongoPruner(StoppableThread):
def __init__(self, tasks=None, run_every=timedelta(minutes=15)):
self.logger = logging.getLogger(__name__)
self.display_name = "Mongo Pruner"
self._run_every = run_every.total_seconds()
self._tasks = tasks or []
super(MongoPruner, self).__init__(logger=self.logger, name="Remover")
def add_task(
self, collection=None, field=None, delete_after=None, additional_query=None
):
self._tasks.append(
{
"collection": collection,
"field": field,
"delete_after": delete_after,
"additional_query": additional_query,
}
)
def run(self):
self.logger.info(self.display_name + " is started")
while not self.wait(self._run_every):
current_time = datetime.utcnow()
for task in self._tasks:
delete_older_than = current_time - task["delete_after"]
query = Q(**{task["field"] + "__lt": delete_older_than})
if task.get("additional_query", None):
query = query & task["additional_query"]
self.logger.debug(
"Removing %ss older than %s"
% (task["collection"].__name__, str(delete_older_than))
)
task["collection"].objects(query).delete()
self.logger.info(self.display_name + " is stopped")
| 2.375 | 2 |
Nifty Data/server.py | gautam7-github/Thrifty-Nifty | 1 | 12792358 | from flask import Flask, jsonify
from flask.templating import render_template
from threading import Thread
from data import *
# config
app = Flask(__name__)
app.config["JSON_SORT_KEYS"] = False
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
# home route
@app.route("/")
def home():
return render_template("index.html")
# f"/{KEY}/data/nifty/index/all/sort/sortby"
@app.route("/help")
def help():
return render_template("help.html")
# index route
@app.route(f"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>")
def send_nifty_index_data(index, sortby):
index = str(index).lower()
sortby = str(sortby).lower()
indices = {
'it': fetch_nifty_index_data("it", sortby),
'bank': fetch_nifty_index_data("bank", sortby),
'50': fetch_nifty_index_data("50", sortby),
'auto': fetch_nifty_index_data("auto", sortby),
'pharma': fetch_nifty_index_data("pharma", sortby),
'fmcg': fetch_nifty_index_data("fmcg", sortby)
}
if index in indices:
return jsonify(indices[index])
# all index route
@app.route(f"/{KEY}/data/nifty/indices/all")
def send_all_nifty_indices_data():
return jsonify(fetch_all_indices_data())
def run():
app.run(host='0.0.0.0', port=8080)
def main():
t = Thread(target=run)
t.start()
if __name__ == "__main__":
main()
| 2.703125 | 3 |
test_pytube.py | Tom-Niesytto/YouTubeDownload | 0 | 12792359 | from pytube import YouTube
YouTube('http://youtube.com/watch?v=9bZkp7q19f0').streams[0].download() | 2.21875 | 2 |
pyforchange/egg/__init__.py | PythonForChange/pyforchange | 1 | 12792360 | from pyforchange.egg.resources.modules import *
from pyforchange.egg.resources.console import *
from pyforchange.egg.resources.constants import *
from pyforchange.egg.resources.extensions import *
from pyforchange.egg.app import * | 1.085938 | 1 |
src/scripts/apply_json_metadata.py | charlottestanton/covid-19-open-data | 430 | 12792361 | <filename>src/scripts/apply_json_metadata.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
| 2.578125 | 3 |
run.py | talos-org/server | 1 | 12792362 | from app import app
app.run(host='0.0.0.0', port="5000")
| 1.507813 | 2 |
quad9_plot.py | lindsayad/python | 0 | 12792363 | <filename>quad9_plot.py
import yt
ds = yt.load("/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e", step=-1)
slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y'))
slc.set_log(('connect1','vel_y'), False)
slc.set_width((1, 1))
slc.save()
| 1.921875 | 2 |
mandrill_logger/utils.py | willandskill/mandrill_logger | 0 | 12792364 | from django.contrib.auth import get_user_model
from .enums import LogReason, LogStatus
from .models import Log
class MandrillLogger():
REASON_TRANSLATOR = {
'hard-bounce': LogReason.HARD_BOUNCE,
'soft-bounce': LogReason.SOFT_BOUNCE,
'spam': LogReason.SPAM,
'unsub': LogReason.UNSUB,
'custom': LogReason.CUSTOM,
'invalid-sender': LogReason.INVALID_SENDER,
'invalid': LogReason.INVALID,
'test-mode-limit': LogReason.TEST_MODE_LIMIT,
'unsigned': LogReason.UNSIGNED,
'rule': LogReason.RULE,
}
# The sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid"
STATUS_TRANSLATOR = {
'sent': LogStatus.SENT,
'queued': LogStatus.QUEUED,
'scheduled': LogStatus.SCHEDULED,
'rejected': LogStatus.REJECTED,
'invalid': LogStatus.INVALID,
}
def __init__(self):
pass
def log_email(self, email):
for recipient in email.to:
_data = {}
_data['template'] = email.template_name
_data['email'] = recipient
_data['user'] = self.get_user_from_email(recipient)
try:
mandrill_response = email.mandrill_response[0]
_data['mandrill_id'] = mandrill_response['_id']
_data['meta_data'] = mandrill_response
_data['status'] = self.get_status_enum(mandrill_response.get('status', None))
_data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None))
except Exception as e:
pass
self.save_log(_data)
def save_log(self, _data):
Log.objects.create(**_data)
def get_user_from_email(self, email):
user = get_user_model()
try:
return user.objects.get(email=email)
except Exception as e:
print(e)
return None
def get_reason_enum(self, reason):
return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA)
def get_status_enum(self, status):
return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT)
def translate_enum(self, _dict, _str, _default=None):
try:
return _dict[_str]
except Exception as e:
return _default
| 2.078125 | 2 |
utils/scripts/OOOlevelGen/src/sprites/ABTargetScrollFocusSprite.py | fullscreennl/bullettime | 0 | 12792365 | <reponame>fullscreennl/bullettime
import PhysicsMixin
import ID
BODIES = """
<dict>
<key>body</key>
<dict>
<key>x</key>
<integer>%(x)s</integer>
<key>y</key>
<integer>%(y)s</integer>
<key>width</key>
<integer>10</integer>
<key>height</key>
<integer>10</integer>
<key>sheet_id</key>
<integer>5</integer>
<key>id</key>
<integer>%(__objID__)s</integer>
<key>name</key>
<string>%(name)s</string>
<key>classname</key>
<string>%(classname)s</string>
<key>static</key>
<true/>
</dict>
<key>shapes</key>
<array>
<dict>
<key>x</key>
<integer>0</integer>
<key>y</key>
<integer>0</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>type</key>
<string>circ</string>
<key>friction</key>
<real>1</real>
<key>density</key>
<integer>1</integer>
<key>restitution</key>
<real>0</real>
<key>sensor</key>
<true/>
</dict>
</array>
</dict>
"""
JOINTS = """"""
CONTACTS = """"""
class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin):
def __init__(self,**kwargs):
self.params = kwargs
self.params['name'] = "ScrollTarget"
self.process(kwargs)
self.addDefault('classname','')
self.params['__objID__'] = ID.next()
def render(self):
return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)
if __name__ == "__main__":
print "no test avaiable"
| 2.34375 | 2 |
project/s2dataprocess.py | 4theKnowledge/literaturesieve | 0 | 12792366 | <gh_stars>0
"""
@author: <NAME>
"""
from tqdm import tqdm
import gzip
import json
from langdetect import detect
import time
from pprint import pprint
import concurrent.futures
import codecs
import re
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f seconds' % (method.__name__, (te - ts)))
return result
return timed
@timeit
def unzipS2Contents(url):
f = gzip.open(url, 'rb')
file_content = f.read().decode('utf-8')
f.close()
fileContentsList = file_content.split('\n')
return fileContentsList
def getManifest(fileLocation):
# Get list of .gz documents from S2 manifest
manifestFile = open(fileLocation, "r")
manifestFile = manifestFile.read()
manifestFileList = manifestFile.split('\n')
manifestFileList = [file for file in manifestFileList if 's2' in file]
return manifestFileList
def convertToJson(file):
try:
docJSON = json.loads(file)
return docJSON
except Exception as e:
# print(f'ERROR: {e}')
pass
def getEnglishDoc(file):
try:
docJSON = json.loads(file)
if detect(docJSON["title"]) == 'en': # can have errors if there isn't a title specified.
return docJSON
# return f'Year: {docJSON["year"]} - Title: {docJSON["title"]} - ID: {docJSON["id"]}'
except Exception as e:
# print(f'ERROR: {e}')
pass
def matchDocWithKeyTerms(file):
# print(file)
# keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's too general when it matches by itself
docJSON = json.loads(json.dumps(file)) # dumps if input is dict...
# keyTermsMatched = set(docJSON["title"].lower().split(' ')).intersection(set(keyTermsList))
pattern = "(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)"
if (re.match(pattern, docJSON["title"].lower())) or (re.match(pattern, docJSON["paperAbstract"].lower())):
print(f'DOC MATCHED: {docJSON["title"]}')
return docJSON
else:
pass
@timeit
def main():
s2CorpusList = getManifest(r"data/sample/semantic scholar manifest 2020-03.txt")
for s2Corpus in s2CorpusList:
print(f'Processing: {s2Corpus}')
s2CorpusUrl = fr"C:\Users\22917746\Desktop\Semantic Scholar EDA\data\raw\{s2Corpus}"
fileContentsList = unzipS2Contents(s2CorpusUrl)
print(f'Number of files: {len(fileContentsList)}')
# Batch processing files rather than distributing massive amounts to each core
batchSize = 10000
chunks = (len(fileContentsList)-1) // batchSize + 1
batchCount = 1
for i in range(chunks):
start = time.time()
batch = fileContentsList[i*batchSize:(i+1)*batchSize]
print(f'\nProcessing batch {batchCount} - Size: {len(batch)}')
# # English Sieving
# docListEnglish = []
# with concurrent.futures.ProcessPoolExecutor() as executor:
# results = executor.map(getEnglishDoc, batch)
# # resultCount = 1
# for result in results:
# if result is not None:
# # print(f'{resultCount} - {result}')
# docListEnglish.append(result)
# else:
# # print(f'{resultCount} - Non-English Title')
# pass
# # resultCount += 1
# finishCheckEng = time.time()
# outputFileEnglishDoc = codecs.open(f"englishDocs_{batchCount}.txt", "w", "utf-8")
# for englishDocContent in docListEnglish:
# outputFileEnglishDoc.writelines(f'{englishDocContent}\n')
# outputFileEnglishDoc.close()
# finishWriteToDisk = time.time()
# print(f'Time to check english {finishCheckEng-start:0.1f}')
# print(f'Time to write to disk {finishWriteToDisk-start:0.1f}')
docListJSON = []
with concurrent.futures.ProcessPoolExecutor() as executor:
results = executor.map(convertToJson, batch)
for result in results:
if result is not None:
docListJSON.append(result)
else:
pass
finishJSONConvert = time.time()
print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}')
# Key Term Matching
start1 = time.time()
docKeyTermMatched = []
for doc in docListJSON: #docListEnglish:
result = matchDocWithKeyTerms(doc)
if result is not None:
# print(result)
docKeyTermMatched.append(result)
s2CorpusFname = s2Corpus.split('.')[0]
outputFileKeyTermMatched = codecs.open(f"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt", "w", "utf-8")
for docMatched in docKeyTermMatched:
outputFileKeyTermMatched.writelines(f'{docMatched}\n')
outputFileKeyTermMatched.close()
finish1 = time.time()
print(f'Time to extract keyterms {finish1-start1:0.1f}')
batchCount += 1
if __name__ == '__main__':
main() | 2.375 | 2 |
apps/linux/vim/plugins/ultisnips/markdown_snippets.py | ma-anwar/knausj_talon | 1 | 12792367 | from talon import Context
ctx = Context()
ctx.matches = r"""
tag: user.vim_ultisnips
mode: user.markdown
mode: command
and code.language: markdown
"""
# spoken name -> ultisnips snippet name
ctx.lists["user.snippets"] = {
# Sections and Paragraphs #
"section": "sec",
"sub section": "ssec",
"sub sub section": "sssec",
"paragraph": "par",
"sub paragraph": "spar",
# Text formatting #
"italics": "*",
"bold": "**",
"bold italics": "***",
"strike through": "~~",
"comment": "/*",
# Common stuff #
"link": "link",
"image": "img",
"inline code": "ilc",
"code block": "cbl",
"shell block": "shellcbl",
"reference link": "refl",
"footnote": "fnt",
"detail": "detail",
}
| 2.078125 | 2 |
helper.py | tangtai/2D_ICON_Generation_with_DCGAN_DEMO | 0 | 12792368 | <filename>helper.py<gh_stars>0
import numpy as np
from PIL import Image
import math
def get_image(image_path, width, height, mode):
"""
Read image
:param image_path: path of image
:param width: width of image
:param height: height of image
:param mode: image mode
:return Image data
"""
image = Image.open(image_path)
return np.array(image.convert(mode))
def get_batch(images, width, height, mode):
data_batch = np.array(
[get_image(files, width, height, mode) for files in images]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
def images_to_grid(images, mode):
# size of grid
size = math.floor(np.sqrt(images.shape[0]))
# scale 0 to 255 rgb color
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# arrange images in square
images_in_square = np.reshape(
images[:size * size],
(size, size, images.shape[1], images.shape[2], images.shape[3]))
if mode == 'L':
images_in_square = np.squeeze(images_in_square, 4)
new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size))
# combine images
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
print(new_im, images.shape[1] * size) | 2.625 | 3 |
bonus2/collateral/PYLINT_EX/ncclient2.py | ksannedhi/pyplus_course | 39 | 12792369 | <reponame>ksannedhi/pyplus_course
import ipdb
from ncclient import manager
from getpass import getpass
from ncclient.xml_ import new_ele
conn = manager.connect(
host="srx2.lasthop.io",
username="pyclass",
password=getpass(),
device_params={"name": "junos"},
hostkey_verify=False,
allow_agent=False,
look_for_keys=False,
port=830,
timeout=60,
)
ipdb.set_trace()
rpc = new_ele("get-software-information")
nc_out = conn.rpc(rpc)
filter = """
<filter type="subtree">
<configuration>
<interfaces>
</interfaces>
</configuration>
</filter>
"""
# It is an XML like thing
print(nc_out.tostring.decode())
print(nc_out.find(".//product-name"))
print(nc_out.find(".//product-name").text)
print(nc_out.find(".//product-model").text)
config = conn.get_config(source="running")
config_xml = config.data_xml
| 2.25 | 2 |
appserver/subscription/models.py | sinag/SWE574-Horuscope | 0 | 12792370 | from django.db import models
from community.models import Community
from root import settings
"""
Subscription object model
"""
class Subscription(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True)
community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True)
created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)
def __str__(self):
return str(self.id)
class Meta:
verbose_name_plural = "subscriptions"
| 2.359375 | 2 |
moontamer/four_wheel_steering_teleop/scripts/teleop_four_wheel_steering_joy.py | cagrikilic/simulation-environment | 1 | 12792371 | <reponame>cagrikilic/simulation-environment<gh_stars>1-10
#!/usr/bin/env python
import rospy
from math import pi
from four_wheel_steering_msgs.msg import FourWheelSteering
from sensor_msgs.msg import Joy
class TeleopFourWheelSteeringJoy():
def __init__(self):
self.axis_dead_zone = 0.05
self.axis_linear_forward = 5 # Right Trigger
self.axis_linear_reverse = 2 # Left Trigger
self.scale_linear = 1
self.axis_front_steering = 0 # Right left-right stick
self.axis_rear_steering = 3 # Left left-right stick
self.scale_steering = pi/10.0
self.enable_button = [4,5]
self.sent_disable_msg = False
self.is_trigger_forward_init = False
self.is_trigger_reverse_init = False
self.last_forward_speed = 0.0
self.last_reverse_speed = 0.0
rospy.Subscriber("joy", Joy, self.callback)
self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10)
rospy.spin()
def callback(self, data):
four_wheel_steering_msg = FourWheelSteering()
new_cmd = False
for button in self.enable_button:
if data.buttons[button]:
new_cmd = True
if new_cmd:
#rospy.loginfo(rospy.get_caller_id() + " axes" + str(data.axes))
linear_forward = 1.0
linear_reverse = 1.0
if self.is_trigger_forward_init:
linear_forward = data.axes[self.axis_linear_forward]
elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01:
self.is_trigger_forward_init = True
else:
self.last_forward_speed = data.axes[self.axis_linear_forward]
if self.is_trigger_reverse_init:
linear_reverse = data.axes[self.axis_linear_reverse]
elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01:
self.is_trigger_reverse_init = True
else:
self.last_reverse_speed = data.axes[self.axis_linear_reverse]
speed = (-linear_forward + linear_reverse)/2.0
#rospy.loginfo(rospy.get_caller_id() + " speed %s", speed)
if abs(speed) > self.axis_dead_zone:
four_wheel_steering_msg.speed = speed*self.scale_linear
if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone:
four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering
else:
four_wheel_steering_msg.front_steering_angle = 0.0
if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone:
four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering
else:
four_wheel_steering_msg.rear_steering_angle = 0.0
self.pub.publish(four_wheel_steering_msg)
self.sent_disable_msg = False
else:
if self.sent_disable_msg == False:
self.pub.publish(four_wheel_steering_msg)
self.sent_disable_msg = True
if __name__ == '__main__':
rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False)
try:
teleop_four_wheel_steering_joy = TeleopFourWheelSteeringJoy()
except rospy.ROSInterruptException: pass
| 2.640625 | 3 |
backend/StudentVillage/serializer.py | henrikhorluck/tdt4140-washlists | 0 | 12792372 | from rest_framework import serializers
from SIFUser.serializer import UserSerializer
from .models import StudentVillage
class StudentVillageSerializer(serializers.ModelSerializer):
managers = UserSerializer(many=True)
dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = StudentVillage
fields = "__all__"
| 2.03125 | 2 |
vulncat/__main__.py | techtimeflies/vulncat_scrapper | 1 | 12792373 | import argparse
import vulncat
import logging
import os
loglevel='DEBUG'
logpath=f'{os.getcwd()}/log'
# create the log directory if it does not exist
if os.path.exists(logpath) == False: os.mkdir(logpath)
logging.basicConfig(
level=loglevel,
filename=f'{logpath}/app.log',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S'
)
parser = argparse.ArgumentParser(description='Vulncat web parser cli')
#parser.add_argument('-h', "--help", help='cli helper')
parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities')
parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms')
parser.add_argument('-language', action='store_true', help='list all the languages')
args = parser.parse_args()
if args.category:
vulncat.scrape_filters('category')
if args.kingdom:
vulncat.scrape_filters('kingdom')
if args.language:
vulncat.scrape_filters('codelang') | 2.53125 | 3 |
pair_sne.py | jcorbettfrank/find_pisne | 0 | 12792374 | <filename>pair_sne.py
#%%
from lightCurve import lightCurve
import pandas as pd
import numpy as np
import os
class pairSNE(lightCurve):
def __init__(self, name):
lightCurve.__init__(self, name)
def loadData(self, dir):
fn = [f for f in os.listdir(dir)
if f[-4:]=='spec']
numT = len(fn)
if numT == 0:
self.rawDat = None
self.tKey = None
self.lambdaKey = None
return None
self.tKey = [float(f.split("_")[1][1:]) for f in fn]
fn = [os.path.join(dir,f) for f in fn]
#lets read first file to see how many bins we have in spectra
arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4])
numBins = arr.shape[0]
#3D array will hold all the data; Each 2d plane holds two cols
#specific luminosity (erg/s/angstrom) and number of photons of that wavelength
#rows are labeled by wavelength.
#Each plane corresponds to a day. We will store the days and wavelengths once
#since they are the same for each day
holder = np.zeros((numBins, 2, numT))
self.lambdaKey = arr[:,0] #angstroms, rows,
#have tKey which is in days and correponds to planes
#now load in the actual data
for idx, f in enumerate(fn):
arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4])
holder[:,:,idx] = arr[:,1:]
self.rawDat = holder
#%%
if __name__=='__main__':
data_dir = os.path.join(os.path.join(os.getcwd(), "data"), 'B200')
pair = pairSNE("red_p")
pair.loadData(data_dir)
#%%
if __name__=='__main__':
print(pair.rawDat)
import matplotlib.pyplot as plt
plt.plot(pair.rawDat[:,0,100])
print("name is {}".format(pair.name))
# %%
| 2.59375 | 3 |
main.py | CypherToad/seed_print | 6 | 12792375 | #!/usr/bin/env python3
import json
import base64
import random
from io import BytesIO
import socket
from binascii import unhexlify, hexlify
# Trusting SeedSigner's embit library
# https://github.com/SeedSigner/embit
from embit import bip32
from embit import bip39
from embit import wordlists
from embit import script
from embit.networks import NETWORKS
# Trusting qrcode library as offline qr code creation
import qrcode
# Trusting Flask as simple web interface
from flask import Flask, render_template, request
app = Flask(__name__)
wordlist = wordlists.bip39.WORDLIST
def is_online():
"""
Check if we are online
Thanks @KeithMukai for the suggestion!
https://twitter.com/KeithMukai/status/1470571942000443392
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('1.1.1.1', 53))
return True
except OSError:
return False
def seed_qr_string(words):
"""
Return the string value of our SeedQR.
"""
return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()])
def seed_qr_base64(words):
"""
Return a base64 PNG image of our SeedQR.
"""
# create a qrcode of our seed_qr_string
img = qrcode.make(
seed_qr_string(words))
# generate a base64 encoding of our png image
# https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/
im_file = BytesIO()
img.save(im_file, format="PNG")
im_b64 = base64.b64encode(im_file.getvalue())
return im_b64.decode()
def get_seed_phrase(entropy):
"""
Generate random seedphrase
"""
words = bip39.mnemonic_from_bytes(entropy)
return words
@app.route("/")
def home():
"""
Main home page which generates random seed phrases
"""
if is_online():
return render_template('panic.html')
params = {}
# generate a random seed phrase
params['entropy'] = random.randbytes(32)
# seedQR our our entropy
params['words'] = get_seed_phrase(params['entropy'])
params['seed_qr_string'] = seed_qr_string(params['words'])
params['seed_qr_base64'] = seed_qr_base64(params['words'])
params['seed'] = bip39.mnemonic_to_seed(params['words'])
params['derivation_path'] = "m/84'/0'/0'"
version = bip32.detect_version(params['derivation_path'], default="xpub", network=NETWORKS['main'])
root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv'])
params['fingerprint'] = hexlify(root.child(0).fingerprint).decode()
xpriv = root.derive(params['derivation_path'])
xpub = xpriv.to_public()
params['xpriv'] = xpriv
params['xpub'] = xpub.to_string(version=version)
return render_template('index.html', **params, wordlist=wordlist)
if __name__ == "__main__":
app.run(debug=True)
| 2.578125 | 3 |
registrations/migrations/0001_initial.py | IFRCGo/ifrcgo-api | 11 | 12792376 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-07 21:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pending',
fields=[
('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('created_at', models.DateTimeField(auto_now_add=True)),
('token', models.CharField(editable=False, max_length=32)),
('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)),
('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)),
('admin_token', models.CharField(editable=False, max_length=32, null=True)),
('email_verified', models.BooleanField(default=False, editable=False)),
],
options={
'verbose_name': 'Pending user',
'verbose_name_plural': 'Pending users',
},
),
migrations.CreateModel(
name='Recovery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('token', models.CharField(editable=False, max_length=32)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.648438 | 2 |
tools/testerize.py | alex65536/contest-template | 0 | 12792377 | #!/usr/bin/env python3
import os
import sys
import json
in_fmt = '%d.in'
out_fmt = '%d.out'
def get_test(path, test_num, test_fmt):
return os.path.join(path, 'tests', test_fmt % test_num)
def testerize(path):
src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem']
dst_prob = {}
dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input']
dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output']
dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000)
dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024)
dst_prob['StopAfterFirstFail'] = False
if os.path.exists(os.path.join(path, 'checker.cpp')):
dst_prob['Checker'] = {
'Type': 'TTextChecker',
'Value': {
'CheckerFileName': 'checker.exe',
'ParamsPolicy': 'secpInOutAns'
}
}
else:
dst_prob['Checker'] = {
'Type': 'TFileCompareChecker',
'Value': {
'StripSpaces': True
}
}
dst_prob['Version'] = {
'Build': 129,
'Major': 1,
'Minor': 2,
'Release': 3,
'Tag': ''
}
dst_prob['TestList'] = []
test_id = 1
while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)):
cur_test = {
'Cost': 1.0,
}
cur_test['InputFile'] = in_fmt % test_id
cur_test['OutputFile'] = out_fmt % test_id
dst_prob['TestList'] += [cur_test]
test_id += 1
test_cost = 0 if test_id == 1 else 100.0 / (test_id - 1)
for i in range(test_id - 1):
dst_prob['TestList'][i]['Cost'] = test_cost
open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2))
def main(args):
if len(args) <= 1:
print('Usage: ./testerize.py <problem directory>')
return 1
testerize(args[1])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 2.59375 | 3 |
uncertainty_baselines/datasets/clinc_intent_test.py | gmum/uncertainty-baselines | 1 | 12792378 | <gh_stars>1-10
# coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ClincIntentDetectionDataset."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import uncertainty_baselines as ub
from uncertainty_baselines.datasets import base
from uncertainty_baselines.datasets import clinc_intent
class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('Train', base.Split.TRAIN),
('Validation', base.Split.VAL),
('Test', base.Split.TEST))
def testDatasetSize(self, split):
batch_size = 9
eval_batch_size = 5
dataset_builder = ub.datasets.ClincIntentDetectionDataset(
batch_size=batch_size,
eval_batch_size=eval_batch_size,
shuffle_buffer_size=20)
dataset = dataset_builder.build(split).take(1)
element = next(iter(dataset))
features = element['features']
labels = element['labels']
expected_batch_size = (
batch_size if split == base.Split.TRAIN else eval_batch_size)
feature_shape, _ = features.shape
labels_shape = labels.shape
self.assertEqual(feature_shape, expected_batch_size)
self.assertEqual(labels_shape, (expected_batch_size,))
@parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND),
('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD),
('All', 'all', clinc_intent._NUM_TRAIN_ALL))
def testDataMode(self, data_mode, num_train_examples_expected):
"""Tests if all data modes can be loaded correctly."""
batch_size = 9
eval_batch_size = 5
split = base.Split.TRAIN
dataset_builder = ub.datasets.ClincIntentDetectionDataset(
batch_size=batch_size,
eval_batch_size=eval_batch_size,
shuffle_buffer_size=20,
data_mode=data_mode)
num_train_examples = dataset_builder.info['num_train_examples']
dataset = dataset_builder.build(split).take(1)
element = next(iter(dataset))
features = element['features']
_, features_length = features.shape
self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH)
self.assertEqual(num_train_examples, num_train_examples_expected)
def testTokenizer(self):
"""Tests if tokenizer is loaded correctly."""
dataset_builder = ub.datasets.ClincIntentDetectionDataset(
batch_size=9,
eval_batch_size=5,
shuffle_buffer_size=20,
)
# The number of valid tokens.
vocab_size = dataset_builder.tokenizer.num_words
self.assertEqual(vocab_size, 7291)
def testNumTokens(self):
"""Tests if num_tokens field is loaded correctly."""
batch_size = 9
eval_batch_size = 5
split = base.Split.TRAIN
dataset_builder = ub.datasets.ClincIntentDetectionDataset(
batch_size=batch_size,
eval_batch_size=eval_batch_size,
shuffle_buffer_size=20,)
dataset = dataset_builder.build(split).take(1)
element = next(iter(dataset))
features = element['features']
num_tokens = element['num_tokens']
# compute number of tokens expected
num_tokens_expected = np.sum(features.numpy() != 0, axis=-1)
num_tokens_loaded = num_tokens.numpy()
np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected)
if __name__ == '__main__':
tf.test.main()
| 2.109375 | 2 |
setup.py | luckygadfy/birthdayrate | 0 | 12792379 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
File Name: setup.py
Author: gadfy
"""
from setuptools import setup, find_packages #这个包没有的可以pip一下
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name = "samebirthdayrate", #这里是pip项目发布的名称
version = "1.0.0", #版本号,数值大的会优先被pip
keywords = ("pip","samebirthdayrate"),
description = "caculate same birthday rate",
long_description = long_description,
license = "MIT Licence",
url = "https://narwelplists.herokuapp.com/", #项目相关文件地址,一般是github
author = "gadfy",
author_email = "<EMAIL>",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = [] #这个项目需要的第三方库
)
| 1.742188 | 2 |
test/programytest/storage/stores/sql/store/test_braintree.py | motazsaad/fit-bot-fb-clt | 5 | 12792380 | <gh_stars>1-10
import unittest
import unittest
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
import programytest.storage.engines as Engines
class SQLBraintreeStoreTests(unittest.TestCase):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
with self.assertRaises(NotImplementedError):
engine.braintree_store()
| 2.25 | 2 |
Methods/Machine/Magnet/comp_volume.py | Superomeg4/pyleecan | 2 | 12792381 | # -*- coding: utf-8 -*-
"""@package Methods.Machine.MagnetType10.comp_volume
Compute the Magnet volume method
@date Created on Wed Dec 17 14:56:19 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
def comp_volume(self):
"""Compute the Magnet volume (by analytical computation)
Parameters
----------
self : Magnet
A Magnet object
Returns
-------
V: float
Magnet volume [m**3]
"""
return self.comp_surface() * self.Lmag
| 3.25 | 3 |
server/app/database.py | MozammilKhan/tdsb-project | 12 | 12792382 | <filename>server/app/database.py<gh_stars>10-100
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
from gridfs import GridFS
class DB(object):
URI = "mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority"
@staticmethod
def init():
client = MongoClient(DB.URI)
DB.DATABASE = client['TDSB']
DB.FS = GridFS(DB.DATABASE)
@staticmethod
def insert(collection, data):
DB.DATABASE[collection].insert(data)
@staticmethod
def find_one(collection, query):
return DB.DATABASE[collection].find_one(query)
@staticmethod
def find(collection, query):
return DB.DATABASE[collection].find(query)
#need delete and update methods
@staticmethod
def remove(collection, query):
DB.DATABASE[collection].remove(query)
@staticmethod
def update(collection, query, update, option=False):
DB.DATABASE[collection].update(query, update, option)
@staticmethod
def save_file(file, filename):
file_id = DB.FS.put(file, filename=filename)
return file_id
@staticmethod
def get_file(file_id):
return DB.FS.get(ObjectId(file_id))
| 2.578125 | 3 |
example.py | amaargiru/pylogger | 0 | 12792383 | <filename>example.py
import pathlib
from pylogger import PyLogger
# Path to logs
log_file_path = "logs//example.log"
# Max file size
log_max_file_size = 1024 ** 2
# Max number of files
log_max_file_count = 10
if __name__ == '__main__':
# Create a path to the log file if it doesn't exist
path = pathlib.Path(log_file_path)
path.parent.mkdir(parents=True, exist_ok=True)
logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count)
logger.debug("Sample of DEBUG message")
logger.info("Sample of INFO message")
logger.warning("Sample of WARNING message")
logger.error("Sample of ERROR message")
logger.critical("Sample of CRITICAL message")
| 2.96875 | 3 |
listeler.py | bekirglr/Python_Temelleri | 0 | 12792384 | meyveler = ["Alma","Armut","Üzüm","Çilek","Karpuz","Muz"]
print(meyveler) #bunu listeye dökmek için;
print(meyveler[0])
print(meyveler[1])
print(meyveler[2])
print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır.
#dizide kaç tane eleman bulunduğunu öğrenmek içinde "len" komutunu kullanırız.
print(len(meyveler)) #lenght
meyveler[0]= "Elma"
#diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde bulunan madde numaralarıdır.)
print(meyveler)
| 4.03125 | 4 |
tests/tests_context.py | theatlantic/v8-cffi | 39 | 12792385 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from unittest.mock import patch, Mock
except ImportError:
from mock import patch, Mock
import unittest
import logging
import os
import tempfile
from contextlib import contextmanager
import six
from v8cffi.platform import platform
from v8cffi.vm import VM
from v8cffi import exceptions
from v8cffi import context
logging.disable(logging.CRITICAL)
class StringTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_with(self):
"""
It should support with statement
"""
with context._String() as s:
self.assertIsInstance(s, context._String)
self.assertEqual(
context.ffi.typeof('char **'),
context.ffi.typeof(s.string_ptr))
self.assertEqual(
context.ffi.typeof('size_t *'),
context.ffi.typeof(s.len_ptr))
self.assertEqual(s.string_ptr[0], context.ffi.NULL)
self.assertEqual(s.len_ptr[0], 0)
def test_to_str(self):
"""
It should support str call
"""
with context._String() as s:
string_ptr = s.string_ptr
s.string_ptr = [context.ffi.new('char[]', b'foo')]
s.len_ptr[0] = 3
self.assertEqual(six.text_type(s), 'foo')
s.string_ptr = string_ptr
def test_to_bytes(self):
"""
It should return the string bytes
"""
with context._String() as s:
string_ptr = s.string_ptr
s.string_ptr = [context.ffi.new('char[]', b'foo')]
s.len_ptr[0] = 3
self.assertEqual(s.to_bytes(), b'foo')
s.string_ptr = string_ptr
def test_free(self):
"""
It should free the string
"""
with patch('v8cffi.context.lib', autospec=True) as r:
s = context._String()
s.__enter__()
free = Mock()
r.v8cffi_free = free
s.__exit__()
self.assertTrue(free.called)
def test_assert_on_re_enter(self):
"""
It should fail to re enter
"""
s = context._String()
with s as _:
self.assertRaises(AssertionError, s.__enter__)
def test_assert_on_re_exit(self):
"""
It should fail to re exit
"""
s = context._String()
self.assertRaises(AssertionError, s.__exit__)
with s as _:
pass
self.assertRaises(AssertionError, s.__exit__)
def test_assert_on_re_create(self):
"""
It should allow to re create
"""
s = context._String()
with s as _:
self.assertIsNotNone(s.string_ptr)
self.assertIsNone(s.string_ptr)
with s as _:
self.assertIsNotNone(s.string_ptr)
@contextmanager
def js_file(data):
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(data)
temp.close()
try:
yield temp.name
finally:
os.remove(temp.name)
class ContextTest(unittest.TestCase):
def setUp(self):
self.vm = VM(platform)
self.vm.set_up()
def tearDown(self):
self.vm.tear_down()
def test_keep_vm(self):
"""
It should keep a reference to the VM
"""
ctx = context.Context(self.vm)
self.assertIsInstance(ctx._vm, VM)
def test_with(self):
"""
It should support with statement
"""
with context.Context(self.vm) as ctx:
self.assertIsInstance(ctx, context.Context)
def test_set_up(self):
"""
It should call __enter__
"""
ctx = context.Context(self.vm)
with patch.object(ctx, '__enter__', autospec=True) as r:
r.return_value = 'foo'
self.assertEqual(ctx.set_up(), 'foo')
r.assert_called_once_with()
def test_tear_down(self):
"""
It should call __exit__
"""
ctx = context.Context(self.vm)
with patch.object(ctx, '__exit__', autospec=True) as r:
ctx.tear_down()
r.assert_called_once_with()
def test_load_libs(self):
"""
It should run the script file content on V8
"""
script = b'var foo = "foo";'
with js_file(script) as path:
with context.Context(self.vm) as ctx:
with patch.object(ctx, 'run_script', autospec=True) as r:
ctx.load_libs([path])
r.assert_called_once_with(script, identifier=path)
def test_run_script(self):
"""
It should run the script on V8
"""
script_foo = b'var foo = "foo!";'
script_bar = 'var bar = "bar!";'
script_special = 'var txt = "áéíóú";'
with context.Context(self.vm) as ctx:
ctx.run_script(script_foo)
ctx.run_script(script_bar)
ctx.run_script(script_special)
self.assertEqual("foo!", ctx.run_script(b'foo'))
self.assertEqual("bar!", ctx.run_script('bar'))
self.assertEqual("áéíóú", ctx.run_script('txt'))
self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz')
self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();')
with context.Context(self.vm) as ctx:
self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo')
def test_builtin_libs(self):
"""
It should pre-load builtin libraries
"""
with context.Context(self.vm) as ctx:
self.assertEqual("20", ctx.run_script('Math.max(10, 20);'))
def test_run_script_trace_back(self):
"""
It should run the script on V8\
and get a useful traceback
"""
def get_exception_message(ctx, script):
try:
return ctx.run_script(script)
except exceptions.V8JSError as ex:
return six.text_type(ex)
script_oops = (
'function oops() {\n'
' thereMayBeErrors();\n'
' var my_var_2;\n'
'}')
script_oops2 = (
'function oops2() {\n'
' thereMayBeMoreErrors();\n'
' var my_var_2;\n'
'}')
var_a = 'var a;'
script_long = (
'function oops3() {\n' +
var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\n'
'}')
# todo: trim source line when too long
with context.Context(self.vm) as ctx:
ctx.run_script(script_oops, identifier='my_file_áéíóú.js')
ctx.run_script(script_oops2, identifier='my_other_file.js')
ctx.run_script(script_long)
self.assertEqual(
'my_file_áéíóú.js:2\n'
' thereMayBeErrors();\n'
' ^\n'
'ReferenceError: thereMayBeErrors is not defined\n'
' at oops (my_file_áéíóú.js:2:3)\n'
' at <anonymous>:1:1',
get_exception_message(ctx, 'oops()'))
self.assertEqual(
'my_other_file.js:2\n'
' thereMayBeMoreErrors();\n'
' ^\n'
'ReferenceError: thereMayBeMoreErrors is not defined\n'
' at oops2 (my_other_file.js:2:3)\n'
' at <anonymous>:1:1',
get_exception_message(ctx, 'oops2()'))
self.assertEqual(
'<anonymous>:2\n'
' ~Line too long to display.\n'
'ReferenceError: thereMayBeMoreErrors is not defined\n'
' at oops3 (<anonymous>:2:601)\n'
' at <anonymous>:1:1',
get_exception_message(ctx, 'oops3()'))
self.assertEqual(
'<anonymous>:1\n'
' nonExistentFunc();\n'
' ^\n'
'ReferenceError: nonExistentFunc is not defined\n'
' at <anonymous>:1:1',
get_exception_message(ctx, 'nonExistentFunc();'))
self.assertEqual(
'<anonymous>:1\n'
' function[]();\n'
' ^\n'
'SyntaxError: Unexpected token [',
get_exception_message(ctx, 'function[]();'))
# Has no .stack property
self.assertEqual(
'<anonymous>:2\n'
' throw "myException";\n'
' ^\n'
'myException',
get_exception_message(
ctx,
'(function() {\n'
' throw "myException";\n'
'})();'))
| 2.671875 | 3 |
ch09/fig09-04_format-logging-temperature.py | ricelee-com/pico-starter-kit | 0 | 12792386 | #!/usr/bin/python3
#+-+-+-+-+-+-+-+-+-+-+-+
#|R|i|c|e|L|e|e|.|c|o|m|
#+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2021, <EMAIL>
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico
import machine
import utime
sensor_temp = machine.ADC(machine.ADC.CORE_TEMP)
conversion_factor = 3.3 / (65535)
file = open("temps.txt", "w")
while True:
reading = sensor_temp.read_u16() * conversion_factor
temperature = 27 - (reading - 0.706)/0.001721
file.write(str(temperature) + "\n")
file.flush()
utime.sleep(10)
| 3.078125 | 3 |
meal_app/migrations/0001_initial.py | praveen868686/meals_myproject | 0 | 12792387 | # Generated by Django 3.2 on 2021-04-18 06:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Meal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('category', models.CharField(blank=True, max_length=10, null=True)),
('instructions', models.CharField(blank=True, max_length=4000, null=True)),
('region', models.CharField(blank=True, max_length=20, null=True)),
('slug', models.SlugField(default='test')),
('image_url', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
| 1.875 | 2 |
annotations/gen_json.py | EricJoraskie/evidence-inference | 42 | 12792388 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 11:32:52 2018
@author: Eric
"""
import glob
import pandas as pd
files = glob.glob("./split_files/*.csv")
for file_name in files:
new_name = file_name.split(".csv")[0] + '.json'
df = pd.read_csv(file_name, engine = 'python', encoding = 'utf-8')
df.to_json(new_name) | 2.921875 | 3 |
lldb/test/API/symbol_ondemand/shared_library/TestSharedLibOnDemand.py | ornata/llvm-project | 0 | 12792389 | """Test that types defined in shared libraries work correctly."""
import lldb
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class SharedLibTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = "shared.c"
self.line = line_number(self.source, "// Set breakpoint 0 here.")
self.shlib_names = ["foo"]
def common_setup(self):
# Run in synchronous mode
self.dbg.SetAsync(False)
self.runCmd("settings set symbols.load-on-demand true")
# Create a target by the debugger.
self.target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(self.target, VALID_TARGET)
# Register our shared libraries for remote targets so they get
# automatically uploaded
self.environment = self.registerSharedLibrariesWithTarget(
self.target, self.shlib_names
)
ctx = self.platformContext
self.shared_lib_name = ctx.shlib_prefix + "foo." + ctx.shlib_extension
@skipIfWindows
def test_source_line_breakpoint(self):
self.build()
self.common_setup()
lldbutil.run_break_set_by_file_and_line(
self, "foo.c", 4, num_expected_locations=1, loc_exact=True
)
# Now launch the process, and do not stop at entry point.
process = self.target.LaunchSimple(
None, self.environment, self.get_process_working_directory()
)
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# The breakpoint should have a hit count of 1.
lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1)
thread = process.GetSelectedThread()
stack_frames = lldbutil.get_stack_frames(thread)
self.assertGreater(len(stack_frames), 2)
leaf_frame = stack_frames[0]
self.assertEqual("foo.c", leaf_frame.GetLineEntry().GetFileSpec().GetFilename())
self.assertEqual(4, leaf_frame.GetLineEntry().GetLine())
parent_frame = stack_frames[1]
self.assertEqual(
"shared.c", parent_frame.GetLineEntry().GetFileSpec().GetFilename()
)
self.assertEqual(7, parent_frame.GetLineEntry().GetLine())
@skipIfWindows
def test_symbolic_breakpoint(self):
self.build()
self.common_setup()
lldbutil.run_break_set_by_symbol(
self, "foo", sym_exact=True, num_expected_locations=1
)
# Now launch the process, and do not stop at entry point.
process = self.target.LaunchSimple(
None, self.environment, self.get_process_working_directory()
)
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# The breakpoint should have a hit count of 1.
lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1)
thread = process.GetSelectedThread()
stack_frames = lldbutil.get_stack_frames(thread)
self.assertGreater(len(stack_frames), 2)
leaf_frame = stack_frames[0]
self.assertEqual("foo.c", leaf_frame.GetLineEntry().GetFileSpec().GetFilename())
self.assertEqual(4, leaf_frame.GetLineEntry().GetLine())
parent_frame = stack_frames[1]
self.assertEqual(
"shared.c", parent_frame.GetLineEntry().GetFileSpec().GetFilename()
)
self.assertEqual(7, parent_frame.GetLineEntry().GetLine())
@skipIfWindows
def test_global_variable_hydration(self):
self.build()
self.common_setup()
lldbutil.run_break_set_by_file_and_line(
self, self.source, self.line, num_expected_locations=1, loc_exact=True
)
# Now launch the process, and do not stop at entry point.
process = self.target.LaunchSimple(
None, self.environment, self.get_process_working_directory()
)
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stopped", "stop reason = breakpoint"],
)
# The breakpoint should have a hit count of 1.
lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1)
self.expect(
"target variable --shlib a.out",
"Breakpoint in a.out should have hydrated the debug info",
substrs=["global_shared = 897"],
)
self.expect(
"target variable --shlib " + self.shared_lib_name,
"shared library should not have debug info by default",
matching=False,
substrs=["global_foo"],
)
self.expect(
"target variable global_foo --shlib " + self.shared_lib_name,
"Match global_foo in symbol table should hydrate debug info",
matching=True,
substrs=["global_foo = 321"],
)
| 2.109375 | 2 |
test/runtests.py | hwjsnc/SenSchema | 3 | 12792390 | import os
import pathlib
import platform
import subprocess
import unittest
# Ensure we're running in the correct folder so we don't destroy anything important
cwd = pathlib.Path(os.getcwd())
if cwd.name == "SenSchema":
os.chdir("test")
cwd = pathlib.Path(os.getcwd())
assert cwd.name == "test"
assert cwd.parent.name == "SenSchema"
current_os = platform.system()
if not pathlib.Path("kaitaistruct.py").exists():
print("Creating link to kaitai struct compiler")
if current_os == "Windows":
os.link(
"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py", "kaitaistruct.py"
)
else:
os.symlink(
"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py", "kaitaistruct.py"
)
print("Cleaning up...")
for file in cwd.glob("cs3tbl/*.py"):
if file.name == "__init__.py":
continue
print(f"Removing {file}")
file.unlink()
print("Generating parser code")
os.chdir("cs3tbl")
executable_file = (
"kaitai-struct-compiler.bat"
if current_os == "Windows"
else "kaitai-struct-compiler"
)
proc = subprocess.run(
[
executable_file,
"-t",
"python",
"--python-package",
".",
"-I",
"../../schemas/",
"../../schemas/cs3.ksy",
]
)
if proc.returncode != 0:
raise RuntimeError(f"kaitai-struct-compiler returned {proc.returncode}")
os.chdir("..")
print("Setup done.")
print("Loading tests.")
suite = unittest.TestSuite(
unittest.TestLoader().discover(start_dir=path, pattern="test_*.py")
for path in pathlib.Path("tbl").iterdir()
)
print("Running tests.")
unittest.TextTestRunner().run(suite)
| 2.40625 | 2 |
message_type.py | VladislavKorecky/pylogs | 0 | 12792391 | from abc import ABC
class MessageType(ABC):
"""
Interface for message types.
"""
def get_name(self):
"""
Return the name of the message type.
Returns:
str: The name of the message type.
"""
pass
def get_color_code(self):
"""
Return the color code of the message type.
Returns:
str: The color code of the message type.
"""
pass
| 3.609375 | 4 |
backend/SIFUser/migrations/0006_auto_20200229_1420.py | henrikhorluck/tdt4140-washlists | 0 | 12792392 | # Generated by Django 3.0.3 on 2020-02-29 14:20
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("Dormroom", "0004_auto_20200229_1420"),
("SIFUser", "0005_merge_20200228_1005"),
]
operations = [
migrations.AlterField(
model_name="user",
name="dormroom",
field=models.ForeignKey(
blank=True,
help_text="Kollektivet personen bor i",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="residents",
to="Dormroom.Dormroom",
),
)
]
| 1.6875 | 2 |
services/showcase/api/db/models/sandbox.py | Open-Earth-Foundation/traction | 0 | 12792393 | import uuid
from datetime import datetime
from typing import Optional, List
import pydantic
from sqlalchemy import Column, JSON
from sqlmodel import Field, Relationship
from api.db.models.base import BaseModel, BaseTable
class SchemaDef(pydantic.BaseModel):
id: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
attributes: Optional[List[str]] = []
class Governance(pydantic.BaseModel):
schema_def: Optional[SchemaDef] = None
cred_def_id: Optional[str] = None
cred_def_tag: Optional[str] = None
class SandboxBase(BaseModel):
tag: Optional[str] = Field(nullable=True)
governance: dict = Field(default={}, sa_column=Column(JSON))
governance_cas: dict = Field(default={}, sa_column=Column(JSON))
class Sandbox(SandboxBase, BaseTable, table=True):
lobs: List["Lob"] = Relationship(back_populates="sandbox") # noqa: F821
students: List["Student"] = Relationship(back_populates="sandbox") # noqa: F821
applicants: List["Applicant"] = Relationship(back_populates="sandbox") # noqa: F821
class SandboxCreate(SandboxBase):
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
class SandboxUpdate(SandboxBase):
id: uuid.UUID
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
class SandboxRead(SandboxBase):
id: uuid.UUID
created_at: datetime
updated_at: datetime
tag: Optional[str] = None
governance: Optional[Governance] = None
governance_cas: Optional[Governance] = None
| 2.515625 | 3 |
magic_missile.py | forthedice/magic-missile | 0 | 12792394 |
import random
class MagicMissile:
def __init__(self, spell_slot_lvl, spell_mode):
try:
self.lvl = int(spell_slot_lvl)
except:
raise TypeError("spell_slot_level should be an integer")
if spell_mode == "roll_die" or spell_mode == "roll_dice":
self.mode = spell_mode
else:
raise Exception("spell_mode should be 'roll_die', or 'roll_dice'")
def _dart_num(self):
if self.lvl == 0:
print("You clearly have no magic ability,\
and are utterly weak")
exit()
elif self.lvl == 1:
return 3
else:
bonus = self.lvl - 1
return (3 + bonus)
def _attack_damage(self):
for x in range(1):
return random.randint(1, 4)
def _damage_roll_die(self):
dart_num = self._dart_num()
base_damage = self._attack_damage()
damage_per_dart = (base_damage + 1)
total_damage = damage_per_dart * dart_num
return { "darts_fired": dart_num,
"base_damage": base_damage,
"damage_per_dart": damage_per_dart,
"total_damage": total_damage }
def _damage_roll_dice(self):
dart_num = self._dart_num()
base_damage_per_dart = {}
total_damage_per_dart = {}
for dart in range(dart_num):
damage = self._attack_damage()
base_damage_per_dart["dart_{}".format(dart + 1)]\
= (damage)
total_damage_per_dart["dart_{}".format(dart + 1)]\
= (damage + 1)
total_damage = sum(total_damage_per_dart.values())
return { "darts_fired": dart_num,
"base_damage_by_dart": base_damage_per_dart,
"total_damage_by_dart": total_damage_per_dart,
"total_damage_all_darts": total_damage }
def cast(self):
if self.mode == "roll_die":
return self._damage_roll_die()
elif self.mode == "roll_dice":
return self._damage_roll_dice()
| 3.40625 | 3 |
code/Gibbs_sampling_horseshoe_prior.py | MariosNT/ML_Extreme_Climate_Events | 0 | 12792395 | <gh_stars>0
"""
Improved sampling code, with horseshoe prior
"""
import copy
from scipy.stats import invgamma as invgamma
import numpy as np
from scipy.stats import gamma, multivariate_normal
import pylab as plt
from Sampler import EllipticalSliceSampling
from timeseries_cp import cptimeseries
from timeseries_cp_extreme import cptimeseries_extreme
import sys
from joblib import Parallel, delayed
year = 2000 #For now, we're focusing on a single year
extreme_case = True
location = 'C:\\Users\\klera\\Documents\\GitHub\\ML_Extreme_Climate_Events\\code\\images\\year_'+str(year)+"\\"
# Model fields
X = np.load('C:\\Users\\klera\\Documents\\GitHub\\ML_Extreme_Climate_Events\\Data\\Data\\model_fields_Cardiff_{}.npy'.format(year))
# Rain fall
Y = np.load('C:\\Users\\klera\\Documents\\GitHub\\ML_Extreme_Climate_Events\\Data\\Data\\Rainfall_Cardiff_{}.npy'.format(year))
print(Y.shape)
##### Defining the priors from Sherman's paper .... without prior on sigmas, so just taking mean for them
if extreme_case:
theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,))))
true_theta = theta_0
Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,)))))
else:
theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,))))
## Realistic priors ##
# Sampling from prior to define a true_theta
beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \
np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \
np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6)
phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\
np.random.normal(size=(5,), scale=1/(1.3*65)),\
np.random.normal(size=(5,), scale=1/(1.3*65)),\
np.random.normal(size=(5,), scale=1/(1.3*65))
true_theta = np.array([])
for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]:
true_theta = np.concatenate([true_theta, array])
Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,)))))
print(np.diag(Sigma_0).shape)
#### Simulated data
if extreme_case:
z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X)
print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X))
else:
z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X)
print(cptimeseries(true_theta).loglikelihood(z, y, X))
#### Now we want to implment a Gibbs sample where we update theta and z one after another
# number of steps Gibbs we want to use
n_step_Gibbs = 1
### Lists to store the samples
Theta, Z, Lambda, Nu, Tau, Eta = [], [], [], [], [], []
# Extract zero/non-zero indices of y
en = np.arange(len(Y))
bool_y_zero = (Y==0)
zero_y_indices = en[bool_y_zero]
nonzero_y_indices = en[np.invert(bool_y_zero)]
## Lets first initialize theta and z for a Markov chain ##
#### For non-zero y, get distribution of rainfalls and calculate quantiles
#### Then use this to initialise z (1, 2, 3, 4), based on the quantiles
y_non_zero = Y[Y>0]
edge1 = np.quantile(y_non_zero, 0.25)
edge2 = np.quantile(y_non_zero, 0.5)
edge3 = np.quantile(y_non_zero, 0.75)
edge4 = np.max(Y)
bin_2 = (edge1<=Y) & (Y<=edge2)
bin_3 = (edge2<Y) & (Y<=edge3)
bin_4 = (edge3<Y) & (Y<=edge4)
z_state = np.ones(shape=Y.shape)
z_state[bin_2] = 2
z_state[bin_3] = 3
z_state[bin_4] = 4
z_state[zero_y_indices] = 0 #z_state an array of 0, 1
theta_state = theta_0
lambda_square_array_state = np.diag(Sigma_0)
nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state))
eta_state = invgamma.rvs(a=0.5, scale=1)
tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2),
scale=(1 / eta_state + 0.5 * (
np.sum(np.power(theta_state, 2) / (lambda_square_array_state)))))
# Add to stored samples
Theta.append(copy.deepcopy(theta_state))
Z.append(copy.deepcopy(z_state))
Lambda.append(copy.deepcopy(lambda_square_array_state))
Nu.append(copy.deepcopy(nu_array_state))
Tau.append(copy.deepcopy(tau_square_state))
Eta.append(copy.deepcopy(eta_state))
#### Parallel Case
def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z):
possible_z[ind_non] = ind_z + 1
#### This is wrong - include the prior in z inside the loglikelihood (final step)
prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1]))
return prob_z
perc = 0.1
for ind_Gibbs in range(n_step_Gibbs):
#print(ind_Gibbs)
##### Copy the present state of the variables to sample .. ####
theta_state = copy.deepcopy(Theta[-1])
z_state = copy.deepcopy(Z[-1])
lambda_square_array_state = copy.deepcopy(Lambda[-1])
nu_array_state = copy.deepcopy(Nu[-1])
tau_square_state = copy.deepcopy(Tau[-1])
eta_state = copy.deepcopy(Eta[-1])
Sigma_0 = np.diag(lambda_square_array_state * tau_square_state)
while True:
try:
#### Step 1: Sample theta using Elliptic Slice Sampler ####
if extreme_case:
# define conditional likelihood for theta
loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X)
# Sample/Update theta
## Here Mean and Sigma are the mean and var-cov matrix of Multivariate normal used as the prior.
## f_0 defines the present state of the Markov chain
Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0,
f_0=theta_state)
theta_state = Samples[-1]
# define conditional likelihood for z
loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X)
else:
# define conditional likelihood for theta
loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X)
# Sample/Update theta
## Here Mean and Sigma are the mean and var-cov matrix of Multivariate normal used as the prior.
## f_0 defines the present state of the Markov chain
Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0,
f_0=theta_state)
theta_state = Samples[-1]
# define conditional likelihood for z
loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X)
# Step 2: Sample/Update z
possible_z = z_state
nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices)))
for ind_nonzero in nonzero_y:
prob_z = np.zeros(9)
prob_z = Parallel(n_jobs=4, prefer="threads")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\
for ind_z in range(9))
prob_z = np.sum(prob_z, axis=0)
#print(prob_z)
finite_indices = np.isfinite(prob_z)
prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices]))
possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices],
p=prob_z / np.sum(prob_z))
z_state = possible_z
except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError):
continue
break
#### Step 3: Sample lambda_aray ###
lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]),
scale=(1 / nu_array_state) + (theta_state ** 2) / (2 * tau_square_state))
#### Step 4: Sample tau ###
tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2),
scale=(1 / eta_state + 0.5 * (
np.sum(np.power(theta_state, 2) / (lambda_square_array_state)))))
#### Step 5: Sample nu_array ###
nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state))
#### Step 6: Sample eta ###
eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state))
print(str(ind_Gibbs)+'-st/th iteration successfully finished' )
# Add to stored samples
Theta.append(copy.deepcopy(theta_state))
Z.append(copy.deepcopy(z_state))
Lambda.append(copy.deepcopy(lambda_square_array_state))
Nu.append(copy.deepcopy(nu_array_state))
Tau.append(copy.deepcopy(tau_square_state))
Eta.append(copy.deepcopy(eta_state))
if extreme_case:
print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X)))
else:
print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X)))
| 1.976563 | 2 |
leetcode/70/70.climbing-stairs.py | Yu-Ren-NEU/Leetcode | 1 | 12792396 | #
# @lc app=leetcode id=70 lang=python3
#
# [70] Climbing Stairs
#
# @lc code=start
class Solution:
def climbStairs(self, n: int) -> int:
# 不管是走几个台阶.
# 要么走1步, 要么走2步
# 那么对于n级台阶, 只有两种情况
# 从n-1级走1步, 或者, 从n-2级台阶走2步
# 第 n 阶的情况只能是上面两种情况之和
# 类似汉诺塔问题
# x=1 -> 1
# x=2 -> 2
if n == 1: return 1
if n == 2: return 2
dp = [0]*(n+1)
dp[1] = 1
dp[2] = 2
idx = 3
while(idx <= n):
# print(idx)
dp[idx] = dp[idx-1] + dp[idx-2]
idx += 1
return dp[n]
def test(self):
assert(self.climbStairs(2) == 2)
assert(self.climbStairs(3) == 3)
assert(self.climbStairs(4) == 5)
sol = Solution()
sol.test()
# @lc code=end
| 3.359375 | 3 |
payments/migrations/0001_initial.py | fcurella/checkout | 0 | 12792397 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import markupfield.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PaymentRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, blank=True)),
('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)),
('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])),
('amount', models.FloatField()),
('_description_rendered', models.TextField(editable=False)),
('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])),
('settled', models.BooleanField(default=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| 1.804688 | 2 |
nei/server.py | pyviz/nei | 13 | 12792398 | #!/usr/bin/env python
import logging
import tornado
import tornado.web
from tornado import httpserver
from tornado import ioloop
from tornado import websocket
import os
import sys
import json
import webbrowser
import nbformat
from queue import Queue
from .execute import ThreadedExecutor
from .cells import ExecutableNotebook
STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client')
ExecutableNotebook.STATIC_PATH = STATIC_PATH
class Session(object):
"""
Global state of the server that doesn't belong to any single
websocket connection but to the whole server session.
"""
def __init__(self):
self._browser = None
self._editor = None
self.notebook = None
self.buffers = {}
def reset(self):
self._browser = None
self.editors = []
self.notebook = None
self.buffers = {}
@property
def browser(self):
return self._browser
@browser.setter
def browser(self, connection):
if self._browser is not None:
logging.info("WARNING: Only one browser connection expected")
self._browser = connection
@property
def editor(self):
return self._editor
@editor.setter
def editor(self, connection):
if self._editor is not None:
logging.info("WARNING: Only editor browser connection expected")
self._editor = connection
session = Session()
class PeriodicOutputCallback(object):
"""
Sets up a periodic callback to push output to cells by polling from
the queue pushed to by the ThreadedExecutor.
"""
def __init__(self, server, period=20):
self.server = server
self.notebook = None
self.period = period
def switch_notebook(self, notebook):
self.notebook = notebook
def start(self):
self.callback = ioloop.PeriodicCallback(self.__call__, self.period)
self.callback.start()
def stop(self):
self.callback.stop()
def __call__(self):
"Processes queue pushed to by ThreadedExecutor"
try:
val = self.server.queue.get_nowait()
self.server.queue.task_done()
result, status = val
except:
return
if status == 'completion':
position = self.notebook.completion_info['position']
relative_position = self.notebook.completion_info['relative_position']
# Adjusted for emacs point position
start_delta = relative_position - result['cursor_start']
end_delta = relative_position - result['cursor_end']
result['cursor_start'] = position - start_delta
result['cursor_end'] = position - end_delta
session.editor.write_message(json.dumps({'cmd':'completion',
'data': result}))
self.notebook.completion_info = None
return
if session.browser and (status == 'comm_open'):
logging.info("REQUEST TO OPEN COMM FOR JS: %s" % result)
self.notebook.message(session.browser, 'comm_open', result['content'])
# e.g:
# {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc',
# 'target_name': 'ZOO', 'target_module': None}
return
elif session.browser and (status == 'comm_msg'):
buffers = result['buffers']
metadata = result.get('metadata', {})
self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg'
{'msg_type': 'comm_msg',
'metadata': metadata,
'content': result['content']},
buffers=buffers)
return
else:
outnode, execution_count = result, status
if session.browser:
cell = self.notebook.find_cell(execution_count)
if cell is None: return # There may be no cell if running a silent execution
position = self.notebook.cell_position(cell)
if execution_count is None:
# For silent execution before *any* output
return
# What about silent execution after start?
self.notebook.update_cell_outputs(session.browser, position, outnode)
class WS(websocket.WebSocketHandler):
def open(self):
self.queue = Queue()
self.output_callback = PeriodicOutputCallback(self)
self.output_callback.start()
logging.info("Connection opened")
def toggle_notebook(self, name):
notebook = session.buffers.get(name, None)
if notebook is None: # Create notebook
# Note that there are multiple connections and we want only one notebook!
# (for now)
notebook = ExecutableNotebook(
(ThreadedExecutor, "threaded-kernel", self.queue),
name=name, cells=list())
session.buffers[name] = notebook
session.notebook = notebook
self.output_callback.switch_notebook(notebook)
def on_message(self, message):
"Websocket on_message handler. Tracks connection type."
try:
payload = json.loads(message)
except Exception as e:
logging.info('JSON parse exception: %s' % str(e))
return
if 'cmd' in payload:
if payload['cmd'] in ['start_mirror']: # Verbose commands
logging.info(u"Received %s command" % payload['cmd'])
else:
logging.info(u"Received message: {0:<.100}".format(message))
if payload.get('cmd') == 'reset_server':
self.output_callback.stop()
session.reset()
return
if payload.get('init', False) == 'editor':
logging.info('Added editor client connection')
session.editor = self
return
if payload.get('init', False) == 'browser':
session.browser = self
logging.info('Added browser client connection')
if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating
logging.info("Restart with previously opened notebook")
session.notebook.reload(self)
# If you hit reload in the browser, the CSS needs to be re-sent
session.notebook.update_theme(self, css=None)
return
# SOME COMMANDS (e.g mirroring) should happen even without a browser tab open!
self.toggle_notebook(payload['name'])
if payload.get('cmd', False) == 'reload_page':
# Reload over the browser connection (currently assuming only one)
if session.browser is not None:
session.notebook.reload(session.browser)
return
editor_msg = session.notebook.dispatch(session.browser, payload)
if (editor_msg is not None) and (session.editor is not None):
session.editor.write_message(json.dumps(editor_msg))
def check_origin(self, origin):
return True
def on_close(self):
logging.info("ON_CLOSE")
if self is session.browser:
session.browser = None
self.output_callback.stop()
def serve(ws_port=9999, html_port=8000, host='0.0.0.0'):
import logging
logging.basicConfig(level=logging.INFO)
html_handler = (r'/(.*)', tornado.web.StaticFileHandler,
{'path': STATIC_PATH})
tornado.web.Application([html_handler]).listen(html_port)
ws_server = httpserver.HTTPServer(tornado.web.Application([(r"/", WS)]))
ws_server.listen(ws_port, host)
logging.info("STARTED: Server started and listening")
ioloop.IOLoop.instance().start()
| 2.453125 | 2 |
correios/__init__.py | rennancockles/rastreio-correios | 2 | 12792399 | <reponame>rennancockles/rastreio-correios
from correios.entities import Objeto
from correios.main import Correios
__version__ = "0.1.4"
__all__ = ["Objeto", "Correios"]
| 1.023438 | 1 |
fline/losses/research/connections.py | asromahin/fline | 5 | 12792400 | import torch
class BboxLoss(torch.nn.Module):
def __init__(self, device):
super(BboxLoss, self).__init__()
self.loss = IouDotsLoss(device)
def forward(
self,
pred_vectors: torch.Tensor,
target_bboxes: torch.Tensor,
):
pred_count = pred_bboxes.shape[1]
target_count = target_bboxes.shape[1]
k_keys = list(range(pred_count))
res_loss = None
count_loss = 0
for i in range(target_count):
target_bbox = target_bboxes[:, i, :]
corrects_mask = target_bbox.max(dim=1)[0] != 0
target_bbox = target_bbox[corrects_mask]
cur_loss = None
select_k = None
for k in k_keys:
pred_bbox = pred_bboxes[:, k, :][corrects_mask]
#print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape)
tloss = self.loss(pred_bbox, target_bbox)
if cur_loss is None:
cur_loss = tloss
select_k = k
else:
if tloss < cur_loss and tloss >= 0:
cur_loss = tloss
select_k = k
if cur_loss is not None:
k_keys.remove(select_k)
count_loss += 1
if res_loss is None:
res_loss = cur_loss
else:
res_loss += cur_loss
for k in k_keys:
res_loss += 1
count_loss += 1
return res_loss/count_loss | 2.4375 | 2 |
points/migrations/0007_alter_spend_receipt.py | JXIong15/fetch-points | 0 | 12792401 | <gh_stars>0
# Generated by Django 4.0.1 on 2022-01-13 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('points', '0006_alter_spend_receipt'),
]
operations = [
migrations.AlterField(
model_name='spend',
name='receipt',
field=models.JSONField(),
),
]
| 1.25 | 1 |
main/cifar_10_resnet.py | xiaonanQua/experiment | 0 | 12792402 | import torch
import torch.nn as nn
from torchvision.datasets import CIFAR10
from torch.optim import Adam
from torchvision.models import resnet50
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import os
import time
# ---------------------配置阶段------------------------------
# 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定)
root_dataset = '/home/team/xiaonan/Dataset/'
root_project = '/home/team/xiaonan/experients/'
root_data_save = '/home/team/xiaonan/data_save/'
# 数据集根目录、项目根目录、训练数据保存目录(本机)
# self.root_dataset = '/home/xiaonan/Dataset/'
# self.root_project = '/home/xiaonan/experients/'
# self.root_data_save = '/home/xiaonan/data_save/'
# 数据集根目录、项目根目录、训练数据保存目录(服务器)
# root_dataset = 'Dataset/'
# root_project = ''
# root_data_save = 'data_save/'
# 模型保存目录、日志文件保存目录
model_dir = root_data_save + 'checkpoints/'
log_dir = root_data_save + 'log/'
# 若文件夹不存在,则创建
if os.path.exists(root_data_save) is False:
os.mkdir(root_data_save)
if os.path.exists(model_dir) is False:
os.mkdir(model_dir)
if os.path.exists(log_dir) is False:
os.mkdir(log_dir)
# cifar-10数据集目录;模型名称;类别数量
cifar_10_dir = root_dataset + 'cifar-10/'
model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth'
log_dir = log_dir + 'cifar10_resnet50_v1'
num_classes = 10
if os.path.exists(log_dir) is False:
os.mkdir(log_dir)
# 检查设备情况
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device:{}'.format(device))
# 设置超参数
epochs = 200
batch_size = 32
learning_rate = 0.1
lr_step_size = 30
weight_decay = 1e-4
momentum = 0.9
# 均值和标准差值
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.255]
# -----------------------------读取数据集--------------------------------
# 训练集、验证集、测试集预处理
train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)),
#transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, saturation=0.4,
hue=0.4, contrast=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)])
valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)])
# 获取训练集、测试集
train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess)
test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess)
# 获取训练集和测试集的加载器
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True, num_workers=4)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=True, num_workers=4)
# ------------------------构建网络、定义损失函数和优化器------------------------
net = resnet50()
print(net)
# 重写网络的最后一层
fc_in_features = net.fc.in_features # 网络最后一层的输入通道
print(fc_in_features)
net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes)
print(net)
# 将网络放置到GPU上
net.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = Adam(params=net.parameters(), weight_decay=weight_decay)
# ----------------------进行网络的训练------------------------------------
print('进行训练....')
# 获得记录日志信息的写入器
writer = SummaryWriter(log_dir)
# ------------------定义训练、验证子函数--------------------
# 训练子函数
def _train(train_loader, num_step):
print(' training stage....')
# 将网络结构调成训练模式;初始化梯度张量
net.train()
optimizer.zero_grad()
# 定义准确率变量,损失值,批次数量,样本总数量
train_acc = 0.0
train_loss = 0.0
num_batch = 0
num_samples = 0
# 进行网络的训练
for index, data in enumerate(train_loader, start=0):
# 获取每批次的训练数据、并将训练数据放入GPU中
images, labels = data
images = images.to(device)
labels = labels.to(device)
# 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值
outputs = net(images)
outputs = F.softmax(outputs, dim=1)
loss = criterion(outputs, labels)
# 计算每个预测值概率最大的索引(下标)
preds = torch.argmax(outputs, dim=1)
# 计算批次的准确率,预测值中预测正确的样本占总样本的比例
# 统计准确率、损失值、批次数量
acc = torch.sum(preds == labels).item()
train_acc += acc
train_loss += loss
num_batch += 1
num_samples += images.size(0)
# 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量
loss.backward()
optimizer.step()
optimizer.zero_grad()
# 输出一定次数的损失和精度情况
if (index + 1) % 30 == 0:
# 输出损失值和精度值
print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'.
format(index, loss, acc / images.size(0)))
# 记录训练批次的损失和准确率
# writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签
writer.add_scalars(main_tag='Train(batch)',
tag_scalar_dict={'batch_loss': loss,
'batch_accuracy': acc / images.size(0)},
global_step=num_step)
# 更新全局步骤
num_step += 1
# 计算训练的准确率和损失值
train_acc = train_acc / num_samples
train_loss = train_loss / num_batch
return train_acc, train_loss, num_step
# 验证子函数
def _valid(valid_loader):
print(' valid stage...')
# 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量;
net.eval()
valid_acc = 0.0
valid_loss = 0.0
num_batch = 0
num_samples = 0
# 进行测试集的测试
with torch.no_grad(): # 不使用梯度,减少内存占用
for images, labels in valid_loader:
# 将测试数据放入GPU上
images, labels = images.to(device), labels.to(device)
# 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围
outputs = net(images)
outputs = F.softmax(outputs, dim=1)
# 计算每个预测值概率最大的索引(下标);计算损失值
pred = torch.argmax(outputs, dim=1)
loss = criterion(outputs, labels)
# 统计真实标签和预测标签的对应情况;计算损失
valid_acc += torch.sum((pred == labels)).item()
valid_loss += loss
num_batch += 1
num_samples += images.size(0)
# 计算测试精度和损失值
valid_acc = valid_acc / num_samples
valid_loss = valid_loss / num_batch
return valid_acc, valid_loss
# ----------------------------开始周期训练--------------------------------
# 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数
start_time = time.time()
best_acc = 0.0
num_step = 0
# 开始周期训练
for epoch in range(epochs):
# 设定每周期开始时间点、周期信息
epoch_start_time = time.time()
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 20)
# 训练
train_acc, train_loss, num_step = _train(train_loader, num_step)
# 验证
valid_acc, valid_loss = _valid(test_loader)
# 输出每周期的训练、验证的平均损失值、准确率
epoch_time = time.time() - epoch_start_time
print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'.
format(epoch, epochs, epoch_time // 60, epoch_time % 60))
print(' train_loss:{:.4f}, train_acc:{:.4f}\n valid_loss:{:.4f}, valid_acc:{:.4f}'.
format(train_loss, train_acc, valid_loss, valid_acc))
# 记录测试结果
writer.add_scalars(main_tag='Train(epoch)',
tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc,
'valid_loss': valid_loss, 'valid_acc': valid_acc},
global_step=epoch)
# 选出最好的模型参数
if valid_acc > best_acc:
# 更新最好精度、保存最好的模型参数
best_acc = valid_acc
torch.save(net.state_dict(), model_dir)
print(' epoch:{}, update model...'.format(epoch))
print()
# 训练结束时间、输出最好的精度
end_time = time.time() - start_time
print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 关闭writer
writer.close()
| 2 | 2 |
src/cogs/owner/owner.py | m1ten/sentry-py | 0 | 12792403 | import discord
from discord import app_commands
from discord.ext import commands
class owner(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@app_commands.command(name='eval', description='evaluate')
async def eval(self, interaction: discord.Interaction, code: str) -> None:
if await interaction.client.is_owner(interaction.user):
try:
result = eval(code)
await interaction.response.send_message(f'> {result}', ephemeral=True)
except Exception as e:
await interaction.response.send_message(f'`{e}`', ephemeral=True)
else:
await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)
@app_commands.command(name='load', description='load or reload a cog')
async def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None:
if await interaction.client.is_owner(interaction.user):
try:
if reload:
await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True)
await self.bot.reload_extension(cog)
await interaction.edit_original_message(content=f'Reloaded `{cog}`!')
else:
await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True)
await self.bot.load_extension(cog)
await interaction.edit_original_message(content=f'Loaded `{cog}`!')
except Exception as e:
await interaction.response.send_message(f'`{e}`', ephemeral=True)
else:
await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)
@app_commands.command(name='check_perms', description='check the permissions of the user')
async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None:
from src.perms import perms
# check if author is bot owner
if await perms(interaction, 'bot_owner'):
# run perms
result = await perms(interaction, permissions, user)
# send result
await interaction.response.send_message(f'{result}', ephemeral=True)
else:
await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)
| 2.421875 | 2 |
autogalaxy/analysis/analysis.py | jonathanfrawley/PyAutoGalaxy | 0 | 12792404 | from astropy import cosmology as cosmo
import autofit as af
from autoarray import preloads as pload
from autoarray.exc import PixelizationException, InversionException, GridException
from autoarray.inversion import pixelizations as pix, inversions as inv
from autofit.exc import FitException
from autogalaxy.analysis import result as res
from autogalaxy.analysis import visualizer as vis
from autogalaxy.fit import fit_imaging, fit_interferometer
from autogalaxy.galaxy import galaxy as g
from autogalaxy.plane import plane as pl
class Analysis(af.Analysis):
def __init__(self, hyper_result=None, cosmology=cosmo.Planck15):
self.hyper_result = hyper_result
self.cosmology = cosmology
class AnalysisDataset(Analysis):
def __init__(
self,
dataset,
hyper_result=None,
cosmology=cosmo.Planck15,
settings_pixelization=pix.SettingsPixelization(),
settings_inversion=inv.SettingsInversion(),
preloads=pload.Preloads(),
):
super().__init__(hyper_result=hyper_result, cosmology=cosmology)
self.dataset = dataset
if self.hyper_result is not None:
if hyper_result.search is not None:
hyper_result.search.paths = None
self.set_hyper_dataset(result=self.hyper_result)
else:
self.hyper_galaxy_image_path_dict = None
self.hyper_model_image = None
self.settings_pixelization = settings_pixelization
self.settings_inversion = settings_inversion
self.preloads = preloads
def set_hyper_dataset(self, result):
self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict
self.hyper_model_image = result.hyper_model_image
def hyper_image_sky_for_instance(self, instance):
if hasattr(instance, "hyper_image_sky"):
return instance.hyper_image_sky
def hyper_background_noise_for_instance(self, instance):
if hasattr(instance, "hyper_background_noise"):
return instance.hyper_background_noise
def plane_for_instance(self, instance):
return pl.Plane(galaxies=instance.galaxies)
def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance:
"""
Takes images from the last result, if there is one, and associates them with galaxies in this search
where full-path galaxy names match.
If the galaxy collection has a different name then an association is not made.
e.g.
galaxies.lens will match with:
galaxies.lens
but not with:
galaxies.lens
galaxies.source
Parameters
----------
instance
A model instance with 0 or more galaxies in its tree
Returns
-------
instance
The input instance with images associated with galaxies where possible.
"""
if self.hyper_galaxy_image_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
g.Galaxy
):
if galaxy_path in self.hyper_galaxy_image_path_dict:
galaxy.hyper_model_image = self.hyper_model_image
galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[
galaxy_path
]
return instance
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
paths.save_object("data", self.dataset.data)
paths.save_object("noise_map", self.dataset.noise_map)
paths.save_object("settings_dataset", self.dataset.settings)
paths.save_object("settings_inversion", self.settings_inversion)
paths.save_object("settings_pixelization", self.settings_pixelization)
paths.save_object("cosmology", self.cosmology)
if self.hyper_model_image is not None:
paths.save_object("hyper_model_image", self.hyper_model_image)
if self.hyper_galaxy_image_path_dict is not None:
paths.save_object(
"hyper_galaxy_image_path_dict", self.hyper_galaxy_image_path_dict
)
class AnalysisImaging(AnalysisDataset):
def __init__(
self,
dataset,
hyper_result=None,
cosmology=cosmo.Planck15,
settings_pixelization=pix.SettingsPixelization(),
settings_inversion=inv.SettingsInversion(),
preloads=pload.Preloads(),
):
super().__init__(
dataset=dataset,
hyper_result=hyper_result,
cosmology=cosmology,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
preloads=preloads,
)
self.dataset = dataset
@property
def imaging(self):
return self.dataset
def log_likelihood_function(self, instance):
"""
Determine the fit of a lens galaxy and source galaxy to the imaging in this lens.
Parameters
----------
instance
A model instance with attributes
Returns
-------
fit : Fit
A fractional value indicating how well this model fit and the model imaging itself
"""
self.associate_hyper_images(instance=instance)
plane = self.plane_for_instance(instance=instance)
hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
try:
fit = self.fit_imaging_for_plane(
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
return fit.figure_of_merit
except (PixelizationException, InversionException, GridException) as e:
raise FitException from e
def fit_imaging_for_plane(
self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True
):
return fit_imaging.FitImaging(
imaging=self.dataset,
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=use_hyper_scalings,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
)
def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):
instance = self.associate_hyper_images(instance=instance)
plane = self.plane_for_instance(instance=instance)
hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
fit = self.fit_imaging_for_plane(
plane=plane,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
visualizer = vis.Visualizer(visualize_path=paths.image_path)
visualizer.visualize_imaging(imaging=self.imaging)
visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis)
if fit.inversion is not None:
visualizer.visualize_inversion(
inversion=fit.inversion, during_analysis=during_analysis
)
visualizer.visualize_hyper_images(
hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,
hyper_model_image=self.hyper_model_image,
plane=plane,
)
if visualizer.plot_fit_no_hyper:
fit = self.fit_imaging_for_plane(
plane=plane,
hyper_image_sky=None,
hyper_background_noise=None,
use_hyper_scalings=False,
)
visualizer.visualize_fit_imaging(
fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper"
)
def make_result(
self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch
):
return res.ResultImaging(
samples=samples, model=model, analysis=self, search=search
)
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
super().save_attributes_for_aggregator(paths=paths)
paths.save_object("psf", self.dataset.psf_unormalized)
paths.save_object("mask", self.dataset.mask)
class AnalysisInterferometer(AnalysisDataset):
def __init__(
self,
dataset,
hyper_result=None,
cosmology=cosmo.Planck15,
settings_pixelization=pix.SettingsPixelization(),
settings_inversion=inv.SettingsInversion(),
preloads=pload.Preloads(),
):
super().__init__(
dataset=dataset,
hyper_result=hyper_result,
cosmology=cosmology,
settings_pixelization=settings_pixelization,
settings_inversion=settings_inversion,
preloads=preloads,
)
if self.hyper_result is not None:
self.set_hyper_dataset(result=self.hyper_result)
else:
self.hyper_galaxy_visibilities_path_dict = None
self.hyper_model_visibilities = None
def set_hyper_dataset(self, result):
super().set_hyper_dataset(result=result)
self.hyper_model_visibilities = result.hyper_model_visibilities
self.hyper_galaxy_visibilities_path_dict = (
result.hyper_galaxy_visibilities_path_dict
)
@property
def interferometer(self):
return self.dataset
def log_likelihood_function(self, instance):
"""
Determine the fit of a lens galaxy and source galaxy to the interferometer in this lens.
Parameters
----------
instance
A model instance with attributes
Returns
-------
fit : Fit
A fractional value indicating how well this model fit and the model interferometer itself
"""
self.associate_hyper_images(instance=instance)
plane = self.plane_for_instance(instance=instance)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
try:
fit = self.fit_interferometer_for_plane(
plane=plane, hyper_background_noise=hyper_background_noise
)
return fit.figure_of_merit
except (PixelizationException, InversionException, GridException) as e:
raise FitException from e
def associate_hyper_visibilities(
self, instance: af.ModelInstance
) -> af.ModelInstance:
"""
Takes visibilities from the last result, if there is one, and associates them with galaxies in this search
where full-path galaxy names match.
If the galaxy collection has a different name then an association is not made.
e.g.
galaxies.lens will match with:
galaxies.lens
but not with:
galaxies.lens
galaxies.source
Parameters
----------
instance
A model instance with 0 or more galaxies in its tree
Returns
-------
instance
The input instance with visibilities associated with galaxies where possible.
"""
if self.hyper_galaxy_visibilities_path_dict is not None:
for galaxy_path, galaxy in instance.path_instance_tuples_for_class(
g.Galaxy
):
if galaxy_path in self.hyper_galaxy_visibilities_path_dict:
galaxy.hyper_model_visibilities = self.hyper_model_visibilities
galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[
galaxy_path
]
return instance
def fit_interferometer_for_plane(
self, plane, hyper_background_noise, use_hyper_scalings=True
):
return fit_interferometer.FitInterferometer(
interferometer=self.dataset,
plane=plane,
hyper_background_noise=hyper_background_noise,
use_hyper_scalings=use_hyper_scalings,
settings_pixelization=self.settings_pixelization,
settings_inversion=self.settings_inversion,
)
def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):
self.associate_hyper_images(instance=instance)
plane = self.plane_for_instance(instance=instance)
hyper_background_noise = self.hyper_background_noise_for_instance(
instance=instance
)
fit = self.fit_interferometer_for_plane(
plane=plane, hyper_background_noise=hyper_background_noise
)
visualizer = vis.Visualizer(visualize_path=paths.image_path)
visualizer.visualize_interferometer(interferometer=self.interferometer)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis
)
if fit.inversion is not None:
visualizer.visualize_inversion(
inversion=fit.inversion, during_analysis=during_analysis
)
visualizer.visualize_hyper_images(
hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,
hyper_model_image=self.hyper_model_image,
plane=plane,
)
if visualizer.plot_fit_no_hyper:
fit = self.fit_interferometer_for_plane(
plane=plane, hyper_background_noise=None, use_hyper_scalings=False
)
visualizer.visualize_fit_interferometer(
fit=fit, during_analysis=during_analysis, subfolders="fit_no_hyper"
)
def make_result(
self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch
):
return res.ResultInterferometer(
samples=samples, model=model, analysis=self, search=search
)
def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):
super().save_attributes_for_aggregator(paths=paths)
paths.save_object("uv_wavelengths", self.dataset.uv_wavelengths)
paths.save_object("real_space_mask", self.dataset.real_space_mask)
| 2.109375 | 2 |
example.py | KostyaKow/PyWebKitGtk-wrapper | 0 | 12792405 | <reponame>KostyaKow/PyWebKitGtk-wrapper<gh_stars>0
#!/usr/bin/env python
import sys, time
sys.path.append('..')
import pwkg
def setupElectron(index_path, on_event):
w = pwkg.Window(100, 100, "PWKG Window", debug=True)
w.load(index_path)
w.on_gui_event += on_event
return w
def sleep(x):
time.sleep(x)
def on_js_event(msg):
print(msg)
print(msg['testdict'])
#TODO: this blocks main thread
def on_update(w):
#from multiprocessing import Process
#p = Process(target=lambda:adblib.screenshot(pic_path))
#p.start()
print("yo")
line = "hi" #sys.stdin.readline()
print(line)
w.exec_js('console.log("hi")');
def main():
w = setupElectron('index.html', on_js_event)
w.run(on_update, 1000)
main()
| 1.820313 | 2 |
customer/views.py | RFNshare/StraightIntLtd | 0 | 12792406 | from django.shortcuts import render
from django.urls import reverse_lazy
from customer.owner import *
from .filters import *
from .forms import *
# class CustomerIndexView(LoginRequiredMixin, View):
# login_url = '/login/'
# redirect_field_name = 'redirect_to'
class CustomerCreateView(OwnerCreateView):
form_class = CustomerCreateForm
paginate_by = 10
template_name = 'customer/create_customer.html'
success_message = "%(name)s was created successfully."
success_url = reverse_lazy("customer:customer_list")
class CustomerListView(OwnerListView):
filterset_class = CustomerFilter
queryset = Customer.objects.filter(is_deleted=False)
template_name = 'customer/customer_list.html'
paginate_by = 10
class CustomerUpdateView(OwnerUpdateView):
model = Customer
template_name = 'customer/customer_edit.html'
form_class = CustomerUpdateForm
success_message = "%(name)s was updated successfully."
success_url = reverse_lazy("customer:customer_list")
class CustomerDeleteView(OwnerDeleteView):
model = Customer
template_name = 'customer/customer_delete.html'
success_url = reverse_lazy('customer:customer_list')
success_message = "Session %(name)s was removed successfully"
class CustomerDetailsView(OwnerDetailView):
model = Customer
template_name = 'customer/customer_details.html'
def CustomerLedgerView(request, pk):
customer = Customer.objects.get(id=pk)
ctx = {'customer': customer}
return render(request, 'customer/customer_ledger.html', ctx)
| 2.203125 | 2 |
testcases/footers_test.py | praveenpj29/selenium_test | 0 | 12792407 | from selenium import webdriver
from selenium.webdriver.common.by import By
from pageObjects.footer import Footer
from utilities.customLogger import LogGen
from utilities.siteConfig import siteconfig
from selenium.common.exceptions import ElementNotInteractableException
import pandas as pd
import time
class Test_1:
base_url = siteconfig.getsiteURl()
base_xpath = siteconfig.getfooterXPATH()
logger = LogGen.loggen()
def test_footer_links(self, setup):
self.logger.info("*************** footer links test started **************")
driver = setup
driver.get(self.base_url)
driver.maximize_window()
time.sleep(3)
footers = Footer(driver)
footer_xpath = footers.footer_links_xpath(self.base_xpath)
time.sleep(3)
df = {
"S.no": [],
"Link_name": [],
"XPATH": [],
"URL": [],
"Directed URL": [],
"Validation": []
}
result = []
for n, i in enumerate(footer_xpath):
footer = driver.find_element_by_xpath(i)
footer.location_once_scrolled_into_view
href = footer.get_attribute("href")
text = footer.get_attribute("text")
try:
footer.click()
except ElementNotInteractableException as e:
self.logger.info("**************** {} ************".format(e))
self.logger.info("************ {} footer failed *********".format(text))
df["Validation"].append("failed")
result.append("failed")
else:
current_url = driver.current_url
time.sleep(1)
driver.back()
df["Validation"].append("passed")
self.logger.info("************ {} footer passed *********".format(text))
result.append("passed")
finally:
df["S.no"].append(n + 1)
df["Link_name"].append(text)
df["URL"].append(href)
df["Directed URL"].append(current_url)
df["XPATH"].append(i)
Data = pd.DataFrame(df, index=df["S.no"])
output = pd.ExcelWriter(".//reports/footers_links_validation.xlsx")
Data.to_excel(output)
output.save()
if "failed" not in result:
assert True
else:
assert False
driver.close()
| 2.59375 | 3 |
pyega3/libs/data_file.py | lvarin/ega-download-client | 47 | 12792408 | import concurrent.futures
import logging
import logging.handlers
import os
import re
import shutil
import sys
import time
import urllib
import htsget
import psutil
from tqdm import tqdm
from pyega3.libs import utils
DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024
class DataFile:
DEFAULT_SLICE_SIZE = 100 * 1024 * 1024
temporary_files_should_be_deleted = False
def __init__(self, data_client, file_id,
display_file_name=None,
file_name=None,
size=None,
unencrypted_checksum=None,
status=None):
self.data_client = data_client
self.id = file_id
self.temporary_files = set()
self._display_file_name = display_file_name
self._file_name = file_name
self._file_size = size
self._unencrypted_checksum = unencrypted_checksum
self._file_status = status
def load_metadata(self):
res = self.data_client.get_json(f"/metadata/files/{self.id}")
# If the user does not have access to the file then the server returns HTTP code 200 but the JSON payload has
# all the fields empty
if res['displayFileName'] is None or res['unencryptedChecksum'] is None:
raise RuntimeError(f"Metadata for file id '{self.id}' could not be retrieved. " +
"This is probably because your account does not have access to this file. "
"You can check which datasets your account has access to at "
"'https://ega-archive.org/my-datasets.php' after logging in.")
self._display_file_name = res['displayFileName']
self._file_name = res['fileName']
self._file_size = res['fileSize']
self._unencrypted_checksum = res['unencryptedChecksum']
self._file_status = res['fileStatus']
@property
def display_name(self):
if self._display_file_name is None:
self.load_metadata()
return self._display_file_name
@property
def name(self):
if self._file_name is None:
self.load_metadata()
return self._file_name
@property
def size(self):
if self._file_size is None:
self.load_metadata()
return self._file_size
@property
def unencrypted_checksum(self):
if self._unencrypted_checksum is None:
self.load_metadata()
return self._unencrypted_checksum
@property
def status(self):
if self._file_status is None:
self.load_metadata()
return self._file_status
@staticmethod
def print_local_file_info(prefix_str, file, md5):
logging.info(f"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})")
def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE):
"""Download an individual file"""
file_size = self.size
check_sum = self.unencrypted_checksum
options = {"destinationFormat": "plain"}
file_size -= 16 # 16 bytes IV not necesary in plain mode
if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum:
DataFile.print_local_file_info('Local file exists:', output_file, check_sum)
return
num_connections = max(num_connections, 1)
num_connections = min(num_connections, 128)
if file_size < 100 * 1024 * 1024:
num_connections = 1
logging.info(f"Download starting [using {num_connections} connection(s), file size {file_size} and chunk "
f"length {max_slice_size}]...")
chunk_len = max_slice_size
temporary_directory = os.path.join(os.path.dirname(output_file), ".tmp_download")
os.makedirs(temporary_directory, exist_ok=True)
with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar:
params = [
(os.path.join(temporary_directory, self.id), chunk_start_pos,
min(chunk_len, file_size - chunk_start_pos), options, pbar)
for chunk_start_pos in range(0, file_size, chunk_len)]
for file in os.listdir(temporary_directory):
match = re.match(r"(.*)-from-(\d*)-len-(\d*).*", file)
file_id = match.group(1)
file_from = match.group(2)
file_length = match.group(3)
if file_id != self.id:
continue
if (file_from, file_length) in [(param[1], param[2]) for param in params]:
continue
logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter ('
f'and thus the slice sizes) have been modified since the last run.')
os.remove(os.path.join(temporary_directory, file))
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor:
for part_file_name in executor.map(self.download_file_slice_, params):
results.append(part_file_name)
pbar.close()
downloaded_file_total_size = sum(os.path.getsize(f) for f in results)
if downloaded_file_total_size == file_size:
utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size)
not_valid_server_md5 = len(str(check_sum or '')) != 32
logging.info("Calculating md5 (this operation can take a long time depending on the file size)")
received_file_md5 = utils.md5(output_file, file_size)
logging.info("Verifying file checksum")
if received_file_md5 == check_sum or not_valid_server_md5:
DataFile.print_local_file_info('Saved to : ', output_file, check_sum)
if not_valid_server_md5:
logging.info(
f"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum})."
f" Can't validate download. Please contact EGA helpdesk on <EMAIL>")
with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux file for future re-use
f.write(received_file_md5.encode())
else:
os.remove(output_file)
raise Exception(f"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'")
def download_file_slice_(self, args):
return self.download_file_slice(*args)
def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None):
if start_pos < 0:
raise ValueError("start : must be positive")
if length <= 0:
raise ValueError("length : must be positive")
path = f"/files/{self.id}"
if options is not None:
path += '?' + urllib.parse.urlencode(options)
final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice'
file_name = final_file_name + '.tmp'
self.temporary_files.add(file_name)
existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0
if existing_size > length:
os.remove(file_name)
if pbar:
pbar.update(existing_size)
if existing_size == length:
return file_name
try:
with self.data_client.get_stream(path,
{
'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r:
with open(file_name, 'ba') as file_out:
for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE):
file_out.write(chunk)
if pbar:
pbar.update(len(chunk))
total_received = os.path.getsize(file_name)
if total_received != length:
raise Exception(f"Slice error: received={total_received}, requested={length}, file='{file_name}'")
except Exception:
if os.path.exists(file_name):
os.remove(file_name)
raise
os.rename(file_name, final_file_name)
return final_file_name
@staticmethod
def is_genomic_range(genomic_range_args):
if not genomic_range_args:
return False
return genomic_range_args[0] is not None or genomic_range_args[1] is not None
def generate_output_filename(self, folder, genomic_range_args):
file_name = self.display_name
ext_to_remove = ".cip"
if file_name.endswith(ext_to_remove):
file_name = file_name[:-len(ext_to_remove)]
name, ext = os.path.splitext(os.path.basename(file_name))
genomic_range = ''
if DataFile.is_genomic_range(genomic_range_args):
genomic_range = "_genomic_range_" + (genomic_range_args[0] or genomic_range_args[1])
genomic_range += '_' + (str(genomic_range_args[2]) or '0')
genomic_range += '_' + (str(genomic_range_args[3]) or '')
format_ext = '.' + (genomic_range_args[4] or '').strip().lower()
if format_ext != ext and len(format_ext) > 1:
ext += format_ext
ret_val = os.path.join(folder, self.id, name + genomic_range + ext)
logging.debug(f"Output file:'{ret_val}'")
return ret_val
@staticmethod
def print_local_file_info_genomic_range(prefix_str, file, gr_args):
logging.info(
f"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},"
f" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})"
)
def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait,
max_slice_size=DEFAULT_SLICE_SIZE):
if self.name.endswith(".gpg"):
logging.info(
"GPG files are currently not supported."
" Please email EGA Helpdesk at <EMAIL>")
return
logging.info(f"File Id: '{self.id}'({self.size} bytes).")
output_file = self.generate_output_filename(output_dir, genomic_range_args)
temporary_directory = os.path.join(os.path.dirname(output_file), ".tmp_download")
if not os.path.exists(temporary_directory):
os.makedirs(temporary_directory)
hdd = psutil.disk_usage(os.getcwd())
logging.info(f"Total space : {hdd.total / (2 ** 30):.2f} GiB")
logging.info(f"Used space : {hdd.used / (2 ** 30):.2f} GiB")
logging.info(f"Free space : {hdd.free / (2 ** 30):.2f} GiB")
# If file is bigger than free space, warning
if hdd.free < self.size:
logging.warning(f"The size of the file that you want to download is bigger than your free space in this "
f"location")
if DataFile.is_genomic_range(genomic_range_args):
with open(output_file, 'wb') as output:
htsget.get(
f"{self.data_client.htsget_url}/files/{self.id}",
output,
reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1],
start=genomic_range_args[2], end=genomic_range_args[3],
data_format=genomic_range_args[4],
max_retries=sys.maxsize if max_retries < 0 else max_retries,
retry_wait=retry_wait,
bearer_token=self.data_client.auth_client.token)
DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args)
return
done = False
num_retries = 0
while not done:
try:
self.download_file(output_file, num_connections, max_slice_size)
done = True
except Exception as e:
if e is ConnectionError:
logging.info("Failed to connect to data service. Check that the necessary ports are open in your "
"firewall. See the documentation for more information.")
logging.exception(e)
if num_retries == max_retries:
if DataFile.temporary_files_should_be_deleted:
self.delete_temporary_folder(temporary_directory)
raise e
time.sleep(retry_wait)
num_retries += 1
logging.info(f"retry attempt {num_retries}")
def delete_temporary_folder(self, temporary_directory):
try:
shutil.rmtree(temporary_directory)
except FileNotFoundError as ex:
logging.error(f'Could not delete the temporary folder: {ex}')
| 2.265625 | 2 |
bot/utils/__init__.py | famaxth/Russian-Qiwi-Bot | 0 | 12792409 | <filename>bot/utils/__init__.py
from . import db_api
from . import misc
from .notify_admins import on_startup_notify
| 1.257813 | 1 |
subtitle_sync.py | apiad/sublime-subtitle-sync | 4 | 12792410 | import sublime_plugin
import sublime
SUB_RE = '\d\d:\d\d:\d\d,\d\d\d'
def find_subtitles(view):
subs = []
sel = view.sel()
for match in view.find_all(SUB_RE):
if sel.contains(match):
subs.append(match)
# sel.clear()
# sel.add_all(subs)
return subs
def convert_to_time(sub):
h, m, s = sub.split(':')
return int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.'))
def convert_to_string(time):
h = int(time / 3600)
m = int((time % 3600) / 60)
s = time % 60
return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + ("%.3f" % s).zfill(6).replace('.', ',')
class SubtitleSyncCommand(sublime_plugin.TextCommand):
def run(self, edit, delta):
subs = find_subtitles(self.view)
for sub in subs: # self.view.sel():
time = convert_to_time(self.view.substr(sub))
time += delta
if time < 0:
time = 0
time = convert_to_string(time)
self.view.replace(edit, sub, time)
| 2.8125 | 3 |
tool/run.py | David-Loibl/gistemp | 1 | 12792411 | #!/usr/local/bin/python3.4
#
# run.cgi -- run steps of the GISTEMP algorithm
#
# <NAME>, 2009-12-08
# <NAME>, Revision 2016-01-06
"""run.cgi [options] -- run steps of the GISTEMP algorithm.
Options:
--help Print this text.
--steps=STEPS Specify which steps to run, as a comma-separated list of
numbers from 0 to 5. For example, --steps=2,3,5
The steps are run in the order you specify.
If this option is omitted, run all steps in order.
"""
# http://www.python.org/doc/2.4.4/lib/module-os.html
import os
# http://docs.python.org/release/2.4.4/lib/module-re.html
import re
# http://www.python.org/doc/2.4.4/lib/module-sys.html
import sys
try:
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.getcwd() != rootdir:
print("The GISTEMP procedure must be run from the root "
"directory of the project.\nPlease change directory "
"to %s and try again." % rootdir)
sys.exit()
except:
sys.exit()
sys.path.append(os.getcwd())
from settings import *
# Clear Climate Code
import gio
class Fatal(Exception):
pass
# Record the original standard output so we can log to it; in steps 2 and 5
# we'll be changing the value of sys.stdout before calling other modules that
# use "print" to generate their output.
logfile = sys.stdout
def log(msg):
print(msg, file=logfile)
progress = open(PROGRESS_DIR + 'progress.txt', 'a')
progress.write(msg + '\n\n')
progress.flush()
def mkdir(path):
"""mkdir(PATH): create the directory PATH, and all intermediate-level
directories needed to contain it, unless it already exists."""
if not os.path.isdir(path):
log("... creating directory %s" % path)
os.makedirs(path)
# Each of the run_stepN functions below takes a data object, its input,
# and produces a data object, its output. Ordinarily the data objects
# are iterators, either produced from the previous step, or an iterator
# that feeds from a file.
def run_step0(data):
from steps import step0
if data is None:
data = gio.step0_input()
result = step0.step0(data)
return gio.step0_output(result)
def run_step1(data):
from steps import step1
from extension import step1 as estep1
if data is None:
data = gio.step1_input()
pre = estep1.pre_step1(data)
result = step1.step1(pre)
post = estep1.post_step1(result)
return gio.step1_output(post)
def run_step2(data):
from steps import step2
if data is None:
data = gio.step2_input()
result = step2.step2(data)
return gio.step2_output(result)
def run_step3(data):
from steps import step3
if data is None:
data = gio.step3_input()
result = step3.step3(data)
return gio.step3_output(result)
def run_step3c(data):
"""An alternative to Step 3 that reads (copies) the output file
created by the Sordinary Step 3. Effectively using the data produced
by Step 3 without re-running it."""
if data:
raise Fatal("Expect to run 3c first in pipeline.")
return gio.step3c_input()
def run_step4(data):
from steps import step4
# Unlike earlier steps, Step 4 always gets input data, ocean
# temperatures, from disk; data from earlier stages is land data and
# is zipped up.
data = gio.step4_input(data)
result = step4.step4(data)
return gio.step4_output(result)
def run_step5(data):
from steps import step5
# Step 5 takes a land mask as optional input, this is all handled in
# the step5_input() function.
data = gio.step5_input(data)
result = step5.step5(data)
return gio.step5_output(result)
def parse_steps(steps):
"""Parse the -s, steps, option. Produces a list of strings."""
steps = steps.strip()
if not steps:
return [str(x) for x in range(6)]
result = set()
for part in steps.split(','):
# Part can be integer number with an optional letter suffix...
if re.match(r'^\d+[a-z]?$', part):
result.add(part)
else:
# Or a range in the form '1-3'.
try:
l, r = part.split('-')
result.update(str(s) for s in range(int(l), int(r) + 1))
except ValueError:
# Expect to catch both
# "ValueError: too many values to unpack" when the split
# produces too many values ("1-3-"), and
# "ValueError: invalid literal for int() with base 10: 'a'"
# when int fails ("1,a")
raise Fatal("Can't understand steps argument.")
return list(sorted(result))
def parse_options(arglist):
import optparse
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage)
parser.add_option("-s", "--steps", action="store", metavar="S[,S]", default="", help="Select range of steps to run")
parser.add_option('-p', '--parameter', action='append', help="Redefine parameter from parameters/*.py during run")
parser.add_option("--no-work_files", "--suppress-work-files", action="store_false", default=True, dest="save_work",
help="Do not save intermediate files in the work sub-directory")
options, args = parser.parse_args(arglist)
if len(args) != 0:
parser.error("Unexpected arguments")
options.steps = parse_steps(options.steps)
return options, args
def update_parameters(parm):
"""Take a parameter string from the command line and update the
parameters module."""
if not parm:
return
import parameters
for p in parm:
try:
key, value = p.split('=', 1)
except ValueError:
raise Fatal("Can't understand parameter option: %r" % p)
if not hasattr(parameters, key):
raise Fatal("Ignoring unknown parameter %r" % key)
# Coerce value, a string, to the same type as the existing parameter
# value. That works nicely for strings, ints, and floats...
x = getattr(parameters, key)
# ... but we need a hack for bool.
if type(x) == bool:
try:
value = ['false', 'true'].index(value.lower())
except ValueError:
raise Fatal("Boolean parameter %r must be True or False"
% key)
# Now value is 0 or 1 and the default case will correctly
# coerce it.
elif value[0] == '(' and value[-1] == ')':
value = value[1:-1]
value = [int(x) for x in value.split(',')]
value = type(x)(value)
setattr(parameters, key, value)
# Download input files
def dl_input_files():
import fetch
fetcher = fetch.Fetcher()
fetcher.fetch()
def main(argv=None):
import time
import os
if argv is None:
argv = sys.argv
options, args = parse_options(argv[1:])
update_parameters(options.parameter)
step_list = list(options.steps)
# overwrite progress popup
if not os.path.exists(PROGRESS_DIR):
os.makedirs(PROGRESS_DIR)
progress = open(PROGRESS_DIR + "progress.txt", 'w')
progress.write("Setting up parameters...\n\n")
# Create all the temporary directories we're going to use.
for d in ['log', 'result', 'work', "input"]:
mkdir(TMP_DIR + '/' + d)
# delete files in /tmp/input to re-download the input data files
# otherwise the files in /tmp/input will be used.
dl_input_files()
step_fn = {
'0': run_step0,
'1': run_step1,
'2': run_step2,
'3': run_step3,
'3c': run_step3c,
'4': run_step4,
'5': run_step5,
}
# Record start time now, and ending times for each step.
start_time = time.time()
cannot = [s for s in step_list if s not in step_fn]
if cannot:
raise Fatal("Can't run steps %s" % str(cannot))
# Create a message for stdout.
if len(step_list) == 1:
logit = "STEP %s" % step_list[0]
else:
assert len(step_list) >= 2
t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)]
if step_list == t:
logit = "STEPS %s to %s" % (step_list[0], step_list[-1])
else:
logit = "STEPS %s" % ', '.join(step_list)
log("====> %s ====" % logit)
data = None
for step in step_list:
data = step_fn[step](data)
# Consume the data in whatever the last step was, in order to
# write its output, and hence suck data through the whole
# pipeline.
for _ in data:
pass
end_time = time.time()
log("====> Timing Summary ====")
log("Run took %.1f seconds" % (end_time - start_time))
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.734375 | 3 |
MSE430Funcs/CrysStrucFuncs.py | KCMak653/MSE430Notebooks | 0 | 12792412 | <reponame>KCMak653/MSE430Notebooks<filename>MSE430Funcs/CrysStrucFuncs.py<gh_stars>0
def make_supercell(cell, diff_species):
"""Append all sites in a unit cell to a structure - must have cubic lattice"""
#diff_species: Boolean, if true, make different species diff colors. If false, make one basis group one color.
#Get a copy of the structure defining the basis
basis=cell.copy()
superCell = cell.copy()
#Create a list of all the lattice points in the cubic unit cell (i.e all the corners)
f=[[i,j,k] for i in range(2) for j in range(2) for k in range(2)]
#Remove the lattice point associated with the basis [0,0,0]
f=f[1:]
#Add a basis at each of the unit cell lattice points
if diff_species:
[superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))]
else:
[superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis]
return(superCell)
def cubicCell(cell, a3):
"""Append all sites in a unit cell to a structure"""
from pymatgen import Structure, Lattice
import numpy as np
import nglview as ngl
basis=cell.copy()
superCell = cell.copy()
prim_vec = np.asarray(cell.lattice.as_dict().get('matrix'))
#Append atoms with different combinations of lattice vectors until all coordinates exceed cubic cell [1,1,1]
i = -1
j = -1
k = -1
#Since not perfect
thr_a = a3*1.15
coord_base = [0,0,0]
new_coord = coord_base.copy()
#while all(x <=thr_a for x in new_coord):
#while all(x <=thr_a for x in new_coord):
#while all(x <= thr_a for x in new_coord):
for i in range(-3, 3):
for j in range(-3, 3):
for k in range(-3,3):
new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
[superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis]
#k +=1
#print(new_coord)
#print(i, j, k)
#j +=1
#k=-1
#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
#i+=1
#j=-1
#k=-1
#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k
return(superCell)
def visLattice(lattice):
from pymatgen import Structure, Lattice
import nglview as ngl
unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False)
view6=ngl.show_pymatgen(unit_cell)
view6.clear_representations()
view6.add_unitcell()
return(view6)
def visUC(SC, a3):
from pymatgen import Structure, Lattice
import nglview as ngl
selec=[]
for ind, site in enumerate(SC.sites):
if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15):
selec=selec+[ind]
view6 = ngl.show_pymatgen(SC)
view6.clear_representations()
#view6.add_representation('ball+stick', aspectRatio=10, selection=selec)
[view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i in selec]
return(view6) | 2.953125 | 3 |
research/pyqtConfig/util.py | FXTD-ODYSSEY/QBinder | 13 | 12792413 | # coding:utf-8
from __future__ import print_function
__author__ = 'timmyliang'
__email__ = '<EMAIL>'
__date__ = '2019-12-12 20:09:01'
"""
调用库
"""
from Qt import QtGui
from Qt import QtCore
from Qt import QtWidgets
# NOTE replaceWidget ----------------------------------------------------------------------------
def replaceWidget(src,dst):
u"""replaceWidget 替换组件
Parameters
----------
src : QWidget
源组件
dst : QWidget
目标组件
Returns
-------
QWidget
[description]
"""
updateWidgetState(src,dst)
layout = src.parent().layout()
layout,index = getTargetLayoutIndex(layout,src)
if not layout:
print (u"没有找到 %s 的 Layout,替换失败" % src)
return src
layout.insertWidget(index,dst)
src.setParent(None)
return dst
def updateWidgetState(src,dst):
u"""updateWidgetState 同步组件状态
Parameters
----------
src : QWidget
源组件
dst : QWidget
目标组件
"""
if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops())
if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription())
if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole())
if src.baseSize() : dst.setBaseSize(src.baseSize())
if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins())
if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy())
if src.cursor() : dst.setCursor(src.cursor())
if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy())
if src.focusProxy() : dst.setFocusProxy(src.focusProxy())
if src.font() : dst.setFont(src.font())
if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole())
if src.geometry() : dst.setGeometry(src.geometry())
if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints())
if src.layout() : dst.setLayout(src.layout())
if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection())
if src.locale() : dst.setLocale(src.locale())
if src.mask() : dst.setMask(src.mask())
if src.maximumSize() : dst.setMaximumSize(src.maximumSize())
if src.minimumSize() : dst.setMinimumSize(src.minimumSize())
if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ())
if src.palette() : dst.setPalette(src.palette())
if src.parent() : dst.setParent(src.parent())
if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement())
if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy())
if src.statusTip() : dst.setStatusTip(src.statusTip())
if src.style() : dst.setStyle(src.style())
if src.toolTip() : dst.setToolTip(src.toolTip())
if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled())
if src.whatsThis() : dst.setWhatsThis(src.whatsThis())
if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath())
if src.windowFlags() : dst.setWindowFlags(src.windowFlags())
if src.windowIcon() : dst.setWindowIcon(src.windowIcon())
if src.windowIconText() : dst.setWindowIconText(src.windowIconText())
if src.windowModality() : dst.setWindowModality(src.windowModality())
if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity())
if src.windowRole() : dst.setWindowRole(src.windowRole())
if src.windowState() : dst.setWindowState(src.windowState())
def getTargetLayoutIndex(layout,target):
u"""getTargetLayoutIndex 获取目标 Layout 和 序号
Parameters
----------
layout : QLayout
通过 QLayout 递归遍历下属的组件
target : QWidget
要查询的组件
Returns
-------
layout : QLayout
查询组件所在的 Layout
i : int
查询组件所在的 Layout 的序号
"""
count = layout.count()
for i in range(count):
item = layout.itemAt(i).widget()
if item == target:
return layout,i
else:
for child in layout.children():
layout,i = getTargetLayoutIndex(child,target)
if layout:
return layout,i
return [None,None]
# NOTE traverseChildren ----------------------------------------------------------------------------
def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix="",log=False):
"""traverseChildren
Traverse into the widget children | print the children hierarchy
:param parent: traverse widget
:type parent: QWidget
:param indent: indentation space, defaults to ""
:type indent: str, optional
:param log: print the data, defaults to False
:type log: bool, optional
"""
if callable(printCallback):
printCallback(prefix,parent)
elif log:
print (prefix,parent)
if not hasattr(parent,"children"):
return
prefix = "".join([" " for _ in range(indent)]) + prefix
for child in parent.children():
traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log)
if callable(childCallback) :
childCallback(child,traverse_func)
else:
traverse_func()
| 2.296875 | 2 |
Heap/TestCircularDoublyLinkedList.py | kopok2/DataStructures | 0 | 12792414 | <filename>Heap/TestCircularDoublyLinkedList.py
# encoding-utf-8
import unittest
from CircularDoublyLinkedList import CircularDoublyLinkedList
class TestCircularDoublyLinkedList(unittest.TestCase):
def test_simple_list(self):
cdll = CircularDoublyLinkedList(17)
cdll.insert_new_node(24)
cdll.insert_new_node(23)
cdll.insert_new_node(7)
cdll.insert_new_node(3)
self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3])
def test_delete_node(self):
cdll = CircularDoublyLinkedList(17)
cdll.insert_new_node(24)
cdll.insert_new_node(23)
cdll.insert_new_node(7)
cdll.insert_new_node(3)
cdll.delete_node(cdll.first)
self.assertEqual(cdll.get_items(), [24, 23, 7, 3])
def test_merge_cdlls(self):
cdll1 = CircularDoublyLinkedList(17)
cdll1.insert_new_node(24)
cdll1.insert_new_node(23)
cdll1.insert_new_node(7)
cdll1.insert_new_node(3)
cdll2 = CircularDoublyLinkedList(17)
cdll2.insert_new_node(24)
cdll2.insert_new_node(23)
cdll2.insert_new_node(7)
cdll2.insert_new_node(3)
cdll1.merge_cdlls(cdll2)
self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7, 3, 17])
if __name__ == "__main__":
unittest.main()
| 3.25 | 3 |
sw/3rd_party/VTK-7.1.0/Imaging/Hybrid/Testing/Python/shepards.py | esean/stl_voro_fill | 4 | 12792415 | <reponame>esean/stl_voro_fill<gh_stars>1-10
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create some points
#
math = vtk.vtkMath()
points = vtk.vtkPoints()
i = 0
while i < 50:
points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1))
i = i + 1
scalars = vtk.vtkFloatArray()
i = 0
while i < 50:
scalars.InsertValue(i,math.Random(0,1))
i = i + 1
profile = vtk.vtkPolyData()
profile.SetPoints(points)
profile.GetPointData().SetScalars(scalars)
# triangulate them
#
shepard = vtk.vtkShepardMethod()
shepard.SetInputData(profile)
shepard.SetModelBounds(0,1,0,1,.1,.5)
# shepard SetMaximumDistance .1
shepard.SetNullValue(1)
shepard.SetSampleDimensions(20,20,20)
shepard.Update()
map = vtk.vtkDataSetMapper()
map.SetInputConnection(shepard.GetOutputPort())
block = vtk.vtkActor()
block.SetMapper(map)
block.GetProperty().SetColor(1,0,0)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(block)
ren1.SetBackground(1,1,1)
renWin.SetSize(400,400)
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.Azimuth(160)
cam1.Elevation(30)
cam1.Zoom(1.5)
ren1.ResetCameraClippingRange()
renWin.Render()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
threshold = 15
# --- end of script --
#iren.Start()
| 1.96875 | 2 |
soe/python/checkClientWaitState.py | nicksstuff/sample.voice.gateway | 0 | 12792416 | # ------------------------------------------------
# IMPORTS ----------------------------------------
# ------------------------------------------------
#####
# Python dist and 3rd party libraries
#####
import os, requests, json, string, datetime, logging, time
from os.path import join, dirname
from weblogger import addLogEntry
import voiceProxySettings
from voiceProxyUtilities import setEarlyReturn, earlyReturn
logging_comp_name = "checkClientWaitState"
# This is because the users wants Watson to wait until they are ready to talk again.
# Need some key words or phrase to jump out of this state.
# will need the RespTimeout Signal to be updated also.
#--- This is for the user to put Watson in a wait state ------
def waitState(message):
message = preCheckClientWaitState(message)
message = checkClientWaitState(message)
message = postCheckClientWaitState(message)
return message
def preCheckClientWaitState(message):
return message
def checkClientWaitState(message):
return message
def postCheckClientWaitState(message):
return message
#------ End From Client Wait state Methods --------------------- | 2.53125 | 3 |
swarm_cli/cli_swarm.py | sungazer-io/swarm-cli | 0 | 12792417 | <gh_stars>0
from typing import List
import click
from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd
@click.group()
@click.option('--environment', '-e', multiple=True, required=False)
@click.pass_context
def swarm(ctx: click.Context, environment: List[str]):
load_env_files(environment)
state = SwarmModeState()
state.initFromFile('swarm-config.yml')
ctx.obj = state
@swarm.group()
@click.pass_context
def preset(ctx: click.Context):
state: SwarmModeState = ctx.obj
@preset.command('ls')
@click.option('--preset', '-p', help="Select a preset", required=False)
@click.pass_context
def preset_ls(ctx: click.Context, preset: str):
state: SwarmModeState = ctx.obj
if preset:
state.ensure_preset(preset)
for k, v in state.cfg['presets'][preset]['stacks'].items():
click.secho("{}:{}".format(k, v['variant']))
else:
for preset in state.cfg['presets'].keys():
click.secho("Preset {}".format(preset))
for k, v in state.cfg['presets'][preset]['stacks'].items():
click.secho(" - {}:{}".format(k, v['variant']))
@preset.command('deploy')
@click.option('--preset', '-p', help="Select a preset", required=True)
@click.option('--dry-run', is_flag=True)
@click.pass_context
def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False):
state: SwarmModeState = ctx.obj
state.ensure_preset(preset)
preset_data = state.cfg['presets'][preset]
load_env_files(preset_data.get('env_files', []), ignore_missing=True)
stacks = state.cfg['presets'][preset]['stacks']
for k, v in stacks.items():
name, variant = k, v['variant']
cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name])
run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant))
@preset.command('build')
@click.option('--preset', '-p', help="Select a preset", required=True)
@click.option('--dry-run', is_flag=True)
@click.pass_context
def preset_build(ctx: click.Context, preset: str = None, dry_run=False):
state: SwarmModeState = ctx.obj
state.ensure_preset(preset)
preset_data = state.cfg['presets'][preset]
load_env_files(preset_data.get('env_files', []), ignore_missing=True)
stacks = state.cfg['presets'][preset]['stacks']
for k, v in stacks.items():
name, variant = k, v['variant']
state.prepare_build_folder(preset, name, variant)
cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build'])
run_cmd(cmd,
dry_run=dry_run,
cwd=state.get_build_folder(preset, name, variant),
env=state.get_environment_for_stack(preset, name, variant)
)
@preset.command('push')
@click.option('--preset', '-p', help="Select a preset", required=True)
@click.option('--dry-run', is_flag=True)
@click.pass_context
def preset_push(ctx: click.Context, preset: str = None, dry_run=False):
state: SwarmModeState = ctx.obj
state.ensure_preset(preset)
preset_data = state.cfg['presets'][preset]
load_env_files(preset_data.get('env_files', []), ignore_missing=True)
stacks = state.cfg['presets'][preset]['stacks']
for k, v in stacks.items():
name, variant = k, v['variant']
cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push'])
run_cmd(cmd,
dry_run=dry_run,
env=state.get_environment_for_stack(preset, name, variant)
)
# @swarm.group()
# def stack():
# pass
#
#
# @stack.command('ls')
# @click.pass_context
# def stack_ls(ctx: click.Context):
# state: SwarmModeState = ctx.obj
# click.echo('Available stacks:')
# for stack_name in sorted(state.layered_stacks.keys()):
# click.echo(stack_name)
# for stack_variant in sorted(state.layered_stacks[stack_name].keys()):
# click.echo("\t {}".format(stack_variant))
#
#
# @stack.command('deploy')
# @click.argument('name_variant', nargs=-1)
# @click.option('--dump-cmd', is_flag=True)
# @click.pass_context
# def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str):
# state: SwarmModeState = ctx.obj
# for name_variant_elem in name_variant:
# name, variant = name_variant_elem.split(':')
# state.ensure_stack_exists(name, variant)
# cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name])
# env = state.get_environment_for_stack(preset, name, variant)
# run_cmd(cmd, dry_run=dump_cmd, env=env)
#
#
# @stack.command('setup')
# @click.argument('name_variant', nargs=-1)
# @click.option('--dump-cmd', is_flag=True)
# @click.pass_context
# def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str):
# state: SwarmModeState = ctx.obj
# for name_variant_elem in name_variant:
# name, variant = name_variant_elem.split(':')
# state.ensure_stack_exists(name, variant)
# state.ensure_preconditions(name, variant, dump_cmd=dump_cmd)
| 2.203125 | 2 |
b3j0f/schema/lang/python.py | b3j0f/schema | 0 | 12792418 | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Python language schemas utilities."""
from re import compile as re_compile
from b3j0f.utils.version import OrderedDict
from b3j0f.utils.path import lookup
from ..base import Schema, DynamicValue
from .factory import SchemaBuilder, build
from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema
from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema
from types import (
FunctionType, MethodType, LambdaType, BuiltinFunctionType,
BuiltinMethodType, MemberDescriptorType
)
from six import get_function_globals
from inspect import getargspec, getsourcelines, isclass, isbuiltin
from functools import wraps
__all__ = [
'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema'
]
class PythonSchemaBuilder(SchemaBuilder):
"""In charge of building python classes."""
__name__ = 'python'
def build(self, _resource, **kwargs):
if not isclass(_resource):
raise TypeError(
'Wrong type {0}, \'type\' expected'.format(_resource)
)
if issubclass(_resource, Schema):
result = _resource
else:
result = datatype2schemacls(_datatype=_resource, _force=False)
if result is None:
resname = _resource.__name__
if 'name' not in kwargs:
kwargs['name'] = resname
for attrname in dir(_resource):
if (
attrname and attrname[0] != '_' and
attrname not in kwargs and
not hasattr(Schema, attrname)
):
attr = getattr(_resource, attrname)
if not isinstance(attr, MemberDescriptorType):
kwargs[attrname] = attr
result = type(resname, (Schema,), kwargs)
return result
def getresource(self, schemacls):
result = None
if hasattr(schemacls, 'mro'):
for mro in schemacls.mro():
if issubclass(mro, Schema):
result = mro
break
return result
def buildschema(_cls=None, **kwargs):
"""Class decorator used to build a schema from the decorate class.
:param type _cls: class to decorate.
:param kwargs: schema attributes to set.
:rtype: type
:return: schema class.
"""
if _cls is None:
return lambda _cls: buildschema(_cls=_cls, **kwargs)
result = build(_cls, **kwargs)
return result
class ParamTypeSchema(Schema):
"""In charge of embedding a parameter type which met a problem while
generating a schema."""
type = TypeSchema()
def _validate(self, data, *args, **kwargs):
super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs)
if not isinstance(data, self.type):
raise TypeError(
'Wrong type of {0}. {1} expected.'.format(data, self.type)
)
@updatecontent
class ParamSchema(RefSchema):
"""Function parameter schema."""
#: if true (default), update self ref when default is given.
autotype = True
mandatory = True #: if true (default), parameter value is mandatory.
def _setvalue(self, schema, value, *args, **kwargs):
super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs)
if schema.name == 'default':
if self.autotype and self.ref is None:
self.ref = None if value is None else data2schema(value)
if value is not None:
self.mandatory = False
class FunctionSchema(ElementarySchema):
"""Function schema.
Dedicated to describe functions, methods and lambda objects.
"""
_PDESC = r':param (?P<ptype1>[\w_,]+) (?P<pname1>\w+):'
_PTYPE = r':type (?P<pname2>[\w_]+):(?P<ptype2>[^\n]+)'
_RTYPE = r':rtype:(?P<rtype>[^\n]+)'
_REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE))
__data_types__ = [
FunctionType, MethodType, LambdaType, BuiltinFunctionType,
BuiltinMethodType
]
params = ArraySchema(itemtype=ParamSchema())
rtype = Schema()
impl = ''
impltype = ''
safe = False
varargs = False
def _validate(self, data, owner, *args, **kwargs):
ElementarySchema._validate(self, data=data, *args, **kwargs)
if data != self.default or data is not self.default:
errormsg = 'Error while validating {0} with {1}'.format(data, self)
if data.__name__ != self.name:
raise TypeError(
'{0}. Wrong function name {1}. {2} expected.'.format(
errormsg, data.__name__, self.name
)
)
params, rtype, vargs, kwargs = self._getparams_rtype(function=data)
var = self.varargs or vargs or kwargs
if (not var) and len(params) != len(self.params):
raise TypeError(
'{0}. Wrong param length: {1}. {2} expected.'.format(
errormsg, len(params), len(self.params)
)
)
if self.rtype is not None and type(self.rtype) != type(rtype):
raise TypeError(
'{0}. Wrong rtype {1}. {2} expected.'.format(
rtype, self.rtype
)
)
for index, pkwargs in enumerate(params.values()):
name = pkwargs['name']
default = pkwargs.get('default')
param = self.params[index]
if param.name != name:
raise TypeError(
'{0}. Wrong param {1} at {2}. {3} expected.'.format(
errormsg, name, index, param.name
)
)
val = param.default
if isinstance(val, DynamicValue):
val = val()
if (
val is not None and default is not None and val != default
):
raise TypeError(
'{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format(
errormsg, name, default, index, val
)
)
def _setvalue(self, schema, value):
if schema.name == 'default':
self._setter(obj=self, value=value)
def _setter(self, obj, value, *args, **kwargs):
if hasattr(self, 'olddefault'):
if self.olddefault is value:
return
self.olddefault = value
ElementarySchema._setter(self, obj, value, *args, **kwargs)
pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value)
self.vargs = vargs or kwargs
params = []
selfparams = {}
for selfparam in self.params:
selfparams[selfparam.name] = selfparam
index = 0
for index, pkwarg in enumerate(pkwargs.values()):
name = pkwarg['name']
selfparam = None # old self param
if name in selfparams:
selfparam = selfparams[name]
if selfparam is None:
selfparam = ParamSchema(**pkwarg)
else:
for key in pkwarg:
val = pkwarg[key]
if val is not None:
setattr(selfparam, key, val)
params.append(selfparam)
self.params = params
self.impltype = 'python'
try:
self.impl = str(getsourcelines(value))
except TypeError:
self.impl = ''
@classmethod
def _getparams_rtype(cls, function):
"""Get function params from input function and rtype.
:return: OrderedDict, rtype, vargs and kwargs.
:rtype: tuple
"""
try:
args, vargs, kwargs, default = getargspec(function)
except TypeError:
args, vargs, kwargs, default = (), (), (), ()
indexlen = len(args) - (0 if default is None else len(default))
params = OrderedDict()
for index, arg in enumerate(args):
pkwargs = {
'name': arg,
'mandatory': True
} # param kwargs
if index >= indexlen: # has default value
value = default[index - indexlen]
pkwargs['default'] = value
pkwargs['ref'] = None if value is None else data2schema(value)
pkwargs['mandatory'] = False
params[arg] = pkwargs
rtype = None
# parse docstring
if function.__doc__ is not None and not isbuiltin(function):
scope = get_function_globals(function)
for match in cls._REC.findall(function.__doc__):
if rtype is None:
rrtype = match[4].strip() or None
if rrtype:
rtypes = rrtype.split(',')
schemas = []
for rtype_ in rtypes:
rtype_ = rtype_.strip()
islist = False
try:
lkrtype = lookup(rtype_, scope=scope)
except ImportError:
islist = True
try:
if rtype_[-1] == 's':
lkrtype = lookup(
rtype_[:-1], scope=scope
)
elif rtype_.startswith('list of '):
lkrtype = lookup(
rtype_[8:], scope=scope
)
else:
raise
except ImportError:
msg = 'rtype "{0}" ({1}) from {2} not found.'
raise ImportError(
msg.format(rtype_, rrtype, function)
)
try:
schemacls = datatype2schemacls(lkrtype)
except TypeError:
schemacls = ParamTypeSchema(type=lkrtype)
rschema = schemacls()
if islist:
rschema = ArraySchema(itemtype=rschema)
schemas.append(rschema)
if len(rtypes) > 1:
rtype = OneOfSchema(schemas=schemas, nullable=True)
else:
rtype = schemas[0]
continue
pname = (match[1] or match[2]).strip()
if pname and pname in params:
ptype = (match[0] or match[3]).strip()
ptypes = ptype.split(',')
schemas = []
for ptype in ptypes:
ptype = ptype.strip()
islist = False
try:
lkptype = lookup(ptype, scope=scope)
except ImportError:
islist = True
try:
if ptype[-1] == 's':
lkptype = lookup(ptype[:-1], scope=scope)
elif ptype.startswith('list of '):
lkptype = lookup(ptype[8:], scope=scope)
else:
raise
except ImportError:
msg = 'Error on ptype "{0}" ({1}) from {2} not found.'
raise ImportError(
msg.format(pname, ptype, function)
)
try:
schemacls = datatype2schemacls(lkptype)
except TypeError:
schemacls = ParamTypeSchema(type=lkptype)
pschema = schemacls()
if islist:
pschema = ArraySchema(itemtype=pschema)
schemas.append(pschema)
if len(ptypes) > 1:
pschema = OneOfSchema(schemas=schemas, nullable=True)
else:
pschema = schemas[0]
params[pname]['ref'] = pschema
return params, rtype, vargs, kwargs
def __call__(self, *args, **kwargs):
return self.default(*args, **kwargs)
def _getter(self, obj, *args, **kwargs):
func = ElementarySchema._getter(self, obj, *args, **kwargs)
@wraps(func)
def result(*args, **kwargs):
try:
result = func(obj, *args, **kwargs)
except TypeError:
result = func(*args, **kwargs)
return result
result.source = func
return result
def funcschema(default=None, *args, **kwargs):
"""Decorator to use in order to transform a function into a schema."""
if default is None:
return lambda default: funcschema(default=default, *args, **kwargs)
return FunctionSchema(default=default, *args, **kwargs)
| 1.28125 | 1 |
tests/test_statefulset_partitioner.py | Qotto/tonga | 1 | 12792419 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
import uuid
import pytest
from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner
from tonga.errors import OutsideInstanceNumber
def test_statefulset_partitioner_with_str_uuid_key():
statefulset_partitioner = StatefulsetPartitioner(instance=1)
for i in range(0, 100):
assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) == 1
def test_statefulset_partitioner_bad_instance():
statefulset_partitioner = StatefulsetPartitioner(instance=100)
with pytest.raises(OutsideInstanceNumber):
statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3])
| 2.3125 | 2 |
reprlearn/data/__init__.py | cocoaaa/ReprLearn | 0 | 12792420 | <reponame>cocoaaa/ReprLearn<gh_stars>0
def data_fn():
print("src.data.__init__.py")
# data_fn() | 1.21875 | 1 |
eris/decorators/__init__.py | xesxen/eris | 1 | 12792421 | <filename>eris/decorators/__init__.py
""" Decorator module.
Contains various decorators for hook callbacks.
"""
class BaseDecorator:
""" Base class for decorators in Eris. """
# The interface for hooks means that events will always be the first argument, anything else
# will be passed as payloads for the events.
_EVENT_OFFSET: int = 1
| 2.015625 | 2 |
tempoMAGE.py | pkhoueiry/TempoMAGE | 1 | 12792422 | #!/usr/bin/python
""" TempMAGE model architecture """
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout
def set_metrics():
""" metrics used to evaluate the model's perfromance """
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='ROC_auc'),
keras.metrics.AUC(name='PR_auc', curve = "PR"),
keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy')
]
return METRICS
def tempoMAGE(metrics, output_bias= None):
if output_bias is not None:
output_bias = tf.keras.initializers.Constant(output_bias)
seq_input = keras.Input(shape=(400,5,1), name='sequence_conv')
x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input)
x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)
x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)
x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)
x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x)
x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)
sequence_features = keras.layers.Flatten()(x)
depth_input = keras.Input(shape=(400,1), name= 'depth')
x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input)
x = keras.layers.MaxPooling1D(pool_size=(2))(x)
x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)
x = keras.layers.MaxPooling1D(pool_size=(2))(x)
x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)
x = keras.layers.MaxPooling1D(pool_size=(2))(x)
depth_features = keras.layers.Flatten()(x)
x = layers.concatenate([sequence_features, depth_features])
conv_dense = keras.layers.Dense(108, activation = 'relu')(x)
expression_input = keras.Input(shape=(20,1), name= 'expression')
expression_features = keras.layers.Flatten()(expression_input)
weight_input = keras.Input(shape=(1,1), name= 'weight')
weight_features = keras.layers.Flatten()(weight_input)
x = layers.concatenate([expression_features, weight_features])
data_dense = keras.layers.Dense(20,activation = 'relu')(x)
x = layers.concatenate([conv_dense, data_dense])
x = keras.layers.Dense(128, activation = 'relu',
kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)
x = keras.layers.Dropout(0.3)(x)
seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x)
model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred)
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(0.001),
metrics= metrics)
return model
| 2.59375 | 3 |
geoloc-server/test.py | muzammilar/passport | 1 | 12792423 | # -*- coding: utf-8 -*-
"""
test.py
~~~~~~~~~~~
Internal file for dummy function checking.
:author: <NAME>
:copyright: Northeastern University © 2018.
:license: Custom BSD, see LICENSE for more details.
:email: <EMAIL>
"""
import time
import configs.system
print configs.system.PROJECT_ROOT
time.sleep(100) | 1.132813 | 1 |
tests/run_test_Recommenders.py | damicoedoardo/NNMF | 2 | 12792424 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 22/11/2018
@author: XXX
"""
import unittest
import os, shutil
from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects
from RecSysFramework.Recommender.KNN import UserKNNCF
from RecSysFramework.Recommender.KNN import ItemKNNCF
from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR
from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE
from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha
from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta
from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD
from RecSysFramework.Recommender.MatrixFactorization import PureSVD
from RecSysFramework.Recommender.MatrixFactorization import IALS
from RecSysFramework.Recommender.MatrixFactorization import NMF
from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout
from RecSysFramework.DataManager.Reader import Movielens1MReader
from RecSysFramework.DataManager.Splitter import Holdout
from RecSysFramework.Utils import EarlyStoppingModel
class RecommenderTestCase(unittest.TestCase):
recommender_class = None
def setUp(self):
self.dataset = Movielens1MReader().load_data()
self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0)
self.train, self.test = self.splitter.split(self.dataset)
def common_test_recommender(self, recommender_class):
temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + "__temp__"
os.makedirs(temp_save_file_folder, exist_ok=True)
URM_train = self.train.get_URM()
URM_test = self.test.get_URM()
recommender_object = recommender_class(URM_train)
if isinstance(recommender_object, EarlyStoppingModel):
fit_params = {"epochs": 10}
else:
fit_params = {}
recommender_object.fit(**fit_params)
evaluator = EvaluatorHoldout([5], exclude_seen=True)
metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test)
recommender_object.save_model(temp_save_file_folder, file_name="temp_model")
recommender_object = recommender_class(URM_train)
recommender_object.load_model(temp_save_file_folder, file_name="temp_model")
evaluator = EvaluatorHoldout([5], exclude_seen=True)
metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test)
shutil.rmtree(temp_save_file_folder, ignore_errors=True)
class RandomRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(Random)
class TopPopRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(TopPop)
class GlobalEffectsRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(GlobalEffects)
class UserKNNCFRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(UserKNNCF)
class ItemKNNCFRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(ItemKNNCF)
class P3alphaRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(P3alpha)
class RP3betaRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(RP3beta)
class SLIM_BPRRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(SLIM_BPR)
class SLIM_RMSERecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(SLIM_RMSE)
class BPRMFRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(BPRMF)
class FunkSVDRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(FunkSVD)
class AsySVDRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(AsySVD)
class PureSVDRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(PureSVD)
class NMFRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(NMF)
class IALSRecommenderTestCase(RecommenderTestCase):
def test_recommender(self):
self.common_test_recommender(IALS)
if __name__ == '__main__':
unittest.main()
| 1.84375 | 2 |
attributes/continuous_integration/__init__.py | Lufedi/reaper | 106 | 12792425 | <reponame>Lufedi/reaper
class CiService(object):
@staticmethod
def is_enabled(path):
raise NotImplementedError()
| 1.484375 | 1 |
scts/__init__.py | deniscapeto/SimpleCorreiosTrackingService | 0 | 12792426 | from django import setup
setup()
from scts.factory.build_app import build_app # noqa
app = build_app()
| 1.304688 | 1 |
bc_website/__main__.py | beginner-codes/website | 1 | 12792427 | import uvicorn
uvicorn.run("bc_website.app:app", host="localhost", port=5000, reload=True)
| 1.679688 | 2 |
web_wrapper/context_processors.py | musicmetadata/web-wrapper | 1 | 12792428 | <filename>web_wrapper/context_processors.py
from django.conf import settings
def features(request):
return {
'CWR2_AVAILABLE': settings.CWR2_AVAILABLE,
'CWR3_AVAILABLE': settings.CWR3_AVAILABLE,
}
| 1.671875 | 2 |
src/__main__.py | andreimaximov/course-planner | 0 | 12792429 | import argparse
import list
import depth
VERSION = '1.0.2'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=VERSION)
subparsers = parser.add_subparsers()
list.init(subparsers)
depth.init(subparsers)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 2.671875 | 3 |
Python3/HTTPRequests/first_request.py | norbertosanchezdichi/TIL | 0 | 12792430 | <reponame>norbertosanchezdichi/TIL
import requests
url = "http://www.google.com"
response = requests.get(url)
print(f"your request to {url} came back w/ status code {response.status_code}")
print(response.text) | 3.125 | 3 |
compositional-zs/compose_data.py | ZhihaoAIRobotic/tafe-net | 56 | 12792431 | <filename>compositional-zs/compose_data.py
import torch
import torch.utils.data as data
from os.path import join, exists
def load_word_embeddings(emb_file, vocab):
vocab = [v.lower() for v in vocab]
embeds = {}
for line in open(emb_file, 'r'):
line = line.strip().split(' ')
wvec = torch.FloatTensor(list(map(float, line[1:])))
embeds[line[0]] = wvec
new_embs = []
for k in vocab:
if '-' in k:
ks = k.split('-')
emb = torch.stack([embeds[it] for it in ks]).mean(dim=0)
else:
emb = embeds[k]
new_embs.append(emb)
embeds = torch.stack(new_embs)
return embeds
class CompositionDataset(data.Dataset):
def __init__(self, root, phase, split='compositional-split',
emb_file='glove/glove.6B.300d.txt', triple=False,
feat='resnet18'):
self.root = root # default is `data/compositional-zs`
self.phase = phase
self.split = split
self.emb_file = join(root, emb_file)
self.triple = triple # SPO triplet is used in StanfordVRD
self.feat = feat
feat_file = '{}/{}_features.t7'.format(root, feat)
activation_data = torch.load(feat_file)
self.activations = dict(
zip(activation_data['files'], activation_data['features']))
self.feat_dim = activation_data['features'].size(1)
# load splits
self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \
self.parse_split()
assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \
'train and test are not mutually exclusive'
# load pretrained word2vec embeddings
if self.emb_file is not None:
att_emb_file = join(root, 'attrs_embs.t7')
if exists(att_emb_file):
self.attrs_embds = torch.load(att_emb_file)
else:
self.attrs_embds = load_word_embeddings(
self.emb_file, self.attrs)
torch.save(self.attrs_embds, att_emb_file)
obj_emb_file = join(root, 'objs_embs.t7')
if exists(obj_emb_file):
self.objs_embds = torch.load(obj_emb_file)
else:
self.objs_embds = load_word_embeddings(
self.emb_file, self.objs)
torch.save(self.objs_embds, obj_emb_file)
else:
raise NotImplementedError
self.curr_pairs = self.train_pairs if self.phase == 'train' \
else self.test_pairs
self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)}
if self.triple:
self.embeddings = [torch.cat([
self.objs_embds[self.objs.index(sub)],
self.attrs_embds[self.attrs.index(verb)],
self.objs_embds[self.objs.index(obj)]])
for (sub, verb, obj) in self.curr_pairs]
else:
self.embeddings = [torch.cat([
self.attrs_embds[self.attrs.index(attr)],
self.objs_embds[self.objs.index(obj)]])
for (attr, obj) in self.curr_pairs]
self.embeddings = torch.stack(self.embeddings)
self.data, self.labels = self.get_split_info()
def get_split_info(self):
data = torch.load(self.root + '/metadata.t7')
images, targets = [], []
for instance in data:
if self.triple:
image, sub, verb, obj = instance['image'], instance['sub'], \
instance['pred'], instance['obj']
key = (sub, verb, obj)
else:
image, attr, obj = instance['image'], instance['attr'], \
instance['obj']
key = (attr, obj)
if key in self.curr_pairs:
images.append(image)
targets.append(self.curr_pairs.index(key))
return images, targets
def parse_split(self):
def parse_pairs(pair_list):
with open(pair_list, 'r') as f:
pairs = f.read().strip().split('\n')
pairs = [t.split() for t in pairs]
pairs = list(map(tuple, pairs))
if self.triple:
subs, verbs, objs = zip(*pairs)
return subs, verbs, objs, pairs
else:
attrs, objs = zip(*pairs)
return attrs, objs, pairs
if self.triple:
tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs(
'{}/{}/train_pairs.txt'.format(self.root, self.split))
ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs(
'{}/{}/test_pairs.txt'.format(self.root, self.split))
# The attributes are now the `verbs` and the subjects and objects
# share the same label space
all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \
sorted(list(set(tr_objs + ts_objs +
tr_subs + ts_subs)))
else:
tr_attrs, tr_objs, tr_pairs = parse_pairs(
'{}/{}/train_pairs.txt'.format(self.root, self.split))
ts_attrs, ts_objs, ts_pairs = parse_pairs(
'{}/{}/test_pairs.txt'.format(self.root, self.split))
all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \
sorted(list(set(tr_objs + ts_objs)))
all_pairs = sorted(list(set(tr_pairs + ts_pairs)))
return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs
def __getitem__(self, index):
image, label = self.data[index], self.labels[index]
feat = self.activations[image]
return feat, label
def __len__(self):
return len(self.data)
| 2.375 | 2 |
breadp/benchmarks/example.py | tgweber/breadp | 0 | 12792432 | <filename>breadp/benchmarks/example.py
################################################################################
# Copyright: <NAME> 2020
#
# Apache 2.0 License
#
# This file contains code related to the DataCite best practice guide benchmark
#
# Basis for this benchmark: 10.5281/zenodo.3559800
#
################################################################################
import sys
from breadp.benchmarks import Benchmark
from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck
from breadp.checks.metadata import \
CreatorsOrcidCheck, \
CreatorsFamilyAndGivenNameCheck, \
CreatorsContainInstitutionsCheck, \
ContributorsOrcidCheck, \
ContributorsFamilyAndGivenNameCheck, \
ContributorsContainInstitutionsCheck, \
ContributorsTypeCheck, \
DatesInformationCheck, \
DatesIssuedYearCheck, \
DatesTypeCheck, \
DescriptionsTypeCheck, \
DescriptionsLanguageCheck, \
DescriptionsLengthCheck, \
DescriptionsNumberCheck, \
FormatsAreValidMediaTypeCheck, \
LanguageSpecifiedCheck, \
PublicationYearCheck, \
RelatedResourceMetadataCheck, \
RelatedResourceTypeCheck, \
RightsAreOpenCheck, \
RightsHaveValidSPDXIdentifierCheck, \
RightsHasAtLeastOneLicenseCheck, \
SizesNumberCheck, \
SizesByteSizeCheck, \
SubjectsNumberCheck, \
SubjectsHaveDdcCheck, \
SubjectsAreQualifiedCheck, \
SubjectsHaveWikidataKeywordsCheck, \
TitlesJustAFileNameCheck, \
TitlesLanguageCheck, \
TitlesTypeCheck, \
VersionSpecifiedCheck
from breadp.evaluations import \
ContainsAllEvaluation, \
ContainsAtLeastOneEvaluation, \
ContainsItemExactlyNTimesEvaluation, \
DoesNotContainEvaluation, \
Evaluation, \
FalseEvaluation, \
FunctionEvaluation, \
InListEvaluation, \
IsBetweenEvaluation, \
IsIdenticalToEvaluation, \
TheMoreTrueTheBetterEvaluation, \
TheMoreFalseTheBetterEvaluation, \
TrueEvaluation
from rdp import Rdp
def skip(e: Evaluation, rdp: Rdp) -> bool:
# Evaluating language make no sense for these types
if rdp.metadata.type in ("Image", "PhysicalObject"):
for c in e.checks:
if type(c).__name__ == "LanguageSpecifiedCheck":
return True
# Version is optional
if rdp.metadata.version is None:
for c in e.checks:
if type(c).__name__ == "VersionSpecifiedCheck":
return True
# Contributors are optional
if len(rdp.metadata.contributors) == 0:
# if the license is non-open, we need to check Rightsholder!
if [c.name for c in e.checks] == ["RightsAreOpenCheck",
"ContributorsTypeCheck"]:
return False
for c in e.checks:
if c.name.startswith("Contributors"):
return True
# Related Resources are optional
if len(rdp.metadata.relatedResources) == 0:
for c in e.checks:
if type(c).__name__.startswith("RelatedResource"):
return True
return False
class BPGBenchmark(Benchmark):
""" This benchmark is inspired by the DataCite best practice guide
(DOI: 10.5281/zenodo.3559799)
"""
def __init__(self, name=None):
Benchmark.__init__(self, "Best Practice Benchmark")
self.skip = skip
self.version = "0.0.1"
self.id = "BPG"
# PID
isValidDoiCheck = IsValidDoiCheck()
doiResolvesCheck = DoiResolvesCheck()
self.add_evaluation(TrueEvaluation([isValidDoiCheck]))
self.add_evaluation(TrueEvaluation([doiResolvesCheck]))
# CREATOR
creatorsOrcidCheck = CreatorsOrcidCheck()
creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck()
creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck()
self.add_evaluation(
TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck])
)
self.add_evaluation(
TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck])
)
self.add_evaluation(
TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck])
)
# TITLE
titlesJustAFileNameCheck = TitlesJustAFileNameCheck()
titlesTypeCheck = TitlesTypeCheck()
titlesLanguageCheck = TitlesLanguageCheck()
self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck]))
self.add_evaluation(
ContainsItemExactlyNTimesEvaluation(
[titlesTypeCheck], None, 1
)
)
self.add_evaluation(
ContainsAllEvaluation(
[titlesLanguageCheck], ["en"]
)
)
# SUBJECT
subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck()
subjectsNumberCheck = SubjectsNumberCheck()
subjectsHaveDdcCheck = SubjectsHaveDdcCheck()
subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck()
self.add_evaluation(
TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck])
)
self.add_evaluation(
IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max)
)
self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck]))
self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck]))
# CONTRIBUTOR
contributorsOrcidCheck = ContributorsOrcidCheck()
contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck()
contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck()
contributorsTypeCheck = ContributorsTypeCheck()
def allow_person_related_tests_to_be_skipped(checks, pid):
evaluation = 0
isInstitution = checks[0].get_last_result(pid).outcome
booleanCheckResult = checks[1].get_last_result(pid).outcome
newTotal = isInstitution.count(False)
for idx, inst in enumerate(isInstitution):
if inst:
continue
if booleanCheckResult[idx]:
evaluation += 1/newTotal
return evaluation
self.add_evaluation(
FunctionEvaluation(
[
contributorsContainInstitutionsCheck,
contributorsOrcidCheck
],
allow_person_related_tests_to_be_skipped
)
)
self.add_evaluation(
FunctionEvaluation(
[
contributorsContainInstitutionsCheck,
contributorsFamilyAndGivenNameCheck
],
allow_person_related_tests_to_be_skipped
)
)
def allow_type_to_enforce_institution(checks, pid):
evaluation = 0
isInstitution = checks[0].get_last_result(pid).outcome
contributorTypes = checks[1].get_last_result(pid).outcome
for idx, inst in enumerate(isInstitution):
if inst:
if contributorTypes[idx] == "HostingInstitution":
evaluation += 1/len(isInstitution)
else:
evaluation += 1/len(isInstitution)
return evaluation
self.add_evaluation(
FunctionEvaluation(
[
contributorsContainInstitutionsCheck,
contributorsTypeCheck
],
allow_type_to_enforce_institution
)
)
self.add_evaluation(
InListEvaluation(
[contributorsTypeCheck],
["ContactPerson",
"DataCollector",
"DataCurator",
"HostingInstitution",
"ProjectLeader",
"ProjectManager",
"ProjectMember",
"Researcher",
"RightsHolder",
"WorkPackageLeader"
],
)
)
# DATES
datesTypeCheck = DatesTypeCheck()
publicationYearCheck = PublicationYearCheck()
datesIssuedYearCheck = DatesIssuedYearCheck()
datesInformationCheck = DatesInformationCheck()
self.add_evaluation(
ContainsAtLeastOneEvaluation([datesTypeCheck], ["Created", "Collected"])
)
def publishedEqualsIssued(checks, pid):
return checks[0].get_last_result(pid).outcome \
== checks[1].get_last_result(pid).outcome
self.add_evaluation(
FunctionEvaluation(
[publicationYearCheck, datesIssuedYearCheck],
publishedEqualsIssued,
)
)
def duplicatesHaveInformation(checks, pid):
r1 = checks[0].get_last_result(pid)
r2 = checks[1].get_last_result(pid)
if 0 in (len(r1.outcome), len(r2.outcome)):
return 0
dups = 0
dupsWithInfo = 0
dateTypesHasInformation = {}
for idx, t in enumerate(r1.outcome):
if dateTypesHasInformation.get(t) is not None:
dups += 1
if dateTypesHasInformation[t] and r2.outcome[idx] is not None:
dupsWithInfo += 1
dateTypesHasInformation[t] = r2.outcome[idx] is not None
if dups == 0:
return 1
else:
return dupsWithInfo/dups
self.add_evaluation(
FunctionEvaluation(
[datesTypeCheck, datesInformationCheck],
duplicatesHaveInformation
)
)
# LANGUAGE
languageSpecifiedCheck = LanguageSpecifiedCheck()
self.add_evaluation(TrueEvaluation([languageSpecifiedCheck]))
# RELATED RESOURCES
relatedResourceMetadataCheck = RelatedResourceMetadataCheck()
relatedResourceTypeCheck = RelatedResourceTypeCheck()
self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck]))
self.add_evaluation(
InListEvaluation(
[relatedResourceTypeCheck],
[
"Describes",
"IsDescribedBy",
"HasPart",
"IsPartOf",
"HasMetadata",
"IsMetadataFor",
"HasVersion",
"IsVersionOf",
"IsNewVersionOf",
"IsPreviousVersionOf",
"IsSourceOf",
"IsDerivedFrom",
"References",
"IsReferencedBy",
"IsVariantFormOf",
"IsIdenticalTo",
"IsSupplementTo",
"IsSupplementedBy",
"Documents",
"IsDocumentedBy"
]
)
)
# SIZE
sizesNumberCheck = SizesNumberCheck()
sizesByteSizeCheck = SizesByteSizeCheck()
self.add_evaluation(
IsIdenticalToEvaluation([sizesNumberCheck], 1)
)
self.add_evaluation(
ContainsItemExactlyNTimesEvaluation(
[sizesByteSizeCheck],
True,
1
)
)
# FORMAT
formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck()
self.add_evaluation(
TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck])
)
# VERSION
versionSpecifiedCheck = VersionSpecifiedCheck()
self.add_evaluation(TrueEvaluation([versionSpecifiedCheck]))
# RIGHTS
rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck()
rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck()
rightsAreOpenCheck = RightsAreOpenCheck()
self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck]))
self.add_evaluation(
TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck])
)
# DESCRIPTIONS
descriptionsNumberCheck = DescriptionsNumberCheck()
descriptionsLengthCheck = DescriptionsLengthCheck()
descriptionsLanguageCheck = DescriptionsLanguageCheck()
descriptionsTypeCheck = DescriptionsTypeCheck()
self.add_evaluation(
IsBetweenEvaluation(
[descriptionsNumberCheck], 1, sys.float_info.max
)
)
self.add_evaluation(
IsBetweenEvaluation(
[descriptionsLengthCheck], 1, 300
)
)
self.add_evaluation(
ContainsAllEvaluation(
[descriptionsLanguageCheck], ["en"]
)
)
self.add_evaluation(
ContainsAllEvaluation(
[descriptionsTypeCheck], ["Abstract"]
)
)
self.add_evaluation(
DoesNotContainEvaluation(
[descriptionsTypeCheck],
["SeriesInformation", "TableOfContents", "Other", None]
)
)
# MIXED
def rightsHolderIfRightsClosed(checks, pid):
rightsAreOpen = checks[0].get_last_result(pid).outcome
contributorsType = checks[1].get_last_result(pid).outcome
if rightsAreOpen:
return 1
elif "RightsHolder" in contributorsType:
return 1
else:
return 0
self.add_evaluation(
FunctionEvaluation(
[rightsAreOpenCheck, contributorsTypeCheck],
rightsHolderIfRightsClosed
)
)
| 1.515625 | 2 |
oreilly_python1/final.py | myleneh/code | 0 | 12792433 | #!/usr/local/bin/python3
"""
This program takes a filename as input and displays the count for each word length, ignoring punctuation and non-alphanumeric characters.
"""
import re, sys
word_table = {}
with open(sys.argv[1]) as f:
for line in f:
for word in line.split():
stripped = re.sub("[^a-zA-Z ]", "", word)
length = len(stripped)
if length in word_table.keys():
word_table[length] += 1
else:
if length > 0:
word_table[length] = 1
print("Length Count")
for key in word_table:
print("{0} {1}".format(key, word_table[key]))
| 4.3125 | 4 |
test_damm.py | jdmacleod/damm | 0 | 12792434 | <reponame>jdmacleod/damm<filename>test_damm.py
from damm import encode, check
import unittest
class GeneralTest(unittest.TestCase):
def test_some_known_numbers(self):
self.assertEqual(encode(572), 4)
self.assertTrue(check(5724))
self.assertEqual(encode('43881234567'), 9)
self.assertFalse(check(5723))
class SwitchTest(unittest.TestCase):
def test_simple_switch(self):
"""Test that a single digit switch changes the result."""
for i in range(9):
si = str(i)
for j in range(i+1, 10):
sj = str(j)
self.assertNotEqual(encode(si+sj), encode(sj+si))
def test_single_switch_offset(self):
"""Test that a single digit switch changes the result, no matter the digit before."""
for k in range(10):
sk = str(k)
for i in range(9):
si = str(i)
for j in range(i+1, 10):
sj = str(j)
self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si))
class DigitTest(unittest.TestCase):
def test_simple_digit(self):
"""Check that changing a single digit changes the check-digit."""
for i in range(10):
si = str(i)
for j in range(10):
sj = str(j)
for k in range(10):
sk = str(k)
if j != k:
self.assertNotEqual(
encode(si+sj),
encode(si+sk))
class PhoneticTest(unittest.TestCase):
def test_phonetic(self):
"""Check that a range of phonetic errors are caught by the check digit."""
self.assertNotEqual(encode(13), encode(30))
self.assertNotEqual(encode(14), encode(40))
self.assertNotEqual(encode(15), encode(50))
self.assertNotEqual(encode(16), encode(60))
self.assertNotEqual(encode(17), encode(70))
self.assertNotEqual(encode(18), encode(80))
self.assertNotEqual(encode(19), encode(90))
def count_singles():
"""Count the fraction of single-digit errors missed."""
checks = 0
errors = 0
for i in range(10):
si = str(i)
for j in range(10):
sj = str(j)
for k in range(10):
sk = str(k)
if j != k:
checks += 1
if encode(si+sj) == encode(si+sk):
errors += 1
return errors / float(checks)
def count_switches():
"""Count the fraction of adjacent-digit-switch errors missed."""
checks = 0
errors = 0
for k in range(10):
sk = str(k)
for i in range(9):
si = str(i)
for j in range(i+1, 10):
sj = str(j)
checks += 1
if encode(sk+si+sj) == encode(sk+sj+si):
errors += 1
return errors / float(checks)
def count_phonetics():
"""Count the fraction of phonetic errors missed."""
checks = 7
errors = 0
if encode(13) == encode(30):
errors += 1
if encode(14) == encode(40):
errors += 1
if encode(15) == encode(50):
errors += 1
if encode(16) == encode(60):
errors += 1
if encode(17) == encode(70):
errors += 1
if encode(18) == encode(80):
errors += 1
if encode(19) == encode(90):
errors += 1
return errors / float(checks)
def count_twins():
"""Count the number of twin errors missed (aa -> bb)."""
checks = 0
errors = 0
for k in range(10):
sk = str(k)
for i in range(10):
si = str(i)
for j in range(10):
sj = str(j)
checks += 1
if encode(sk+si+si) == encode(sk+sj+sj):
errors += 1
return errors / float(checks)
def count_jump_switch():
""" abc -> cba """
checks = 0
errors = 0
for k in range(10):
sk = str(k)
for l in range(10):
sl = str(l)
for a in range(9):
sa = str(a)
for b in range(a+1,10):
sb = str(b)
checks += 1
if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa):
errors += 1
return errors / float(checks)
def count_jump_twins():
""" Xaka -> Xbkb """
checks = 0
errors = 0
for k in range(10):
sk = str(k)
for l in range(10):
sl = str(l)
for a in range(10):
sa = str(a)
for b in range(10):
sb = str(b)
checks += 1
if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb):
errors += 1
return errors / float(checks)
| 3 | 3 |
nemo/collections/nlp/models/glue_benchmark/metrics_for_glue.py | vinayphadnis/NeMo | 4,145 | 12792435 | # Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
__all__ = ['compute_metrics']
def accuracy(preds: List[int], labels: List[int]):
return {"acc": (preds == labels).mean()}
def acc_and_f1(preds: List[int], labels: List[int]):
accuracy = (preds == labels).mean()
f1 = f1_score(y_true=labels, y_pred=preds)
return {"acc": accuracy, "f1": f1}
def mcc(preds: List[int], labels: List[int]):
return {"mcc": matthews_corrcoef(labels, preds)}
def pearson_and_spearman(preds: List[int], labels: List[int]):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {"pearson": pearson_corr, "spearmanr": spearman_corr, "pear+spear av": (pearson_corr + spearman_corr) / 2}
def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]:
"""
Computes metrics for GLUE tasks
Args:
task_name: GLUE task name
preds: model predictions
labels: golden labels
Returns:
metrics
"""
if len(preds) != len(labels):
raise ValueError("Predictions and labels must have the same length")
metric_fn = accuracy
if task_name == 'cola':
metric_fn = mcc
elif task_name in ['mrpc', 'qqp']:
metric_fn = acc_and_f1
elif task_name == 'sts-b':
metric_fn = pearson_and_spearman
return metric_fn(preds, labels)
| 2.1875 | 2 |
tests/test_populate.py | bio2bel/drugbank | 6 | 12792436 | <reponame>bio2bel/drugbank
# -*- coding: utf-8 -*-
"""Suites for testing the populated database."""
from pybel import BELGraph
from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED
from tests.constants import PopulatedTemporaryCacheClassMixin
class TestPopulation(PopulatedTemporaryCacheClassMixin):
"""Tests the database is populated correctly."""
def test_count(self):
"""Tests the correct number of drugs."""
self.assertEqual(4, self.manager.count_drugs())
self.assertLessEqual(23, self.manager.count_articles())
def test_article(self):
"""Test lookup of an article."""
article = self.manager.get_article_by_pmid('10505536')
self.assertIsNotNone(article)
dpis = list(article.drug_protein_interactions)
self.assertNotEqual(0, len(dpis))
def test_bel(self):
"""Test adding a DTI to a graph."""
article = self.manager.get_article_by_pmid('10505536')
drug_protein_interaction = article.drug_protein_interactions.all()[0]
protein = drug_protein_interaction.protein
self.assertEqual('P00734', protein.uniprot_id)
drug = drug_protein_interaction.drug
self.assertEqual('DB00001', drug.drugbank_id)
self.assertEqual('Lepirudin', drug.name)
graph = BELGraph()
drug_protein_interaction.add_to_graph(graph)
self.assertEqual(2, graph.number_of_nodes())
self.assertEqual(6, graph.number_of_edges())
_, _, data = list(graph.edges(data=True))[0]
self.assertIn(CITATION, data)
self.assertIn(CITATION_TYPE, data[CITATION])
self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE])
self.assertIn(CITATION_REFERENCE, data[CITATION])
self.assertEqual('10505536', data[CITATION][CITATION_REFERENCE])
| 2.578125 | 3 |
Correlation_Coefficient.py | Priority-At-Next-Intersection/Corner_Detection | 5 | 12792437 | """
There are two useful functions:
1. correlationCoef will tell you the coreelation coefficient of two patches of same size
the greater this coefficient is, the similar this two patches are.
2. matchTemplate will automatically go through the whole input 'img' with a sliding window
and implement correlationCoef function on every window comparing it to template.
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def correlationCoef(g1,g2):
"""
Parameters:
g1: graph one, grayscale(0-255)
g2: graph two, grayscale(0-255)
Return:
Correlation coefficient(float).
"""
#1. make sure I read the correct patches
if(g1.shape!=g2.shape):
print('Invalid patch. Patch should be in same size')
print('Size of graph 1:',(g1.shape))
print('Size of graph 2:',(g2.shape))
return 0
#2. Calculate Statistic Infomation
std_g1=np.std(g1)
std_g2=np.std(g2)
array1=g1.ravel()
array2=g2.ravel()
cov_matrix=np.cov(array1,array2)
cov=cov_matrix[1,0]
#3. Calculate coefficient(float)
coef=cov/(std_g1*std_g2)
return coef
def matchTemplate(img,template):
"""
Parameters:
img: image, such as a cat, grayscale(0-255)
template: your target, such as a cat's paw, grayscale(0-255)
Return:
a float image consisted of correlation coefficient of each pixel.
"""
win_w,win_h=template.shape[::-1]
w,h=img.shape[::-1]
result=np.zeros(img.shape)
for row in range(h-win_h):
for col in range(w-win_w):
t_patch=img[row:row+win_h,col:col+win_w]
result[row,col]=correlationCoef(template,t_patch)
return result
| 3.171875 | 3 |
opentapioca/tagger.py | heathersherry/opentapioca | 191 | 12792438 | <reponame>heathersherry/opentapioca<filename>opentapioca/tagger.py
import json
import requests
import logging
import re
from math import log
from .languagemodel import BOWLanguageModel
from .wikidatagraph import WikidataGraph
from .tag import Tag
from .mention import Mention
# solr_collection = 'wd_multilingual'
logger = logging.getLogger(__name__)
class Tagger(object):
"""
The tagger indexes a Wikidata dump in Solr
and uses it to detect efficiently mentions of Wikidata
items in text.
"""
def __init__(self, solr_collection, bow, graph):
"""
Creates a tagger from:
- a solr collection name, which has been adequately initialized with a compatible index and filled with documents
- a bag of words language model, adequately trained, which will be used to evaluate the likelihood of phrases
- a wikidata graph, adequately loaded, which will be used to compute the page rank and the edges between items
"""
self.bow = bow
self.graph = graph
self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection)
self.prune_re = re.compile(r'^(\w\w?|[\d ]{,4})$')
self.max_length = 10000
def tag_and_rank(self, phrase, prune=True):
"""
Given some text, use the solr index to retrieve candidate items mentioned in the text.
:param prune: if True, ignores lowercase mentions shorter than 3 characters
"""
# Tag
phrase = phrase[:self.max_length]
logger.debug('Tagging text with solr (length {})'.format(len(phrase)))
r = requests.post(self.solr_endpoint,
params={'overlaps':'NO_SUB',
'tagsLimit':500,
'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types',
'wt':'json',
'indent':'off',
},
headers ={'Content-Type':'text/plain'},
data=phrase.encode('utf-8'))
r.raise_for_status()
logger.debug('Tagging succeeded')
resp = r.json()
# Enhance mentions with page rank and edge similarity
mentions_json = [
self._dictify(mention)
for mention in resp.get('tags', [])
]
docs = {
doc['id']:doc
for doc in resp.get('response', {}).get('docs', [])
}
mentions = [
self._create_mention(phrase, mention, docs, mentions_json)
for mention in mentions_json
]
pruned_mentions = [
mention
for mention in mentions
if not self.prune_phrase(mention.phrase)
]
return pruned_mentions
def prune_phrase(self, phrase):
"""
Should this phrase be pruned? It happens when
it is shorter than 3 characters and appears in lowercase in the text,
or only consists of digits.
This is mostly introduced to remove matches of Wikidata items about characters,
or to prevent short words such as "of" or "in" to match with initials "OF", "IN",
as well as sport scores, postcodes, and so on.
"""
return self.prune_re.match(phrase) is not None and phrase.lower() == phrase
def _create_mention(self, phrase, mention, docs, mentions):
"""
Adds more info to the mentions returned from Solr, to prepare
them for ranking by the classifier.
:param phrase: the original document
:param mention: the JSON mention to enhance with scores
:param docs: dictionary from qid to item
:param mentions: the list of all mentions in the document
:returns: the enhanced mention, as a Mention object
"""
start = mention['startOffset']
end = mention['endOffset']
surface = phrase[start:end]
surface_score = self.bow.log_likelihood(surface)
ranked_tags = []
for qid in mention['ids']:
item = dict(docs[qid].items())
item['rank'] = 23. + log(self.graph.get_pagerank(qid))
ranked_tags.append(Tag(**item))
return Mention(
phrase=surface,
start=start,
end=end,
log_likelihood=-surface_score,
tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10],
)
def _dictify(self, lst):
"""
Converts a list of [key1,val1,key2,val2,...] to a dict
"""
return {
lst[2*k]: lst[2*k+1]
for k in range(len(lst)//2)
}
if __name__ == '__main__':
import sys
fname = sys.argv[1]
print('Loading '+fname)
bow = BOWLanguageModel()
bow.load(fname)
print('Loading '+sys.argv[2])
graph = WikidataGraph()
graph.load_pagerank(sys.argv[2])
tagger = Tagger(bow, graph)
while True:
phrase = input('>>> ')
tags = tagger.tag_and_rank(phrase)
for mention in tags:
for tag in mention.get('tags', []):
if 'edges' in tag:
del tag['edges']
if 'aliases' in tag:
del tag['aliases']
print(json.dumps(tags, indent=2, sort_keys=True))
| 2.625 | 3 |
other/q2.py | pengfei-chen/algorithm_qa | 79 | 12792439 | <filename>other/q2.py
"""
问题描述:给定两个不等于0的整数M和N,求M和N的最大公约数.
"""
def get_bigest_public_num(m, n):
if n == 0:
return m
else:
return get_bigest_public_num(n, m % n)
if __name__ == '__main__':
print(get_bigest_public_num(10, 23)) | 2.828125 | 3 |
codes/20200209_person_reid/src/optimize_params.py | ryoichiro3816/testpy | 4 | 12792440 | '''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from sklearn.metrics import classification_report
import pandas as pd
import optuna
from datasets import market1501
import metrics
import torchvision.models as models
def opt():
study = optuna.create_study(direction='maximize')
#study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=1000)
print('Number of finished trials: ', len(study.trials))
print('Best trial:')
trial = study.best_trial
print(' Value: ', trial.value)
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
def objective(trial):
# Parse arguments.
args = parse_args()
# Set device.
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load dataset.
if os.path.exists(args.anno_path) == False:
market1501.make_train_anno(args.data_dir, args.anno_path)
train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch)
# Set a model.
# cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8
n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2])
model = models.resnet50(pretrained=True)
model.fc = nn.Linear(2048, n_feats)
model = model.to(device)
#print(model)
# Set a metric
"""
'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}.
"""
norm = trial.suggest_int('norm', 0, 30)
margin = trial.suggest_uniform('margin', 0.0, 1e-3)
easy_margin = trial.suggest_categorical('easy_margin', [0, 1])
metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin)
metric.to(device)
# Set loss function and optimization function.
lr = trial.suggest_uniform('lr', 1e-3, 1e-1)
weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}],
lr=lr,
weight_decay=weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
# Train and test.
for epoch in range(args.n_epoch):
# Train and test a model.
train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler)
#test_acc, test_loss = test(device, test_loader, model, metric, criterion)
# Output score.
#stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'
#print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss))
stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}'
print(stdout_temp.format(epoch+1, train_acc, train_loss))
# Save a model checkpoint.
#model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1)
#torch.save(model.state_dict(), model_ckpt_path)
#print('Saved a model checkpoint at {}'.format(model_ckpt_path))
#print('')
return train_acc
def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler):
model.train()
output_list = []
target_list = []
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
# Forward processing.
inputs, targets = inputs.to(device), targets.to(device).long()
features = model(inputs)
outputs = metric_fc(features, targets)
loss = criterion(outputs, targets)
# Backward processing.
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Set data to calculate score.
output_list += [int(o.argmax()) for o in outputs]
target_list += [int(t) for t in targets]
running_loss += loss.item()
# Calculate score at present.
train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader)
if (batch_idx % 100 == 0 and batch_idx != 0) or (batch_idx == len(train_loader)):
stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}'
print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss))
# Calculate score.
#train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader)
return train_acc, train_loss
def test(device, test_loader, model, metric_fc, criterion):
model.eval()
output_list = []
target_list = []
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(test_loader):
# Forward processing.
inputs, targets = inputs.to(device), targets.to(device)
features = model(inputs)
outputs = metric_fc(features, targets)
loss = criterion(outputs, targets)
# Set data to calculate score.
output_list += [int(o.argmax()) for o in outputs]
target_list += [int(t) for t in targets]
running_loss += loss.item()
test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader)
return test_acc, test_loss
def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader):
# Calculate accuracy.
result = classification_report(output_list, target_list, output_dict=True)
acc = round(result['weighted avg']['f1-score'], 6)
#loss = round(running_loss / len(data_loader.dataset), 6)
if n_batch * batch_idx < len(data_loader.dataset):
loss = running_loss / (n_batch * (batch_idx+1))
else:
loss = running_loss / len(data_loader.dataset)
return acc, loss
def parse_args():
# Set arguments.
arg_parser = argparse.ArgumentParser(description="Image Classification")
arg_parser.add_argument('--dataset_name', default='Market1501')
arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/')
arg_parser.add_argument('--anno_dir', default='../data/annos/')
arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv')
arg_parser.add_argument('--n_batch', default=32, type=int)
arg_parser.add_argument("--model_name", type=str, default='ResNet50')
arg_parser.add_argument("--model_ckpt_dir", type=str, default='../experiments/models/checkpoints/')
arg_parser.add_argument("--model_ckpt_path_temp", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth')
arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch')
arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate')
arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output')
arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True')
arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='')
arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature')
arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin')
arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size')
arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma')
"""
{'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}.
"""
args = arg_parser.parse_args()
# Make directory.
os.makedirs(args.anno_dir, exist_ok=True)
os.makedirs(args.model_ckpt_dir, exist_ok=True)
# Validate paths.
assert os.path.exists(args.data_dir)
assert os.path.exists(args.anno_dir)
assert os.path.exists(args.model_ckpt_dir)
return args
if __name__ == "__main__":
opt()
#main()
| 2.765625 | 3 |
src/cogs/image.py | Alone-ankush/Credo | 0 | 12792441 | import discord
from discord.ext import commands
import aiohttp
import requests
class Image(commands.Cog, name='Image'):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def cat(self, ctx):
"""Gives You Random Image Of Cat"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('http://aws.random.cat/meow') as r:
data = await r.json()
em = discord.Embed(
title='Cat', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['file'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def dog(self, ctx):
"""Gives You Random Image Of Dog"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('http://random.dog/woof.json') as r:
data = await r.json()
em = discord.Embed(
title='Dog', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['url'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def fox(self, ctx):
"""Gives You Random Image Of Fox"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/fox') as r:
data = await r.json()
em = discord.Embed(
title='Fox', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def panda(self, ctx):
"""Gives You Random Image Of Panda"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/panda') as r:
data = await r.json()
em = discord.Embed(
title='Panda', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def red_panda(self, ctx):
"""Gives You Random Image Of Red Panda"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/red_panda') as r:
data = await r.json()
em = discord.Embed(
title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def bird(self, ctx):
"""Gives You Random Image Of Bird"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/birb') as r:
data = await r.json()
em = discord.Embed(
title='Bird', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def kola(self, ctx):
"""Gives You Random Image Of Kola"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/koala') as r:
data = await r.json()
em = discord.Embed(
title='kola', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def pikachu(self, ctx):
"""Gives You Random Image Or GIF Of Pikachu"""
async with ctx.channel.typing():
async with aiohttp.ClientSession() as cs:
async with cs.get('https://some-random-api.ml/img/pikachu') as r:
data = await r.json()
em = discord.Embed(
title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color)
em.set_image(url=data['link'])
em.set_footer(icon_url=ctx.author.avatar_url,
text=f"Requested By {ctx.author.name}")
await ctx.send(embed=em)
# @commands.command()
# @commands.cooldown(1, 10, commands.BucketType.user)
# async def yt(self,ctx,comment:str):
# """Comments On Youtube"""
# url = f"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}"
# em = discord.Embed(color = ctx.author.color)
# em.set_image(url=url)
# em.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url)
# await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Image(bot))
| 2.75 | 3 |
mlmo/interfaces/base_i_prod.py | prashantlv/mltoolkit | 1 | 12792442 | class BaseIProd(object):
pass
| 1.117188 | 1 |
oss_client/fileobj.py | olivetree123/oss-client | 0 | 12792443 | import os
from oss_client.utils import content_md5
class FileObject(object):
def __init__(self, name="", obj=None, hash_value=None, storage=None):
if not (obj or hash_value):
raise ValueError("obj and hash_value both are None")
self.obj = obj
self.name = name
self.suffix = ""
self.length = 0
self.hash_value = hash_value
self.storage = storage
names = name.split(".")
if len(names) > 1:
self.suffix = names[-1]
if not self.hash_value and self.obj:
content = self.obj.read()
self.length = len(content)
self.hash_value = content_md5(content)
self.obj.seek(0, os.SEEK_SET)
def __str__(self):
return self.hash_value
def key(self):
if self.suffix:
return ".".join([self.hash_value, self.suffix])
return self.hash_value
def content(self, range=None):
if self.obj:
return self.obj.read()
if self.storage:
return self.storage.read(self.key(), range)
raise Exception("can not find content")
| 2.796875 | 3 |
bot/utils/utils.py | BANanaD3V/kuzaku | 0 | 12792444 | <filename>bot/utils/utils.py
from disnake.ext.commands.cooldowns import BucketType, Cooldown, CooldownMapping
from disnake.ext.commands import Command
def cooldoown(rate, per, type=BucketType.default, premium: bool = False):
def decorator(func):
if isinstance(func, Command):
func._buckets = CooldownMapping(Cooldown(rate, per, type))
else:
if not premium:
func.__commands_cooldown__ = Cooldown(rate, per, type)
else:
func.__commands_cooldown__ = Cooldown(0, per, type)
return func
return decorator
def humanbytes(B):
"Return the given bytes as a human friendly KB, MB, GB, or TB string"
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return "{0} {1}".format(B, "Bytes" if 0 == B > 1 else "Byte")
elif KB <= B < MB:
return "{0:.2f} KB".format(B / KB)
elif MB <= B < GB:
return "{0:.2f} MB".format(B / MB)
elif GB <= B < TB:
return "{0:.2f} GB".format(B / GB)
elif TB <= B:
return "{0:.2f} TB".format(B / TB)
| 2.453125 | 2 |
cloudbutton_geospatial/io_utils/plot.py | berkevaroll/geospatial-usecase | 0 | 12792445 | from matplotlib import pyplot as plt
from ibm_botocore.client import Config, ClientError
import rasterio
import random
import ibm_boto3
def plot_random_blocks(bucket, item, num):
"""
Plot num random blocks from IBM COS item located at bucket
"""
fig, axs = plt.subplots(num, figsize=(20,30))
cos = ibm_boto3.resource("s3",
config=Config(signature_version="oauth"),
endpoint_url="https://s3.eu-de.cloud-object-storage.appdomain.cloud"
)
obj = cos.Object(bucket, item)
with rasterio.open(obj.get()['Body']) as src:
for j in range(0,num):
ij, window = random.choice(list(src.block_windows()))
arr = src.read(1, window=window)
plt.subplot(1 + (num-1)/2, 2, j+1)
plt.gca().set_title(item)
plt.imshow(arr)
plt.colorbar(shrink=0.5)
plt.show()
def plot_results(bucket, results):
"""
Plot an array of COS from IBM Cloud
"""
size = len(results)
fig, axs = plt.subplots(len(results), figsize=(20,30))
cos = ibm_boto3.resource("s3",
config=Config(signature_version="oauth"),
endpoint_url="https://s3.eu-de.cloud-object-storage.appdomain.cloud"
)
i = 1
for item in results:
obj = cos.Object(bucket, item)
with rasterio.open(obj.get()['Body']) as src:
arr = src.read(1, out_shape=(src.height//10, src.width//10))
plt.subplot(1 + (size-1)/2, 2, i)
plt.gca().set_title(item)
plt.imshow(arr)
plt.colorbar(shrink=0.5)
i += 1
plt.show()
def tiff_overview(tiff_url):
"""
Plot the a little version of the map (thumbnail)
"""
with rasterio.open(tiff_url) as dataset:
oviews = dataset.overviews(1) # list of overviews from biggest to smallest
oview = oviews[-1] # let's look at the smallest thumbnail
print('Decimation factor= {}'.format(oview))
# NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html)
thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview)))
print('array type: ', type(thumbnail))
plt.figure(figsize=(5, 5))
plt.imshow(thumbnail)
plt.colorbar()
plt.title('Overview - Band 4 {}'.format(thumbnail.shape))
plt.xlabel('Column #')
plt.ylabel('Row #')
def plot_map(image, title, x_label="", y_label=""):
plt.figure(figsize=(10, 15))
plt.imshow(image)
plt.colorbar(shrink=0.5)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
| 2.65625 | 3 |
tests/test_graindist.py | eblur/newdust | 4 | 12792446 | import pytest
import numpy as np
from scipy.integrate import trapz
from newdust.graindist import *
from . import percent_diff
MD = 1.e-5 # g cm^-2
RHO = 3.0 # g c^-3
SDEFAULT = 'Powerlaw'
CDEFAULT = 'Silicate'
ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff']
ALLOWED_COMPS = ['Drude','Silicate','Graphite']
# Test that the helper function runs on all types
@pytest.mark.parametrize('sstring', ALLOWED_SIZES)
def test_sstring(sstring):
test = GrainDist(sstring, CDEFAULT)
assert isinstance(test, GrainDist)
@pytest.mark.parametrize('cstring', ALLOWED_COMPS)
def test_cstring(cstring):
test = GrainDist(SDEFAULT, cstring)
assert isinstance(test, GrainDist)
# Test that the helper function does not run on weird strings
def test_catch_exception():
ss, cc = 'foo', 'bar'
with pytest.raises(AssertionError):
GrainDist(ss, CDEFAULT)
GrainDist(SDEFAULT, cc)
# Test the basic properties and functions of GrainDist
@pytest.mark.parametrize('sstring', ALLOWED_SIZES)
def test_GrainDist(sstring):
test = GrainDist(sstring, CDEFAULT, md=MD)
assert isinstance(test.a, np.ndarray)
assert len(test.a) == len(test.ndens)
assert len(test.a) == len(test.mdens)
if isinstance(test.size, sizedist.Grain):
mtot = test.mdens
else:
mtot = trapz(test.mdens, test.a)
assert percent_diff(mtot, MD) <= 0.01
# Test that doubling the dust mass column doubles the total mass
MD2 = 2.0 * MD
def test_dmass():
for ss in ALLOWED_SIZES:
for cc in ALLOWED_COMPS:
test1 = GrainDist(ss, cc, md=MD)
test2 = GrainDist(ss, cc, md=MD2)
if isinstance(test1.size, sizedist.Grain):
mtot1, mtot2 = test1.mdens, test2.mdens
else:
mtot1 = trapz(test1.mdens, test1.a)
mtot2 = trapz(test2.mdens, test2.a)
assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01
# Test that doubling the dust grain material density halves the total number
RHO2 = 2.0 * RHO
def test_ndens():
for ss in ALLOWED_SIZES:
for cc in ALLOWED_COMPS:
test1 = GrainDist(ss, cc, md=MD, rho=RHO)
test2 = GrainDist(ss, cc, md=MD, rho=RHO2)
if isinstance(test1.size, sizedist.Grain):
nd1, nd2 = test1.ndens, test2.ndens
else:
nd1 = trapz(test1.ndens, test1.a)
nd2 = trapz(test2.ndens, test2.a)
assert percent_diff(nd2, 0.5 * nd1) <= 0.01
| 2.125 | 2 |
allauth/socialaccount/providers/noa/views.py | Supratix/django-allauth | 0 | 12792447 | <filename>allauth/socialaccount/providers/noa/views.py
import requests
import jwt
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import NoaProvider
# {"issuer":"https://noaidentitydev.azurewebsites.net/authorization",
# "jwks_uri":"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks",
# "authorization_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize",
# "token_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/token",
# "userinfo_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo",
# "end_session_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession",
# "check_session_iframe":"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession",
# "revocation_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation",
# "introspection_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect",
# "device_authorization_endpoint":"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization",
# "frontchannel_logout_supported":true,
# "frontchannel_logout_session_supported":true,
# "backchannel_logout_supported":true,
# "backchannel_logout_session_supported":true,
# "scopes_supported":["profile","openid","email","api","offline_access"],
# "claims_supported":["name","family_name","given_name","middle_name","nickname","preferred_username","profile","picture","website","gender","birthdate","zoneinfo","locale","updated_at","sub","email","email_verified"],
# "grant_types_supported":["authorization_code","client_credentials","refresh_token","implicit","password","urn:ietf:params:oauth:grant-type:device_code"],
# "response_types_supported":["code","token","id_token","id_token token","code id_token","code token","code id_token token"],
# "response_modes_supported":["form_post","query","fragment"],
# "token_endpoint_auth_methods_supported":["client_secret_basic","client_secret_post"],
# "id_token_signing_alg_values_supported":["RS256"],
# "subject_types_supported":["public"],
# "code_challenge_methods_supported":["plain","S256"],
# "request_parameter_supported":true}
from ..oauth2.client import OAuth2Error
class NoaOAuth2Adapter(OAuth2Adapter):
provider_id = NoaProvider.id
access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token'
authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize'
profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo'
def complete_login(self, request, app, token, response):
# extra_data_token = requests.post(self.access_token_url, params={
# 'client_id': app.client_id,
# 'grant_type': 'authorization_code',
# 'code': token.token,
# 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/',
# })
# public_key = (
# b"-----BEGIN PUBLIC KEY-----\n"
# b"<KEY>"
# b"<KEY>"
# b"<KEY>"
# b"<KEY>"
# b"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n"
# b"<KEY>"
# b"IwIDAQAB"
# b"\n-----END PUBLIC KEY-----"
# )
#
# extra_data = jwt.decode(
# extra_data_token, public_key, audience="api", algorithms="RS256"
# )
extra_data = {}
return self.get_provider().sociallogin_from_response(
request,
extra_data.json()
)
oauth2_login = OAuth2LoginView.adapter_view(NoaOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(NoaOAuth2Adapter)
| 1.875 | 2 |
Day 3/IntegerToBinary.py | RajShashwat/100daysofcode | 0 | 12792448 | #imported the Stack.py
from Stack import Stack
def intToBinary(num: int) -> str :
stack = Stack()
while num > 0:
remender = num % 2
stack.push(remender)
num = num // 2
binary = ""
while not stack.is_empty():
binary += str(stack.pop())
return binary
num = int(input("Enter a Number: "))
if num < 0:
print("Enter a Positive Number")
quit()
result = intToBinary(num)
print("Binary: ",result)
| 3.9375 | 4 |
lesson01/monkey/t1.py | herrywen-nanj/51reboot | 0 | 12792449 |
x1 = 1.5
x2 = 3
x3 = "123"
x4 = True
x5 = False
##################################################
xx1 = int(x1)
xx3 = int(x3)
xx4 = int(x4)
xx5 = int(x5)
print(type(xx1))
print(type(xx3))
print(type(xx4), xx4)
print(type(xx5), xx5)
xx2 = float(x2)
print(type(xx2), xx2)
# x3 = "123"
xxx3 = float(x3)
print(type(xxx3), xxx3)
# -> str
xxxx1 = str("123")
print(type(xxxx1), xxxx1)
xxxx2 = str(123)
print(type(xxxx2), xxxx2)
xxxx3 = str(1.23)
print(type(xxxx3), xxxx3)
x1 = "123"
x1 = "234"
print(x1)
| 3.5625 | 4 |
lambda/write_id_index_lambda.py | jhuapl-boss/boss-tools | 1 | 12792450 | <reponame>jhuapl-boss/boss-tools
# Lambda to write the morton index of a cuboid object key to the id in the
# DynamoDB id index table.
#
# If there are failures, uses decorrelatd jitter backoff algorithm described in:
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
#
# It expects to get from events dictionary
# {
# 'id_index_table': ...,
# 's3_index_table': ...,
# 'id_count_table': ...,
# 'cuboid_bucket': ...,
# 'id_index_new_chunk_threshold': ...,
# 'cuboid_object_key': '...',
# 'id_group': '...',
# 'version': '...',
# 'write_id_index_status': {
# 'done': False,
# 'delay': 0,
# 'retries_left': int # How many retries left in case of an error.
# }
# }
import botocore
from bossutils.aws import get_region
import json
import random
from spdb.spatialdb.object_indices import ObjectIndices
from time import sleep
BASE_DELAY_TIME_SECS = 5
"""
These derived exceptions of botocore.exceptions.ClientError will be not be
retried by the step function that calls this lambda. Since this lambda
controls retries via event['write_id_index_status']['retries_left'] the step
function should proceed to its catch handling when it receives one of these
exceptions.
Derived exceptions of ClientError that are not part of this list get wrapped
in DynamoClientError to ensure the step function goes to its catch handling
step.
The error information available to the step function isn't as useful when
wrapped, so the expected errors are enumerated below and in the step function's
retry statement.
"""
DO_NOT_WRAP_THESE_EXCEPTIONS = [
'ClientError',
'ConditionalCheckFailedException',
'GlobalTableNotFoundException',
'InternalServerError',
'ItemCollectionSizeLimitExceededException',
'LimitExceededException',
'ProvisionedThroughputExceededException',
'ReplicaAlreadyExistsException',
'ReplicaNotFoundException',
'ResourceInUseException',
'ResourceNotFoundException',
'TableNotFoundException'
]
class DynamoClientError(Exception):
"""
Wrap boto3 ClientError exceptions so the step function can fail when
event['write_id_index_status']['retries_left'] == 0.
"""
def __init__(self, message):
super().__init__(message)
def handler(event, context):
id_index_table = event['id_index_table']
s3_index_table = event['s3_index_table']
id_count_table = event['id_count_table']
cuboid_bucket = event['cuboid_bucket']
write_id_index_status = event['write_id_index_status']
id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold'])
obj_ind = ObjectIndices(
s3_index_table, id_index_table, id_count_table, cuboid_bucket,
get_region())
try:
for obj_id in event['id_group']:
obj_ind.write_id_index(
id_index_new_chunk_threshold,
event['cuboid_object_key'], obj_id, event['version'])
write_id_index_status['done'] = True
except botocore.exceptions.ClientError as ex:
# Probably had a throttle or a ConditionCheckFailed.
print('ClientError caught: {}'.format(ex))
if int(write_id_index_status['retries_left']) < 1:
if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS:
raise
msg = '{}: {}'.format(type(ex), ex)
raise DynamoClientError(msg) from ex
event['result'] = str(ex)
prep_for_retry(write_id_index_status)
return event
def prep_for_retry(write_id_index_status):
"""
Update the given dictionary so the step function knows to retry.
Args:
write_id_index_status (dict): Update this dict.
"""
write_id_index_status['done'] = False
write_id_index_status['retries_left'] = (
int(write_id_index_status['retries_left']) - 1)
# Prepare decorrelated jitter backoff delay.
last_delay = int(write_id_index_status['delay'])
if last_delay < BASE_DELAY_TIME_SECS:
last_delay = BASE_DELAY_TIME_SECS
write_id_index_status['delay'] = round(
random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3))
def get_class_name(type_):
"""
Get just the class name (w/o module(s) from the type.
Args:
type_ (type): Class as a type.
Returns:
(str|None): Just the name of the class or None.
"""
try:
return str(type_).rsplit('.', 1)[1].rstrip("'>")
except IndexError:
return None
| 1.828125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.